diff options
Diffstat (limited to '')
86 files changed, 47508 insertions, 0 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig new file mode 100644 index 000000000..8d7001712 --- /dev/null +++ b/drivers/base/Kconfig @@ -0,0 +1,213 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "Generic Driver Options" + +config UEVENT_HELPER + bool "Support for uevent helper" + help + The uevent helper program is forked by the kernel for + every uevent. + Before the switch to the netlink-based uevent source, this was + used to hook hotplug scripts into kernel device events. It + usually pointed to a shell script at /sbin/hotplug. + This should not be used today, because usual systems create + many events at bootup or device discovery in a very short time + frame. One forked process per event can create so many processes + that it creates a high system load, or on smaller systems + it is known to create out-of-memory situations during bootup. + +config UEVENT_HELPER_PATH + string "path to uevent helper" + depends on UEVENT_HELPER + default "" + help + To disable user space helper program execution at by default + specify an empty string here. This setting can still be altered + via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper + later at runtime. + +config DEVTMPFS + bool "Maintain a devtmpfs filesystem to mount at /dev" + help + This creates a tmpfs/ramfs filesystem instance early at bootup. + In this filesystem, the kernel driver core maintains device + nodes with their default names and permissions for all + registered devices with an assigned major/minor number. + Userspace can modify the filesystem content as needed, add + symlinks, and apply needed permissions. + It provides a fully functional /dev directory, where usually + udev runs on top, managing permissions and adding meaningful + symlinks. + In very limited environments, it may provide a sufficient + functional /dev without any further help. It also allows simple + rescue systems, and reliably handles dynamic major/minor numbers. + + Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs + file system will be used instead. + +config DEVTMPFS_MOUNT + bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs" + depends on DEVTMPFS + help + This will instruct the kernel to automatically mount the + devtmpfs filesystem at /dev, directly after the kernel has + mounted the root filesystem. The behavior can be overridden + with the commandline parameter: devtmpfs.mount=0|1. + This option does not affect initramfs based booting, here + the devtmpfs filesystem always needs to be mounted manually + after the rootfs is mounted. + With this option enabled, it allows to bring up a system in + rescue mode with init=/bin/sh, even when the /dev directory + on the rootfs is completely empty. + +config STANDALONE + bool "Select only drivers that don't need compile-time external firmware" + default y + help + Select this option if you don't have magic firmware for drivers that + need it. + + If unsure, say Y. + +config PREVENT_FIRMWARE_BUILD + bool "Disable drivers features which enable custom firmware building" + default y + help + Say yes to disable driver features which enable building a custom + driver firmware at kernel build time. These drivers do not use the + kernel firmware API to load firmware (CONFIG_FW_LOADER), instead they + use their own custom loading mechanism. The required firmware is + usually shipped with the driver, building the driver firmware + should only be needed if you have an updated firmware source. + + Firmware should not be being built as part of kernel, these days + you should always prevent this and say Y here. There are only two + old drivers which enable building of its firmware at kernel build + time: + + o CONFIG_WANXL through CONFIG_WANXL_BUILD_FIRMWARE + o CONFIG_SCSI_AIC79XX through CONFIG_AIC79XX_BUILD_FIRMWARE + +source "drivers/base/firmware_loader/Kconfig" + +config WANT_DEV_COREDUMP + bool + help + Drivers should "select" this option if they desire to use the + device coredump mechanism. + +config ALLOW_DEV_COREDUMP + bool "Allow device coredump" if EXPERT + default y + help + This option controls if the device coredump mechanism is available or + not; if disabled, the mechanism will be omitted even if drivers that + can use it are enabled. + Say 'N' for more sensitive systems or systems that don't want + to ever access the information to not have the code, nor keep any + data. + + If unsure, say Y. + +config DEV_COREDUMP + bool + default y if WANT_DEV_COREDUMP + depends on ALLOW_DEV_COREDUMP + +config DEBUG_DRIVER + bool "Driver Core verbose debug messages" + depends on DEBUG_KERNEL + help + Say Y here if you want the Driver core to produce a bunch of + debug messages to the system log. Select this if you are having a + problem with the driver core and want to see more of what is + going on. + + If you are unsure about this, say N here. + +config DEBUG_DEVRES + bool "Managed device resources verbose debug messages" + depends on DEBUG_KERNEL + help + This option enables kernel parameter devres.log. If set to + non-zero, devres debug messages are printed. Select this if + you are having a problem with devres or want to debug + resource management for a managed device. devres.log can be + switched on and off from sysfs node. + + If you are unsure about this, Say N here. + +config DEBUG_TEST_DRIVER_REMOVE + bool "Test driver remove calls during probe (UNSTABLE)" + depends on DEBUG_KERNEL + help + Say Y here if you want the Driver core to test driver remove functions + by calling probe, remove, probe. This tests the remove path without + having to unbind the driver or unload the driver module. + + This option is expected to find errors and may render your system + unusable. You should say N here unless you are explicitly looking to + test this functionality. + +config PM_QOS_KUNIT_TEST + bool "KUnit Test for PM QoS features" if !KUNIT_ALL_TESTS + depends on KUNIT=y + default KUNIT_ALL_TESTS + +config HMEM_REPORTING + bool + default n + depends on NUMA + help + Enable reporting for heterogenous memory access attributes under + their non-uniform memory nodes. + +source "drivers/base/test/Kconfig" + +config SYS_HYPERVISOR + bool + default n + +config GENERIC_CPU_DEVICES + bool + default n + +config GENERIC_CPU_AUTOPROBE + bool + +config GENERIC_CPU_VULNERABILITIES + bool + +config SOC_BUS + bool + select GLOB + +source "drivers/base/regmap/Kconfig" + +config DMA_SHARED_BUFFER + bool + default n + select IRQ_WORK + help + This option enables the framework for buffer-sharing between + multiple drivers. A buffer is associated with a file using driver + APIs extension; the file's descriptor can then be passed on to other + driver. + +config DMA_FENCE_TRACE + bool "Enable verbose DMA_FENCE_TRACE messages" + depends on DMA_SHARED_BUFFER + help + Enable the DMA_FENCE_TRACE printks. This will add extra + spam to the console log, but will make it easier to diagnose + lockup related problems for dma-buffers shared across multiple + devices. + +config GENERIC_ARCH_TOPOLOGY + bool + help + Enable support for architectures common topology code: e.g., parsing + CPU capacity information from DT, usage of such information for + appropriate scaling, sysfs interface for reading capacity values at + runtime. + +endmenu diff --git a/drivers/base/Makefile b/drivers/base/Makefile new file mode 100644 index 000000000..41369fc70 --- /dev/null +++ b/drivers/base/Makefile @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for the Linux device tree + +obj-y := component.o core.o bus.o dd.o syscore.o \ + driver.o class.o platform.o \ + cpu.o firmware.o init.o map.o devres.o \ + attribute_container.o transport_class.o \ + topology.o container.o property.o cacheinfo.o \ + swnode.o +obj-$(CONFIG_DEVTMPFS) += devtmpfs.o +obj-y += power/ +obj-$(CONFIG_ISA_BUS_API) += isa.o +obj-y += firmware_loader/ +obj-$(CONFIG_NUMA) += node.o +obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o +ifeq ($(CONFIG_SYSFS),y) +obj-$(CONFIG_MODULES) += module.o +endif +obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o +obj-$(CONFIG_REGMAP) += regmap/ +obj-$(CONFIG_SOC_BUS) += soc.o +obj-$(CONFIG_PINCTRL) += pinctrl.o +obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o +obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o +obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o + +obj-y += test/ + +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG + diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c new file mode 100644 index 000000000..51647926e --- /dev/null +++ b/drivers/base/arch_topology.c @@ -0,0 +1,618 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Arch specific cpu topology information + * + * Copyright (C) 2016, ARM Ltd. + * Written by: Juri Lelli, ARM Ltd. + */ + +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/sched/topology.h> +#include <linux/cpuset.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/sched.h> +#include <linux/smp.h> + +bool topology_scale_freq_invariant(void) +{ + return cpufreq_supports_freq_invariance() || + arch_freq_counters_available(cpu_online_mask); +} + +__weak bool arch_freq_counters_available(const struct cpumask *cpus) +{ + return false; +} +DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; + +void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, + unsigned long max_freq) +{ + unsigned long scale; + int i; + + if (WARN_ON_ONCE(!cur_freq || !max_freq)) + return; + + /* + * If the use of counters for FIE is enabled, just return as we don't + * want to update the scale factor with information from CPUFREQ. + * Instead the scale factor will be updated from arch_scale_freq_tick. + */ + if (arch_freq_counters_available(cpus)) + return; + + scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; + + for_each_cpu(i, cpus) + per_cpu(freq_scale, i) = scale; +} + +DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; + +void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) +{ + per_cpu(cpu_scale, cpu) = capacity; +} + +DEFINE_PER_CPU(unsigned long, thermal_pressure); + +void topology_set_thermal_pressure(const struct cpumask *cpus, + unsigned long th_pressure) +{ + int cpu; + + for_each_cpu(cpu, cpus) + WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); +} + +static ssize_t cpu_capacity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + + return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); +} + +static void update_topology_flags_workfn(struct work_struct *work); +static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); + +static DEVICE_ATTR_RO(cpu_capacity); + +static int register_cpu_capacity_sysctl(void) +{ + int i; + struct device *cpu; + + for_each_possible_cpu(i) { + cpu = get_cpu_device(i); + if (!cpu) { + pr_err("%s: too early to get CPU%d device!\n", + __func__, i); + continue; + } + device_create_file(cpu, &dev_attr_cpu_capacity); + } + + return 0; +} +subsys_initcall(register_cpu_capacity_sysctl); + +static int update_topology; + +int topology_update_cpu_topology(void) +{ + return update_topology; +} + +/* + * Updating the sched_domains can't be done directly from cpufreq callbacks + * due to locking, so queue the work for later. + */ +static void update_topology_flags_workfn(struct work_struct *work) +{ + update_topology = 1; + rebuild_sched_domains(); + pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); + update_topology = 0; +} + +static DEFINE_PER_CPU(u32, freq_factor) = 1; +static u32 *raw_capacity; + +static int free_raw_capacity(void) +{ + kfree(raw_capacity); + raw_capacity = NULL; + + return 0; +} + +void topology_normalize_cpu_scale(void) +{ + u64 capacity; + u64 capacity_scale; + int cpu; + + if (!raw_capacity) + return; + + capacity_scale = 1; + for_each_possible_cpu(cpu) { + capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity_scale = max(capacity, capacity_scale); + } + + pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); + for_each_possible_cpu(cpu) { + capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); + capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, + capacity_scale); + topology_set_cpu_scale(cpu, capacity); + pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", + cpu, topology_get_cpu_scale(cpu)); + } +} + +bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) +{ + struct clk *cpu_clk; + static bool cap_parsing_failed; + int ret; + u32 cpu_capacity; + + if (cap_parsing_failed) + return false; + + ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", + &cpu_capacity); + if (!ret) { + if (!raw_capacity) { + raw_capacity = kcalloc(num_possible_cpus(), + sizeof(*raw_capacity), + GFP_KERNEL); + if (!raw_capacity) { + cap_parsing_failed = true; + return false; + } + } + raw_capacity[cpu] = cpu_capacity; + pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", + cpu_node, raw_capacity[cpu]); + + /* + * Update freq_factor for calculating early boot cpu capacities. + * For non-clk CPU DVFS mechanism, there's no way to get the + * frequency value now, assuming they are running at the same + * frequency (by keeping the initial freq_factor value). + */ + cpu_clk = of_clk_get(cpu_node, 0); + if (!PTR_ERR_OR_ZERO(cpu_clk)) { + per_cpu(freq_factor, cpu) = + clk_get_rate(cpu_clk) / 1000; + clk_put(cpu_clk); + } + } else { + if (raw_capacity) { + pr_err("cpu_capacity: missing %pOF raw capacity\n", + cpu_node); + pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); + } + cap_parsing_failed = true; + free_raw_capacity(); + } + + return !ret; +} + +#ifdef CONFIG_CPU_FREQ +static cpumask_var_t cpus_to_visit; +static void parsing_done_workfn(struct work_struct *work); +static DECLARE_WORK(parsing_done_work, parsing_done_workfn); + +static int +init_cpu_capacity_callback(struct notifier_block *nb, + unsigned long val, + void *data) +{ + struct cpufreq_policy *policy = data; + int cpu; + + if (!raw_capacity) + return 0; + + if (val != CPUFREQ_CREATE_POLICY) + return 0; + + pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", + cpumask_pr_args(policy->related_cpus), + cpumask_pr_args(cpus_to_visit)); + + cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); + + for_each_cpu(cpu, policy->related_cpus) + per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; + + if (cpumask_empty(cpus_to_visit)) { + topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); + free_raw_capacity(); + pr_debug("cpu_capacity: parsing done\n"); + schedule_work(&parsing_done_work); + } + + return 0; +} + +static struct notifier_block init_cpu_capacity_notifier = { + .notifier_call = init_cpu_capacity_callback, +}; + +static int __init register_cpufreq_notifier(void) +{ + int ret; + + /* + * on ACPI-based systems we need to use the default cpu capacity + * until we have the necessary code to parse the cpu capacity, so + * skip registering cpufreq notifier. + */ + if (!acpi_disabled || !raw_capacity) + return -EINVAL; + + if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) + return -ENOMEM; + + cpumask_copy(cpus_to_visit, cpu_possible_mask); + + ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, + CPUFREQ_POLICY_NOTIFIER); + + if (ret) + free_cpumask_var(cpus_to_visit); + + return ret; +} +core_initcall(register_cpufreq_notifier); + +static void parsing_done_workfn(struct work_struct *work) +{ + cpufreq_unregister_notifier(&init_cpu_capacity_notifier, + CPUFREQ_POLICY_NOTIFIER); + free_cpumask_var(cpus_to_visit); +} + +#else +core_initcall(free_raw_capacity); +#endif + +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) +/* + * This function returns the logic cpu number of the node. + * There are basically three kinds of return values: + * (1) logic cpu number which is > 0. + * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but + * there is no possible logical CPU in the kernel to match. This happens + * when CONFIG_NR_CPUS is configure to be smaller than the number of + * CPU nodes in DT. We need to just ignore this case. + * (3) -1 if the node does not exist in the device tree + */ +static int __init get_cpu_for_node(struct device_node *node) +{ + struct device_node *cpu_node; + int cpu; + + cpu_node = of_parse_phandle(node, "cpu", 0); + if (!cpu_node) + return -1; + + cpu = of_cpu_node_to_id(cpu_node); + if (cpu >= 0) + topology_parse_cpu_capacity(cpu_node, cpu); + else + pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", + cpu_node, cpumask_pr_args(cpu_possible_mask)); + + of_node_put(cpu_node); + return cpu; +} + +static int __init parse_core(struct device_node *core, int package_id, + int core_id) +{ + char name[20]; + bool leaf = true; + int i = 0; + int cpu; + struct device_node *t; + + do { + snprintf(name, sizeof(name), "thread%d", i); + t = of_get_child_by_name(core, name); + if (t) { + leaf = false; + cpu = get_cpu_for_node(t); + if (cpu >= 0) { + cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].core_id = core_id; + cpu_topology[cpu].thread_id = i; + } else if (cpu != -ENODEV) { + pr_err("%pOF: Can't get CPU for thread\n", t); + of_node_put(t); + return -EINVAL; + } + of_node_put(t); + } + i++; + } while (t); + + cpu = get_cpu_for_node(core); + if (cpu >= 0) { + if (!leaf) { + pr_err("%pOF: Core has both threads and CPU\n", + core); + return -EINVAL; + } + + cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].core_id = core_id; + } else if (leaf && cpu != -ENODEV) { + pr_err("%pOF: Can't get CPU for leaf core\n", core); + return -EINVAL; + } + + return 0; +} + +static int __init parse_cluster(struct device_node *cluster, int depth) +{ + char name[20]; + bool leaf = true; + bool has_cores = false; + struct device_node *c; + static int package_id __initdata; + int core_id = 0; + int i, ret; + + /* + * First check for child clusters; we currently ignore any + * information about the nesting of clusters and present the + * scheduler with a flat list of them. + */ + i = 0; + do { + snprintf(name, sizeof(name), "cluster%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + leaf = false; + ret = parse_cluster(c, depth + 1); + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + /* Now check for cores */ + i = 0; + do { + snprintf(name, sizeof(name), "core%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + has_cores = true; + + if (depth == 0) { + pr_err("%pOF: cpu-map children should be clusters\n", + c); + of_node_put(c); + return -EINVAL; + } + + if (leaf) { + ret = parse_core(c, package_id, core_id++); + } else { + pr_err("%pOF: Non-leaf cluster with core %s\n", + cluster, name); + ret = -EINVAL; + } + + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + if (leaf && !has_cores) + pr_warn("%pOF: empty cluster\n", cluster); + + if (leaf) + package_id++; + + return 0; +} + +static int __init parse_dt_topology(void) +{ + struct device_node *cn, *map; + int ret = 0; + int cpu; + + cn = of_find_node_by_path("/cpus"); + if (!cn) { + pr_err("No CPU information found in DT\n"); + return 0; + } + + /* + * When topology is provided cpu-map is essentially a root + * cluster with restricted subnodes. + */ + map = of_get_child_by_name(cn, "cpu-map"); + if (!map) + goto out; + + ret = parse_cluster(map, 0); + if (ret != 0) + goto out_map; + + topology_normalize_cpu_scale(); + + /* + * Check that all cores are in the topology; the SMP code will + * only mark cores described in the DT as possible. + */ + for_each_possible_cpu(cpu) + if (cpu_topology[cpu].package_id == -1) + ret = -EINVAL; + +out_map: + of_node_put(map); +out: + of_node_put(cn); + return ret; +} +#endif + +/* + * cpu topology table + */ +struct cpu_topology cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); + + /* Find the smaller of NUMA, core or LLC siblings */ + if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { + /* not numa in package, lets use the package siblings */ + core_mask = &cpu_topology[cpu].core_sibling; + } + if (cpu_topology[cpu].llc_id != -1) { + if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) + core_mask = &cpu_topology[cpu].llc_sibling; + } + + return core_mask; +} + +void update_siblings_masks(unsigned int cpuid) +{ + struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; + int cpu; + + /* update core and thread sibling masks */ + for_each_online_cpu(cpu) { + cpu_topo = &cpu_topology[cpu]; + + if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { + cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); + cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); + } + + if (cpuid_topo->package_id != cpu_topo->package_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id != cpu_topo->core_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); + } +} + +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + +void __init reset_cpu_topology(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpu_topo->thread_id = -1; + cpu_topo->core_id = -1; + cpu_topo->package_id = -1; + cpu_topo->llc_id = -1; + + clear_cpu_topology(cpu); + } +} + +void remove_cpu_topology(unsigned int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + +__weak int __init parse_acpi_topology(void) +{ + return 0; +} + +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) +void __init init_cpu_topology(void) +{ + reset_cpu_topology(); + + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ + if (parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) + reset_cpu_topology(); +} + +void store_cpu_topology(unsigned int cpuid) +{ + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; + + if (cpuid_topo->package_id != -1) + goto topology_populated; + + cpuid_topo->thread_id = -1; + cpuid_topo->core_id = cpuid; + cpuid_topo->package_id = cpu_to_node(cpuid); + + pr_debug("CPU%u: package %d core %d thread %d\n", + cpuid, cpuid_topo->package_id, cpuid_topo->core_id, + cpuid_topo->thread_id); + +topology_populated: + update_siblings_masks(cpuid); +} +#endif diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c new file mode 100644 index 000000000..f7bd0f4db --- /dev/null +++ b/drivers/base/attribute_container.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * attribute_container.c - implementation of a simple container for classes + * + * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> + * + * The basic idea here is to enable a device to be attached to an + * aritrary numer of classes without having to allocate storage for them. + * Instead, the contained classes select the devices they need to attach + * to via a matching function. + */ + +#include <linux/attribute_container.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> + +#include "base.h" + +/* This is a private structure used to tie the classdev and the + * container .. it should never be visible outside this file */ +struct internal_container { + struct klist_node node; + struct attribute_container *cont; + struct device classdev; +}; + +static void internal_container_klist_get(struct klist_node *n) +{ + struct internal_container *ic = + container_of(n, struct internal_container, node); + get_device(&ic->classdev); +} + +static void internal_container_klist_put(struct klist_node *n) +{ + struct internal_container *ic = + container_of(n, struct internal_container, node); + put_device(&ic->classdev); +} + + +/** + * attribute_container_classdev_to_container - given a classdev, return the container + * + * @classdev: the class device created by attribute_container_add_device. + * + * Returns the container associated with this classdev. + */ +struct attribute_container * +attribute_container_classdev_to_container(struct device *classdev) +{ + struct internal_container *ic = + container_of(classdev, struct internal_container, classdev); + return ic->cont; +} +EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container); + +static LIST_HEAD(attribute_container_list); + +static DEFINE_MUTEX(attribute_container_mutex); + +/** + * attribute_container_register - register an attribute container + * + * @cont: The container to register. This must be allocated by the + * callee and should also be zeroed by it. + */ +int +attribute_container_register(struct attribute_container *cont) +{ + INIT_LIST_HEAD(&cont->node); + klist_init(&cont->containers, internal_container_klist_get, + internal_container_klist_put); + + mutex_lock(&attribute_container_mutex); + list_add_tail(&cont->node, &attribute_container_list); + mutex_unlock(&attribute_container_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(attribute_container_register); + +/** + * attribute_container_unregister - remove a container registration + * + * @cont: previously registered container to remove + */ +int +attribute_container_unregister(struct attribute_container *cont) +{ + int retval = -EBUSY; + + mutex_lock(&attribute_container_mutex); + spin_lock(&cont->containers.k_lock); + if (!list_empty(&cont->containers.k_list)) + goto out; + retval = 0; + list_del(&cont->node); + out: + spin_unlock(&cont->containers.k_lock); + mutex_unlock(&attribute_container_mutex); + return retval; + +} +EXPORT_SYMBOL_GPL(attribute_container_unregister); + +/* private function used as class release */ +static void attribute_container_release(struct device *classdev) +{ + struct internal_container *ic + = container_of(classdev, struct internal_container, classdev); + struct device *dev = classdev->parent; + + kfree(ic); + put_device(dev); +} + +/** + * attribute_container_add_device - see if any container is interested in dev + * + * @dev: device to add attributes to + * @fn: function to trigger addition of class device. + * + * This function allocates storage for the class device(s) to be + * attached to dev (one for each matching attribute_container). If no + * fn is provided, the code will simply register the class device via + * device_add. If a function is provided, it is expected to add + * the class device at the appropriate time. One of the things that + * might be necessary is to allocate and initialise the classdev and + * then add it a later time. To do this, call this routine for + * allocation and initialisation and then use + * attribute_container_device_trigger() to call device_add() on + * it. Note: after this, the class device contains a reference to dev + * which is not relinquished until the release of the classdev. + */ +void +attribute_container_add_device(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)) +{ + struct attribute_container *cont; + + mutex_lock(&attribute_container_mutex); + list_for_each_entry(cont, &attribute_container_list, node) { + struct internal_container *ic; + + if (attribute_container_no_classdevs(cont)) + continue; + + if (!cont->match(cont, dev)) + continue; + + ic = kzalloc(sizeof(*ic), GFP_KERNEL); + if (!ic) { + dev_err(dev, "failed to allocate class container\n"); + continue; + } + + ic->cont = cont; + device_initialize(&ic->classdev); + ic->classdev.parent = get_device(dev); + ic->classdev.class = cont->class; + cont->class->dev_release = attribute_container_release; + dev_set_name(&ic->classdev, "%s", dev_name(dev)); + if (fn) + fn(cont, dev, &ic->classdev); + else + attribute_container_add_class_device(&ic->classdev); + klist_add_tail(&ic->node, &cont->containers); + } + mutex_unlock(&attribute_container_mutex); +} + +/* FIXME: can't break out of this unless klist_iter_exit is also + * called before doing the break + */ +#define klist_for_each_entry(pos, head, member, iter) \ + for (klist_iter_init(head, iter); (pos = ({ \ + struct klist_node *n = klist_next(iter); \ + n ? container_of(n, typeof(*pos), member) : \ + ({ klist_iter_exit(iter) ; NULL; }); \ + })) != NULL;) + + +/** + * attribute_container_remove_device - make device eligible for removal. + * + * @dev: The generic device + * @fn: A function to call to remove the device + * + * This routine triggers device removal. If fn is NULL, then it is + * simply done via device_unregister (note that if something + * still has a reference to the classdev, then the memory occupied + * will not be freed until the classdev is released). If you want a + * two phase release: remove from visibility and then delete the + * device, then you should use this routine with a fn that calls + * device_del() and then use attribute_container_device_trigger() + * to do the final put on the classdev. + */ +void +attribute_container_remove_device(struct device *dev, + void (*fn)(struct attribute_container *, + struct device *, + struct device *)) +{ + struct attribute_container *cont; + + mutex_lock(&attribute_container_mutex); + list_for_each_entry(cont, &attribute_container_list, node) { + struct internal_container *ic; + struct klist_iter iter; + + if (attribute_container_no_classdevs(cont)) + continue; + + if (!cont->match(cont, dev)) + continue; + + klist_for_each_entry(ic, &cont->containers, node, &iter) { + if (dev != ic->classdev.parent) + continue; + klist_del(&ic->node); + if (fn) + fn(cont, dev, &ic->classdev); + else { + attribute_container_remove_attrs(&ic->classdev); + device_unregister(&ic->classdev); + } + } + } + mutex_unlock(&attribute_container_mutex); +} + +static int +do_attribute_container_device_trigger_safe(struct device *dev, + struct attribute_container *cont, + int (*fn)(struct attribute_container *, + struct device *, struct device *), + int (*undo)(struct attribute_container *, + struct device *, struct device *)) +{ + int ret; + struct internal_container *ic, *failed; + struct klist_iter iter; + + if (attribute_container_no_classdevs(cont)) + return fn(cont, dev, NULL); + + klist_for_each_entry(ic, &cont->containers, node, &iter) { + if (dev == ic->classdev.parent) { + ret = fn(cont, dev, &ic->classdev); + if (ret) { + failed = ic; + klist_iter_exit(&iter); + goto fail; + } + } + } + return 0; + +fail: + if (!undo) + return ret; + + /* Attempt to undo the work partially done. */ + klist_for_each_entry(ic, &cont->containers, node, &iter) { + if (ic == failed) { + klist_iter_exit(&iter); + break; + } + if (dev == ic->classdev.parent) + undo(cont, dev, &ic->classdev); + } + return ret; +} + +/** + * attribute_container_device_trigger_safe - execute a trigger for each + * matching classdev or fail all of them. + * + * @dev: The generic device to run the trigger for + * @fn the function to execute for each classdev. + * @undo A function to undo the work previously done in case of error + * + * This function is a safe version of + * attribute_container_device_trigger. It stops on the first error and + * undo the partial work that has been done, on previous classdev. It + * is guaranteed that either they all succeeded, or none of them + * succeeded. + */ +int +attribute_container_device_trigger_safe(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *), + int (*undo)(struct attribute_container *, + struct device *, + struct device *)) +{ + struct attribute_container *cont, *failed = NULL; + int ret = 0; + + mutex_lock(&attribute_container_mutex); + + list_for_each_entry(cont, &attribute_container_list, node) { + + if (!cont->match(cont, dev)) + continue; + + ret = do_attribute_container_device_trigger_safe(dev, cont, + fn, undo); + if (ret) { + failed = cont; + break; + } + } + + if (ret && !WARN_ON(!undo)) { + list_for_each_entry(cont, &attribute_container_list, node) { + + if (failed == cont) + break; + + if (!cont->match(cont, dev)) + continue; + + do_attribute_container_device_trigger_safe(dev, cont, + undo, NULL); + } + } + + mutex_unlock(&attribute_container_mutex); + return ret; + +} + +/** + * attribute_container_device_trigger - execute a trigger for each matching classdev + * + * @dev: The generic device to run the trigger for + * @fn the function to execute for each classdev. + * + * This function is for executing a trigger when you need to know both + * the container and the classdev. If you only care about the + * container, then use attribute_container_trigger() instead. + */ +void +attribute_container_device_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *, + struct device *)) +{ + struct attribute_container *cont; + + mutex_lock(&attribute_container_mutex); + list_for_each_entry(cont, &attribute_container_list, node) { + struct internal_container *ic; + struct klist_iter iter; + + if (!cont->match(cont, dev)) + continue; + + if (attribute_container_no_classdevs(cont)) { + fn(cont, dev, NULL); + continue; + } + + klist_for_each_entry(ic, &cont->containers, node, &iter) { + if (dev == ic->classdev.parent) + fn(cont, dev, &ic->classdev); + } + } + mutex_unlock(&attribute_container_mutex); +} + +/** + * attribute_container_trigger - trigger a function for each matching container + * + * @dev: The generic device to activate the trigger for + * @fn: the function to trigger + * + * This routine triggers a function that only needs to know the + * matching containers (not the classdev) associated with a device. + * It is more lightweight than attribute_container_device_trigger, so + * should be used in preference unless the triggering function + * actually needs to know the classdev. + */ +void +attribute_container_trigger(struct device *dev, + int (*fn)(struct attribute_container *, + struct device *)) +{ + struct attribute_container *cont; + + mutex_lock(&attribute_container_mutex); + list_for_each_entry(cont, &attribute_container_list, node) { + if (cont->match(cont, dev)) + fn(cont, dev); + } + mutex_unlock(&attribute_container_mutex); +} + +/** + * attribute_container_add_attrs - add attributes + * + * @classdev: The class device + * + * This simply creates all the class device sysfs files from the + * attributes listed in the container + */ +int +attribute_container_add_attrs(struct device *classdev) +{ + struct attribute_container *cont = + attribute_container_classdev_to_container(classdev); + struct device_attribute **attrs = cont->attrs; + int i, error; + + BUG_ON(attrs && cont->grp); + + if (!attrs && !cont->grp) + return 0; + + if (cont->grp) + return sysfs_create_group(&classdev->kobj, cont->grp); + + for (i = 0; attrs[i]; i++) { + sysfs_attr_init(&attrs[i]->attr); + error = device_create_file(classdev, attrs[i]); + if (error) + return error; + } + + return 0; +} + +/** + * attribute_container_add_class_device - same function as device_add + * + * @classdev: the class device to add + * + * This performs essentially the same function as device_add except for + * attribute containers, namely add the classdev to the system and then + * create the attribute files + */ +int +attribute_container_add_class_device(struct device *classdev) +{ + int error = device_add(classdev); + + if (error) + return error; + return attribute_container_add_attrs(classdev); +} + +/** + * attribute_container_add_class_device_adapter - simple adapter for triggers + * + * This function is identical to attribute_container_add_class_device except + * that it is designed to be called from the triggers + */ +int +attribute_container_add_class_device_adapter(struct attribute_container *cont, + struct device *dev, + struct device *classdev) +{ + return attribute_container_add_class_device(classdev); +} + +/** + * attribute_container_remove_attrs - remove any attribute files + * + * @classdev: The class device to remove the files from + * + */ +void +attribute_container_remove_attrs(struct device *classdev) +{ + struct attribute_container *cont = + attribute_container_classdev_to_container(classdev); + struct device_attribute **attrs = cont->attrs; + int i; + + if (!attrs && !cont->grp) + return; + + if (cont->grp) { + sysfs_remove_group(&classdev->kobj, cont->grp); + return ; + } + + for (i = 0; attrs[i]; i++) + device_remove_file(classdev, attrs[i]); +} + +/** + * attribute_container_class_device_del - equivalent of class_device_del + * + * @classdev: the class device + * + * This function simply removes all the attribute files and then calls + * device_del. + */ +void +attribute_container_class_device_del(struct device *classdev) +{ + attribute_container_remove_attrs(classdev); + device_del(classdev); +} + +/** + * attribute_container_find_class_device - find the corresponding class_device + * + * @cont: the container + * @dev: the generic device + * + * Looks up the device in the container's list of class devices and returns + * the corresponding class_device. + */ +struct device * +attribute_container_find_class_device(struct attribute_container *cont, + struct device *dev) +{ + struct device *cdev = NULL; + struct internal_container *ic; + struct klist_iter iter; + + klist_for_each_entry(ic, &cont->containers, node, &iter) { + if (ic->classdev.parent == dev) { + cdev = &ic->classdev; + /* FIXME: must exit iterator then break */ + klist_iter_exit(&iter); + break; + } + } + + return cdev; +} +EXPORT_SYMBOL_GPL(attribute_container_find_class_device); diff --git a/drivers/base/base.h b/drivers/base/base.h new file mode 100644 index 000000000..91cfb8405 --- /dev/null +++ b/drivers/base/base.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org> + * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (c) 2008-2012 Novell Inc. + * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org> + * Copyright (c) 2012-2019 Linux Foundation + * + * Core driver model functions and structures that should not be + * shared outside of the drivers/base/ directory. + * + */ +#include <linux/notifier.h> + +/** + * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure. + * + * @subsys - the struct kset that defines this subsystem + * @devices_kset - the subsystem's 'devices' directory + * @interfaces - list of subsystem interfaces associated + * @mutex - protect the devices, and interfaces lists. + * + * @drivers_kset - the list of drivers associated + * @klist_devices - the klist to iterate over the @devices_kset + * @klist_drivers - the klist to iterate over the @drivers_kset + * @bus_notifier - the bus notifier list for anything that cares about things + * on this bus. + * @bus - pointer back to the struct bus_type that this structure is associated + * with. + * + * @glue_dirs - "glue" directory to put in-between the parent device to + * avoid namespace conflicts + * @class - pointer back to the struct class that this structure is associated + * with. + * + * This structure is the one that is the actual kobject allowing struct + * bus_type/class to be statically allocated safely. Nothing outside of the + * driver core should ever touch these fields. + */ +struct subsys_private { + struct kset subsys; + struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; + + struct kset *drivers_kset; + struct klist klist_devices; + struct klist klist_drivers; + struct blocking_notifier_head bus_notifier; + unsigned int drivers_autoprobe:1; + struct bus_type *bus; + + struct kset glue_dirs; + struct class *class; +}; +#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj) + +struct driver_private { + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + struct module_kobject *mkobj; + struct device_driver *driver; +}; +#define to_driver(obj) container_of(obj, struct driver_private, kobj) + +/** + * struct device_private - structure to hold the private to the driver core portions of the device structure. + * + * @klist_children - klist containing all children of this device + * @knode_parent - node in sibling list + * @knode_driver - node in driver list + * @knode_bus - node in bus list + * @knode_class - node in class list + * @deferred_probe - entry in deferred_probe_list which is used to retry the + * binding of drivers which were unable to get all the resources needed by + * the device; typically because it depends on another driver getting + * probed first. + * @async_driver - pointer to device driver awaiting probe via async_probe + * @device - pointer back to the struct device that this structure is + * associated with. + * @dead - This device is currently either in the process of or has been + * removed from the system. Any asynchronous events scheduled for this + * device should exit without taking any action. + * + * Nothing outside of the driver core should ever touch these fields. + */ +struct device_private { + struct klist klist_children; + struct klist_node knode_parent; + struct klist_node knode_driver; + struct klist_node knode_bus; + struct klist_node knode_class; + struct list_head deferred_probe; + struct device_driver *async_driver; + char *deferred_probe_reason; + struct device *device; + u8 dead:1; +}; +#define to_device_private_parent(obj) \ + container_of(obj, struct device_private, knode_parent) +#define to_device_private_driver(obj) \ + container_of(obj, struct device_private, knode_driver) +#define to_device_private_bus(obj) \ + container_of(obj, struct device_private, knode_bus) +#define to_device_private_class(obj) \ + container_of(obj, struct device_private, knode_class) + +/* initialisation functions */ +extern int devices_init(void); +extern int buses_init(void); +extern int classes_init(void); +extern int firmware_init(void); +#ifdef CONFIG_SYS_HYPERVISOR +extern int hypervisor_init(void); +#else +static inline int hypervisor_init(void) { return 0; } +#endif +extern int platform_bus_init(void); +extern void cpu_dev_init(void); +extern void container_dev_init(void); + +struct kobject *virtual_device_parent(struct device *dev); + +extern int bus_add_device(struct device *dev); +extern void bus_probe_device(struct device *dev); +extern void bus_remove_device(struct device *dev); + +extern int bus_add_driver(struct device_driver *drv); +extern void bus_remove_driver(struct device_driver *drv); +extern void device_release_driver_internal(struct device *dev, + struct device_driver *drv, + struct device *parent); + +extern void driver_detach(struct device_driver *drv); +extern int driver_probe_device(struct device_driver *drv, struct device *dev); +extern void driver_deferred_probe_del(struct device *dev); +extern void device_set_deferred_probe_reason(const struct device *dev, + struct va_format *vaf); +static inline int driver_match_device(struct device_driver *drv, + struct device *dev) +{ + return drv->bus->match ? drv->bus->match(dev, drv) : 1; +} +extern bool driver_allows_async_probing(struct device_driver *drv); + +extern int driver_add_groups(struct device_driver *drv, + const struct attribute_group **groups); +extern void driver_remove_groups(struct device_driver *drv, + const struct attribute_group **groups); +int device_driver_attach(struct device_driver *drv, struct device *dev); +void device_driver_detach(struct device *dev); + +extern char *make_class_name(const char *name, struct kobject *kobj); + +extern int devres_release_all(struct device *dev); +extern void device_block_probing(void); +extern void device_unblock_probing(void); + +/* /sys/devices directory */ +extern struct kset *devices_kset; +extern void devices_kset_move_last(struct device *dev); + +#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) +extern void module_add_driver(struct module *mod, struct device_driver *drv); +extern void module_remove_driver(struct device_driver *drv); +#else +static inline void module_add_driver(struct module *mod, + struct device_driver *drv) { } +static inline void module_remove_driver(struct device_driver *drv) { } +#endif + +#ifdef CONFIG_DEVTMPFS +extern int devtmpfs_init(void); +#else +static inline int devtmpfs_init(void) { return 0; } +#endif + +/* Device links support */ +extern int device_links_read_lock(void); +extern void device_links_read_unlock(int idx); +extern int device_links_read_lock_held(void); +extern int device_links_check_suppliers(struct device *dev); +extern void device_links_driver_bound(struct device *dev); +extern void device_links_driver_cleanup(struct device *dev); +extern void device_links_no_driver(struct device *dev); +extern bool device_links_busy(struct device *dev); +extern void device_links_unbind_consumers(struct device *dev); + +/* device pm support */ +void device_pm_move_to_tail(struct device *dev); + +#ifdef CONFIG_DEVTMPFS +int devtmpfs_create_node(struct device *dev); +int devtmpfs_delete_node(struct device *dev); +#else +static inline int devtmpfs_create_node(struct device *dev) { return 0; } +static inline int devtmpfs_delete_node(struct device *dev) { return 0; } +#endif diff --git a/drivers/base/bus.c b/drivers/base/bus.c new file mode 100644 index 000000000..df85e928b --- /dev/null +++ b/drivers/base/bus.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * bus.c - bus driver management + * + * Copyright (c) 2002-3 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (c) 2007 Novell Inc. + */ + +#include <linux/async.h> +#include <linux/device/bus.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/mutex.h> +#include <linux/sysfs.h> +#include "base.h" +#include "power/power.h" + +/* /sys/devices/system */ +static struct kset *system_kset; + +#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) + +/* + * sysfs bindings for drivers + */ + +#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr) + +#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \ + struct driver_attribute driver_attr_##_name = \ + __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) + +static int __must_check bus_rescan_devices_helper(struct device *dev, + void *data); + +static struct bus_type *bus_get(struct bus_type *bus) +{ + if (bus) { + kset_get(&bus->p->subsys); + return bus; + } + return NULL; +} + +static void bus_put(struct bus_type *bus) +{ + if (bus) + kset_put(&bus->p->subsys); +} + +static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct driver_attribute *drv_attr = to_drv_attr(attr); + struct driver_private *drv_priv = to_driver(kobj); + ssize_t ret = -EIO; + + if (drv_attr->show) + ret = drv_attr->show(drv_priv->driver, buf); + return ret; +} + +static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct driver_attribute *drv_attr = to_drv_attr(attr); + struct driver_private *drv_priv = to_driver(kobj); + ssize_t ret = -EIO; + + if (drv_attr->store) + ret = drv_attr->store(drv_priv->driver, buf, count); + return ret; +} + +static const struct sysfs_ops driver_sysfs_ops = { + .show = drv_attr_show, + .store = drv_attr_store, +}; + +static void driver_release(struct kobject *kobj) +{ + struct driver_private *drv_priv = to_driver(kobj); + + pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__); + kfree(drv_priv); +} + +static struct kobj_type driver_ktype = { + .sysfs_ops = &driver_sysfs_ops, + .release = driver_release, +}; + +/* + * sysfs bindings for buses + */ +static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct bus_attribute *bus_attr = to_bus_attr(attr); + struct subsys_private *subsys_priv = to_subsys_private(kobj); + ssize_t ret = 0; + + if (bus_attr->show) + ret = bus_attr->show(subsys_priv->bus, buf); + return ret; +} + +static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct bus_attribute *bus_attr = to_bus_attr(attr); + struct subsys_private *subsys_priv = to_subsys_private(kobj); + ssize_t ret = 0; + + if (bus_attr->store) + ret = bus_attr->store(subsys_priv->bus, buf, count); + return ret; +} + +static const struct sysfs_ops bus_sysfs_ops = { + .show = bus_attr_show, + .store = bus_attr_store, +}; + +int bus_create_file(struct bus_type *bus, struct bus_attribute *attr) +{ + int error; + if (bus_get(bus)) { + error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr); + bus_put(bus); + } else + error = -EINVAL; + return error; +} +EXPORT_SYMBOL_GPL(bus_create_file); + +void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr) +{ + if (bus_get(bus)) { + sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr); + bus_put(bus); + } +} +EXPORT_SYMBOL_GPL(bus_remove_file); + +static void bus_release(struct kobject *kobj) +{ + struct subsys_private *priv = to_subsys_private(kobj); + struct bus_type *bus = priv->bus; + + kfree(priv); + bus->p = NULL; +} + +static struct kobj_type bus_ktype = { + .sysfs_ops = &bus_sysfs_ops, + .release = bus_release, +}; + +static int bus_uevent_filter(struct kset *kset, struct kobject *kobj) +{ + struct kobj_type *ktype = get_ktype(kobj); + + if (ktype == &bus_ktype) + return 1; + return 0; +} + +static const struct kset_uevent_ops bus_uevent_ops = { + .filter = bus_uevent_filter, +}; + +static struct kset *bus_kset; + +/* Manually detach a device from its associated driver. */ +static ssize_t unbind_store(struct device_driver *drv, const char *buf, + size_t count) +{ + struct bus_type *bus = bus_get(drv->bus); + struct device *dev; + int err = -ENODEV; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev && dev->driver == drv) { + device_driver_detach(dev); + err = count; + } + put_device(dev); + bus_put(bus); + return err; +} +static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store); + +/* + * Manually attach a device to a driver. + * Note: the driver must want to bind to the device, + * it is not possible to override the driver's id table. + */ +static ssize_t bind_store(struct device_driver *drv, const char *buf, + size_t count) +{ + struct bus_type *bus = bus_get(drv->bus); + struct device *dev; + int err = -ENODEV; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { + err = device_driver_attach(drv, dev); + + if (err > 0) { + /* success */ + err = count; + } else if (err == 0) { + /* driver didn't accept device */ + err = -ENODEV; + } + } + put_device(dev); + bus_put(bus); + return err; +} +static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); + +static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) +{ + return sysfs_emit(buf, "%d\n", bus->p->drivers_autoprobe); +} + +static ssize_t drivers_autoprobe_store(struct bus_type *bus, + const char *buf, size_t count) +{ + if (buf[0] == '0') + bus->p->drivers_autoprobe = 0; + else + bus->p->drivers_autoprobe = 1; + return count; +} + +static ssize_t drivers_probe_store(struct bus_type *bus, + const char *buf, size_t count) +{ + struct device *dev; + int err = -EINVAL; + + dev = bus_find_device_by_name(bus, NULL, buf); + if (!dev) + return -ENODEV; + if (bus_rescan_devices_helper(dev, NULL) == 0) + err = count; + put_device(dev); + return err; +} + +static struct device *next_device(struct klist_iter *i) +{ + struct klist_node *n = klist_next(i); + struct device *dev = NULL; + struct device_private *dev_prv; + + if (n) { + dev_prv = to_device_private_bus(n); + dev = dev_prv->device; + } + return dev; +} + +/** + * bus_for_each_dev - device iterator. + * @bus: bus type. + * @start: device to start iterating from. + * @data: data for the callback. + * @fn: function to be called for each device. + * + * Iterate over @bus's list of devices, and call @fn for each, + * passing it @data. If @start is not NULL, we use that device to + * begin iterating from. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + * + * NOTE: The device that returns a non-zero value is not retained + * in any way, nor is its refcount incremented. If the caller needs + * to retain this data, it should do so, and increment the reference + * count in the supplied callback. + */ +int bus_for_each_dev(struct bus_type *bus, struct device *start, + void *data, int (*fn)(struct device *, void *)) +{ + struct klist_iter i; + struct device *dev; + int error = 0; + + if (!bus || !bus->p) + return -EINVAL; + + klist_iter_init_node(&bus->p->klist_devices, &i, + (start ? &start->p->knode_bus : NULL)); + while (!error && (dev = next_device(&i))) + error = fn(dev, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(bus_for_each_dev); + +/** + * bus_find_device - device iterator for locating a particular device. + * @bus: bus type + * @start: Device to begin with + * @data: Data to pass to match function + * @match: Callback function to check device + * + * This is similar to the bus_for_each_dev() function above, but it + * returns a reference to a device that is 'found' for later use, as + * determined by the @match callback. + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero, this function will + * return to the caller and not iterate over any more devices. + */ +struct device *bus_find_device(struct bus_type *bus, + struct device *start, const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct klist_iter i; + struct device *dev; + + if (!bus || !bus->p) + return NULL; + + klist_iter_init_node(&bus->p->klist_devices, &i, + (start ? &start->p->knode_bus : NULL)); + while ((dev = next_device(&i))) + if (match(dev, data) && get_device(dev)) + break; + klist_iter_exit(&i); + return dev; +} +EXPORT_SYMBOL_GPL(bus_find_device); + +/** + * subsys_find_device_by_id - find a device with a specific enumeration number + * @subsys: subsystem + * @id: index 'id' in struct device + * @hint: device to check first + * + * Check the hint's next object and if it is a match return it directly, + * otherwise, fall back to a full list search. Either way a reference for + * the returned object is taken. + */ +struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id, + struct device *hint) +{ + struct klist_iter i; + struct device *dev; + + if (!subsys) + return NULL; + + if (hint) { + klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus); + dev = next_device(&i); + if (dev && dev->id == id && get_device(dev)) { + klist_iter_exit(&i); + return dev; + } + klist_iter_exit(&i); + } + + klist_iter_init_node(&subsys->p->klist_devices, &i, NULL); + while ((dev = next_device(&i))) { + if (dev->id == id && get_device(dev)) { + klist_iter_exit(&i); + return dev; + } + } + klist_iter_exit(&i); + return NULL; +} +EXPORT_SYMBOL_GPL(subsys_find_device_by_id); + +static struct device_driver *next_driver(struct klist_iter *i) +{ + struct klist_node *n = klist_next(i); + struct driver_private *drv_priv; + + if (n) { + drv_priv = container_of(n, struct driver_private, knode_bus); + return drv_priv->driver; + } + return NULL; +} + +/** + * bus_for_each_drv - driver iterator + * @bus: bus we're dealing with. + * @start: driver to start iterating on. + * @data: data to pass to the callback. + * @fn: function to call for each driver. + * + * This is nearly identical to the device iterator above. + * We iterate over each driver that belongs to @bus, and call + * @fn for each. If @fn returns anything but 0, we break out + * and return it. If @start is not NULL, we use it as the head + * of the list. + * + * NOTE: we don't return the driver that returns a non-zero + * value, nor do we leave the reference count incremented for that + * driver. If the caller needs to know that info, it must set it + * in the callback. It must also be sure to increment the refcount + * so it doesn't disappear before returning to the caller. + */ +int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, + void *data, int (*fn)(struct device_driver *, void *)) +{ + struct klist_iter i; + struct device_driver *drv; + int error = 0; + + if (!bus) + return -EINVAL; + + klist_iter_init_node(&bus->p->klist_drivers, &i, + start ? &start->p->knode_bus : NULL); + while ((drv = next_driver(&i)) && !error) + error = fn(drv, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(bus_for_each_drv); + +/** + * bus_add_device - add device to bus + * @dev: device being added + * + * - Add device's bus attributes. + * - Create links to device's bus. + * - Add the device to its bus's list of devices. + */ +int bus_add_device(struct device *dev) +{ + struct bus_type *bus = bus_get(dev->bus); + int error = 0; + + if (bus) { + pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev)); + error = device_add_groups(dev, bus->dev_groups); + if (error) + goto out_put; + error = sysfs_create_link(&bus->p->devices_kset->kobj, + &dev->kobj, dev_name(dev)); + if (error) + goto out_groups; + error = sysfs_create_link(&dev->kobj, + &dev->bus->p->subsys.kobj, "subsystem"); + if (error) + goto out_subsys; + klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices); + } + return 0; + +out_subsys: + sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); +out_groups: + device_remove_groups(dev, bus->dev_groups); +out_put: + bus_put(dev->bus); + return error; +} + +/** + * bus_probe_device - probe drivers for a new device + * @dev: device to probe + * + * - Automatically probe for a driver if the bus allows it. + */ +void bus_probe_device(struct device *dev) +{ + struct bus_type *bus = dev->bus; + struct subsys_interface *sif; + + if (!bus) + return; + + if (bus->p->drivers_autoprobe) + device_initial_probe(dev); + + mutex_lock(&bus->p->mutex); + list_for_each_entry(sif, &bus->p->interfaces, node) + if (sif->add_dev) + sif->add_dev(dev, sif); + mutex_unlock(&bus->p->mutex); +} + +/** + * bus_remove_device - remove device from bus + * @dev: device to be removed + * + * - Remove device from all interfaces. + * - Remove symlink from bus' directory. + * - Delete device from bus's list. + * - Detach from its driver. + * - Drop reference taken in bus_add_device(). + */ +void bus_remove_device(struct device *dev) +{ + struct bus_type *bus = dev->bus; + struct subsys_interface *sif; + + if (!bus) + return; + + mutex_lock(&bus->p->mutex); + list_for_each_entry(sif, &bus->p->interfaces, node) + if (sif->remove_dev) + sif->remove_dev(dev, sif); + mutex_unlock(&bus->p->mutex); + + sysfs_remove_link(&dev->kobj, "subsystem"); + sysfs_remove_link(&dev->bus->p->devices_kset->kobj, + dev_name(dev)); + device_remove_groups(dev, dev->bus->dev_groups); + if (klist_node_attached(&dev->p->knode_bus)) + klist_del(&dev->p->knode_bus); + + pr_debug("bus: '%s': remove device %s\n", + dev->bus->name, dev_name(dev)); + device_release_driver(dev); + bus_put(dev->bus); +} + +static int __must_check add_bind_files(struct device_driver *drv) +{ + int ret; + + ret = driver_create_file(drv, &driver_attr_unbind); + if (ret == 0) { + ret = driver_create_file(drv, &driver_attr_bind); + if (ret) + driver_remove_file(drv, &driver_attr_unbind); + } + return ret; +} + +static void remove_bind_files(struct device_driver *drv) +{ + driver_remove_file(drv, &driver_attr_bind); + driver_remove_file(drv, &driver_attr_unbind); +} + +static BUS_ATTR_WO(drivers_probe); +static BUS_ATTR_RW(drivers_autoprobe); + +static int add_probe_files(struct bus_type *bus) +{ + int retval; + + retval = bus_create_file(bus, &bus_attr_drivers_probe); + if (retval) + goto out; + + retval = bus_create_file(bus, &bus_attr_drivers_autoprobe); + if (retval) + bus_remove_file(bus, &bus_attr_drivers_probe); +out: + return retval; +} + +static void remove_probe_files(struct bus_type *bus) +{ + bus_remove_file(bus, &bus_attr_drivers_autoprobe); + bus_remove_file(bus, &bus_attr_drivers_probe); +} + +static ssize_t uevent_store(struct device_driver *drv, const char *buf, + size_t count) +{ + int rc; + + rc = kobject_synth_uevent(&drv->p->kobj, buf, count); + return rc ? rc : count; +} +static DRIVER_ATTR_WO(uevent); + +/** + * bus_add_driver - Add a driver to the bus. + * @drv: driver. + */ +int bus_add_driver(struct device_driver *drv) +{ + struct bus_type *bus; + struct driver_private *priv; + int error = 0; + + bus = bus_get(drv->bus); + if (!bus) + return -EINVAL; + + pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + error = -ENOMEM; + goto out_put_bus; + } + klist_init(&priv->klist_devices, NULL, NULL); + priv->driver = drv; + drv->p = priv; + priv->kobj.kset = bus->p->drivers_kset; + error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL, + "%s", drv->name); + if (error) + goto out_unregister; + + klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); + if (drv->bus->p->drivers_autoprobe) { + error = driver_attach(drv); + if (error) + goto out_del_list; + } + module_add_driver(drv->owner, drv); + + error = driver_create_file(drv, &driver_attr_uevent); + if (error) { + printk(KERN_ERR "%s: uevent attr (%s) failed\n", + __func__, drv->name); + } + error = driver_add_groups(drv, bus->drv_groups); + if (error) { + /* How the hell do we get out of this pickle? Give up */ + printk(KERN_ERR "%s: driver_create_groups(%s) failed\n", + __func__, drv->name); + } + + if (!drv->suppress_bind_attrs) { + error = add_bind_files(drv); + if (error) { + /* Ditto */ + printk(KERN_ERR "%s: add_bind_files(%s) failed\n", + __func__, drv->name); + } + } + + return 0; + +out_del_list: + klist_del(&priv->knode_bus); +out_unregister: + kobject_put(&priv->kobj); + /* drv->p is freed in driver_release() */ + drv->p = NULL; +out_put_bus: + bus_put(bus); + return error; +} + +/** + * bus_remove_driver - delete driver from bus's knowledge. + * @drv: driver. + * + * Detach the driver from the devices it controls, and remove + * it from its bus's list of drivers. Finally, we drop the reference + * to the bus we took in bus_add_driver(). + */ +void bus_remove_driver(struct device_driver *drv) +{ + if (!drv->bus) + return; + + if (!drv->suppress_bind_attrs) + remove_bind_files(drv); + driver_remove_groups(drv, drv->bus->drv_groups); + driver_remove_file(drv, &driver_attr_uevent); + klist_remove(&drv->p->knode_bus); + pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name); + driver_detach(drv); + module_remove_driver(drv); + kobject_put(&drv->p->kobj); + bus_put(drv->bus); +} + +/* Helper for bus_rescan_devices's iter */ +static int __must_check bus_rescan_devices_helper(struct device *dev, + void *data) +{ + int ret = 0; + + if (!dev->driver) { + if (dev->parent && dev->bus->need_parent_lock) + device_lock(dev->parent); + ret = device_attach(dev); + if (dev->parent && dev->bus->need_parent_lock) + device_unlock(dev->parent); + } + return ret < 0 ? ret : 0; +} + +/** + * bus_rescan_devices - rescan devices on the bus for possible drivers + * @bus: the bus to scan. + * + * This function will look for devices on the bus with no driver + * attached and rescan it against existing drivers to see if it matches + * any by calling device_attach() for the unbound devices. + */ +int bus_rescan_devices(struct bus_type *bus) +{ + return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper); +} +EXPORT_SYMBOL_GPL(bus_rescan_devices); + +/** + * device_reprobe - remove driver for a device and probe for a new driver + * @dev: the device to reprobe + * + * This function detaches the attached driver (if any) for the given + * device and restarts the driver probing process. It is intended + * to use if probing criteria changed during a devices lifetime and + * driver attachment should change accordingly. + */ +int device_reprobe(struct device *dev) +{ + if (dev->driver) + device_driver_detach(dev); + return bus_rescan_devices_helper(dev, NULL); +} +EXPORT_SYMBOL_GPL(device_reprobe); + +/** + * find_bus - locate bus by name. + * @name: name of bus. + * + * Call kset_find_obj() to iterate over list of buses to + * find a bus by name. Return bus if found. + * + * Note that kset_find_obj increments bus' reference count. + */ +#if 0 +struct bus_type *find_bus(char *name) +{ + struct kobject *k = kset_find_obj(bus_kset, name); + return k ? to_bus(k) : NULL; +} +#endif /* 0 */ + +static int bus_add_groups(struct bus_type *bus, + const struct attribute_group **groups) +{ + return sysfs_create_groups(&bus->p->subsys.kobj, groups); +} + +static void bus_remove_groups(struct bus_type *bus, + const struct attribute_group **groups) +{ + sysfs_remove_groups(&bus->p->subsys.kobj, groups); +} + +static void klist_devices_get(struct klist_node *n) +{ + struct device_private *dev_prv = to_device_private_bus(n); + struct device *dev = dev_prv->device; + + get_device(dev); +} + +static void klist_devices_put(struct klist_node *n) +{ + struct device_private *dev_prv = to_device_private_bus(n); + struct device *dev = dev_prv->device; + + put_device(dev); +} + +static ssize_t bus_uevent_store(struct bus_type *bus, + const char *buf, size_t count) +{ + int rc; + + rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); + return rc ? rc : count; +} +/* + * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO() + * here, but can not use it as earlier in the file we have + * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store + * function name. + */ +static struct bus_attribute bus_attr_uevent = __ATTR(uevent, S_IWUSR, NULL, + bus_uevent_store); + +/** + * bus_register - register a driver-core subsystem + * @bus: bus to register + * + * Once we have that, we register the bus with the kobject + * infrastructure, then register the children subsystems it has: + * the devices and drivers that belong to the subsystem. + */ +int bus_register(struct bus_type *bus) +{ + int retval; + struct subsys_private *priv; + struct lock_class_key *key = &bus->lock_key; + + priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->bus = bus; + bus->p = priv; + + BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier); + + retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name); + if (retval) + goto out; + + priv->subsys.kobj.kset = bus_kset; + priv->subsys.kobj.ktype = &bus_ktype; + priv->drivers_autoprobe = 1; + + retval = kset_register(&priv->subsys); + if (retval) + goto out; + + retval = bus_create_file(bus, &bus_attr_uevent); + if (retval) + goto bus_uevent_fail; + + priv->devices_kset = kset_create_and_add("devices", NULL, + &priv->subsys.kobj); + if (!priv->devices_kset) { + retval = -ENOMEM; + goto bus_devices_fail; + } + + priv->drivers_kset = kset_create_and_add("drivers", NULL, + &priv->subsys.kobj); + if (!priv->drivers_kset) { + retval = -ENOMEM; + goto bus_drivers_fail; + } + + INIT_LIST_HEAD(&priv->interfaces); + __mutex_init(&priv->mutex, "subsys mutex", key); + klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put); + klist_init(&priv->klist_drivers, NULL, NULL); + + retval = add_probe_files(bus); + if (retval) + goto bus_probe_files_fail; + + retval = bus_add_groups(bus, bus->bus_groups); + if (retval) + goto bus_groups_fail; + + pr_debug("bus: '%s': registered\n", bus->name); + return 0; + +bus_groups_fail: + remove_probe_files(bus); +bus_probe_files_fail: + kset_unregister(bus->p->drivers_kset); +bus_drivers_fail: + kset_unregister(bus->p->devices_kset); +bus_devices_fail: + bus_remove_file(bus, &bus_attr_uevent); +bus_uevent_fail: + kset_unregister(&bus->p->subsys); +out: + kfree(bus->p); + bus->p = NULL; + return retval; +} +EXPORT_SYMBOL_GPL(bus_register); + +/** + * bus_unregister - remove a bus from the system + * @bus: bus. + * + * Unregister the child subsystems and the bus itself. + * Finally, we call bus_put() to release the refcount + */ +void bus_unregister(struct bus_type *bus) +{ + pr_debug("bus: '%s': unregistering\n", bus->name); + if (bus->dev_root) + device_unregister(bus->dev_root); + bus_remove_groups(bus, bus->bus_groups); + remove_probe_files(bus); + kset_unregister(bus->p->drivers_kset); + kset_unregister(bus->p->devices_kset); + bus_remove_file(bus, &bus_attr_uevent); + kset_unregister(&bus->p->subsys); +} +EXPORT_SYMBOL_GPL(bus_unregister); + +int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&bus->p->bus_notifier, nb); +} +EXPORT_SYMBOL_GPL(bus_register_notifier); + +int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb); +} +EXPORT_SYMBOL_GPL(bus_unregister_notifier); + +struct kset *bus_get_kset(struct bus_type *bus) +{ + return &bus->p->subsys; +} +EXPORT_SYMBOL_GPL(bus_get_kset); + +struct klist *bus_get_device_klist(struct bus_type *bus) +{ + return &bus->p->klist_devices; +} +EXPORT_SYMBOL_GPL(bus_get_device_klist); + +/* + * Yes, this forcibly breaks the klist abstraction temporarily. It + * just wants to sort the klist, not change reference counts and + * take/drop locks rapidly in the process. It does all this while + * holding the lock for the list, so objects can't otherwise be + * added/removed while we're swizzling. + */ +static void device_insertion_sort_klist(struct device *a, struct list_head *list, + int (*compare)(const struct device *a, + const struct device *b)) +{ + struct klist_node *n; + struct device_private *dev_prv; + struct device *b; + + list_for_each_entry(n, list, n_node) { + dev_prv = to_device_private_bus(n); + b = dev_prv->device; + if (compare(a, b) <= 0) { + list_move_tail(&a->p->knode_bus.n_node, + &b->p->knode_bus.n_node); + return; + } + } + list_move_tail(&a->p->knode_bus.n_node, list); +} + +void bus_sort_breadthfirst(struct bus_type *bus, + int (*compare)(const struct device *a, + const struct device *b)) +{ + LIST_HEAD(sorted_devices); + struct klist_node *n, *tmp; + struct device_private *dev_prv; + struct device *dev; + struct klist *device_klist; + + device_klist = bus_get_device_klist(bus); + + spin_lock(&device_klist->k_lock); + list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) { + dev_prv = to_device_private_bus(n); + dev = dev_prv->device; + device_insertion_sort_klist(dev, &sorted_devices, compare); + } + list_splice(&sorted_devices, &device_klist->k_list); + spin_unlock(&device_klist->k_lock); +} +EXPORT_SYMBOL_GPL(bus_sort_breadthfirst); + +/** + * subsys_dev_iter_init - initialize subsys device iterator + * @iter: subsys iterator to initialize + * @subsys: the subsys we wanna iterate over + * @start: the device to start iterating from, if any + * @type: device_type of the devices to iterate over, NULL for all + * + * Initialize subsys iterator @iter such that it iterates over devices + * of @subsys. If @start is set, the list iteration will start there, + * otherwise if it is NULL, the iteration starts at the beginning of + * the list. + */ +void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys, + struct device *start, const struct device_type *type) +{ + struct klist_node *start_knode = NULL; + + if (start) + start_knode = &start->p->knode_bus; + klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode); + iter->type = type; +} +EXPORT_SYMBOL_GPL(subsys_dev_iter_init); + +/** + * subsys_dev_iter_next - iterate to the next device + * @iter: subsys iterator to proceed + * + * Proceed @iter to the next device and return it. Returns NULL if + * iteration is complete. + * + * The returned device is referenced and won't be released till + * iterator is proceed to the next device or exited. The caller is + * free to do whatever it wants to do with the device including + * calling back into subsys code. + */ +struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) +{ + struct klist_node *knode; + struct device *dev; + + for (;;) { + knode = klist_next(&iter->ki); + if (!knode) + return NULL; + dev = to_device_private_bus(knode)->device; + if (!iter->type || iter->type == dev->type) + return dev; + } +} +EXPORT_SYMBOL_GPL(subsys_dev_iter_next); + +/** + * subsys_dev_iter_exit - finish iteration + * @iter: subsys iterator to finish + * + * Finish an iteration. Always call this function after iteration is + * complete whether the iteration ran till the end or not. + */ +void subsys_dev_iter_exit(struct subsys_dev_iter *iter) +{ + klist_iter_exit(&iter->ki); +} +EXPORT_SYMBOL_GPL(subsys_dev_iter_exit); + +int subsys_interface_register(struct subsys_interface *sif) +{ + struct bus_type *subsys; + struct subsys_dev_iter iter; + struct device *dev; + + if (!sif || !sif->subsys) + return -ENODEV; + + subsys = bus_get(sif->subsys); + if (!subsys) + return -EINVAL; + + mutex_lock(&subsys->p->mutex); + list_add_tail(&sif->node, &subsys->p->interfaces); + if (sif->add_dev) { + subsys_dev_iter_init(&iter, subsys, NULL, NULL); + while ((dev = subsys_dev_iter_next(&iter))) + sif->add_dev(dev, sif); + subsys_dev_iter_exit(&iter); + } + mutex_unlock(&subsys->p->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(subsys_interface_register); + +void subsys_interface_unregister(struct subsys_interface *sif) +{ + struct bus_type *subsys; + struct subsys_dev_iter iter; + struct device *dev; + + if (!sif || !sif->subsys) + return; + + subsys = sif->subsys; + + mutex_lock(&subsys->p->mutex); + list_del_init(&sif->node); + if (sif->remove_dev) { + subsys_dev_iter_init(&iter, subsys, NULL, NULL); + while ((dev = subsys_dev_iter_next(&iter))) + sif->remove_dev(dev, sif); + subsys_dev_iter_exit(&iter); + } + mutex_unlock(&subsys->p->mutex); + + bus_put(subsys); +} +EXPORT_SYMBOL_GPL(subsys_interface_unregister); + +static void system_root_device_release(struct device *dev) +{ + kfree(dev); +} + +static int subsys_register(struct bus_type *subsys, + const struct attribute_group **groups, + struct kobject *parent_of_root) +{ + struct device *dev; + int err; + + err = bus_register(subsys); + if (err < 0) + return err; + + dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!dev) { + err = -ENOMEM; + goto err_dev; + } + + err = dev_set_name(dev, "%s", subsys->name); + if (err < 0) + goto err_name; + + dev->kobj.parent = parent_of_root; + dev->groups = groups; + dev->release = system_root_device_release; + + err = device_register(dev); + if (err < 0) + goto err_dev_reg; + + subsys->dev_root = dev; + return 0; + +err_dev_reg: + put_device(dev); + dev = NULL; +err_name: + kfree(dev); +err_dev: + bus_unregister(subsys); + return err; +} + +/** + * subsys_system_register - register a subsystem at /sys/devices/system/ + * @subsys: system subsystem + * @groups: default attributes for the root device + * + * All 'system' subsystems have a /sys/devices/system/<name> root device + * with the name of the subsystem. The root device can carry subsystem- + * wide attributes. All registered devices are below this single root + * device and are named after the subsystem with a simple enumeration + * number appended. The registered devices are not explicitly named; + * only 'id' in the device needs to be set. + * + * Do not use this interface for anything new, it exists for compatibility + * with bad ideas only. New subsystems should use plain subsystems; and + * add the subsystem-wide attributes should be added to the subsystem + * directory itself and not some create fake root-device placed in + * /sys/devices/system/<name>. + */ +int subsys_system_register(struct bus_type *subsys, + const struct attribute_group **groups) +{ + return subsys_register(subsys, groups, &system_kset->kobj); +} +EXPORT_SYMBOL_GPL(subsys_system_register); + +/** + * subsys_virtual_register - register a subsystem at /sys/devices/virtual/ + * @subsys: virtual subsystem + * @groups: default attributes for the root device + * + * All 'virtual' subsystems have a /sys/devices/system/<name> root device + * with the name of the subystem. The root device can carry subsystem-wide + * attributes. All registered devices are below this single root device. + * There's no restriction on device naming. This is for kernel software + * constructs which need sysfs interface. + */ +int subsys_virtual_register(struct bus_type *subsys, + const struct attribute_group **groups) +{ + struct kobject *virtual_dir; + + virtual_dir = virtual_device_parent(NULL); + if (!virtual_dir) + return -ENOMEM; + + return subsys_register(subsys, groups, virtual_dir); +} +EXPORT_SYMBOL_GPL(subsys_virtual_register); + +int __init buses_init(void) +{ + bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL); + if (!bus_kset) + return -ENOMEM; + + system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj); + if (!system_kset) + return -ENOMEM; + + return 0; +} diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c new file mode 100644 index 000000000..bfc095956 --- /dev/null +++ b/drivers/base/cacheinfo.c @@ -0,0 +1,674 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * cacheinfo support - processor cache information via sysfs + * + * Based on arch/x86/kernel/cpu/intel_cacheinfo.c + * Author: Sudeep Holla <sudeep.holla@arm.com> + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/cacheinfo.h> +#include <linux/compiler.h> +#include <linux/cpu.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/sysfs.h> + +/* pointer to per cpu cacheinfo */ +static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); +#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) +#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) +#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) + +struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) +{ + return ci_cacheinfo(cpu); +} + +#ifdef CONFIG_OF +static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, + struct cacheinfo *sib_leaf) +{ + return sib_leaf->fw_token == this_leaf->fw_token; +} + +/* OF properties to query for a given cache type */ +struct cache_type_info { + const char *size_prop; + const char *line_size_props[2]; + const char *nr_sets_prop; +}; + +static const struct cache_type_info cache_type_info[] = { + { + .size_prop = "cache-size", + .line_size_props = { "cache-line-size", + "cache-block-size", }, + .nr_sets_prop = "cache-sets", + }, { + .size_prop = "i-cache-size", + .line_size_props = { "i-cache-line-size", + "i-cache-block-size", }, + .nr_sets_prop = "i-cache-sets", + }, { + .size_prop = "d-cache-size", + .line_size_props = { "d-cache-line-size", + "d-cache-block-size", }, + .nr_sets_prop = "d-cache-sets", + }, +}; + +static inline int get_cacheinfo_idx(enum cache_type type) +{ + if (type == CACHE_TYPE_UNIFIED) + return 0; + return type; +} + +static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) +{ + const char *propname; + int ct_idx; + + ct_idx = get_cacheinfo_idx(this_leaf->type); + propname = cache_type_info[ct_idx].size_prop; + + of_property_read_u32(np, propname, &this_leaf->size); +} + +/* not cache_line_size() because that's a macro in include/linux/cache.h */ +static void cache_get_line_size(struct cacheinfo *this_leaf, + struct device_node *np) +{ + int i, lim, ct_idx; + + ct_idx = get_cacheinfo_idx(this_leaf->type); + lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props); + + for (i = 0; i < lim; i++) { + int ret; + u32 line_size; + const char *propname; + + propname = cache_type_info[ct_idx].line_size_props[i]; + ret = of_property_read_u32(np, propname, &line_size); + if (!ret) { + this_leaf->coherency_line_size = line_size; + break; + } + } +} + +static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) +{ + const char *propname; + int ct_idx; + + ct_idx = get_cacheinfo_idx(this_leaf->type); + propname = cache_type_info[ct_idx].nr_sets_prop; + + of_property_read_u32(np, propname, &this_leaf->number_of_sets); +} + +static void cache_associativity(struct cacheinfo *this_leaf) +{ + unsigned int line_size = this_leaf->coherency_line_size; + unsigned int nr_sets = this_leaf->number_of_sets; + unsigned int size = this_leaf->size; + + /* + * If the cache is fully associative, there is no need to + * check the other properties. + */ + if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0)) + this_leaf->ways_of_associativity = (size / nr_sets) / line_size; +} + +static bool cache_node_is_unified(struct cacheinfo *this_leaf, + struct device_node *np) +{ + return of_property_read_bool(np, "cache-unified"); +} + +static void cache_of_set_props(struct cacheinfo *this_leaf, + struct device_node *np) +{ + /* + * init_cache_level must setup the cache level correctly + * overriding the architecturally specified levels, so + * if type is NONE at this stage, it should be unified + */ + if (this_leaf->type == CACHE_TYPE_NOCACHE && + cache_node_is_unified(this_leaf, np)) + this_leaf->type = CACHE_TYPE_UNIFIED; + cache_size(this_leaf, np); + cache_get_line_size(this_leaf, np); + cache_nr_sets(this_leaf, np); + cache_associativity(this_leaf); +} + +static int cache_setup_of_node(unsigned int cpu) +{ + struct device_node *np; + struct cacheinfo *this_leaf; + struct device *cpu_dev = get_cpu_device(cpu); + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + unsigned int index = 0; + + /* skip if fw_token is already populated */ + if (this_cpu_ci->info_list->fw_token) { + return 0; + } + + if (!cpu_dev) { + pr_err("No cpu device for CPU %d\n", cpu); + return -ENODEV; + } + np = cpu_dev->of_node; + if (!np) { + pr_err("Failed to find cpu%d device node\n", cpu); + return -ENOENT; + } + + while (index < cache_leaves(cpu)) { + this_leaf = this_cpu_ci->info_list + index; + if (this_leaf->level != 1) + np = of_find_next_cache_node(np); + else + np = of_node_get(np);/* cpu node itself */ + if (!np) + break; + cache_of_set_props(this_leaf, np); + this_leaf->fw_token = np; + index++; + } + + if (index != cache_leaves(cpu)) /* not all OF nodes populated */ + return -ENOENT; + + return 0; +} +#else +static inline int cache_setup_of_node(unsigned int cpu) { return 0; } +static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, + struct cacheinfo *sib_leaf) +{ + /* + * For non-DT/ACPI systems, assume unique level 1 caches, system-wide + * shared caches for all other levels. This will be used only if + * arch specific code has not populated shared_cpu_map + */ + return !(this_leaf->level == 1); +} +#endif + +int __weak cache_setup_acpi(unsigned int cpu) +{ + return -ENOTSUPP; +} + +unsigned int coherency_max_size; + +static int cache_shared_cpu_map_setup(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf, *sib_leaf; + unsigned int index; + int ret = 0; + + if (this_cpu_ci->cpu_map_populated) + return 0; + + if (of_have_populated_dt()) + ret = cache_setup_of_node(cpu); + else if (!acpi_disabled) + ret = cache_setup_acpi(cpu); + + if (ret) + return ret; + + for (index = 0; index < cache_leaves(cpu); index++) { + unsigned int i; + + this_leaf = this_cpu_ci->info_list + index; + /* skip if shared_cpu_map is already populated */ + if (!cpumask_empty(&this_leaf->shared_cpu_map)) + continue; + + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + for_each_online_cpu(i) { + struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); + + if (i == cpu || !sib_cpu_ci->info_list) + continue;/* skip if itself or no cacheinfo */ + sib_leaf = sib_cpu_ci->info_list + index; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { + cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_set_cpu(i, &this_leaf->shared_cpu_map); + } + } + /* record the maximum cache line size */ + if (this_leaf->coherency_line_size > coherency_max_size) + coherency_max_size = this_leaf->coherency_line_size; + } + + return 0; +} + +static void cache_shared_cpu_map_remove(unsigned int cpu) +{ + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf, *sib_leaf; + unsigned int sibling, index; + + for (index = 0; index < cache_leaves(cpu); index++) { + this_leaf = this_cpu_ci->info_list + index; + for_each_cpu(sibling, &this_leaf->shared_cpu_map) { + struct cpu_cacheinfo *sib_cpu_ci; + + if (sibling == cpu) /* skip itself */ + continue; + + sib_cpu_ci = get_cpu_cacheinfo(sibling); + if (!sib_cpu_ci->info_list) + continue; + + sib_leaf = sib_cpu_ci->info_list + index; + cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); + } + if (of_have_populated_dt()) + of_node_put(this_leaf->fw_token); + } +} + +static void free_cache_attributes(unsigned int cpu) +{ + if (!per_cpu_cacheinfo(cpu)) + return; + + cache_shared_cpu_map_remove(cpu); + + kfree(per_cpu_cacheinfo(cpu)); + per_cpu_cacheinfo(cpu) = NULL; +} + +int __weak init_cache_level(unsigned int cpu) +{ + return -ENOENT; +} + +int __weak populate_cache_leaves(unsigned int cpu) +{ + return -ENOENT; +} + +static int detect_cache_attributes(unsigned int cpu) +{ + int ret; + + if (init_cache_level(cpu) || !cache_leaves(cpu)) + return -ENOENT; + + per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), + sizeof(struct cacheinfo), GFP_KERNEL); + if (per_cpu_cacheinfo(cpu) == NULL) + return -ENOMEM; + + /* + * populate_cache_leaves() may completely setup the cache leaves and + * shared_cpu_map or it may leave it partially setup. + */ + ret = populate_cache_leaves(cpu); + if (ret) + goto free_ci; + /* + * For systems using DT for cache hierarchy, fw_token + * and shared_cpu_map will be set up here only if they are + * not populated already + */ + ret = cache_shared_cpu_map_setup(cpu); + if (ret) { + pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu); + goto free_ci; + } + + return 0; + +free_ci: + free_cache_attributes(cpu); + return ret; +} + +/* pointer to cpuX/cache device */ +static DEFINE_PER_CPU(struct device *, ci_cache_dev); +#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) + +static cpumask_t cache_dev_map; + +/* pointer to array of devices for cpuX/cache/indexY */ +static DEFINE_PER_CPU(struct device **, ci_index_dev); +#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) +#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) + +#define show_one(file_name, object) \ +static ssize_t file_name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ + return sysfs_emit(buf, "%u\n", this_leaf->object); \ +} + +show_one(id, id); +show_one(level, level); +show_one(coherency_line_size, coherency_line_size); +show_one(number_of_sets, number_of_sets); +show_one(physical_line_partition, physical_line_partition); +show_one(ways_of_associativity, ways_of_associativity); + +static ssize_t size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10); +} + +static ssize_t shared_cpu_map_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + const struct cpumask *mask = &this_leaf->shared_cpu_map; + + return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask); +} + +static ssize_t shared_cpu_list_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + const struct cpumask *mask = &this_leaf->shared_cpu_map; + + return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask); +} + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + const char *output; + + switch (this_leaf->type) { + case CACHE_TYPE_DATA: + output = "Data"; + break; + case CACHE_TYPE_INST: + output = "Instruction"; + break; + case CACHE_TYPE_UNIFIED: + output = "Unified"; + break; + default: + return -EINVAL; + } + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t allocation_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + unsigned int ci_attr = this_leaf->attributes; + const char *output; + + if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) + output = "ReadWriteAllocate"; + else if (ci_attr & CACHE_READ_ALLOCATE) + output = "ReadAllocate"; + else if (ci_attr & CACHE_WRITE_ALLOCATE) + output = "WriteAllocate"; + else + return 0; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t write_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + unsigned int ci_attr = this_leaf->attributes; + int n = 0; + + if (ci_attr & CACHE_WRITE_THROUGH) + n = sysfs_emit(buf, "WriteThrough\n"); + else if (ci_attr & CACHE_WRITE_BACK) + n = sysfs_emit(buf, "WriteBack\n"); + return n; +} + +static DEVICE_ATTR_RO(id); +static DEVICE_ATTR_RO(level); +static DEVICE_ATTR_RO(type); +static DEVICE_ATTR_RO(coherency_line_size); +static DEVICE_ATTR_RO(ways_of_associativity); +static DEVICE_ATTR_RO(number_of_sets); +static DEVICE_ATTR_RO(size); +static DEVICE_ATTR_RO(allocation_policy); +static DEVICE_ATTR_RO(write_policy); +static DEVICE_ATTR_RO(shared_cpu_map); +static DEVICE_ATTR_RO(shared_cpu_list); +static DEVICE_ATTR_RO(physical_line_partition); + +static struct attribute *cache_default_attrs[] = { + &dev_attr_id.attr, + &dev_attr_type.attr, + &dev_attr_level.attr, + &dev_attr_shared_cpu_map.attr, + &dev_attr_shared_cpu_list.attr, + &dev_attr_coherency_line_size.attr, + &dev_attr_ways_of_associativity.attr, + &dev_attr_number_of_sets.attr, + &dev_attr_size.attr, + &dev_attr_allocation_policy.attr, + &dev_attr_write_policy.attr, + &dev_attr_physical_line_partition.attr, + NULL +}; + +static umode_t +cache_default_attrs_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + struct device *dev = kobj_to_dev(kobj); + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + const struct cpumask *mask = &this_leaf->shared_cpu_map; + umode_t mode = attr->mode; + + if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID)) + return mode; + if ((attr == &dev_attr_type.attr) && this_leaf->type) + return mode; + if ((attr == &dev_attr_level.attr) && this_leaf->level) + return mode; + if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) + return mode; + if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) + return mode; + if ((attr == &dev_attr_coherency_line_size.attr) && + this_leaf->coherency_line_size) + return mode; + if ((attr == &dev_attr_ways_of_associativity.attr) && + this_leaf->size) /* allow 0 = full associativity */ + return mode; + if ((attr == &dev_attr_number_of_sets.attr) && + this_leaf->number_of_sets) + return mode; + if ((attr == &dev_attr_size.attr) && this_leaf->size) + return mode; + if ((attr == &dev_attr_write_policy.attr) && + (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) + return mode; + if ((attr == &dev_attr_allocation_policy.attr) && + (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) + return mode; + if ((attr == &dev_attr_physical_line_partition.attr) && + this_leaf->physical_line_partition) + return mode; + + return 0; +} + +static const struct attribute_group cache_default_group = { + .attrs = cache_default_attrs, + .is_visible = cache_default_attrs_is_visible, +}; + +static const struct attribute_group *cache_default_groups[] = { + &cache_default_group, + NULL, +}; + +static const struct attribute_group *cache_private_groups[] = { + &cache_default_group, + NULL, /* Place holder for private group */ + NULL, +}; + +const struct attribute_group * +__weak cache_get_priv_group(struct cacheinfo *this_leaf) +{ + return NULL; +} + +static const struct attribute_group ** +cache_get_attribute_groups(struct cacheinfo *this_leaf) +{ + const struct attribute_group *priv_group = + cache_get_priv_group(this_leaf); + + if (!priv_group) + return cache_default_groups; + + if (!cache_private_groups[1]) + cache_private_groups[1] = priv_group; + + return cache_private_groups; +} + +/* Add/Remove cache interface for CPU device */ +static void cpu_cache_sysfs_exit(unsigned int cpu) +{ + int i; + struct device *ci_dev; + + if (per_cpu_index_dev(cpu)) { + for (i = 0; i < cache_leaves(cpu); i++) { + ci_dev = per_cache_index_dev(cpu, i); + if (!ci_dev) + continue; + device_unregister(ci_dev); + } + kfree(per_cpu_index_dev(cpu)); + per_cpu_index_dev(cpu) = NULL; + } + device_unregister(per_cpu_cache_dev(cpu)); + per_cpu_cache_dev(cpu) = NULL; +} + +static int cpu_cache_sysfs_init(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + if (per_cpu_cacheinfo(cpu) == NULL) + return -ENOENT; + + per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); + if (IS_ERR(per_cpu_cache_dev(cpu))) + return PTR_ERR(per_cpu_cache_dev(cpu)); + + /* Allocate all required memory */ + per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), + sizeof(struct device *), GFP_KERNEL); + if (unlikely(per_cpu_index_dev(cpu) == NULL)) + goto err_out; + + return 0; + +err_out: + cpu_cache_sysfs_exit(cpu); + return -ENOMEM; +} + +static int cache_add_dev(unsigned int cpu) +{ + unsigned int i; + int rc; + struct device *ci_dev, *parent; + struct cacheinfo *this_leaf; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + const struct attribute_group **cache_groups; + + rc = cpu_cache_sysfs_init(cpu); + if (unlikely(rc < 0)) + return rc; + + parent = per_cpu_cache_dev(cpu); + for (i = 0; i < cache_leaves(cpu); i++) { + this_leaf = this_cpu_ci->info_list + i; + if (this_leaf->disable_sysfs) + continue; + if (this_leaf->type == CACHE_TYPE_NOCACHE) + break; + cache_groups = cache_get_attribute_groups(this_leaf); + ci_dev = cpu_device_create(parent, this_leaf, cache_groups, + "index%1u", i); + if (IS_ERR(ci_dev)) { + rc = PTR_ERR(ci_dev); + goto err; + } + per_cache_index_dev(cpu, i) = ci_dev; + } + cpumask_set_cpu(cpu, &cache_dev_map); + + return 0; +err: + cpu_cache_sysfs_exit(cpu); + return rc; +} + +static int cacheinfo_cpu_online(unsigned int cpu) +{ + int rc = detect_cache_attributes(cpu); + + if (rc) + return rc; + rc = cache_add_dev(cpu); + if (rc) + free_cache_attributes(cpu); + return rc; +} + +static int cacheinfo_cpu_pre_down(unsigned int cpu) +{ + if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map)) + cpu_cache_sysfs_exit(cpu); + + free_cache_attributes(cpu); + return 0; +} + +static int __init cacheinfo_sysfs_init(void) +{ + return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE, + "base/cacheinfo:online", + cacheinfo_cpu_online, cacheinfo_cpu_pre_down); +} +device_initcall(cacheinfo_sysfs_init); diff --git a/drivers/base/class.c b/drivers/base/class.c new file mode 100644 index 000000000..ef7ff822b --- /dev/null +++ b/drivers/base/class.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * class.c - basic device class management + * + * Copyright (c) 2002-3 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + * Copyright (c) 2003-2004 Greg Kroah-Hartman + * Copyright (c) 2003-2004 IBM Corp. + */ + +#include <linux/device/class.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/string.h> +#include <linux/kdev_t.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/genhd.h> +#include <linux/mutex.h> +#include "base.h" + +#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr) + +static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct class_attribute *class_attr = to_class_attr(attr); + struct subsys_private *cp = to_subsys_private(kobj); + ssize_t ret = -EIO; + + if (class_attr->show) + ret = class_attr->show(cp->class, class_attr, buf); + return ret; +} + +static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct class_attribute *class_attr = to_class_attr(attr); + struct subsys_private *cp = to_subsys_private(kobj); + ssize_t ret = -EIO; + + if (class_attr->store) + ret = class_attr->store(cp->class, class_attr, buf, count); + return ret; +} + +static void class_release(struct kobject *kobj) +{ + struct subsys_private *cp = to_subsys_private(kobj); + struct class *class = cp->class; + + pr_debug("class '%s': release.\n", class->name); + + if (class->class_release) + class->class_release(class); + else + pr_debug("class '%s' does not have a release() function, " + "be careful\n", class->name); + + kfree(cp); +} + +static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj) +{ + struct subsys_private *cp = to_subsys_private(kobj); + struct class *class = cp->class; + + return class->ns_type; +} + +static const struct sysfs_ops class_sysfs_ops = { + .show = class_attr_show, + .store = class_attr_store, +}; + +static struct kobj_type class_ktype = { + .sysfs_ops = &class_sysfs_ops, + .release = class_release, + .child_ns_type = class_child_ns_type, +}; + +/* Hotplug events for classes go to the class subsys */ +static struct kset *class_kset; + + +int class_create_file_ns(struct class *cls, const struct class_attribute *attr, + const void *ns) +{ + int error; + + if (cls) + error = sysfs_create_file_ns(&cls->p->subsys.kobj, + &attr->attr, ns); + else + error = -EINVAL; + return error; +} + +void class_remove_file_ns(struct class *cls, const struct class_attribute *attr, + const void *ns) +{ + if (cls) + sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns); +} + +static struct class *class_get(struct class *cls) +{ + if (cls) + kset_get(&cls->p->subsys); + return cls; +} + +static void class_put(struct class *cls) +{ + if (cls) + kset_put(&cls->p->subsys); +} + +static struct device *klist_class_to_dev(struct klist_node *n) +{ + struct device_private *p = to_device_private_class(n); + return p->device; +} + +static void klist_class_dev_get(struct klist_node *n) +{ + struct device *dev = klist_class_to_dev(n); + + get_device(dev); +} + +static void klist_class_dev_put(struct klist_node *n) +{ + struct device *dev = klist_class_to_dev(n); + + put_device(dev); +} + +static int class_add_groups(struct class *cls, + const struct attribute_group **groups) +{ + return sysfs_create_groups(&cls->p->subsys.kobj, groups); +} + +static void class_remove_groups(struct class *cls, + const struct attribute_group **groups) +{ + return sysfs_remove_groups(&cls->p->subsys.kobj, groups); +} + +int __class_register(struct class *cls, struct lock_class_key *key) +{ + struct subsys_private *cp; + int error; + + pr_debug("device class '%s': registering\n", cls->name); + + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) + return -ENOMEM; + klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put); + INIT_LIST_HEAD(&cp->interfaces); + kset_init(&cp->glue_dirs); + __mutex_init(&cp->mutex, "subsys mutex", key); + error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); + if (error) { + kfree(cp); + return error; + } + + /* set the default /sys/dev directory for devices of this class */ + if (!cls->dev_kobj) + cls->dev_kobj = sysfs_dev_char_kobj; + +#if defined(CONFIG_BLOCK) + /* let the block class directory show up in the root of sysfs */ + if (!sysfs_deprecated || cls != &block_class) + cp->subsys.kobj.kset = class_kset; +#else + cp->subsys.kobj.kset = class_kset; +#endif + cp->subsys.kobj.ktype = &class_ktype; + cp->class = cls; + cls->p = cp; + + error = kset_register(&cp->subsys); + if (error) { + kfree(cp); + return error; + } + error = class_add_groups(class_get(cls), cls->class_groups); + class_put(cls); + if (error) { + kobject_del(&cp->subsys.kobj); + kfree_const(cp->subsys.kobj.name); + kfree(cp); + } + return error; +} +EXPORT_SYMBOL_GPL(__class_register); + +void class_unregister(struct class *cls) +{ + pr_debug("device class '%s': unregistering\n", cls->name); + class_remove_groups(cls, cls->class_groups); + kset_unregister(&cls->p->subsys); +} + +static void class_create_release(struct class *cls) +{ + pr_debug("%s called for %s\n", __func__, cls->name); + kfree(cls); +} + +/** + * class_create - create a struct class structure + * @owner: pointer to the module that is to "own" this struct class + * @name: pointer to a string for the name of this class. + * @key: the lock_class_key for this class; used by mutex lock debugging + * + * This is used to create a struct class pointer that can then be used + * in calls to device_create(). + * + * Returns &struct class pointer on success, or ERR_PTR() on error. + * + * Note, the pointer created here is to be destroyed when finished by + * making a call to class_destroy(). + */ +struct class *__class_create(struct module *owner, const char *name, + struct lock_class_key *key) +{ + struct class *cls; + int retval; + + cls = kzalloc(sizeof(*cls), GFP_KERNEL); + if (!cls) { + retval = -ENOMEM; + goto error; + } + + cls->name = name; + cls->owner = owner; + cls->class_release = class_create_release; + + retval = __class_register(cls, key); + if (retval) + goto error; + + return cls; + +error: + kfree(cls); + return ERR_PTR(retval); +} +EXPORT_SYMBOL_GPL(__class_create); + +/** + * class_destroy - destroys a struct class structure + * @cls: pointer to the struct class that is to be destroyed + * + * Note, the pointer to be destroyed must have been created with a call + * to class_create(). + */ +void class_destroy(struct class *cls) +{ + if ((cls == NULL) || (IS_ERR(cls))) + return; + + class_unregister(cls); +} + +/** + * class_dev_iter_init - initialize class device iterator + * @iter: class iterator to initialize + * @class: the class we wanna iterate over + * @start: the device to start iterating from, if any + * @type: device_type of the devices to iterate over, NULL for all + * + * Initialize class iterator @iter such that it iterates over devices + * of @class. If @start is set, the list iteration will start there, + * otherwise if it is NULL, the iteration starts at the beginning of + * the list. + */ +void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, + struct device *start, const struct device_type *type) +{ + struct klist_node *start_knode = NULL; + + if (start) + start_knode = &start->p->knode_class; + klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); + iter->type = type; +} +EXPORT_SYMBOL_GPL(class_dev_iter_init); + +/** + * class_dev_iter_next - iterate to the next device + * @iter: class iterator to proceed + * + * Proceed @iter to the next device and return it. Returns NULL if + * iteration is complete. + * + * The returned device is referenced and won't be released till + * iterator is proceed to the next device or exited. The caller is + * free to do whatever it wants to do with the device including + * calling back into class code. + */ +struct device *class_dev_iter_next(struct class_dev_iter *iter) +{ + struct klist_node *knode; + struct device *dev; + + while (1) { + knode = klist_next(&iter->ki); + if (!knode) + return NULL; + dev = klist_class_to_dev(knode); + if (!iter->type || iter->type == dev->type) + return dev; + } +} +EXPORT_SYMBOL_GPL(class_dev_iter_next); + +/** + * class_dev_iter_exit - finish iteration + * @iter: class iterator to finish + * + * Finish an iteration. Always call this function after iteration is + * complete whether the iteration ran till the end or not. + */ +void class_dev_iter_exit(struct class_dev_iter *iter) +{ + klist_iter_exit(&iter->ki); +} +EXPORT_SYMBOL_GPL(class_dev_iter_exit); + +/** + * class_for_each_device - device iterator + * @class: the class we're iterating + * @start: the device to start with in the list, if any. + * @data: data for the callback + * @fn: function to be called for each device + * + * Iterate over @class's list of devices, and call @fn for each, + * passing it @data. If @start is set, the list iteration will start + * there, otherwise if it is NULL, the iteration starts at the + * beginning of the list. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + * + * @fn is allowed to do anything including calling back into class + * code. There's no locking restriction. + */ +int class_for_each_device(struct class *class, struct device *start, + void *data, int (*fn)(struct device *, void *)) +{ + struct class_dev_iter iter; + struct device *dev; + int error = 0; + + if (!class) + return -EINVAL; + if (!class->p) { + WARN(1, "%s called for class '%s' before it was initialized", + __func__, class->name); + return -EINVAL; + } + + class_dev_iter_init(&iter, class, start, NULL); + while ((dev = class_dev_iter_next(&iter))) { + error = fn(dev, data); + if (error) + break; + } + class_dev_iter_exit(&iter); + + return error; +} +EXPORT_SYMBOL_GPL(class_for_each_device); + +/** + * class_find_device - device iterator for locating a particular device + * @class: the class we're iterating + * @start: Device to begin with + * @data: data for the match function + * @match: function to check device + * + * This is similar to the class_for_each_dev() function above, but it + * returns a reference to a device that is 'found' for later use, as + * determined by the @match callback. + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero, this function will + * return to the caller and not iterate over any more devices. + * + * Note, you will need to drop the reference with put_device() after use. + * + * @match is allowed to do anything including calling back into class + * code. There's no locking restriction. + */ +struct device *class_find_device(struct class *class, struct device *start, + const void *data, + int (*match)(struct device *, const void *)) +{ + struct class_dev_iter iter; + struct device *dev; + + if (!class) + return NULL; + if (!class->p) { + WARN(1, "%s called for class '%s' before it was initialized", + __func__, class->name); + return NULL; + } + + class_dev_iter_init(&iter, class, start, NULL); + while ((dev = class_dev_iter_next(&iter))) { + if (match(dev, data)) { + get_device(dev); + break; + } + } + class_dev_iter_exit(&iter); + + return dev; +} +EXPORT_SYMBOL_GPL(class_find_device); + +int class_interface_register(struct class_interface *class_intf) +{ + struct class *parent; + struct class_dev_iter iter; + struct device *dev; + + if (!class_intf || !class_intf->class) + return -ENODEV; + + parent = class_get(class_intf->class); + if (!parent) + return -EINVAL; + + mutex_lock(&parent->p->mutex); + list_add_tail(&class_intf->node, &parent->p->interfaces); + if (class_intf->add_dev) { + class_dev_iter_init(&iter, parent, NULL, NULL); + while ((dev = class_dev_iter_next(&iter))) + class_intf->add_dev(dev, class_intf); + class_dev_iter_exit(&iter); + } + mutex_unlock(&parent->p->mutex); + + return 0; +} + +void class_interface_unregister(struct class_interface *class_intf) +{ + struct class *parent = class_intf->class; + struct class_dev_iter iter; + struct device *dev; + + if (!parent) + return; + + mutex_lock(&parent->p->mutex); + list_del_init(&class_intf->node); + if (class_intf->remove_dev) { + class_dev_iter_init(&iter, parent, NULL, NULL); + while ((dev = class_dev_iter_next(&iter))) + class_intf->remove_dev(dev, class_intf); + class_dev_iter_exit(&iter); + } + mutex_unlock(&parent->p->mutex); + + class_put(parent); +} + +ssize_t show_class_attr_string(struct class *class, + struct class_attribute *attr, char *buf) +{ + struct class_attribute_string *cs; + + cs = container_of(attr, struct class_attribute_string, attr); + return sysfs_emit(buf, "%s\n", cs->str); +} + +EXPORT_SYMBOL_GPL(show_class_attr_string); + +struct class_compat { + struct kobject *kobj; +}; + +/** + * class_compat_register - register a compatibility class + * @name: the name of the class + * + * Compatibility class are meant as a temporary user-space compatibility + * workaround when converting a family of class devices to a bus devices. + */ +struct class_compat *class_compat_register(const char *name) +{ + struct class_compat *cls; + + cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL); + if (!cls) + return NULL; + cls->kobj = kobject_create_and_add(name, &class_kset->kobj); + if (!cls->kobj) { + kfree(cls); + return NULL; + } + return cls; +} +EXPORT_SYMBOL_GPL(class_compat_register); + +/** + * class_compat_unregister - unregister a compatibility class + * @cls: the class to unregister + */ +void class_compat_unregister(struct class_compat *cls) +{ + kobject_put(cls->kobj); + kfree(cls); +} +EXPORT_SYMBOL_GPL(class_compat_unregister); + +/** + * class_compat_create_link - create a compatibility class device link to + * a bus device + * @cls: the compatibility class + * @dev: the target bus device + * @device_link: an optional device to which a "device" link should be created + */ +int class_compat_create_link(struct class_compat *cls, struct device *dev, + struct device *device_link) +{ + int error; + + error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev)); + if (error) + return error; + + /* + * Optionally add a "device" link (typically to the parent), as a + * class device would have one and we want to provide as much + * backwards compatibility as possible. + */ + if (device_link) { + error = sysfs_create_link(&dev->kobj, &device_link->kobj, + "device"); + if (error) + sysfs_remove_link(cls->kobj, dev_name(dev)); + } + + return error; +} +EXPORT_SYMBOL_GPL(class_compat_create_link); + +/** + * class_compat_remove_link - remove a compatibility class device link to + * a bus device + * @cls: the compatibility class + * @dev: the target bus device + * @device_link: an optional device to which a "device" link was previously + * created + */ +void class_compat_remove_link(struct class_compat *cls, struct device *dev, + struct device *device_link) +{ + if (device_link) + sysfs_remove_link(&dev->kobj, "device"); + sysfs_remove_link(cls->kobj, dev_name(dev)); +} +EXPORT_SYMBOL_GPL(class_compat_remove_link); + +int __init classes_init(void) +{ + class_kset = kset_create_and_add("class", NULL, NULL); + if (!class_kset) + return -ENOMEM; + return 0; +} + +EXPORT_SYMBOL_GPL(class_create_file_ns); +EXPORT_SYMBOL_GPL(class_remove_file_ns); +EXPORT_SYMBOL_GPL(class_unregister); +EXPORT_SYMBOL_GPL(class_destroy); + +EXPORT_SYMBOL_GPL(class_interface_register); +EXPORT_SYMBOL_GPL(class_interface_unregister); diff --git a/drivers/base/component.c b/drivers/base/component.c new file mode 100644 index 000000000..dcfbe7251 --- /dev/null +++ b/drivers/base/component.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Componentized device handling. + * + * This is work in progress. We gather up the component devices into a list, + * and bind them when instructed. At the moment, we're specific to the DRM + * subsystem, and only handles one master device, but this doesn't have to be + * the case. + */ +#include <linux/component.h> +#include <linux/device.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/debugfs.h> + +/** + * DOC: overview + * + * The component helper allows drivers to collect a pile of sub-devices, + * including their bound drivers, into an aggregate driver. Various subsystems + * already provide functions to get hold of such components, e.g. + * of_clk_get_by_name(). The component helper can be used when such a + * subsystem-specific way to find a device is not available: The component + * helper fills the niche of aggregate drivers for specific hardware, where + * further standardization into a subsystem would not be practical. The common + * example is when a logical device (e.g. a DRM display driver) is spread around + * the SoC on various components (scanout engines, blending blocks, transcoders + * for various outputs and so on). + * + * The component helper also doesn't solve runtime dependencies, e.g. for system + * suspend and resume operations. See also :ref:`device links<device_link>`. + * + * Components are registered using component_add() and unregistered with + * component_del(), usually from the driver's probe and disconnect functions. + * + * Aggregate drivers first assemble a component match list of what they need + * using component_match_add(). This is then registered as an aggregate driver + * using component_master_add_with_match(), and unregistered using + * component_master_del(). + */ + +struct component; + +struct component_match_array { + void *data; + int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); + void (*release)(struct device *, void *); + struct component *component; + bool duplicate; +}; + +struct component_match { + size_t alloc; + size_t num; + struct component_match_array *compare; +}; + +struct master { + struct list_head node; + bool bound; + + const struct component_master_ops *ops; + struct device *dev; + struct component_match *match; + struct dentry *dentry; +}; + +struct component { + struct list_head node; + struct master *master; + bool bound; + + const struct component_ops *ops; + int subcomponent; + struct device *dev; +}; + +static DEFINE_MUTEX(component_mutex); +static LIST_HEAD(component_list); +static LIST_HEAD(masters); + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *component_debugfs_dir; + +static int component_devices_show(struct seq_file *s, void *data) +{ + struct master *m = s->private; + struct component_match *match = m->match; + size_t i; + + mutex_lock(&component_mutex); + seq_printf(s, "%-40s %20s\n", "master name", "status"); + seq_puts(s, "-------------------------------------------------------------\n"); + seq_printf(s, "%-40s %20s\n\n", + dev_name(m->dev), m->bound ? "bound" : "not bound"); + + seq_printf(s, "%-40s %20s\n", "device name", "status"); + seq_puts(s, "-------------------------------------------------------------\n"); + for (i = 0; i < match->num; i++) { + struct component *component = match->compare[i].component; + + seq_printf(s, "%-40s %20s\n", + component ? dev_name(component->dev) : "(unknown)", + component ? (component->bound ? "bound" : "not bound") : "not registered"); + } + mutex_unlock(&component_mutex); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(component_devices); + +static int __init component_debug_init(void) +{ + component_debugfs_dir = debugfs_create_dir("device_component", NULL); + + return 0; +} + +core_initcall(component_debug_init); + +static void component_master_debugfs_add(struct master *m) +{ + m->dentry = debugfs_create_file(dev_name(m->dev), 0444, + component_debugfs_dir, + m, &component_devices_fops); +} + +static void component_master_debugfs_del(struct master *m) +{ + debugfs_remove(m->dentry); + m->dentry = NULL; +} + +#else + +static void component_master_debugfs_add(struct master *m) +{ } + +static void component_master_debugfs_del(struct master *m) +{ } + +#endif + +static struct master *__master_find(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *m; + + list_for_each_entry(m, &masters, node) + if (m->dev == dev && (!ops || m->ops == ops)) + return m; + + return NULL; +} + +static struct component *find_component(struct master *master, + struct component_match_array *mc) +{ + struct component *c; + + list_for_each_entry(c, &component_list, node) { + if (c->master && c->master != master) + continue; + + if (mc->compare && mc->compare(c->dev, mc->data)) + return c; + + if (mc->compare_typed && + mc->compare_typed(c->dev, c->subcomponent, mc->data)) + return c; + } + + return NULL; +} + +static int find_components(struct master *master) +{ + struct component_match *match = master->match; + size_t i; + int ret = 0; + + /* + * Scan the array of match functions and attach + * any components which are found to this master. + */ + for (i = 0; i < match->num; i++) { + struct component_match_array *mc = &match->compare[i]; + struct component *c; + + dev_dbg(master->dev, "Looking for component %zu\n", i); + + if (match->compare[i].component) + continue; + + c = find_component(master, mc); + if (!c) { + ret = -ENXIO; + break; + } + + dev_dbg(master->dev, "found component %s, duplicate %u\n", dev_name(c->dev), !!c->master); + + /* Attach this component to the master */ + match->compare[i].duplicate = !!c->master; + match->compare[i].component = c; + c->master = master; + } + return ret; +} + +/* Detach component from associated master */ +static void remove_component(struct master *master, struct component *c) +{ + size_t i; + + /* Detach the component from this master. */ + for (i = 0; i < master->match->num; i++) + if (master->match->compare[i].component == c) + master->match->compare[i].component = NULL; +} + +/* + * Try to bring up a master. If component is NULL, we're interested in + * this master, otherwise it's a component which must be present to try + * and bring up the master. + * + * Returns 1 for successful bringup, 0 if not ready, or -ve errno. + */ +static int try_to_bring_up_master(struct master *master, + struct component *component) +{ + int ret; + + dev_dbg(master->dev, "trying to bring up master\n"); + + if (find_components(master)) { + dev_dbg(master->dev, "master has incomplete components\n"); + return 0; + } + + if (component && component->master != master) { + dev_dbg(master->dev, "master is not for this component (%s)\n", + dev_name(component->dev)); + return 0; + } + + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + /* Found all components */ + ret = master->ops->bind(master->dev); + if (ret < 0) { + devres_release_group(master->dev, NULL); + if (ret != -EPROBE_DEFER) + dev_info(master->dev, "master bind failed: %d\n", ret); + return ret; + } + + master->bound = true; + return 1; +} + +static int try_to_bring_up_masters(struct component *component) +{ + struct master *m; + int ret = 0; + + list_for_each_entry(m, &masters, node) { + if (!m->bound) { + ret = try_to_bring_up_master(m, component); + if (ret != 0) + break; + } + } + + return ret; +} + +static void take_down_master(struct master *master) +{ + if (master->bound) { + master->ops->unbind(master->dev); + devres_release_group(master->dev, NULL); + master->bound = false; + } +} + +static void component_match_release(struct device *master, + struct component_match *match) +{ + unsigned int i; + + for (i = 0; i < match->num; i++) { + struct component_match_array *mc = &match->compare[i]; + + if (mc->release) + mc->release(master, mc->data); + } + + kfree(match->compare); +} + +static void devm_component_match_release(struct device *dev, void *res) +{ + component_match_release(dev, res); +} + +static int component_match_realloc(struct device *dev, + struct component_match *match, size_t num) +{ + struct component_match_array *new; + + if (match->alloc == num) + return 0; + + new = kmalloc_array(num, sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + + if (match->compare) { + memcpy(new, match->compare, sizeof(*new) * + min(match->num, num)); + kfree(match->compare); + } + match->compare = new; + match->alloc = num; + + return 0; +} + +static void __component_match_add(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), + int (*compare)(struct device *, void *), + int (*compare_typed)(struct device *, int, void *), + void *compare_data) +{ + struct component_match *match = *matchptr; + + if (IS_ERR(match)) + return; + + if (!match) { + match = devres_alloc(devm_component_match_release, + sizeof(*match), GFP_KERNEL); + if (!match) { + *matchptr = ERR_PTR(-ENOMEM); + return; + } + + devres_add(master, match); + + *matchptr = match; + } + + if (match->num == match->alloc) { + size_t new_size = match->alloc + 16; + int ret; + + ret = component_match_realloc(master, match, new_size); + if (ret) { + *matchptr = ERR_PTR(ret); + return; + } + } + + match->compare[match->num].compare = compare; + match->compare[match->num].compare_typed = compare_typed; + match->compare[match->num].release = release; + match->compare[match->num].data = compare_data; + match->compare[match->num].component = NULL; + match->num++; +} + +/** + * component_match_add_release - add a component match entry with release callback + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @release: release function for @compare_data + * @compare: compare function to match against all components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions, where upon @release will be called to free any references held by + * @compare_data, e.g. when @compare_data is a &device_node that must be + * released with of_node_put(). + * + * See also component_match_add() and component_match_add_typed(). + */ +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), + int (*compare)(struct device *, void *), void *compare_data) +{ + __component_match_add(master, matchptr, release, compare, NULL, + compare_data); +} +EXPORT_SYMBOL(component_match_add_release); + +/** + * component_match_add_typed - add a component match entry for a typed component + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @compare_typed: compare function to match against all typed components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add_typed(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions. + * + * See also component_match_add_release() and component_match_add_typed(). + */ +void component_match_add_typed(struct device *master, + struct component_match **matchptr, + int (*compare_typed)(struct device *, int, void *), void *compare_data) +{ + __component_match_add(master, matchptr, NULL, NULL, compare_typed, + compare_data); +} +EXPORT_SYMBOL(component_match_add_typed); + +static void free_master(struct master *master) +{ + struct component_match *match = master->match; + int i; + + component_master_debugfs_del(master); + list_del(&master->node); + + if (match) { + for (i = 0; i < match->num; i++) { + struct component *c = match->compare[i].component; + if (c) + c->master = NULL; + } + } + + kfree(master); +} + +/** + * component_master_add_with_match - register an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * @match: component match list for the aggregate driver + * + * Registers a new aggregate driver consisting of the components added to @match + * by calling one of the component_match_add() functions. Once all components in + * @match are available, it will be assembled by calling + * &component_master_ops.bind from @ops. Must be unregistered by calling + * component_master_del(). + */ +int component_master_add_with_match(struct device *dev, + const struct component_master_ops *ops, + struct component_match *match) +{ + struct master *master; + int ret; + + /* Reallocate the match array for its true size */ + ret = component_match_realloc(dev, match, match->num); + if (ret) + return ret; + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return -ENOMEM; + + master->dev = dev; + master->ops = ops; + master->match = match; + + component_master_debugfs_add(master); + /* Add to the list of available masters. */ + mutex_lock(&component_mutex); + list_add(&master->node, &masters); + + ret = try_to_bring_up_master(master, NULL); + + if (ret < 0) + free_master(master); + + mutex_unlock(&component_mutex); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(component_master_add_with_match); + +/** + * component_master_del - unregister an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * + * Unregisters an aggregate driver registered with + * component_master_add_with_match(). If necessary the aggregate driver is first + * disassembled by calling &component_master_ops.unbind from @ops. + */ +void component_master_del(struct device *dev, + const struct component_master_ops *ops) +{ + struct master *master; + + mutex_lock(&component_mutex); + master = __master_find(dev, ops); + if (master) { + take_down_master(master); + free_master(master); + } + mutex_unlock(&component_mutex); +} +EXPORT_SYMBOL_GPL(component_master_del); + +static void component_unbind(struct component *component, + struct master *master, void *data) +{ + WARN_ON(!component->bound); + + if (component->ops && component->ops->unbind) + component->ops->unbind(component->dev, master->dev, data); + component->bound = false; + + /* Release all resources claimed in the binding of this component */ + devres_release_group(component->dev, component); +} + +/** + * component_unbind_all - unbind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Unbinds all components of the aggregate @dev by passing @data to their + * &component_ops.unbind functions. Should be called from + * &component_master_ops.unbind. + */ +void component_unbind_all(struct device *master_dev, void *data) +{ + struct master *master; + struct component *c; + size_t i; + + WARN_ON(!mutex_is_locked(&component_mutex)); + + master = __master_find(master_dev, NULL); + if (!master) + return; + + /* Unbind components in reverse order */ + for (i = master->match->num; i--; ) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + component_unbind(c, master, data); + } +} +EXPORT_SYMBOL_GPL(component_unbind_all); + +static int component_bind(struct component *component, struct master *master, + void *data) +{ + int ret; + + /* + * Each component initialises inside its own devres group. + * This allows us to roll-back a failed component without + * affecting anything else. + */ + if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + /* + * Also open a group for the device itself: this allows us + * to release the resources claimed against the sub-device + * at the appropriate moment. + */ + if (!devres_open_group(component->dev, component, GFP_KERNEL)) { + devres_release_group(master->dev, NULL); + return -ENOMEM; + } + + dev_dbg(master->dev, "binding %s (ops %ps)\n", + dev_name(component->dev), component->ops); + + ret = component->ops->bind(component->dev, master->dev, data); + if (!ret) { + component->bound = true; + + /* + * Close the component device's group so that resources + * allocated in the binding are encapsulated for removal + * at unbind. Remove the group on the DRM device as we + * can clean those resources up independently. + */ + devres_close_group(component->dev, NULL); + devres_remove_group(master->dev, NULL); + + dev_info(master->dev, "bound %s (ops %ps)\n", + dev_name(component->dev), component->ops); + } else { + devres_release_group(component->dev, NULL); + devres_release_group(master->dev, NULL); + + if (ret != -EPROBE_DEFER) + dev_err(master->dev, "failed to bind %s (ops %ps): %d\n", + dev_name(component->dev), component->ops, ret); + } + + return ret; +} + +/** + * component_bind_all - bind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Binds all components of the aggregate @dev by passing @data to their + * &component_ops.bind functions. Should be called from + * &component_master_ops.bind. + */ +int component_bind_all(struct device *master_dev, void *data) +{ + struct master *master; + struct component *c; + size_t i; + int ret = 0; + + WARN_ON(!mutex_is_locked(&component_mutex)); + + master = __master_find(master_dev, NULL); + if (!master) + return -EINVAL; + + /* Bind components in match order */ + for (i = 0; i < master->match->num; i++) + if (!master->match->compare[i].duplicate) { + c = master->match->compare[i].component; + ret = component_bind(c, master, data); + if (ret) + break; + } + + if (ret != 0) { + for (; i > 0; i--) + if (!master->match->compare[i - 1].duplicate) { + c = master->match->compare[i - 1].component; + component_unbind(c, master, data); + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(component_bind_all); + +static int __component_add(struct device *dev, const struct component_ops *ops, + int subcomponent) +{ + struct component *component; + int ret; + + component = kzalloc(sizeof(*component), GFP_KERNEL); + if (!component) + return -ENOMEM; + + component->ops = ops; + component->dev = dev; + component->subcomponent = subcomponent; + + dev_dbg(dev, "adding component (ops %ps)\n", ops); + + mutex_lock(&component_mutex); + list_add_tail(&component->node, &component_list); + + ret = try_to_bring_up_masters(component); + if (ret < 0) { + if (component->master) + remove_component(component->master, component); + list_del(&component->node); + + kfree(component); + } + mutex_unlock(&component_mutex); + + return ret < 0 ? ret : 0; +} + +/** + * component_add_typed - register a component + * @dev: component device + * @ops: component callbacks + * @subcomponent: nonzero identifier for subcomponents + * + * Register a new component for @dev. Functions in @ops will be call when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * @subcomponent must be nonzero and is used to differentiate between multiple + * components registerd on the same device @dev. These components are match + * using component_match_add_typed(). + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add(). + */ +int component_add_typed(struct device *dev, const struct component_ops *ops, + int subcomponent) +{ + if (WARN_ON(subcomponent == 0)) + return -EINVAL; + + return __component_add(dev, ops, subcomponent); +} +EXPORT_SYMBOL_GPL(component_add_typed); + +/** + * component_add - register a component + * @dev: component device + * @ops: component callbacks + * + * Register a new component for @dev. Functions in @ops will be called when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add_typed() for a variant that allows multipled different + * components on the same device. + */ +int component_add(struct device *dev, const struct component_ops *ops) +{ + return __component_add(dev, ops, 0); +} +EXPORT_SYMBOL_GPL(component_add); + +/** + * component_del - unregister a component + * @dev: component device + * @ops: component callbacks + * + * Unregister a component added with component_add(). If the component is bound + * into an aggregate driver, this will force the entire aggregate driver, including + * all its components, to be unbound. + */ +void component_del(struct device *dev, const struct component_ops *ops) +{ + struct component *c, *component = NULL; + + mutex_lock(&component_mutex); + list_for_each_entry(c, &component_list, node) + if (c->dev == dev && c->ops == ops) { + list_del(&c->node); + component = c; + break; + } + + if (component && component->master) { + take_down_master(component->master); + remove_component(component->master, component); + } + + mutex_unlock(&component_mutex); + + WARN_ON(!component); + kfree(component); +} +EXPORT_SYMBOL_GPL(component_del); diff --git a/drivers/base/container.c b/drivers/base/container.c new file mode 100644 index 000000000..1ba42d2d3 --- /dev/null +++ b/drivers/base/container.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System bus type for containers. + * + * Copyright (C) 2013, Intel Corporation + * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + */ + +#include <linux/container.h> + +#include "base.h" + +#define CONTAINER_BUS_NAME "container" + +static int trivial_online(struct device *dev) +{ + return 0; +} + +static int container_offline(struct device *dev) +{ + struct container_dev *cdev = to_container_dev(dev); + + return cdev->offline ? cdev->offline(cdev) : 0; +} + +struct bus_type container_subsys = { + .name = CONTAINER_BUS_NAME, + .dev_name = CONTAINER_BUS_NAME, + .online = trivial_online, + .offline = container_offline, +}; + +void __init container_dev_init(void) +{ + int ret; + + ret = subsys_system_register(&container_subsys, NULL); + if (ret) + pr_err("%s() failed: %d\n", __func__, ret); +} diff --git a/drivers/base/core.c b/drivers/base/core.c new file mode 100644 index 000000000..d98cab88c --- /dev/null +++ b/drivers/base/core.c @@ -0,0 +1,4424 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/core.c - core driver model code (device registration, etc) + * + * Copyright (c) 2002-3 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (c) 2006 Novell, Inc. + */ + +#include <linux/acpi.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/fwnode.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/kdev_t.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/genhd.h> +#include <linux/mutex.h> +#include <linux/pm_runtime.h> +#include <linux/netdevice.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/sysfs.h> + +#include "base.h" +#include "power/power.h" + +#ifdef CONFIG_SYSFS_DEPRECATED +#ifdef CONFIG_SYSFS_DEPRECATED_V2 +long sysfs_deprecated = 1; +#else +long sysfs_deprecated = 0; +#endif +static int __init sysfs_deprecated_setup(char *arg) +{ + return kstrtol(arg, 10, &sysfs_deprecated); +} +early_param("sysfs.deprecated", sysfs_deprecated_setup); +#endif + +/* Device links support. */ +static LIST_HEAD(wait_for_suppliers); +static DEFINE_MUTEX(wfs_lock); +static LIST_HEAD(deferred_sync); +static unsigned int defer_sync_state_count = 1; +static unsigned int defer_fw_devlink_count; +static LIST_HEAD(deferred_fw_devlink); +static DEFINE_MUTEX(defer_fw_devlink_lock); +static bool fw_devlink_is_permissive(void); + +#ifdef CONFIG_SRCU +static DEFINE_MUTEX(device_links_lock); +DEFINE_STATIC_SRCU(device_links_srcu); + +static inline void device_links_write_lock(void) +{ + mutex_lock(&device_links_lock); +} + +static inline void device_links_write_unlock(void) +{ + mutex_unlock(&device_links_lock); +} + +int device_links_read_lock(void) __acquires(&device_links_srcu) +{ + return srcu_read_lock(&device_links_srcu); +} + +void device_links_read_unlock(int idx) __releases(&device_links_srcu) +{ + srcu_read_unlock(&device_links_srcu, idx); +} + +int device_links_read_lock_held(void) +{ + return srcu_read_lock_held(&device_links_srcu); +} + +static void device_link_synchronize_removal(void) +{ + synchronize_srcu(&device_links_srcu); +} +#else /* !CONFIG_SRCU */ +static DECLARE_RWSEM(device_links_lock); + +static inline void device_links_write_lock(void) +{ + down_write(&device_links_lock); +} + +static inline void device_links_write_unlock(void) +{ + up_write(&device_links_lock); +} + +int device_links_read_lock(void) +{ + down_read(&device_links_lock); + return 0; +} + +void device_links_read_unlock(int not_used) +{ + up_read(&device_links_lock); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +int device_links_read_lock_held(void) +{ + return lockdep_is_held(&device_links_lock); +} +#endif + +static inline void device_link_synchronize_removal(void) +{ +} +#endif /* !CONFIG_SRCU */ + +static bool device_is_ancestor(struct device *dev, struct device *target) +{ + while (target->parent) { + target = target->parent; + if (dev == target) + return true; + } + return false; +} + +/** + * device_is_dependent - Check if one device depends on another one + * @dev: Device to check dependencies for. + * @target: Device to check against. + * + * Check if @target depends on @dev or any device dependent on it (its child or + * its consumer etc). Return 1 if that is the case or 0 otherwise. + */ +int device_is_dependent(struct device *dev, void *target) +{ + struct device_link *link; + int ret; + + /* + * The "ancestors" check is needed to catch the case when the target + * device has not been completely initialized yet and it is still + * missing from the list of children of its parent device. + */ + if (dev == target || device_is_ancestor(dev, target)) + return 1; + + ret = device_for_each_child(dev, target, device_is_dependent); + if (ret) + return ret; + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) + continue; + + if (link->consumer == target) + return 1; + + ret = device_is_dependent(link->consumer, target); + if (ret) + break; + } + return ret; +} + +static void device_link_init_status(struct device_link *link, + struct device *consumer, + struct device *supplier) +{ + switch (supplier->links.status) { + case DL_DEV_PROBING: + switch (consumer->links.status) { + case DL_DEV_PROBING: + /* + * A consumer driver can create a link to a supplier + * that has not completed its probing yet as long as it + * knows that the supplier is already functional (for + * example, it has just acquired some resources from the + * supplier). + */ + link->status = DL_STATE_CONSUMER_PROBE; + break; + default: + link->status = DL_STATE_DORMANT; + break; + } + break; + case DL_DEV_DRIVER_BOUND: + switch (consumer->links.status) { + case DL_DEV_PROBING: + link->status = DL_STATE_CONSUMER_PROBE; + break; + case DL_DEV_DRIVER_BOUND: + link->status = DL_STATE_ACTIVE; + break; + default: + link->status = DL_STATE_AVAILABLE; + break; + } + break; + case DL_DEV_UNBINDING: + link->status = DL_STATE_SUPPLIER_UNBIND; + break; + default: + link->status = DL_STATE_DORMANT; + break; + } +} + +static int device_reorder_to_tail(struct device *dev, void *not_used) +{ + struct device_link *link; + + /* + * Devices that have not been registered yet will be put to the ends + * of the lists during the registration, so skip them here. + */ + if (device_is_registered(dev)) + devices_kset_move_last(dev); + + if (device_pm_initialized(dev)) + device_pm_move_last(dev); + + device_for_each_child(dev, NULL, device_reorder_to_tail); + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (link->flags == (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED)) + continue; + device_reorder_to_tail(link->consumer, NULL); + } + + return 0; +} + +/** + * device_pm_move_to_tail - Move set of devices to the end of device lists + * @dev: Device to move + * + * This is a device_reorder_to_tail() wrapper taking the requisite locks. + * + * It moves the @dev along with all of its children and all of its consumers + * to the ends of the device_kset and dpm_list, recursively. + */ +void device_pm_move_to_tail(struct device *dev) +{ + int idx; + + idx = device_links_read_lock(); + device_pm_lock(); + device_reorder_to_tail(dev, NULL); + device_pm_unlock(); + device_links_read_unlock(idx); +} + +#define to_devlink(dev) container_of((dev), struct device_link, link_dev) + +static ssize_t status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *output; + + switch (to_devlink(dev)->status) { + case DL_STATE_NONE: + output = "not tracked"; + break; + case DL_STATE_DORMANT: + output = "dormant"; + break; + case DL_STATE_AVAILABLE: + output = "available"; + break; + case DL_STATE_CONSUMER_PROBE: + output = "consumer probing"; + break; + case DL_STATE_ACTIVE: + output = "active"; + break; + case DL_STATE_SUPPLIER_UNBIND: + output = "supplier unbinding"; + break; + default: + output = "unknown"; + break; + } + + return sysfs_emit(buf, "%s\n", output); +} +static DEVICE_ATTR_RO(status); + +static ssize_t auto_remove_on_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + const char *output; + + if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + output = "supplier unbind"; + else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) + output = "consumer unbind"; + else + output = "never"; + + return sysfs_emit(buf, "%s\n", output); +} +static DEVICE_ATTR_RO(auto_remove_on); + +static ssize_t runtime_pm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + + return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME)); +} +static DEVICE_ATTR_RO(runtime_pm); + +static ssize_t sync_state_only_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct device_link *link = to_devlink(dev); + + return sysfs_emit(buf, "%d\n", + !!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); +} +static DEVICE_ATTR_RO(sync_state_only); + +static struct attribute *devlink_attrs[] = { + &dev_attr_status.attr, + &dev_attr_auto_remove_on.attr, + &dev_attr_runtime_pm.attr, + &dev_attr_sync_state_only.attr, + NULL, +}; +ATTRIBUTE_GROUPS(devlink); + +static void device_link_release_fn(struct work_struct *work) +{ + struct device_link *link = container_of(work, struct device_link, rm_work); + + /* Ensure that all references to the link object have been dropped. */ + device_link_synchronize_removal(); + + pm_runtime_release_supplier(link); + pm_request_idle(link->supplier); + + put_device(link->consumer); + put_device(link->supplier); + kfree(link); +} + +static void devlink_dev_release(struct device *dev) +{ + struct device_link *link = to_devlink(dev); + + INIT_WORK(&link->rm_work, device_link_release_fn); + /* + * It may take a while to complete this work because of the SRCU + * synchronization in device_link_release_fn() and if the consumer or + * supplier devices get deleted when it runs, so put it into the "long" + * workqueue. + */ + queue_work(system_long_wq, &link->rm_work); +} + +static struct class devlink_class = { + .name = "devlink", + .owner = THIS_MODULE, + .dev_groups = devlink_groups, + .dev_release = devlink_dev_release, +}; + +static int devlink_add_symlinks(struct device *dev, + struct class_interface *class_intf) +{ + int ret; + size_t len; + struct device_link *link = to_devlink(dev); + struct device *sup = link->supplier; + struct device *con = link->consumer; + char *buf; + + len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), + strlen(dev_bus_name(con)) + strlen(dev_name(con))); + len += strlen(":"); + len += strlen("supplier:") + 1; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier"); + if (ret) + goto out; + + ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer"); + if (ret) + goto err_con; + + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); + ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); + if (ret) + goto err_con_dev; + + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); + if (ret) + goto err_sup_dev; + + goto out; + +err_sup_dev: + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); + sysfs_remove_link(&sup->kobj, buf); +err_con_dev: + sysfs_remove_link(&link->link_dev.kobj, "consumer"); +err_con: + sysfs_remove_link(&link->link_dev.kobj, "supplier"); +out: + kfree(buf); + return ret; +} + +static void devlink_remove_symlinks(struct device *dev, + struct class_interface *class_intf) +{ + struct device_link *link = to_devlink(dev); + size_t len; + struct device *sup = link->supplier; + struct device *con = link->consumer; + char *buf; + + sysfs_remove_link(&link->link_dev.kobj, "consumer"); + sysfs_remove_link(&link->link_dev.kobj, "supplier"); + + len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), + strlen(dev_bus_name(con)) + strlen(dev_name(con))); + len += strlen(":"); + len += strlen("supplier:") + 1; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) { + WARN(1, "Unable to properly free device link symlinks!\n"); + return; + } + + if (device_is_registered(con)) { + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); + sysfs_remove_link(&con->kobj, buf); + } + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); + sysfs_remove_link(&sup->kobj, buf); + kfree(buf); +} + +static struct class_interface devlink_class_intf = { + .class = &devlink_class, + .add_dev = devlink_add_symlinks, + .remove_dev = devlink_remove_symlinks, +}; + +static int __init devlink_class_init(void) +{ + int ret; + + ret = class_register(&devlink_class); + if (ret) + return ret; + + ret = class_interface_register(&devlink_class_intf); + if (ret) + class_unregister(&devlink_class); + + return ret; +} +postcore_initcall(devlink_class_init); + +#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \ + DL_FLAG_AUTOREMOVE_SUPPLIER | \ + DL_FLAG_AUTOPROBE_CONSUMER | \ + DL_FLAG_SYNC_STATE_ONLY) + +#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \ + DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE) + +/** + * device_link_add - Create a link between two devices. + * @consumer: Consumer end of the link. + * @supplier: Supplier end of the link. + * @flags: Link flags. + * + * The caller is responsible for the proper synchronization of the link creation + * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the + * runtime PM framework to take the link into account. Second, if the + * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will + * be forced into the active metastate and reference-counted upon the creation + * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be + * ignored. + * + * If DL_FLAG_STATELESS is set in @flags, the caller of this function is + * expected to release the link returned by it directly with the help of either + * device_link_del() or device_link_remove(). + * + * If that flag is not set, however, the caller of this function is handing the + * management of the link over to the driver core entirely and its return value + * can only be used to check whether or not the link is present. In that case, + * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link + * flags can be used to indicate to the driver core when the link can be safely + * deleted. Namely, setting one of them in @flags indicates to the driver core + * that the link is not going to be used (by the given caller of this function) + * after unbinding the consumer or supplier driver, respectively, from its + * device, so the link can be deleted at that point. If none of them is set, + * the link will be maintained until one of the devices pointed to by it (either + * the consumer or the supplier) is unregistered. + * + * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and + * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent + * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can + * be used to request the driver core to automaticall probe for a consmer + * driver after successfully binding a driver to the supplier device. + * + * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER, + * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at + * the same time is invalid and will cause NULL to be returned upfront. + * However, if a device link between the given @consumer and @supplier pair + * exists already when this function is called for them, the existing link will + * be returned regardless of its current type and status (the link's flags may + * be modified then). The caller of this function is then expected to treat + * the link as though it has just been created, so (in particular) if + * DL_FLAG_STATELESS was passed in @flags, the link needs to be released + * explicitly when not needed any more (as stated above). + * + * A side effect of the link creation is re-ordering of dpm_list and the + * devices_kset list by moving the consumer device and all devices depending + * on it to the ends of these lists (that does not happen to devices that have + * not been registered when this function is called). + * + * The supplier device is required to be registered when this function is called + * and NULL will be returned if that is not the case. The consumer device need + * not be registered, however. + */ +struct device_link *device_link_add(struct device *consumer, + struct device *supplier, u32 flags) +{ + struct device_link *link; + + if (!consumer || !supplier || consumer == supplier || + flags & ~DL_ADD_VALID_FLAGS || + (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) || + (flags & DL_FLAG_SYNC_STATE_ONLY && + flags != DL_FLAG_SYNC_STATE_ONLY) || + (flags & DL_FLAG_AUTOPROBE_CONSUMER && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER))) + return NULL; + + if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { + if (pm_runtime_get_sync(supplier) < 0) { + pm_runtime_put_noidle(supplier); + return NULL; + } + } + + if (!(flags & DL_FLAG_STATELESS)) + flags |= DL_FLAG_MANAGED; + + device_links_write_lock(); + device_pm_lock(); + + /* + * If the supplier has not been fully registered yet or there is a + * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and + * the supplier already in the graph, return NULL. If the link is a + * SYNC_STATE_ONLY link, we don't check for reverse dependencies + * because it only affects sync_state() callbacks. + */ + if (!device_pm_initialized(supplier) + || (!(flags & DL_FLAG_SYNC_STATE_ONLY) && + device_is_dependent(consumer, supplier))) { + link = NULL; + goto out; + } + + /* + * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed + * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both + * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + + list_for_each_entry(link, &supplier->links.consumers, s_node) { + if (link->consumer != consumer) + continue; + + if (flags & DL_FLAG_PM_RUNTIME) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + pm_runtime_new_link(consumer); + link->flags |= DL_FLAG_PM_RUNTIME; + } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + } + + if (flags & DL_FLAG_STATELESS) { + kref_get(&link->kref); + if (link->flags & DL_FLAG_SYNC_STATE_ONLY && + !(link->flags & DL_FLAG_STATELESS)) { + link->flags |= DL_FLAG_STATELESS; + goto reorder; + } else { + link->flags |= DL_FLAG_STATELESS; + goto out; + } + } + + /* + * If the life time of the link following from the new flags is + * longer than indicated by the flags of the existing link, + * update the existing link to stay around longer. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { + if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; + } + } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { + link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER); + } + if (!(link->flags & DL_FLAG_MANAGED)) { + kref_get(&link->kref); + link->flags |= DL_FLAG_MANAGED; + device_link_init_status(link, consumer, supplier); + } + if (link->flags & DL_FLAG_SYNC_STATE_ONLY && + !(flags & DL_FLAG_SYNC_STATE_ONLY)) { + link->flags &= ~DL_FLAG_SYNC_STATE_ONLY; + goto reorder; + } + + goto out; + } + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + goto out; + + refcount_set(&link->rpm_active, 1); + + get_device(supplier); + link->supplier = supplier; + INIT_LIST_HEAD(&link->s_node); + get_device(consumer); + link->consumer = consumer; + INIT_LIST_HEAD(&link->c_node); + link->flags = flags; + kref_init(&link->kref); + + link->link_dev.class = &devlink_class; + device_set_pm_not_required(&link->link_dev); + dev_set_name(&link->link_dev, "%s:%s--%s:%s", + dev_bus_name(supplier), dev_name(supplier), + dev_bus_name(consumer), dev_name(consumer)); + if (device_register(&link->link_dev)) { + put_device(&link->link_dev); + link = NULL; + goto out; + } + + if (flags & DL_FLAG_PM_RUNTIME) { + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + + pm_runtime_new_link(consumer); + } + + /* Determine the initial link state. */ + if (flags & DL_FLAG_STATELESS) + link->status = DL_STATE_NONE; + else + device_link_init_status(link, consumer, supplier); + + /* + * Some callers expect the link creation during consumer driver probe to + * resume the supplier even without DL_FLAG_RPM_ACTIVE. + */ + if (link->status == DL_STATE_CONSUMER_PROBE && + flags & DL_FLAG_PM_RUNTIME) + pm_runtime_resume(supplier); + + list_add_tail_rcu(&link->s_node, &supplier->links.consumers); + list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); + + if (flags & DL_FLAG_SYNC_STATE_ONLY) { + dev_dbg(consumer, + "Linked as a sync state only consumer to %s\n", + dev_name(supplier)); + goto out; + } + +reorder: + /* + * Move the consumer and all of the devices depending on it to the end + * of dpm_list and the devices_kset list. + * + * It is necessary to hold dpm_list locked throughout all that or else + * we may end up suspending with a wrong ordering of it. + */ + device_reorder_to_tail(consumer, NULL); + + dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); + +out: + device_pm_unlock(); + device_links_write_unlock(); + + if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) + pm_runtime_put(supplier); + + return link; +} +EXPORT_SYMBOL_GPL(device_link_add); + +/** + * device_link_wait_for_supplier - Add device to wait_for_suppliers list + * @consumer: Consumer device + * + * Marks the @consumer device as waiting for suppliers to become available by + * adding it to the wait_for_suppliers list. The consumer device will never be + * probed until it's removed from the wait_for_suppliers list. + * + * The caller is responsible for adding the links to the supplier devices once + * they are available and removing the @consumer device from the + * wait_for_suppliers list once links to all the suppliers have been created. + * + * This function is NOT meant to be called from the probe function of the + * consumer but rather from code that creates/adds the consumer device. + */ +static void device_link_wait_for_supplier(struct device *consumer, + bool need_for_probe) +{ + mutex_lock(&wfs_lock); + list_add_tail(&consumer->links.needs_suppliers, &wait_for_suppliers); + consumer->links.need_for_probe = need_for_probe; + mutex_unlock(&wfs_lock); +} + +static void device_link_wait_for_mandatory_supplier(struct device *consumer) +{ + device_link_wait_for_supplier(consumer, true); +} + +static void device_link_wait_for_optional_supplier(struct device *consumer) +{ + device_link_wait_for_supplier(consumer, false); +} + +/** + * device_link_add_missing_supplier_links - Add links from consumer devices to + * supplier devices, leaving any + * consumer with inactive suppliers on + * the wait_for_suppliers list + * + * Loops through all consumers waiting on suppliers and tries to add all their + * supplier links. If that succeeds, the consumer device is removed from + * wait_for_suppliers list. Otherwise, they are left in the wait_for_suppliers + * list. Devices left on the wait_for_suppliers list will not be probed. + * + * The fwnode add_links callback is expected to return 0 if it has found and + * added all the supplier links for the consumer device. It should return an + * error if it isn't able to do so. + * + * The caller of device_link_wait_for_supplier() is expected to call this once + * it's aware of potential suppliers becoming available. + */ +static void device_link_add_missing_supplier_links(void) +{ + struct device *dev, *tmp; + + mutex_lock(&wfs_lock); + list_for_each_entry_safe(dev, tmp, &wait_for_suppliers, + links.needs_suppliers) { + int ret = fwnode_call_int_op(dev->fwnode, add_links, dev); + if (!ret) + list_del_init(&dev->links.needs_suppliers); + else if (ret != -ENODEV || fw_devlink_is_permissive()) + dev->links.need_for_probe = false; + } + mutex_unlock(&wfs_lock); +} + +#ifdef CONFIG_SRCU +static void __device_link_del(struct kref *kref) +{ + struct device_link *link = container_of(kref, struct device_link, kref); + + dev_dbg(link->consumer, "Dropping the link to %s\n", + dev_name(link->supplier)); + + pm_runtime_drop_link(link); + + list_del_rcu(&link->s_node); + list_del_rcu(&link->c_node); + device_unregister(&link->link_dev); +} +#else /* !CONFIG_SRCU */ +static void __device_link_del(struct kref *kref) +{ + struct device_link *link = container_of(kref, struct device_link, kref); + + dev_info(link->consumer, "Dropping the link to %s\n", + dev_name(link->supplier)); + + pm_runtime_drop_link(link); + + list_del(&link->s_node); + list_del(&link->c_node); + device_unregister(&link->link_dev); +} +#endif /* !CONFIG_SRCU */ + +static void device_link_put_kref(struct device_link *link) +{ + if (link->flags & DL_FLAG_STATELESS) + kref_put(&link->kref, __device_link_del); + else + WARN(1, "Unable to drop a managed device link reference\n"); +} + +/** + * device_link_del - Delete a stateless link between two devices. + * @link: Device link to delete. + * + * The caller must ensure proper synchronization of this function with runtime + * PM. If the link was added multiple times, it needs to be deleted as often. + * Care is required for hotplugged devices: Their links are purged on removal + * and calling device_link_del() is then no longer allowed. + */ +void device_link_del(struct device_link *link) +{ + device_links_write_lock(); + device_link_put_kref(link); + device_links_write_unlock(); +} +EXPORT_SYMBOL_GPL(device_link_del); + +/** + * device_link_remove - Delete a stateless link between two devices. + * @consumer: Consumer end of the link. + * @supplier: Supplier end of the link. + * + * The caller must ensure proper synchronization of this function with runtime + * PM. + */ +void device_link_remove(void *consumer, struct device *supplier) +{ + struct device_link *link; + + if (WARN_ON(consumer == supplier)) + return; + + device_links_write_lock(); + + list_for_each_entry(link, &supplier->links.consumers, s_node) { + if (link->consumer == consumer) { + device_link_put_kref(link); + break; + } + } + + device_links_write_unlock(); +} +EXPORT_SYMBOL_GPL(device_link_remove); + +static void device_links_missing_supplier(struct device *dev) +{ + struct device_link *link; + + list_for_each_entry(link, &dev->links.suppliers, c_node) { + if (link->status != DL_STATE_CONSUMER_PROBE) + continue; + + if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { + WRITE_ONCE(link->status, DL_STATE_AVAILABLE); + } else { + WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + } +} + +/** + * device_links_check_suppliers - Check presence of supplier drivers. + * @dev: Consumer device. + * + * Check links from this device to any suppliers. Walk the list of the device's + * links to suppliers and see if all of them are available. If not, simply + * return -EPROBE_DEFER. + * + * We need to guarantee that the supplier will not go away after the check has + * been positive here. It only can go away in __device_release_driver() and + * that function checks the device's links to consumers. This means we need to + * mark the link as "consumer probe in progress" to make the supplier removal + * wait for us to complete (or bad things may happen). + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ +int device_links_check_suppliers(struct device *dev) +{ + struct device_link *link; + int ret = 0; + + /* + * Device waiting for supplier to become available is not allowed to + * probe. + */ + mutex_lock(&wfs_lock); + if (!list_empty(&dev->links.needs_suppliers) && + dev->links.need_for_probe) { + mutex_unlock(&wfs_lock); + return -EPROBE_DEFER; + } + mutex_unlock(&wfs_lock); + + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.suppliers, c_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + if (link->status != DL_STATE_AVAILABLE && + !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { + device_links_missing_supplier(dev); + ret = -EPROBE_DEFER; + break; + } + WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE); + } + dev->links.status = DL_DEV_PROBING; + + device_links_write_unlock(); + return ret; +} + +/** + * __device_links_queue_sync_state - Queue a device for sync_state() callback + * @dev: Device to call sync_state() on + * @list: List head to queue the @dev on + * + * Queues a device for a sync_state() callback when the device links write lock + * isn't held. This allows the sync_state() execution flow to use device links + * APIs. The caller must ensure this function is called with + * device_links_write_lock() held. + * + * This function does a get_device() to make sure the device is not freed while + * on this list. + * + * So the caller must also ensure that device_links_flush_sync_list() is called + * as soon as the caller releases device_links_write_lock(). This is necessary + * to make sure the sync_state() is called in a timely fashion and the + * put_device() is called on this device. + */ +static void __device_links_queue_sync_state(struct device *dev, + struct list_head *list) +{ + struct device_link *link; + + if (!dev_has_sync_state(dev)) + return; + if (dev->state_synced) + return; + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + if (link->status != DL_STATE_ACTIVE) + return; + } + + /* + * Set the flag here to avoid adding the same device to a list more + * than once. This can happen if new consumers get added to the device + * and probed before the list is flushed. + */ + dev->state_synced = true; + + if (WARN_ON(!list_empty(&dev->links.defer_hook))) + return; + + get_device(dev); + list_add_tail(&dev->links.defer_hook, list); +} + +/** + * device_links_flush_sync_list - Call sync_state() on a list of devices + * @list: List of devices to call sync_state() on + * @dont_lock_dev: Device for which lock is already held by the caller + * + * Calls sync_state() on all the devices that have been queued for it. This + * function is used in conjunction with __device_links_queue_sync_state(). The + * @dont_lock_dev parameter is useful when this function is called from a + * context where a device lock is already held. + */ +static void device_links_flush_sync_list(struct list_head *list, + struct device *dont_lock_dev) +{ + struct device *dev, *tmp; + + list_for_each_entry_safe(dev, tmp, list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); + + if (dev != dont_lock_dev) + device_lock(dev); + + if (dev->bus->sync_state) + dev->bus->sync_state(dev); + else if (dev->driver && dev->driver->sync_state) + dev->driver->sync_state(dev); + + if (dev != dont_lock_dev) + device_unlock(dev); + + put_device(dev); + } +} + +void device_links_supplier_sync_state_pause(void) +{ + device_links_write_lock(); + defer_sync_state_count++; + device_links_write_unlock(); +} + +void device_links_supplier_sync_state_resume(void) +{ + struct device *dev, *tmp; + LIST_HEAD(sync_list); + + device_links_write_lock(); + if (!defer_sync_state_count) { + WARN(true, "Unmatched sync_state pause/resume!"); + goto out; + } + defer_sync_state_count--; + if (defer_sync_state_count) + goto out; + + list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) { + /* + * Delete from deferred_sync list before queuing it to + * sync_list because defer_hook is used for both lists. + */ + list_del_init(&dev->links.defer_hook); + __device_links_queue_sync_state(dev, &sync_list); + } +out: + device_links_write_unlock(); + + device_links_flush_sync_list(&sync_list, NULL); +} + +static int sync_state_resume_initcall(void) +{ + device_links_supplier_sync_state_resume(); + return 0; +} +late_initcall(sync_state_resume_initcall); + +static void __device_links_supplier_defer_sync(struct device *sup) +{ + if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup)) + list_add_tail(&sup->links.defer_hook, &deferred_sync); +} + +static void device_link_drop_managed(struct device_link *link) +{ + link->flags &= ~DL_FLAG_MANAGED; + WRITE_ONCE(link->status, DL_STATE_NONE); + kref_put(&link->kref, __device_link_del); +} + +static ssize_t waiting_for_supplier_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + bool val; + + device_lock(dev); + mutex_lock(&wfs_lock); + val = !list_empty(&dev->links.needs_suppliers) + && dev->links.need_for_probe; + mutex_unlock(&wfs_lock); + device_unlock(dev); + return sysfs_emit(buf, "%u\n", val); +} +static DEVICE_ATTR_RO(waiting_for_supplier); + +/** + * device_links_driver_bound - Update device links after probing its driver. + * @dev: Device to update the links for. + * + * The probe has been successful, so update links from this device to any + * consumers by changing their status to "available". + * + * Also change the status of @dev's links to suppliers to "active". + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ +void device_links_driver_bound(struct device *dev) +{ + struct device_link *link, *ln; + LIST_HEAD(sync_list); + + /* + * If a device probes successfully, it's expected to have created all + * the device links it needs to or make new device links as it needs + * them. So, it no longer needs to wait on any suppliers. + */ + mutex_lock(&wfs_lock); + list_del_init(&dev->links.needs_suppliers); + mutex_unlock(&wfs_lock); + device_remove_file(dev, &dev_attr_waiting_for_supplier); + + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + /* + * Links created during consumer probe may be in the "consumer + * probe" state to start with if the supplier is still probing + * when they are created and they may become "active" if the + * consumer probe returns first. Skip them here. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + continue; + + WARN_ON(link->status != DL_STATE_DORMANT); + WRITE_ONCE(link->status, DL_STATE_AVAILABLE); + + if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + driver_deferred_probe_add(link->consumer); + } + + if (defer_sync_state_count) + __device_links_supplier_defer_sync(dev); + else + __device_links_queue_sync_state(dev, &sync_list); + + list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) { + struct device *supplier; + + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + supplier = link->supplier; + if (link->flags & DL_FLAG_SYNC_STATE_ONLY) { + /* + * When DL_FLAG_SYNC_STATE_ONLY is set, it means no + * other DL_MANAGED_LINK_FLAGS have been set. So, it's + * save to drop the managed link completely. + */ + device_link_drop_managed(link); + } else { + WARN_ON(link->status != DL_STATE_CONSUMER_PROBE); + WRITE_ONCE(link->status, DL_STATE_ACTIVE); + } + + /* + * This needs to be done even for the deleted + * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last + * device link that was preventing the supplier from getting a + * sync_state() call. + */ + if (defer_sync_state_count) + __device_links_supplier_defer_sync(supplier); + else + __device_links_queue_sync_state(supplier, &sync_list); + } + + dev->links.status = DL_DEV_DRIVER_BOUND; + + device_links_write_unlock(); + + device_links_flush_sync_list(&sync_list, dev); +} + +/** + * __device_links_no_driver - Update links of a device without a driver. + * @dev: Device without a drvier. + * + * Delete all non-persistent links from this device to any suppliers. + * + * Persistent links stay around, but their status is changed to "available", + * unless they already are in the "supplier unbind in progress" state in which + * case they need not be updated. + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ +static void __device_links_no_driver(struct device *dev) +{ + struct device_link *link, *ln; + + list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + device_link_drop_managed(link); + continue; + } + + if (link->status != DL_STATE_CONSUMER_PROBE && + link->status != DL_STATE_ACTIVE) + continue; + + if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) { + WRITE_ONCE(link->status, DL_STATE_AVAILABLE); + } else { + WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY)); + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + } + + dev->links.status = DL_DEV_NO_DRIVER; +} + +/** + * device_links_no_driver - Update links after failing driver probe. + * @dev: Device whose driver has just failed to probe. + * + * Clean up leftover links to consumers for @dev and invoke + * %__device_links_no_driver() to update links to suppliers for it as + * appropriate. + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ +void device_links_no_driver(struct device *dev) +{ + struct device_link *link; + + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + /* + * The probe has failed, so if the status of the link is + * "consumer probe" or "active", it must have been added by + * a probing consumer while this device was still probing. + * Change its state to "dormant", as it represents a valid + * relationship, but it is not functionally meaningful. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + + __device_links_no_driver(dev); + + device_links_write_unlock(); +} + +/** + * device_links_driver_cleanup - Update links after driver removal. + * @dev: Device whose driver has just gone away. + * + * Update links to consumers for @dev by changing their status to "dormant" and + * invoke %__device_links_no_driver() to update links to suppliers for it as + * appropriate. + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ +void device_links_driver_cleanup(struct device *dev) +{ + struct device_link *link, *ln; + + device_links_write_lock(); + + list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER); + WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND); + + /* + * autoremove the links between this @dev and its consumer + * devices that are not active, i.e. where the link state + * has moved to DL_STATE_SUPPLIER_UNBIND. + */ + if (link->status == DL_STATE_SUPPLIER_UNBIND && + link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + device_link_drop_managed(link); + + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + + list_del_init(&dev->links.defer_hook); + __device_links_no_driver(dev); + + device_links_write_unlock(); +} + +/** + * device_links_busy - Check if there are any busy links to consumers. + * @dev: Device to check. + * + * Check each consumer of the device and return 'true' if its link's status + * is one of "consumer probe" or "active" (meaning that the given consumer is + * probing right now or its driver is present). Otherwise, change the link + * state to "supplier unbind" to prevent the consumer from being probed + * successfully going forward. + * + * Return 'false' if there are no probing or active consumers. + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ +bool device_links_busy(struct device *dev) +{ + struct device_link *link; + bool ret = false; + + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (!(link->flags & DL_FLAG_MANAGED)) + continue; + + if (link->status == DL_STATE_CONSUMER_PROBE + || link->status == DL_STATE_ACTIVE) { + ret = true; + break; + } + WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); + } + + dev->links.status = DL_DEV_UNBINDING; + + device_links_write_unlock(); + return ret; +} + +/** + * device_links_unbind_consumers - Force unbind consumers of the given device. + * @dev: Device to unbind the consumers of. + * + * Walk the list of links to consumers for @dev and if any of them is in the + * "consumer probe" state, wait for all device probes in progress to complete + * and start over. + * + * If that's not the case, change the status of the link to "supplier unbind" + * and check if the link was in the "active" state. If so, force the consumer + * driver to unbind and start over (the consumer will not re-probe as we have + * changed the state of the link already). + * + * Links without the DL_FLAG_MANAGED flag set are ignored. + */ +void device_links_unbind_consumers(struct device *dev) +{ + struct device_link *link; + + start: + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + enum device_link_state status; + + if (!(link->flags & DL_FLAG_MANAGED) || + link->flags & DL_FLAG_SYNC_STATE_ONLY) + continue; + + status = link->status; + if (status == DL_STATE_CONSUMER_PROBE) { + device_links_write_unlock(); + + wait_for_device_probe(); + goto start; + } + WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND); + if (status == DL_STATE_ACTIVE) { + struct device *consumer = link->consumer; + + get_device(consumer); + + device_links_write_unlock(); + + device_release_driver_internal(consumer, NULL, + consumer->parent); + put_device(consumer); + goto start; + } + } + + device_links_write_unlock(); +} + +/** + * device_links_purge - Delete existing links to other devices. + * @dev: Target device. + */ +static void device_links_purge(struct device *dev) +{ + struct device_link *link, *ln; + + if (dev->class == &devlink_class) + return; + + mutex_lock(&wfs_lock); + list_del_init(&dev->links.needs_suppliers); + mutex_unlock(&wfs_lock); + + /* + * Delete all of the remaining links from this device to any other + * devices (either consumers or suppliers). + */ + device_links_write_lock(); + + list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) { + WARN_ON(link->status == DL_STATE_ACTIVE); + __device_link_del(&link->kref); + } + + list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) { + WARN_ON(link->status != DL_STATE_DORMANT && + link->status != DL_STATE_NONE); + __device_link_del(&link->kref); + } + + device_links_write_unlock(); +} + +static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; +static int __init fw_devlink_setup(char *arg) +{ + if (!arg) + return -EINVAL; + + if (strcmp(arg, "off") == 0) { + fw_devlink_flags = 0; + } else if (strcmp(arg, "permissive") == 0) { + fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY; + } else if (strcmp(arg, "on") == 0) { + fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER; + } else if (strcmp(arg, "rpm") == 0) { + fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER | + DL_FLAG_PM_RUNTIME; + } + return 0; +} +early_param("fw_devlink", fw_devlink_setup); + +u32 fw_devlink_get_flags(void) +{ + return fw_devlink_flags; +} + +static bool fw_devlink_is_permissive(void) +{ + return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY; +} + +static void fw_devlink_link_device(struct device *dev) +{ + int fw_ret; + + if (!fw_devlink_flags) + return; + + mutex_lock(&defer_fw_devlink_lock); + if (!defer_fw_devlink_count) + device_link_add_missing_supplier_links(); + + /* + * The device's fwnode not having add_links() doesn't affect if other + * consumers can find this device as a supplier. So, this check is + * intentionally placed after device_link_add_missing_supplier_links(). + */ + if (!fwnode_has_op(dev->fwnode, add_links)) + goto out; + + /* + * If fw_devlink is being deferred, assume all devices have mandatory + * suppliers they need to link to later. Then, when the fw_devlink is + * resumed, all these devices will get a chance to try and link to any + * suppliers they have. + */ + if (!defer_fw_devlink_count) { + fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev); + if (fw_ret == -ENODEV && fw_devlink_is_permissive()) + fw_ret = -EAGAIN; + } else { + fw_ret = -ENODEV; + /* + * defer_hook is not used to add device to deferred_sync list + * until device is bound. Since deferred fw devlink also blocks + * probing, same list hook can be used for deferred_fw_devlink. + */ + list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink); + } + + if (fw_ret == -ENODEV) + device_link_wait_for_mandatory_supplier(dev); + else if (fw_ret) + device_link_wait_for_optional_supplier(dev); + +out: + mutex_unlock(&defer_fw_devlink_lock); +} + +/** + * fw_devlink_pause - Pause parsing of fwnode to create device links + * + * Calling this function defers any fwnode parsing to create device links until + * fw_devlink_resume() is called. Both these functions are ref counted and the + * caller needs to match the calls. + * + * While fw_devlink is paused: + * - Any device that is added won't have its fwnode parsed to create device + * links. + * - The probe of the device will also be deferred during this period. + * - Any devices that were already added, but waiting for suppliers won't be + * able to link to newly added devices. + * + * Once fw_devlink_resume(): + * - All the fwnodes that was not parsed will be parsed. + * - All the devices that were deferred probing will be reattempted if they + * aren't waiting for any more suppliers. + * + * This pair of functions, is mainly meant to optimize the parsing of fwnodes + * when a lot of devices that need to link to each other are added in a short + * interval of time. For example, adding all the top level devices in a system. + * + * For example, if N devices are added and: + * - All the consumers are added before their suppliers + * - All the suppliers of the N devices are part of the N devices + * + * Then: + * + * - With the use of fw_devlink_pause() and fw_devlink_resume(), each device + * will only need one parsing of its fwnode because it is guaranteed to find + * all the supplier devices already registered and ready to link to. It won't + * have to do another pass later to find one or more suppliers it couldn't + * find in the first parse of the fwnode. So, we'll only need O(N) fwnode + * parses. + * + * - Without the use of fw_devlink_pause() and fw_devlink_resume(), we would + * end up doing O(N^2) parses of fwnodes because every device that's added is + * guaranteed to trigger a parse of the fwnode of every device added before + * it. This O(N^2) parse is made worse by the fact that when a fwnode of a + * device is parsed, all it descendant devices might need to have their + * fwnodes parsed too (even if the devices themselves aren't added). + */ +void fw_devlink_pause(void) +{ + mutex_lock(&defer_fw_devlink_lock); + defer_fw_devlink_count++; + mutex_unlock(&defer_fw_devlink_lock); +} + +/** fw_devlink_resume - Resume parsing of fwnode to create device links + * + * This function is used in conjunction with fw_devlink_pause() and is ref + * counted. See documentation for fw_devlink_pause() for more details. + */ +void fw_devlink_resume(void) +{ + struct device *dev, *tmp; + LIST_HEAD(probe_list); + + mutex_lock(&defer_fw_devlink_lock); + if (!defer_fw_devlink_count) { + WARN(true, "Unmatched fw_devlink pause/resume!"); + goto out; + } + + defer_fw_devlink_count--; + if (defer_fw_devlink_count) + goto out; + + device_link_add_missing_supplier_links(); + list_splice_tail_init(&deferred_fw_devlink, &probe_list); +out: + mutex_unlock(&defer_fw_devlink_lock); + + /* + * bus_probe_device() can cause new devices to get added and they'll + * try to grab defer_fw_devlink_lock. So, this needs to be done outside + * the defer_fw_devlink_lock. + */ + list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) { + list_del_init(&dev->links.defer_hook); + bus_probe_device(dev); + } +} +/* Device links support end. */ + +int (*platform_notify)(struct device *dev) = NULL; +int (*platform_notify_remove)(struct device *dev) = NULL; +static struct kobject *dev_kobj; +struct kobject *sysfs_dev_char_kobj; +struct kobject *sysfs_dev_block_kobj; + +static DEFINE_MUTEX(device_hotplug_lock); + +void lock_device_hotplug(void) +{ + mutex_lock(&device_hotplug_lock); +} + +void unlock_device_hotplug(void) +{ + mutex_unlock(&device_hotplug_lock); +} + +int lock_device_hotplug_sysfs(void) +{ + if (mutex_trylock(&device_hotplug_lock)) + return 0; + + /* Avoid busy looping (5 ms of sleep should do). */ + msleep(5); + return restart_syscall(); +} + +#ifdef CONFIG_BLOCK +static inline int device_is_not_partition(struct device *dev) +{ + return !(dev->type == &part_type); +} +#else +static inline int device_is_not_partition(struct device *dev) +{ + return 1; +} +#endif + +static int +device_platform_notify(struct device *dev, enum kobject_action action) +{ + int ret; + + ret = acpi_platform_notify(dev, action); + if (ret) + return ret; + + ret = software_node_notify(dev, action); + if (ret) + return ret; + + if (platform_notify && action == KOBJ_ADD) + platform_notify(dev); + else if (platform_notify_remove && action == KOBJ_REMOVE) + platform_notify_remove(dev); + return 0; +} + +/** + * dev_driver_string - Return a device's driver name, if at all possible + * @dev: struct device to get the name of + * + * Will return the device's driver's name if it is bound to a device. If + * the device is not bound to a driver, it will return the name of the bus + * it is attached to. If it is not attached to a bus either, an empty + * string will be returned. + */ +const char *dev_driver_string(const struct device *dev) +{ + struct device_driver *drv; + + /* dev->driver can change to NULL underneath us because of unbinding, + * so be careful about accessing it. dev->bus and dev->class should + * never change once they are set, so they don't need special care. + */ + drv = READ_ONCE(dev->driver); + return drv ? drv->name : dev_bus_name(dev); +} +EXPORT_SYMBOL(dev_driver_string); + +#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) + +static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct device_attribute *dev_attr = to_dev_attr(attr); + struct device *dev = kobj_to_dev(kobj); + ssize_t ret = -EIO; + + if (dev_attr->show) + ret = dev_attr->show(dev, dev_attr, buf); + if (ret >= (ssize_t)PAGE_SIZE) { + printk("dev_attr_show: %pS returned bad count\n", + dev_attr->show); + } + return ret; +} + +static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct device_attribute *dev_attr = to_dev_attr(attr); + struct device *dev = kobj_to_dev(kobj); + ssize_t ret = -EIO; + + if (dev_attr->store) + ret = dev_attr->store(dev, dev_attr, buf, count); + return ret; +} + +static const struct sysfs_ops dev_sysfs_ops = { + .show = dev_attr_show, + .store = dev_attr_store, +}; + +#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) + +ssize_t device_store_ulong(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + int ret; + unsigned long new; + + ret = kstrtoul(buf, 0, &new); + if (ret) + return ret; + *(unsigned long *)(ea->var) = new; + /* Always return full write size even if we didn't consume all */ + return size; +} +EXPORT_SYMBOL_GPL(device_store_ulong); + +ssize_t device_show_ulong(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var)); +} +EXPORT_SYMBOL_GPL(device_show_ulong); + +ssize_t device_store_int(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + int ret; + long new; + + ret = kstrtol(buf, 0, &new); + if (ret) + return ret; + + if (new > INT_MAX || new < INT_MIN) + return -EINVAL; + *(int *)(ea->var) = new; + /* Always return full write size even if we didn't consume all */ + return size; +} +EXPORT_SYMBOL_GPL(device_store_int); + +ssize_t device_show_int(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + + return sysfs_emit(buf, "%d\n", *(int *)(ea->var)); +} +EXPORT_SYMBOL_GPL(device_show_int); + +ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + + if (strtobool(buf, ea->var) < 0) + return -EINVAL; + + return size; +} +EXPORT_SYMBOL_GPL(device_store_bool); + +ssize_t device_show_bool(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + + return sysfs_emit(buf, "%d\n", *(bool *)(ea->var)); +} +EXPORT_SYMBOL_GPL(device_show_bool); + +/** + * device_release - free device structure. + * @kobj: device's kobject. + * + * This is called once the reference count for the object + * reaches 0. We forward the call to the device's release + * method, which should handle actually freeing the structure. + */ +static void device_release(struct kobject *kobj) +{ + struct device *dev = kobj_to_dev(kobj); + struct device_private *p = dev->p; + + /* + * Some platform devices are driven without driver attached + * and managed resources may have been acquired. Make sure + * all resources are released. + * + * Drivers still can add resources into device after device + * is deleted but alive, so release devres here to avoid + * possible memory leak. + */ + devres_release_all(dev); + + kfree(dev->dma_range_map); + + if (dev->release) + dev->release(dev); + else if (dev->type && dev->type->release) + dev->type->release(dev); + else if (dev->class && dev->class->dev_release) + dev->class->dev_release(dev); + else + WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n", + dev_name(dev)); + kfree(p); +} + +static const void *device_namespace(struct kobject *kobj) +{ + struct device *dev = kobj_to_dev(kobj); + const void *ns = NULL; + + if (dev->class && dev->class->ns_type) + ns = dev->class->namespace(dev); + + return ns; +} + +static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid) +{ + struct device *dev = kobj_to_dev(kobj); + + if (dev->class && dev->class->get_ownership) + dev->class->get_ownership(dev, uid, gid); +} + +static struct kobj_type device_ktype = { + .release = device_release, + .sysfs_ops = &dev_sysfs_ops, + .namespace = device_namespace, + .get_ownership = device_get_ownership, +}; + + +static int dev_uevent_filter(struct kset *kset, struct kobject *kobj) +{ + struct kobj_type *ktype = get_ktype(kobj); + + if (ktype == &device_ktype) { + struct device *dev = kobj_to_dev(kobj); + if (dev->bus) + return 1; + if (dev->class) + return 1; + } + return 0; +} + +static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj) +{ + struct device *dev = kobj_to_dev(kobj); + + if (dev->bus) + return dev->bus->name; + if (dev->class) + return dev->class->name; + return NULL; +} + +static int dev_uevent(struct kset *kset, struct kobject *kobj, + struct kobj_uevent_env *env) +{ + struct device *dev = kobj_to_dev(kobj); + int retval = 0; + + /* add device node properties if present */ + if (MAJOR(dev->devt)) { + const char *tmp; + const char *name; + umode_t mode = 0; + kuid_t uid = GLOBAL_ROOT_UID; + kgid_t gid = GLOBAL_ROOT_GID; + + add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); + add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); + name = device_get_devnode(dev, &mode, &uid, &gid, &tmp); + if (name) { + add_uevent_var(env, "DEVNAME=%s", name); + if (mode) + add_uevent_var(env, "DEVMODE=%#o", mode & 0777); + if (!uid_eq(uid, GLOBAL_ROOT_UID)) + add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid)); + if (!gid_eq(gid, GLOBAL_ROOT_GID)) + add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid)); + kfree(tmp); + } + } + + if (dev->type && dev->type->name) + add_uevent_var(env, "DEVTYPE=%s", dev->type->name); + + if (dev->driver) + add_uevent_var(env, "DRIVER=%s", dev->driver->name); + + /* Add common DT information about the device */ + of_device_uevent(dev, env); + + /* have the bus specific function add its stuff */ + if (dev->bus && dev->bus->uevent) { + retval = dev->bus->uevent(dev, env); + if (retval) + pr_debug("device: '%s': %s: bus uevent() returned %d\n", + dev_name(dev), __func__, retval); + } + + /* have the class specific function add its stuff */ + if (dev->class && dev->class->dev_uevent) { + retval = dev->class->dev_uevent(dev, env); + if (retval) + pr_debug("device: '%s': %s: class uevent() " + "returned %d\n", dev_name(dev), + __func__, retval); + } + + /* have the device type specific function add its stuff */ + if (dev->type && dev->type->uevent) { + retval = dev->type->uevent(dev, env); + if (retval) + pr_debug("device: '%s': %s: dev_type uevent() " + "returned %d\n", dev_name(dev), + __func__, retval); + } + + return retval; +} + +static const struct kset_uevent_ops device_uevent_ops = { + .filter = dev_uevent_filter, + .name = dev_uevent_name, + .uevent = dev_uevent, +}; + +static ssize_t uevent_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct kobject *top_kobj; + struct kset *kset; + struct kobj_uevent_env *env = NULL; + int i; + int len = 0; + int retval; + + /* search the kset, the device belongs to */ + top_kobj = &dev->kobj; + while (!top_kobj->kset && top_kobj->parent) + top_kobj = top_kobj->parent; + if (!top_kobj->kset) + goto out; + + kset = top_kobj->kset; + if (!kset->uevent_ops || !kset->uevent_ops->uevent) + goto out; + + /* respect filter */ + if (kset->uevent_ops && kset->uevent_ops->filter) + if (!kset->uevent_ops->filter(kset, &dev->kobj)) + goto out; + + env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL); + if (!env) + return -ENOMEM; + + /* let the kset specific function add its keys */ + retval = kset->uevent_ops->uevent(kset, &dev->kobj, env); + if (retval) + goto out; + + /* copy keys to file */ + for (i = 0; i < env->envp_idx; i++) + len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]); +out: + kfree(env); + return len; +} + +static ssize_t uevent_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int rc; + + rc = kobject_synth_uevent(&dev->kobj, buf, count); + + if (rc) { + dev_err(dev, "uevent: failed to send synthetic uevent\n"); + return rc; + } + + return count; +} +static DEVICE_ATTR_RW(uevent); + +static ssize_t online_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + bool val; + + device_lock(dev); + val = !dev->offline; + device_unlock(dev); + return sysfs_emit(buf, "%u\n", val); +} + +static ssize_t online_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + bool val; + int ret; + + ret = strtobool(buf, &val); + if (ret < 0) + return ret; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + ret = val ? device_online(dev) : device_offline(dev); + unlock_device_hotplug(); + return ret < 0 ? ret : count; +} +static DEVICE_ATTR_RW(online); + +static ssize_t removable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + const char *loc; + + switch (dev->removable) { + case DEVICE_REMOVABLE: + loc = "removable"; + break; + case DEVICE_FIXED: + loc = "fixed"; + break; + default: + loc = "unknown"; + } + return sysfs_emit(buf, "%s\n", loc); +} +static DEVICE_ATTR_RO(removable); + +int device_add_groups(struct device *dev, const struct attribute_group **groups) +{ + return sysfs_create_groups(&dev->kobj, groups); +} +EXPORT_SYMBOL_GPL(device_add_groups); + +void device_remove_groups(struct device *dev, + const struct attribute_group **groups) +{ + sysfs_remove_groups(&dev->kobj, groups); +} +EXPORT_SYMBOL_GPL(device_remove_groups); + +union device_attr_group_devres { + const struct attribute_group *group; + const struct attribute_group **groups; +}; + +static int devm_attr_group_match(struct device *dev, void *res, void *data) +{ + return ((union device_attr_group_devres *)res)->group == data; +} + +static void devm_attr_group_remove(struct device *dev, void *res) +{ + union device_attr_group_devres *devres = res; + const struct attribute_group *group = devres->group; + + dev_dbg(dev, "%s: removing group %p\n", __func__, group); + sysfs_remove_group(&dev->kobj, group); +} + +static void devm_attr_groups_remove(struct device *dev, void *res) +{ + union device_attr_group_devres *devres = res; + const struct attribute_group **groups = devres->groups; + + dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); + sysfs_remove_groups(&dev->kobj, groups); +} + +/** + * devm_device_add_group - given a device, create a managed attribute group + * @dev: The device to create the group for + * @grp: The attribute group to create + * + * This function creates a group for the first time. It will explicitly + * warn and error if any of the attribute files being created already exist. + * + * Returns 0 on success or error code on failure. + */ +int devm_device_add_group(struct device *dev, const struct attribute_group *grp) +{ + union device_attr_group_devres *devres; + int error; + + devres = devres_alloc(devm_attr_group_remove, + sizeof(*devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + error = sysfs_create_group(&dev->kobj, grp); + if (error) { + devres_free(devres); + return error; + } + + devres->group = grp; + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_device_add_group); + +/** + * devm_device_remove_group: remove a managed group from a device + * @dev: device to remove the group from + * @grp: group to remove + * + * This function removes a group of attributes from a device. The attributes + * previously have to have been created for this group, otherwise it will fail. + */ +void devm_device_remove_group(struct device *dev, + const struct attribute_group *grp) +{ + WARN_ON(devres_release(dev, devm_attr_group_remove, + devm_attr_group_match, + /* cast away const */ (void *)grp)); +} +EXPORT_SYMBOL_GPL(devm_device_remove_group); + +/** + * devm_device_add_groups - create a bunch of managed attribute groups + * @dev: The device to create the group for + * @groups: The attribute groups to create, NULL terminated + * + * This function creates a bunch of managed attribute groups. If an error + * occurs when creating a group, all previously created groups will be + * removed, unwinding everything back to the original state when this + * function was called. It will explicitly warn and error if any of the + * attribute files being created already exist. + * + * Returns 0 on success or error code from sysfs_create_group on failure. + */ +int devm_device_add_groups(struct device *dev, + const struct attribute_group **groups) +{ + union device_attr_group_devres *devres; + int error; + + devres = devres_alloc(devm_attr_groups_remove, + sizeof(*devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + error = sysfs_create_groups(&dev->kobj, groups); + if (error) { + devres_free(devres); + return error; + } + + devres->groups = groups; + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_device_add_groups); + +/** + * devm_device_remove_groups - remove a list of managed groups + * + * @dev: The device for the groups to be removed from + * @groups: NULL terminated list of groups to be removed + * + * If groups is not NULL, remove the specified groups from the device. + */ +void devm_device_remove_groups(struct device *dev, + const struct attribute_group **groups) +{ + WARN_ON(devres_release(dev, devm_attr_groups_remove, + devm_attr_group_match, + /* cast away const */ (void *)groups)); +} +EXPORT_SYMBOL_GPL(devm_device_remove_groups); + +static int device_add_attrs(struct device *dev) +{ + struct class *class = dev->class; + const struct device_type *type = dev->type; + int error; + + if (class) { + error = device_add_groups(dev, class->dev_groups); + if (error) + return error; + } + + if (type) { + error = device_add_groups(dev, type->groups); + if (error) + goto err_remove_class_groups; + } + + error = device_add_groups(dev, dev->groups); + if (error) + goto err_remove_type_groups; + + if (device_supports_offline(dev) && !dev->offline_disabled) { + error = device_create_file(dev, &dev_attr_online); + if (error) + goto err_remove_dev_groups; + } + + if (fw_devlink_flags && !fw_devlink_is_permissive()) { + error = device_create_file(dev, &dev_attr_waiting_for_supplier); + if (error) + goto err_remove_dev_online; + } + + if (dev_removable_is_valid(dev)) { + error = device_create_file(dev, &dev_attr_removable); + if (error) + goto err_remove_dev_waiting_for_supplier; + } + + return 0; + + err_remove_dev_waiting_for_supplier: + device_remove_file(dev, &dev_attr_waiting_for_supplier); + err_remove_dev_online: + device_remove_file(dev, &dev_attr_online); + err_remove_dev_groups: + device_remove_groups(dev, dev->groups); + err_remove_type_groups: + if (type) + device_remove_groups(dev, type->groups); + err_remove_class_groups: + if (class) + device_remove_groups(dev, class->dev_groups); + + return error; +} + +static void device_remove_attrs(struct device *dev) +{ + struct class *class = dev->class; + const struct device_type *type = dev->type; + + device_remove_file(dev, &dev_attr_removable); + device_remove_file(dev, &dev_attr_waiting_for_supplier); + device_remove_file(dev, &dev_attr_online); + device_remove_groups(dev, dev->groups); + + if (type) + device_remove_groups(dev, type->groups); + + if (class) + device_remove_groups(dev, class->dev_groups); +} + +static ssize_t dev_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return print_dev_t(buf, dev->devt); +} +static DEVICE_ATTR_RO(dev); + +/* /sys/devices/ */ +struct kset *devices_kset; + +/** + * devices_kset_move_before - Move device in the devices_kset's list. + * @deva: Device to move. + * @devb: Device @deva should come before. + */ +static void devices_kset_move_before(struct device *deva, struct device *devb) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s before %s\n", + dev_name(deva), dev_name(devb)); + spin_lock(&devices_kset->list_lock); + list_move_tail(&deva->kobj.entry, &devb->kobj.entry); + spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_after - Move device in the devices_kset's list. + * @deva: Device to move + * @devb: Device @deva should come after. + */ +static void devices_kset_move_after(struct device *deva, struct device *devb) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s after %s\n", + dev_name(deva), dev_name(devb)); + spin_lock(&devices_kset->list_lock); + list_move(&deva->kobj.entry, &devb->kobj.entry); + spin_unlock(&devices_kset->list_lock); +} + +/** + * devices_kset_move_last - move the device to the end of devices_kset's list. + * @dev: device to move + */ +void devices_kset_move_last(struct device *dev) +{ + if (!devices_kset) + return; + pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev)); + spin_lock(&devices_kset->list_lock); + list_move_tail(&dev->kobj.entry, &devices_kset->list); + spin_unlock(&devices_kset->list_lock); +} + +/** + * device_create_file - create sysfs attribute file for device. + * @dev: device. + * @attr: device attribute descriptor. + */ +int device_create_file(struct device *dev, + const struct device_attribute *attr) +{ + int error = 0; + + if (dev) { + WARN(((attr->attr.mode & S_IWUGO) && !attr->store), + "Attribute %s: write permission without 'store'\n", + attr->attr.name); + WARN(((attr->attr.mode & S_IRUGO) && !attr->show), + "Attribute %s: read permission without 'show'\n", + attr->attr.name); + error = sysfs_create_file(&dev->kobj, &attr->attr); + } + + return error; +} +EXPORT_SYMBOL_GPL(device_create_file); + +/** + * device_remove_file - remove sysfs attribute file. + * @dev: device. + * @attr: device attribute descriptor. + */ +void device_remove_file(struct device *dev, + const struct device_attribute *attr) +{ + if (dev) + sysfs_remove_file(&dev->kobj, &attr->attr); +} +EXPORT_SYMBOL_GPL(device_remove_file); + +/** + * device_remove_file_self - remove sysfs attribute file from its own method. + * @dev: device. + * @attr: device attribute descriptor. + * + * See kernfs_remove_self() for details. + */ +bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr) +{ + if (dev) + return sysfs_remove_file_self(&dev->kobj, &attr->attr); + else + return false; +} +EXPORT_SYMBOL_GPL(device_remove_file_self); + +/** + * device_create_bin_file - create sysfs binary attribute file for device. + * @dev: device. + * @attr: device binary attribute descriptor. + */ +int device_create_bin_file(struct device *dev, + const struct bin_attribute *attr) +{ + int error = -EINVAL; + if (dev) + error = sysfs_create_bin_file(&dev->kobj, attr); + return error; +} +EXPORT_SYMBOL_GPL(device_create_bin_file); + +/** + * device_remove_bin_file - remove sysfs binary attribute file + * @dev: device. + * @attr: device binary attribute descriptor. + */ +void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr) +{ + if (dev) + sysfs_remove_bin_file(&dev->kobj, attr); +} +EXPORT_SYMBOL_GPL(device_remove_bin_file); + +static void klist_children_get(struct klist_node *n) +{ + struct device_private *p = to_device_private_parent(n); + struct device *dev = p->device; + + get_device(dev); +} + +static void klist_children_put(struct klist_node *n) +{ + struct device_private *p = to_device_private_parent(n); + struct device *dev = p->device; + + put_device(dev); +} + +/** + * device_initialize - init device structure. + * @dev: device. + * + * This prepares the device for use by other layers by initializing + * its fields. + * It is the first half of device_register(), if called by + * that function, though it can also be called separately, so one + * may use @dev's fields. In particular, get_device()/put_device() + * may be used for reference counting of @dev after calling this + * function. + * + * All fields in @dev must be initialized by the caller to 0, except + * for those explicitly set to some other value. The simplest + * approach is to use kzalloc() to allocate the structure containing + * @dev. + * + * NOTE: Use put_device() to give up your reference instead of freeing + * @dev directly once you have called this function. + */ +void device_initialize(struct device *dev) +{ + dev->kobj.kset = devices_kset; + kobject_init(&dev->kobj, &device_ktype); + INIT_LIST_HEAD(&dev->dma_pools); + mutex_init(&dev->mutex); +#ifdef CONFIG_PROVE_LOCKING + mutex_init(&dev->lockdep_mutex); +#endif + lockdep_set_novalidate_class(&dev->mutex); + spin_lock_init(&dev->devres_lock); + INIT_LIST_HEAD(&dev->devres_head); + device_pm_init(dev); + set_dev_node(dev, -1); +#ifdef CONFIG_GENERIC_MSI_IRQ + raw_spin_lock_init(&dev->msi_lock); + INIT_LIST_HEAD(&dev->msi_list); +#endif + INIT_LIST_HEAD(&dev->links.consumers); + INIT_LIST_HEAD(&dev->links.suppliers); + INIT_LIST_HEAD(&dev->links.needs_suppliers); + INIT_LIST_HEAD(&dev->links.defer_hook); + dev->links.status = DL_DEV_NO_DRIVER; +} +EXPORT_SYMBOL_GPL(device_initialize); + +struct kobject *virtual_device_parent(struct device *dev) +{ + static struct kobject *virtual_dir = NULL; + + if (!virtual_dir) + virtual_dir = kobject_create_and_add("virtual", + &devices_kset->kobj); + + return virtual_dir; +} + +struct class_dir { + struct kobject kobj; + struct class *class; +}; + +#define to_class_dir(obj) container_of(obj, struct class_dir, kobj) + +static void class_dir_release(struct kobject *kobj) +{ + struct class_dir *dir = to_class_dir(kobj); + kfree(dir); +} + +static const +struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj) +{ + struct class_dir *dir = to_class_dir(kobj); + return dir->class->ns_type; +} + +static struct kobj_type class_dir_ktype = { + .release = class_dir_release, + .sysfs_ops = &kobj_sysfs_ops, + .child_ns_type = class_dir_child_ns_type +}; + +static struct kobject * +class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) +{ + struct class_dir *dir; + int retval; + + dir = kzalloc(sizeof(*dir), GFP_KERNEL); + if (!dir) + return ERR_PTR(-ENOMEM); + + dir->class = class; + kobject_init(&dir->kobj, &class_dir_ktype); + + dir->kobj.kset = &class->p->glue_dirs; + + retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); + if (retval < 0) { + kobject_put(&dir->kobj); + return ERR_PTR(retval); + } + return &dir->kobj; +} + +static DEFINE_MUTEX(gdp_mutex); + +static struct kobject *get_device_parent(struct device *dev, + struct device *parent) +{ + if (dev->class) { + struct kobject *kobj = NULL; + struct kobject *parent_kobj; + struct kobject *k; + +#ifdef CONFIG_BLOCK + /* block disks show up in /sys/block */ + if (sysfs_deprecated && dev->class == &block_class) { + if (parent && parent->class == &block_class) + return &parent->kobj; + return &block_class.p->subsys.kobj; + } +#endif + + /* + * If we have no parent, we live in "virtual". + * Class-devices with a non class-device as parent, live + * in a "glue" directory to prevent namespace collisions. + */ + if (parent == NULL) + parent_kobj = virtual_device_parent(dev); + else if (parent->class && !dev->class->ns_type) + return &parent->kobj; + else + parent_kobj = &parent->kobj; + + mutex_lock(&gdp_mutex); + + /* find our class-directory at the parent and reference it */ + spin_lock(&dev->class->p->glue_dirs.list_lock); + list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) + if (k->parent == parent_kobj) { + kobj = kobject_get(k); + break; + } + spin_unlock(&dev->class->p->glue_dirs.list_lock); + if (kobj) { + mutex_unlock(&gdp_mutex); + return kobj; + } + + /* or create a new class-directory at the parent device */ + k = class_dir_create_and_add(dev->class, parent_kobj); + /* do not emit an uevent for this simple "glue" directory */ + mutex_unlock(&gdp_mutex); + return k; + } + + /* subsystems can specify a default root directory for their devices */ + if (!parent && dev->bus && dev->bus->dev_root) + return &dev->bus->dev_root->kobj; + + if (parent) + return &parent->kobj; + return NULL; +} + +static inline bool live_in_glue_dir(struct kobject *kobj, + struct device *dev) +{ + if (!kobj || !dev->class || + kobj->kset != &dev->class->p->glue_dirs) + return false; + return true; +} + +static inline struct kobject *get_glue_dir(struct device *dev) +{ + return dev->kobj.parent; +} + +/* + * make sure cleaning up dir as the last step, we need to make + * sure .release handler of kobject is run with holding the + * global lock + */ +static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) +{ + unsigned int ref; + + /* see if we live in a "glue" directory */ + if (!live_in_glue_dir(glue_dir, dev)) + return; + + mutex_lock(&gdp_mutex); + /** + * There is a race condition between removing glue directory + * and adding a new device under the glue directory. + * + * CPU1: CPU2: + * + * device_add() + * get_device_parent() + * class_dir_create_and_add() + * kobject_add_internal() + * create_dir() // create glue_dir + * + * device_add() + * get_device_parent() + * kobject_get() // get glue_dir + * + * device_del() + * cleanup_glue_dir() + * kobject_del(glue_dir) + * + * kobject_add() + * kobject_add_internal() + * create_dir() // in glue_dir + * sysfs_create_dir_ns() + * kernfs_create_dir_ns(sd) + * + * sysfs_remove_dir() // glue_dir->sd=NULL + * sysfs_put() // free glue_dir->sd + * + * // sd is freed + * kernfs_new_node(sd) + * kernfs_get(glue_dir) + * kernfs_add_one() + * kernfs_put() + * + * Before CPU1 remove last child device under glue dir, if CPU2 add + * a new device under glue dir, the glue_dir kobject reference count + * will be increase to 2 in kobject_get(k). And CPU2 has been called + * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir() + * and sysfs_put(). This result in glue_dir->sd is freed. + * + * Then the CPU2 will see a stale "empty" but still potentially used + * glue dir around in kernfs_new_node(). + * + * In order to avoid this happening, we also should make sure that + * kernfs_node for glue_dir is released in CPU1 only when refcount + * for glue_dir kobj is 1. + */ + ref = kref_read(&glue_dir->kref); + if (!kobject_has_children(glue_dir) && !--ref) + kobject_del(glue_dir); + kobject_put(glue_dir); + mutex_unlock(&gdp_mutex); +} + +static int device_add_class_symlinks(struct device *dev) +{ + struct device_node *of_node = dev_of_node(dev); + int error; + + if (of_node) { + error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node"); + if (error) + dev_warn(dev, "Error %d creating of_node link\n",error); + /* An error here doesn't warrant bringing down the device */ + } + + if (!dev->class) + return 0; + + error = sysfs_create_link(&dev->kobj, + &dev->class->p->subsys.kobj, + "subsystem"); + if (error) + goto out_devnode; + + if (dev->parent && device_is_not_partition(dev)) { + error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, + "device"); + if (error) + goto out_subsys; + } + +#ifdef CONFIG_BLOCK + /* /sys/block has directories and does not need symlinks */ + if (sysfs_deprecated && dev->class == &block_class) + return 0; +#endif + + /* link in the class directory pointing to the device */ + error = sysfs_create_link(&dev->class->p->subsys.kobj, + &dev->kobj, dev_name(dev)); + if (error) + goto out_device; + + return 0; + +out_device: + sysfs_remove_link(&dev->kobj, "device"); + +out_subsys: + sysfs_remove_link(&dev->kobj, "subsystem"); +out_devnode: + sysfs_remove_link(&dev->kobj, "of_node"); + return error; +} + +static void device_remove_class_symlinks(struct device *dev) +{ + if (dev_of_node(dev)) + sysfs_remove_link(&dev->kobj, "of_node"); + + if (!dev->class) + return; + + if (dev->parent && device_is_not_partition(dev)) + sysfs_remove_link(&dev->kobj, "device"); + sysfs_remove_link(&dev->kobj, "subsystem"); +#ifdef CONFIG_BLOCK + if (sysfs_deprecated && dev->class == &block_class) + return; +#endif + sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); +} + +/** + * dev_set_name - set a device name + * @dev: device + * @fmt: format string for the device's name + */ +int dev_set_name(struct device *dev, const char *fmt, ...) +{ + va_list vargs; + int err; + + va_start(vargs, fmt); + err = kobject_set_name_vargs(&dev->kobj, fmt, vargs); + va_end(vargs); + return err; +} +EXPORT_SYMBOL_GPL(dev_set_name); + +/** + * device_to_dev_kobj - select a /sys/dev/ directory for the device + * @dev: device + * + * By default we select char/ for new entries. Setting class->dev_obj + * to NULL prevents an entry from being created. class->dev_kobj must + * be set (or cleared) before any devices are registered to the class + * otherwise device_create_sys_dev_entry() and + * device_remove_sys_dev_entry() will disagree about the presence of + * the link. + */ +static struct kobject *device_to_dev_kobj(struct device *dev) +{ + struct kobject *kobj; + + if (dev->class) + kobj = dev->class->dev_kobj; + else + kobj = sysfs_dev_char_kobj; + + return kobj; +} + +static int device_create_sys_dev_entry(struct device *dev) +{ + struct kobject *kobj = device_to_dev_kobj(dev); + int error = 0; + char devt_str[15]; + + if (kobj) { + format_dev_t(devt_str, dev->devt); + error = sysfs_create_link(kobj, &dev->kobj, devt_str); + } + + return error; +} + +static void device_remove_sys_dev_entry(struct device *dev) +{ + struct kobject *kobj = device_to_dev_kobj(dev); + char devt_str[15]; + + if (kobj) { + format_dev_t(devt_str, dev->devt); + sysfs_remove_link(kobj, devt_str); + } +} + +static int device_private_init(struct device *dev) +{ + dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL); + if (!dev->p) + return -ENOMEM; + dev->p->device = dev; + klist_init(&dev->p->klist_children, klist_children_get, + klist_children_put); + INIT_LIST_HEAD(&dev->p->deferred_probe); + return 0; +} + +/** + * device_add - add device to device hierarchy. + * @dev: device. + * + * This is part 2 of device_register(), though may be called + * separately _iff_ device_initialize() has been called separately. + * + * This adds @dev to the kobject hierarchy via kobject_add(), adds it + * to the global and sibling lists for the device, then + * adds it to the other relevant subsystems of the driver model. + * + * Do not call this routine or device_register() more than once for + * any device structure. The driver model core is not designed to work + * with devices that get unregistered and then spring back to life. + * (Among other things, it's very hard to guarantee that all references + * to the previous incarnation of @dev have been dropped.) Allocate + * and register a fresh new struct device instead. + * + * NOTE: _Never_ directly free @dev after calling this function, even + * if it returned an error! Always use put_device() to give up your + * reference instead. + * + * Rule of thumb is: if device_add() succeeds, you should call + * device_del() when you want to get rid of it. If device_add() has + * *not* succeeded, use *only* put_device() to drop the reference + * count. + */ +int device_add(struct device *dev) +{ + struct device *parent; + struct kobject *kobj; + struct class_interface *class_intf; + int error = -EINVAL; + struct kobject *glue_dir = NULL; + + dev = get_device(dev); + if (!dev) + goto done; + + if (!dev->p) { + error = device_private_init(dev); + if (error) + goto done; + } + + /* + * for statically allocated devices, which should all be converted + * some day, we need to initialize the name. We prevent reading back + * the name, and force the use of dev_name() + */ + if (dev->init_name) { + dev_set_name(dev, "%s", dev->init_name); + dev->init_name = NULL; + } + + /* subsystems can specify simple device enumeration */ + if (!dev_name(dev) && dev->bus && dev->bus->dev_name) + dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); + + if (!dev_name(dev)) { + error = -EINVAL; + goto name_error; + } + + pr_debug("device: '%s': %s\n", dev_name(dev), __func__); + + parent = get_device(dev->parent); + kobj = get_device_parent(dev, parent); + if (IS_ERR(kobj)) { + error = PTR_ERR(kobj); + goto parent_error; + } + if (kobj) + dev->kobj.parent = kobj; + + /* use parent numa_node */ + if (parent && (dev_to_node(dev) == NUMA_NO_NODE)) + set_dev_node(dev, dev_to_node(parent)); + + /* first, register with generic layer. */ + /* we require the name to be set before, and pass NULL */ + error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); + if (error) { + glue_dir = get_glue_dir(dev); + goto Error; + } + + /* notify platform of device entry */ + error = device_platform_notify(dev, KOBJ_ADD); + if (error) + goto platform_error; + + error = device_create_file(dev, &dev_attr_uevent); + if (error) + goto attrError; + + error = device_add_class_symlinks(dev); + if (error) + goto SymlinkError; + error = device_add_attrs(dev); + if (error) + goto AttrsError; + error = bus_add_device(dev); + if (error) + goto BusError; + error = dpm_sysfs_add(dev); + if (error) + goto DPMError; + device_pm_add(dev); + + if (MAJOR(dev->devt)) { + error = device_create_file(dev, &dev_attr_dev); + if (error) + goto DevAttrError; + + error = device_create_sys_dev_entry(dev); + if (error) + goto SysEntryError; + + devtmpfs_create_node(dev); + } + + /* Notify clients of device addition. This call must come + * after dpm_sysfs_add() and before kobject_uevent(). + */ + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_ADD_DEVICE, dev); + + kobject_uevent(&dev->kobj, KOBJ_ADD); + + /* + * Check if any of the other devices (consumers) have been waiting for + * this device (supplier) to be added so that they can create a device + * link to it. + * + * This needs to happen after device_pm_add() because device_link_add() + * requires the supplier be registered before it's called. + * + * But this also needs to happen before bus_probe_device() to make sure + * waiting consumers can link to it before the driver is bound to the + * device and the driver sync_state callback is called for this device. + */ + if (dev->fwnode && !dev->fwnode->dev) { + dev->fwnode->dev = dev; + fw_devlink_link_device(dev); + } + + bus_probe_device(dev); + if (parent) + klist_add_tail(&dev->p->knode_parent, + &parent->p->klist_children); + + if (dev->class) { + mutex_lock(&dev->class->p->mutex); + /* tie the class to the device */ + klist_add_tail(&dev->p->knode_class, + &dev->class->p->klist_devices); + + /* notify any interfaces that the device is here */ + list_for_each_entry(class_intf, + &dev->class->p->interfaces, node) + if (class_intf->add_dev) + class_intf->add_dev(dev, class_intf); + mutex_unlock(&dev->class->p->mutex); + } +done: + put_device(dev); + return error; + SysEntryError: + if (MAJOR(dev->devt)) + device_remove_file(dev, &dev_attr_dev); + DevAttrError: + device_pm_remove(dev); + dpm_sysfs_remove(dev); + DPMError: + bus_remove_device(dev); + BusError: + device_remove_attrs(dev); + AttrsError: + device_remove_class_symlinks(dev); + SymlinkError: + device_remove_file(dev, &dev_attr_uevent); + attrError: + device_platform_notify(dev, KOBJ_REMOVE); +platform_error: + kobject_uevent(&dev->kobj, KOBJ_REMOVE); + glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); + Error: + cleanup_glue_dir(dev, glue_dir); +parent_error: + put_device(parent); +name_error: + kfree(dev->p); + dev->p = NULL; + goto done; +} +EXPORT_SYMBOL_GPL(device_add); + +/** + * device_register - register a device with the system. + * @dev: pointer to the device structure + * + * This happens in two clean steps - initialize the device + * and add it to the system. The two steps can be called + * separately, but this is the easiest and most common. + * I.e. you should only call the two helpers separately if + * have a clearly defined need to use and refcount the device + * before it is added to the hierarchy. + * + * For more information, see the kerneldoc for device_initialize() + * and device_add(). + * + * NOTE: _Never_ directly free @dev after calling this function, even + * if it returned an error! Always use put_device() to give up the + * reference initialized in this function instead. + */ +int device_register(struct device *dev) +{ + device_initialize(dev); + return device_add(dev); +} +EXPORT_SYMBOL_GPL(device_register); + +/** + * get_device - increment reference count for device. + * @dev: device. + * + * This simply forwards the call to kobject_get(), though + * we do take care to provide for the case that we get a NULL + * pointer passed in. + */ +struct device *get_device(struct device *dev) +{ + return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL; +} +EXPORT_SYMBOL_GPL(get_device); + +/** + * put_device - decrement reference count. + * @dev: device in question. + */ +void put_device(struct device *dev) +{ + /* might_sleep(); */ + if (dev) + kobject_put(&dev->kobj); +} +EXPORT_SYMBOL_GPL(put_device); + +bool kill_device(struct device *dev) +{ + /* + * Require the device lock and set the "dead" flag to guarantee that + * the update behavior is consistent with the other bitfields near + * it and that we cannot have an asynchronous probe routine trying + * to run while we are tearing out the bus/class/sysfs from + * underneath the device. + */ + lockdep_assert_held(&dev->mutex); + + if (dev->p->dead) + return false; + dev->p->dead = true; + return true; +} +EXPORT_SYMBOL_GPL(kill_device); + +/** + * device_del - delete device from system. + * @dev: device. + * + * This is the first part of the device unregistration + * sequence. This removes the device from the lists we control + * from here, has it removed from the other driver model + * subsystems it was added to in device_add(), and removes it + * from the kobject hierarchy. + * + * NOTE: this should be called manually _iff_ device_add() was + * also called manually. + */ +void device_del(struct device *dev) +{ + struct device *parent = dev->parent; + struct kobject *glue_dir = NULL; + struct class_interface *class_intf; + unsigned int noio_flag; + + device_lock(dev); + kill_device(dev); + device_unlock(dev); + + if (dev->fwnode && dev->fwnode->dev == dev) + dev->fwnode->dev = NULL; + + /* Notify clients of device removal. This call must come + * before dpm_sysfs_remove(). + */ + noio_flag = memalloc_noio_save(); + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_DEL_DEVICE, dev); + + dpm_sysfs_remove(dev); + if (parent) + klist_del(&dev->p->knode_parent); + if (MAJOR(dev->devt)) { + devtmpfs_delete_node(dev); + device_remove_sys_dev_entry(dev); + device_remove_file(dev, &dev_attr_dev); + } + if (dev->class) { + device_remove_class_symlinks(dev); + + mutex_lock(&dev->class->p->mutex); + /* notify any interfaces that the device is now gone */ + list_for_each_entry(class_intf, + &dev->class->p->interfaces, node) + if (class_intf->remove_dev) + class_intf->remove_dev(dev, class_intf); + /* remove the device from the class list */ + klist_del(&dev->p->knode_class); + mutex_unlock(&dev->class->p->mutex); + } + device_remove_file(dev, &dev_attr_uevent); + device_remove_attrs(dev); + bus_remove_device(dev); + device_pm_remove(dev); + driver_deferred_probe_del(dev); + device_platform_notify(dev, KOBJ_REMOVE); + device_remove_properties(dev); + device_links_purge(dev); + + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_REMOVED_DEVICE, dev); + kobject_uevent(&dev->kobj, KOBJ_REMOVE); + glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); + cleanup_glue_dir(dev, glue_dir); + memalloc_noio_restore(noio_flag); + put_device(parent); +} +EXPORT_SYMBOL_GPL(device_del); + +/** + * device_unregister - unregister device from system. + * @dev: device going away. + * + * We do this in two parts, like we do device_register(). First, + * we remove it from all the subsystems with device_del(), then + * we decrement the reference count via put_device(). If that + * is the final reference count, the device will be cleaned up + * via device_release() above. Otherwise, the structure will + * stick around until the final reference to the device is dropped. + */ +void device_unregister(struct device *dev) +{ + pr_debug("device: '%s': %s\n", dev_name(dev), __func__); + device_del(dev); + put_device(dev); +} +EXPORT_SYMBOL_GPL(device_unregister); + +static struct device *prev_device(struct klist_iter *i) +{ + struct klist_node *n = klist_prev(i); + struct device *dev = NULL; + struct device_private *p; + + if (n) { + p = to_device_private_parent(n); + dev = p->device; + } + return dev; +} + +static struct device *next_device(struct klist_iter *i) +{ + struct klist_node *n = klist_next(i); + struct device *dev = NULL; + struct device_private *p; + + if (n) { + p = to_device_private_parent(n); + dev = p->device; + } + return dev; +} + +/** + * device_get_devnode - path of device node file + * @dev: device + * @mode: returned file access mode + * @uid: returned file owner + * @gid: returned file group + * @tmp: possibly allocated string + * + * Return the relative path of a possible device node. + * Non-default names may need to allocate a memory to compose + * a name. This memory is returned in tmp and needs to be + * freed by the caller. + */ +const char *device_get_devnode(struct device *dev, + umode_t *mode, kuid_t *uid, kgid_t *gid, + const char **tmp) +{ + char *s; + + *tmp = NULL; + + /* the device type may provide a specific name */ + if (dev->type && dev->type->devnode) + *tmp = dev->type->devnode(dev, mode, uid, gid); + if (*tmp) + return *tmp; + + /* the class may provide a specific name */ + if (dev->class && dev->class->devnode) + *tmp = dev->class->devnode(dev, mode); + if (*tmp) + return *tmp; + + /* return name without allocation, tmp == NULL */ + if (strchr(dev_name(dev), '!') == NULL) + return dev_name(dev); + + /* replace '!' in the name with '/' */ + s = kstrdup(dev_name(dev), GFP_KERNEL); + if (!s) + return NULL; + strreplace(s, '!', '/'); + return *tmp = s; +} + +/** + * device_for_each_child - device child iterator. + * @parent: parent struct device. + * @fn: function to be called for each device. + * @data: data for the callback. + * + * Iterate over @parent's child devices, and call @fn for each, + * passing it @data. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + */ +int device_for_each_child(struct device *parent, void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct klist_iter i; + struct device *child; + int error = 0; + + if (!parent->p) + return 0; + + klist_iter_init(&parent->p->klist_children, &i); + while (!error && (child = next_device(&i))) + error = fn(child, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(device_for_each_child); + +/** + * device_for_each_child_reverse - device child iterator in reversed order. + * @parent: parent struct device. + * @fn: function to be called for each device. + * @data: data for the callback. + * + * Iterate over @parent's child devices, and call @fn for each, + * passing it @data. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + */ +int device_for_each_child_reverse(struct device *parent, void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct klist_iter i; + struct device *child; + int error = 0; + + if (!parent->p) + return 0; + + klist_iter_init(&parent->p->klist_children, &i); + while ((child = prev_device(&i)) && !error) + error = fn(child, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(device_for_each_child_reverse); + +/** + * device_find_child - device iterator for locating a particular device. + * @parent: parent struct device + * @match: Callback function to check device + * @data: Data to pass to match function + * + * This is similar to the device_for_each_child() function above, but it + * returns a reference to a device that is 'found' for later use, as + * determined by the @match callback. + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero and a reference to the + * current device can be obtained, this function will return to the caller + * and not iterate over any more devices. + * + * NOTE: you will need to drop the reference with put_device() after use. + */ +struct device *device_find_child(struct device *parent, void *data, + int (*match)(struct device *dev, void *data)) +{ + struct klist_iter i; + struct device *child; + + if (!parent) + return NULL; + + klist_iter_init(&parent->p->klist_children, &i); + while ((child = next_device(&i))) + if (match(child, data) && get_device(child)) + break; + klist_iter_exit(&i); + return child; +} +EXPORT_SYMBOL_GPL(device_find_child); + +/** + * device_find_child_by_name - device iterator for locating a child device. + * @parent: parent struct device + * @name: name of the child device + * + * This is similar to the device_find_child() function above, but it + * returns a reference to a device that has the name @name. + * + * NOTE: you will need to drop the reference with put_device() after use. + */ +struct device *device_find_child_by_name(struct device *parent, + const char *name) +{ + struct klist_iter i; + struct device *child; + + if (!parent) + return NULL; + + klist_iter_init(&parent->p->klist_children, &i); + while ((child = next_device(&i))) + if (sysfs_streq(dev_name(child), name) && get_device(child)) + break; + klist_iter_exit(&i); + return child; +} +EXPORT_SYMBOL_GPL(device_find_child_by_name); + +int __init devices_init(void) +{ + devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL); + if (!devices_kset) + return -ENOMEM; + dev_kobj = kobject_create_and_add("dev", NULL); + if (!dev_kobj) + goto dev_kobj_err; + sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj); + if (!sysfs_dev_block_kobj) + goto block_kobj_err; + sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj); + if (!sysfs_dev_char_kobj) + goto char_kobj_err; + + return 0; + + char_kobj_err: + kobject_put(sysfs_dev_block_kobj); + block_kobj_err: + kobject_put(dev_kobj); + dev_kobj_err: + kset_unregister(devices_kset); + return -ENOMEM; +} + +static int device_check_offline(struct device *dev, void *not_used) +{ + int ret; + + ret = device_for_each_child(dev, NULL, device_check_offline); + if (ret) + return ret; + + return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0; +} + +/** + * device_offline - Prepare the device for hot-removal. + * @dev: Device to be put offline. + * + * Execute the device bus type's .offline() callback, if present, to prepare + * the device for a subsequent hot-removal. If that succeeds, the device must + * not be used until either it is removed or its bus type's .online() callback + * is executed. + * + * Call under device_hotplug_lock. + */ +int device_offline(struct device *dev) +{ + int ret; + + if (dev->offline_disabled) + return -EPERM; + + ret = device_for_each_child(dev, NULL, device_check_offline); + if (ret) + return ret; + + device_lock(dev); + if (device_supports_offline(dev)) { + if (dev->offline) { + ret = 1; + } else { + ret = dev->bus->offline(dev); + if (!ret) { + kobject_uevent(&dev->kobj, KOBJ_OFFLINE); + dev->offline = true; + } + } + } + device_unlock(dev); + + return ret; +} + +/** + * device_online - Put the device back online after successful device_offline(). + * @dev: Device to be put back online. + * + * If device_offline() has been successfully executed for @dev, but the device + * has not been removed subsequently, execute its bus type's .online() callback + * to indicate that the device can be used again. + * + * Call under device_hotplug_lock. + */ +int device_online(struct device *dev) +{ + int ret = 0; + + device_lock(dev); + if (device_supports_offline(dev)) { + if (dev->offline) { + ret = dev->bus->online(dev); + if (!ret) { + kobject_uevent(&dev->kobj, KOBJ_ONLINE); + dev->offline = false; + } + } else { + ret = 1; + } + } + device_unlock(dev); + + return ret; +} + +struct root_device { + struct device dev; + struct module *owner; +}; + +static inline struct root_device *to_root_device(struct device *d) +{ + return container_of(d, struct root_device, dev); +} + +static void root_device_release(struct device *dev) +{ + kfree(to_root_device(dev)); +} + +/** + * __root_device_register - allocate and register a root device + * @name: root device name + * @owner: owner module of the root device, usually THIS_MODULE + * + * This function allocates a root device and registers it + * using device_register(). In order to free the returned + * device, use root_device_unregister(). + * + * Root devices are dummy devices which allow other devices + * to be grouped under /sys/devices. Use this function to + * allocate a root device and then use it as the parent of + * any device which should appear under /sys/devices/{name} + * + * The /sys/devices/{name} directory will also contain a + * 'module' symlink which points to the @owner directory + * in sysfs. + * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * + * Note: You probably want to use root_device_register(). + */ +struct device *__root_device_register(const char *name, struct module *owner) +{ + struct root_device *root; + int err = -ENOMEM; + + root = kzalloc(sizeof(struct root_device), GFP_KERNEL); + if (!root) + return ERR_PTR(err); + + err = dev_set_name(&root->dev, "%s", name); + if (err) { + kfree(root); + return ERR_PTR(err); + } + + root->dev.release = root_device_release; + + err = device_register(&root->dev); + if (err) { + put_device(&root->dev); + return ERR_PTR(err); + } + +#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */ + if (owner) { + struct module_kobject *mk = &owner->mkobj; + + err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module"); + if (err) { + device_unregister(&root->dev); + return ERR_PTR(err); + } + root->owner = owner; + } +#endif + + return &root->dev; +} +EXPORT_SYMBOL_GPL(__root_device_register); + +/** + * root_device_unregister - unregister and free a root device + * @dev: device going away + * + * This function unregisters and cleans up a device that was created by + * root_device_register(). + */ +void root_device_unregister(struct device *dev) +{ + struct root_device *root = to_root_device(dev); + + if (root->owner) + sysfs_remove_link(&root->dev.kobj, "module"); + + device_unregister(dev); +} +EXPORT_SYMBOL_GPL(root_device_unregister); + + +static void device_create_release(struct device *dev) +{ + pr_debug("device: '%s': %s\n", dev_name(dev), __func__); + kfree(dev); +} + +static __printf(6, 0) struct device * +device_create_groups_vargs(struct class *class, struct device *parent, + dev_t devt, void *drvdata, + const struct attribute_group **groups, + const char *fmt, va_list args) +{ + struct device *dev = NULL; + int retval = -ENODEV; + + if (class == NULL || IS_ERR(class)) + goto error; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + retval = -ENOMEM; + goto error; + } + + device_initialize(dev); + dev->devt = devt; + dev->class = class; + dev->parent = parent; + dev->groups = groups; + dev->release = device_create_release; + dev_set_drvdata(dev, drvdata); + + retval = kobject_set_name_vargs(&dev->kobj, fmt, args); + if (retval) + goto error; + + retval = device_add(dev); + if (retval) + goto error; + + return dev; + +error: + put_device(dev); + return ERR_PTR(retval); +} + +/** + * device_create - creates a device and registers it with sysfs + * @class: pointer to the struct class that this device should be registered to + * @parent: pointer to the parent struct device of this new device, if any + * @devt: the dev_t for the char device to be added + * @drvdata: the data to be added to the device for callbacks + * @fmt: string for the device's name + * + * This function can be used by char device classes. A struct device + * will be created in sysfs, registered to the specified class. + * + * A "dev" file will be created, showing the dev_t for the device, if + * the dev_t is not 0,0. + * If a pointer to a parent struct device is passed in, the newly created + * struct device will be a child of that device in sysfs. + * The pointer to the struct device will be returned from the call. + * Any further sysfs files that might be required can be created using this + * pointer. + * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * + * Note: the struct class passed to this function must have previously + * been created with a call to class_create(). + */ +struct device *device_create(struct class *class, struct device *parent, + dev_t devt, void *drvdata, const char *fmt, ...) +{ + va_list vargs; + struct device *dev; + + va_start(vargs, fmt); + dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL, + fmt, vargs); + va_end(vargs); + return dev; +} +EXPORT_SYMBOL_GPL(device_create); + +/** + * device_create_with_groups - creates a device and registers it with sysfs + * @class: pointer to the struct class that this device should be registered to + * @parent: pointer to the parent struct device of this new device, if any + * @devt: the dev_t for the char device to be added + * @drvdata: the data to be added to the device for callbacks + * @groups: NULL-terminated list of attribute groups to be created + * @fmt: string for the device's name + * + * This function can be used by char device classes. A struct device + * will be created in sysfs, registered to the specified class. + * Additional attributes specified in the groups parameter will also + * be created automatically. + * + * A "dev" file will be created, showing the dev_t for the device, if + * the dev_t is not 0,0. + * If a pointer to a parent struct device is passed in, the newly created + * struct device will be a child of that device in sysfs. + * The pointer to the struct device will be returned from the call. + * Any further sysfs files that might be required can be created using this + * pointer. + * + * Returns &struct device pointer on success, or ERR_PTR() on error. + * + * Note: the struct class passed to this function must have previously + * been created with a call to class_create(). + */ +struct device *device_create_with_groups(struct class *class, + struct device *parent, dev_t devt, + void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...) +{ + va_list vargs; + struct device *dev; + + va_start(vargs, fmt); + dev = device_create_groups_vargs(class, parent, devt, drvdata, groups, + fmt, vargs); + va_end(vargs); + return dev; +} +EXPORT_SYMBOL_GPL(device_create_with_groups); + +/** + * device_destroy - removes a device that was created with device_create() + * @class: pointer to the struct class that this device was registered with + * @devt: the dev_t of the device that was previously registered + * + * This call unregisters and cleans up a device that was created with a + * call to device_create(). + */ +void device_destroy(struct class *class, dev_t devt) +{ + struct device *dev; + + dev = class_find_device_by_devt(class, devt); + if (dev) { + put_device(dev); + device_unregister(dev); + } +} +EXPORT_SYMBOL_GPL(device_destroy); + +/** + * device_rename - renames a device + * @dev: the pointer to the struct device to be renamed + * @new_name: the new name of the device + * + * It is the responsibility of the caller to provide mutual + * exclusion between two different calls of device_rename + * on the same device to ensure that new_name is valid and + * won't conflict with other devices. + * + * Note: Don't call this function. Currently, the networking layer calls this + * function, but that will change. The following text from Kay Sievers offers + * some insight: + * + * Renaming devices is racy at many levels, symlinks and other stuff are not + * replaced atomically, and you get a "move" uevent, but it's not easy to + * connect the event to the old and new device. Device nodes are not renamed at + * all, there isn't even support for that in the kernel now. + * + * In the meantime, during renaming, your target name might be taken by another + * driver, creating conflicts. Or the old name is taken directly after you + * renamed it -- then you get events for the same DEVPATH, before you even see + * the "move" event. It's just a mess, and nothing new should ever rely on + * kernel device renaming. Besides that, it's not even implemented now for + * other things than (driver-core wise very simple) network devices. + * + * We are currently about to change network renaming in udev to completely + * disallow renaming of devices in the same namespace as the kernel uses, + * because we can't solve the problems properly, that arise with swapping names + * of multiple interfaces without races. Means, renaming of eth[0-9]* will only + * be allowed to some other name than eth[0-9]*, for the aforementioned + * reasons. + * + * Make up a "real" name in the driver before you register anything, or add + * some other attributes for userspace to find the device, or use udev to add + * symlinks -- but never rename kernel devices later, it's a complete mess. We + * don't even want to get into that and try to implement the missing pieces in + * the core. We really have other pieces to fix in the driver core mess. :) + */ +int device_rename(struct device *dev, const char *new_name) +{ + struct kobject *kobj = &dev->kobj; + char *old_device_name = NULL; + int error; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + dev_dbg(dev, "renaming to %s\n", new_name); + + old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); + if (!old_device_name) { + error = -ENOMEM; + goto out; + } + + if (dev->class) { + error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj, + kobj, old_device_name, + new_name, kobject_namespace(kobj)); + if (error) + goto out; + } + + error = kobject_rename(kobj, new_name); + if (error) + goto out; + +out: + put_device(dev); + + kfree(old_device_name); + + return error; +} +EXPORT_SYMBOL_GPL(device_rename); + +static int device_move_class_links(struct device *dev, + struct device *old_parent, + struct device *new_parent) +{ + int error = 0; + + if (old_parent) + sysfs_remove_link(&dev->kobj, "device"); + if (new_parent) + error = sysfs_create_link(&dev->kobj, &new_parent->kobj, + "device"); + return error; +} + +/** + * device_move - moves a device to a new parent + * @dev: the pointer to the struct device to be moved + * @new_parent: the new parent of the device (can be NULL) + * @dpm_order: how to reorder the dpm_list + */ +int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order) +{ + int error; + struct device *old_parent; + struct kobject *new_parent_kobj; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + device_pm_lock(); + new_parent = get_device(new_parent); + new_parent_kobj = get_device_parent(dev, new_parent); + if (IS_ERR(new_parent_kobj)) { + error = PTR_ERR(new_parent_kobj); + put_device(new_parent); + goto out; + } + + pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev), + __func__, new_parent ? dev_name(new_parent) : "<NULL>"); + error = kobject_move(&dev->kobj, new_parent_kobj); + if (error) { + cleanup_glue_dir(dev, new_parent_kobj); + put_device(new_parent); + goto out; + } + old_parent = dev->parent; + dev->parent = new_parent; + if (old_parent) + klist_remove(&dev->p->knode_parent); + if (new_parent) { + klist_add_tail(&dev->p->knode_parent, + &new_parent->p->klist_children); + set_dev_node(dev, dev_to_node(new_parent)); + } + + if (dev->class) { + error = device_move_class_links(dev, old_parent, new_parent); + if (error) { + /* We ignore errors on cleanup since we're hosed anyway... */ + device_move_class_links(dev, new_parent, old_parent); + if (!kobject_move(&dev->kobj, &old_parent->kobj)) { + if (new_parent) + klist_remove(&dev->p->knode_parent); + dev->parent = old_parent; + if (old_parent) { + klist_add_tail(&dev->p->knode_parent, + &old_parent->p->klist_children); + set_dev_node(dev, dev_to_node(old_parent)); + } + } + cleanup_glue_dir(dev, new_parent_kobj); + put_device(new_parent); + goto out; + } + } + switch (dpm_order) { + case DPM_ORDER_NONE: + break; + case DPM_ORDER_DEV_AFTER_PARENT: + device_pm_move_after(dev, new_parent); + devices_kset_move_after(dev, new_parent); + break; + case DPM_ORDER_PARENT_BEFORE_DEV: + device_pm_move_before(new_parent, dev); + devices_kset_move_before(new_parent, dev); + break; + case DPM_ORDER_DEV_LAST: + device_pm_move_last(dev); + devices_kset_move_last(dev); + break; + } + + put_device(old_parent); +out: + device_pm_unlock(); + put_device(dev); + return error; +} +EXPORT_SYMBOL_GPL(device_move); + +static int device_attrs_change_owner(struct device *dev, kuid_t kuid, + kgid_t kgid) +{ + struct kobject *kobj = &dev->kobj; + struct class *class = dev->class; + const struct device_type *type = dev->type; + int error; + + if (class) { + /* + * Change the device groups of the device class for @dev to + * @kuid/@kgid. + */ + error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid, + kgid); + if (error) + return error; + } + + if (type) { + /* + * Change the device groups of the device type for @dev to + * @kuid/@kgid. + */ + error = sysfs_groups_change_owner(kobj, type->groups, kuid, + kgid); + if (error) + return error; + } + + /* Change the device groups of @dev to @kuid/@kgid. */ + error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid); + if (error) + return error; + + if (device_supports_offline(dev) && !dev->offline_disabled) { + /* Change online device attributes of @dev to @kuid/@kgid. */ + error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name, + kuid, kgid); + if (error) + return error; + } + + return 0; +} + +/** + * device_change_owner - change the owner of an existing device. + * @dev: device. + * @kuid: new owner's kuid + * @kgid: new owner's kgid + * + * This changes the owner of @dev and its corresponding sysfs entries to + * @kuid/@kgid. This function closely mirrors how @dev was added via driver + * core. + * + * Returns 0 on success or error code on failure. + */ +int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) +{ + int error; + struct kobject *kobj = &dev->kobj; + + dev = get_device(dev); + if (!dev) + return -EINVAL; + + /* + * Change the kobject and the default attributes and groups of the + * ktype associated with it to @kuid/@kgid. + */ + error = sysfs_change_owner(kobj, kuid, kgid); + if (error) + goto out; + + /* + * Change the uevent file for @dev to the new owner. The uevent file + * was created in a separate step when @dev got added and we mirror + * that step here. + */ + error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid, + kgid); + if (error) + goto out; + + /* + * Change the device groups, the device groups associated with the + * device class, and the groups associated with the device type of @dev + * to @kuid/@kgid. + */ + error = device_attrs_change_owner(dev, kuid, kgid); + if (error) + goto out; + + error = dpm_sysfs_change_owner(dev, kuid, kgid); + if (error) + goto out; + +#ifdef CONFIG_BLOCK + if (sysfs_deprecated && dev->class == &block_class) + goto out; +#endif + + /* + * Change the owner of the symlink located in the class directory of + * the device class associated with @dev which points to the actual + * directory entry for @dev to @kuid/@kgid. This ensures that the + * symlink shows the same permissions as its target. + */ + error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj, + dev_name(dev), kuid, kgid); + if (error) + goto out; + +out: + put_device(dev); + return error; +} +EXPORT_SYMBOL_GPL(device_change_owner); + +/** + * device_shutdown - call ->shutdown() on each device to shutdown. + */ +void device_shutdown(void) +{ + struct device *dev, *parent; + + wait_for_device_probe(); + device_block_probing(); + + cpufreq_suspend(); + + spin_lock(&devices_kset->list_lock); + /* + * Walk the devices list backward, shutting down each in turn. + * Beware that device unplug events may also start pulling + * devices offline, even as the system is shutting down. + */ + while (!list_empty(&devices_kset->list)) { + dev = list_entry(devices_kset->list.prev, struct device, + kobj.entry); + + /* + * hold reference count of device's parent to + * prevent it from being freed because parent's + * lock is to be held + */ + parent = get_device(dev->parent); + get_device(dev); + /* + * Make sure the device is off the kset list, in the + * event that dev->*->shutdown() doesn't remove it. + */ + list_del_init(&dev->kobj.entry); + spin_unlock(&devices_kset->list_lock); + + /* hold lock to avoid race with probe/release */ + if (parent) + device_lock(parent); + device_lock(dev); + + /* Don't allow any more runtime suspends */ + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); + + if (dev->class && dev->class->shutdown_pre) { + if (initcall_debug) + dev_info(dev, "shutdown_pre\n"); + dev->class->shutdown_pre(dev); + } + if (dev->bus && dev->bus->shutdown) { + if (initcall_debug) + dev_info(dev, "shutdown\n"); + dev->bus->shutdown(dev); + } else if (dev->driver && dev->driver->shutdown) { + if (initcall_debug) + dev_info(dev, "shutdown\n"); + dev->driver->shutdown(dev); + } + + device_unlock(dev); + if (parent) + device_unlock(parent); + + put_device(dev); + put_device(parent); + + spin_lock(&devices_kset->list_lock); + } + spin_unlock(&devices_kset->list_lock); +} + +/* + * Device logging functions + */ + +#ifdef CONFIG_PRINTK +static void +set_dev_info(const struct device *dev, struct dev_printk_info *dev_info) +{ + const char *subsys; + + memset(dev_info, 0, sizeof(*dev_info)); + + if (dev->class) + subsys = dev->class->name; + else if (dev->bus) + subsys = dev->bus->name; + else + return; + + strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem)); + + /* + * Add device identifier DEVICE=: + * b12:8 block dev_t + * c127:3 char dev_t + * n8 netdev ifindex + * +sound:card0 subsystem:devname + */ + if (MAJOR(dev->devt)) { + char c; + + if (strcmp(subsys, "block") == 0) + c = 'b'; + else + c = 'c'; + + snprintf(dev_info->device, sizeof(dev_info->device), + "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt)); + } else if (strcmp(subsys, "net") == 0) { + struct net_device *net = to_net_dev(dev); + + snprintf(dev_info->device, sizeof(dev_info->device), + "n%u", net->ifindex); + } else { + snprintf(dev_info->device, sizeof(dev_info->device), + "+%s:%s", subsys, dev_name(dev)); + } +} + +int dev_vprintk_emit(int level, const struct device *dev, + const char *fmt, va_list args) +{ + struct dev_printk_info dev_info; + + set_dev_info(dev, &dev_info); + + return vprintk_emit(0, level, &dev_info, fmt, args); +} +EXPORT_SYMBOL(dev_vprintk_emit); + +int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...) +{ + va_list args; + int r; + + va_start(args, fmt); + + r = dev_vprintk_emit(level, dev, fmt, args); + + va_end(args); + + return r; +} +EXPORT_SYMBOL(dev_printk_emit); + +static void __dev_printk(const char *level, const struct device *dev, + struct va_format *vaf) +{ + if (dev) + dev_printk_emit(level[1] - '0', dev, "%s %s: %pV", + dev_driver_string(dev), dev_name(dev), vaf); + else + printk("%s(NULL device *): %pV", level, vaf); +} + +void dev_printk(const char *level, const struct device *dev, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + __dev_printk(level, dev, &vaf); + + va_end(args); +} +EXPORT_SYMBOL(dev_printk); + +#define define_dev_printk_level(func, kern_level) \ +void func(const struct device *dev, const char *fmt, ...) \ +{ \ + struct va_format vaf; \ + va_list args; \ + \ + va_start(args, fmt); \ + \ + vaf.fmt = fmt; \ + vaf.va = &args; \ + \ + __dev_printk(kern_level, dev, &vaf); \ + \ + va_end(args); \ +} \ +EXPORT_SYMBOL(func); + +define_dev_printk_level(_dev_emerg, KERN_EMERG); +define_dev_printk_level(_dev_alert, KERN_ALERT); +define_dev_printk_level(_dev_crit, KERN_CRIT); +define_dev_printk_level(_dev_err, KERN_ERR); +define_dev_printk_level(_dev_warn, KERN_WARNING); +define_dev_printk_level(_dev_notice, KERN_NOTICE); +define_dev_printk_level(_dev_info, KERN_INFO); + +#endif + +/** + * dev_err_probe - probe error check and log helper + * @dev: the pointer to the struct device + * @err: error value to test + * @fmt: printf-style format string + * @...: arguments as specified in the format string + * + * This helper implements common pattern present in probe functions for error + * checking: print debug or error message depending if the error value is + * -EPROBE_DEFER and propagate error upwards. + * In case of -EPROBE_DEFER it sets also defer probe reason, which can be + * checked later by reading devices_deferred debugfs attribute. + * It replaces code sequence:: + * + * if (err != -EPROBE_DEFER) + * dev_err(dev, ...); + * else + * dev_dbg(dev, ...); + * return err; + * + * with:: + * + * return dev_err_probe(dev, err, ...); + * + * Returns @err. + * + */ +int dev_err_probe(const struct device *dev, int err, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + if (err != -EPROBE_DEFER) { + dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf); + } else { + device_set_deferred_probe_reason(dev, &vaf); + dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf); + } + + va_end(args); + + return err; +} +EXPORT_SYMBOL_GPL(dev_err_probe); + +static inline bool fwnode_is_primary(struct fwnode_handle *fwnode) +{ + return fwnode && !IS_ERR(fwnode->secondary); +} + +/** + * set_primary_fwnode - Change the primary firmware node of a given device. + * @dev: Device to handle. + * @fwnode: New primary firmware node of the device. + * + * Set the device's firmware node pointer to @fwnode, but if a secondary + * firmware node of the device is present, preserve it. + */ +void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode) +{ + struct device *parent = dev->parent; + struct fwnode_handle *fn = dev->fwnode; + + if (fwnode) { + if (fwnode_is_primary(fn)) + fn = fn->secondary; + + if (fn) { + WARN_ON(fwnode->secondary); + fwnode->secondary = fn; + } + dev->fwnode = fwnode; + } else { + if (fwnode_is_primary(fn)) { + dev->fwnode = fn->secondary; + if (!(parent && fn == parent->fwnode)) + fn->secondary = NULL; + } else { + dev->fwnode = NULL; + } + } +} +EXPORT_SYMBOL_GPL(set_primary_fwnode); + +/** + * set_secondary_fwnode - Change the secondary firmware node of a given device. + * @dev: Device to handle. + * @fwnode: New secondary firmware node of the device. + * + * If a primary firmware node of the device is present, set its secondary + * pointer to @fwnode. Otherwise, set the device's firmware node pointer to + * @fwnode. + */ +void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode) +{ + if (fwnode) + fwnode->secondary = ERR_PTR(-ENODEV); + + if (fwnode_is_primary(dev->fwnode)) + dev->fwnode->secondary = fwnode; + else + dev->fwnode = fwnode; +} +EXPORT_SYMBOL_GPL(set_secondary_fwnode); + +/** + * device_set_of_node_from_dev - reuse device-tree node of another device + * @dev: device whose device-tree node is being set + * @dev2: device whose device-tree node is being reused + * + * Takes another reference to the new device-tree node after first dropping + * any reference held to the old node. + */ +void device_set_of_node_from_dev(struct device *dev, const struct device *dev2) +{ + of_node_put(dev->of_node); + dev->of_node = of_node_get(dev2->of_node); + dev->of_node_reused = true; +} +EXPORT_SYMBOL_GPL(device_set_of_node_from_dev); + +void device_set_node(struct device *dev, struct fwnode_handle *fwnode) +{ + dev->fwnode = fwnode; + dev->of_node = to_of_node(fwnode); +} +EXPORT_SYMBOL_GPL(device_set_node); + +int device_match_name(struct device *dev, const void *name) +{ + return sysfs_streq(dev_name(dev), name); +} +EXPORT_SYMBOL_GPL(device_match_name); + +int device_match_of_node(struct device *dev, const void *np) +{ + return dev->of_node == np; +} +EXPORT_SYMBOL_GPL(device_match_of_node); + +int device_match_fwnode(struct device *dev, const void *fwnode) +{ + return dev_fwnode(dev) == fwnode; +} +EXPORT_SYMBOL_GPL(device_match_fwnode); + +int device_match_devt(struct device *dev, const void *pdevt) +{ + return dev->devt == *(dev_t *)pdevt; +} +EXPORT_SYMBOL_GPL(device_match_devt); + +int device_match_acpi_dev(struct device *dev, const void *adev) +{ + return ACPI_COMPANION(dev) == adev; +} +EXPORT_SYMBOL(device_match_acpi_dev); + +int device_match_any(struct device *dev, const void *unused) +{ + return 1; +} +EXPORT_SYMBOL_GPL(device_match_any); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c new file mode 100644 index 000000000..2db1e0e8c --- /dev/null +++ b/drivers/base/cpu.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CPU subsystem support + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/cpu.h> +#include <linux/topology.h> +#include <linux/device.h> +#include <linux/node.h> +#include <linux/gfp.h> +#include <linux/slab.h> +#include <linux/percpu.h> +#include <linux/acpi.h> +#include <linux/of.h> +#include <linux/cpufeature.h> +#include <linux/tick.h> +#include <linux/pm_qos.h> +#include <linux/sched/isolation.h> + +#include "base.h" + +static DEFINE_PER_CPU(struct device *, cpu_sys_devices); + +static int cpu_subsys_match(struct device *dev, struct device_driver *drv) +{ + /* ACPI style match is the only one that may succeed. */ + if (acpi_driver_match_device(dev, drv)) + return 1; + + return 0; +} + +#ifdef CONFIG_HOTPLUG_CPU +static void change_cpu_under_node(struct cpu *cpu, + unsigned int from_nid, unsigned int to_nid) +{ + int cpuid = cpu->dev.id; + unregister_cpu_under_node(cpuid, from_nid); + register_cpu_under_node(cpuid, to_nid); + cpu->node_id = to_nid; +} + +static int cpu_subsys_online(struct device *dev) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + int cpuid = dev->id; + int from_nid, to_nid; + int ret; + + from_nid = cpu_to_node(cpuid); + if (from_nid == NUMA_NO_NODE) + return -ENODEV; + + ret = cpu_device_up(dev); + /* + * When hot adding memory to memoryless node and enabling a cpu + * on the node, node number of the cpu may internally change. + */ + to_nid = cpu_to_node(cpuid); + if (from_nid != to_nid) + change_cpu_under_node(cpu, from_nid, to_nid); + + return ret; +} + +static int cpu_subsys_offline(struct device *dev) +{ + return cpu_device_down(dev); +} + +void unregister_cpu(struct cpu *cpu) +{ + int logical_cpu = cpu->dev.id; + + unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); + + device_unregister(&cpu->dev); + per_cpu(cpu_sys_devices, logical_cpu) = NULL; + return; +} + +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE +static ssize_t cpu_probe_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + ssize_t cnt; + int ret; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + cnt = arch_cpu_probe(buf, count); + + unlock_device_hotplug(); + return cnt; +} + +static ssize_t cpu_release_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + ssize_t cnt; + int ret; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + cnt = arch_cpu_release(buf, count); + + unlock_device_hotplug(); + return cnt; +} + +static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); +static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); +#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ +#endif /* CONFIG_HOTPLUG_CPU */ + +struct bus_type cpu_subsys = { + .name = "cpu", + .dev_name = "cpu", + .match = cpu_subsys_match, +#ifdef CONFIG_HOTPLUG_CPU + .online = cpu_subsys_online, + .offline = cpu_subsys_offline, +#endif +}; +EXPORT_SYMBOL_GPL(cpu_subsys); + +#ifdef CONFIG_KEXEC +#include <linux/kexec.h> + +static ssize_t crash_notes_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu *cpu = container_of(dev, struct cpu, dev); + unsigned long long addr; + int cpunum; + + cpunum = cpu->dev.id; + + /* + * Might be reading other cpu's data based on which cpu read thread + * has been scheduled. But cpu data (memory) is allocated once during + * boot up and this data does not change there after. Hence this + * operation should be safe. No locking required. + */ + addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); + + return sysfs_emit(buf, "%llx\n", addr); +} +static DEVICE_ATTR_ADMIN_RO(crash_notes); + +static ssize_t crash_notes_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t)); +} +static DEVICE_ATTR_ADMIN_RO(crash_notes_size); + +static struct attribute *crash_note_cpu_attrs[] = { + &dev_attr_crash_notes.attr, + &dev_attr_crash_notes_size.attr, + NULL +}; + +static struct attribute_group crash_note_cpu_attr_group = { + .attrs = crash_note_cpu_attrs, +}; +#endif + +static const struct attribute_group *common_cpu_attr_groups[] = { +#ifdef CONFIG_KEXEC + &crash_note_cpu_attr_group, +#endif + NULL +}; + +static const struct attribute_group *hotplugable_cpu_attr_groups[] = { +#ifdef CONFIG_KEXEC + &crash_note_cpu_attr_group, +#endif + NULL +}; + +/* + * Print cpu online, possible, present, and system maps + */ + +struct cpu_attr { + struct device_attribute attr; + const struct cpumask *const map; +}; + +static ssize_t show_cpus_attr(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); + + return cpumap_print_to_pagebuf(true, buf, ca->map); +} + +#define _CPU_ATTR(name, map) \ + { __ATTR(name, 0444, show_cpus_attr, NULL), map } + +/* Keep in sync with cpu_subsys_attrs */ +static struct cpu_attr cpu_attrs[] = { + _CPU_ATTR(online, &__cpu_online_mask), + _CPU_ATTR(possible, &__cpu_possible_mask), + _CPU_ATTR(present, &__cpu_present_mask), +}; + +/* + * Print values for NR_CPUS and offlined cpus + */ +static ssize_t print_cpus_kernel_max(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", NR_CPUS - 1); +} +static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); + +/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ +unsigned int total_cpus; + +static ssize_t print_cpus_offline(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int len = 0; + cpumask_var_t offline; + + /* display offline cpus < nr_cpu_ids */ + if (!alloc_cpumask_var(&offline, GFP_KERNEL)) + return -ENOMEM; + cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); + len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline)); + free_cpumask_var(offline); + + /* display offline cpus >= nr_cpu_ids */ + if (total_cpus && nr_cpu_ids < total_cpus) { + len += sysfs_emit_at(buf, len, ","); + + if (nr_cpu_ids == total_cpus-1) + len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids); + else + len += sysfs_emit_at(buf, len, "%u-%d", + nr_cpu_ids, total_cpus - 1); + } + + len += sysfs_emit_at(buf, len, "\n"); + + return len; +} +static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); + +static ssize_t print_cpus_isolated(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int len; + cpumask_var_t isolated; + + if (!alloc_cpumask_var(&isolated, GFP_KERNEL)) + return -ENOMEM; + + cpumask_andnot(isolated, cpu_possible_mask, + housekeeping_cpumask(HK_FLAG_DOMAIN)); + len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated)); + + free_cpumask_var(isolated); + + return len; +} +static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL); + +#ifdef CONFIG_NO_HZ_FULL +static ssize_t print_cpus_nohz_full(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask)); +} +static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL); +#endif + +static void cpu_device_release(struct device *dev) +{ + /* + * This is an empty function to prevent the driver core from spitting a + * warning at us. Yes, I know this is directly opposite of what the + * documentation for the driver core and kobjects say, and the author + * of this code has already been publically ridiculed for doing + * something as foolish as this. However, at this point in time, it is + * the only way to handle the issue of statically allocated cpu + * devices. The different architectures will have their cpu device + * code reworked to properly handle this in the near future, so this + * function will then be changed to correctly free up the memory held + * by the cpu device. + * + * Never copy this way of doing things, or you too will be made fun of + * on the linux-kernel list, you have been warned. + */ +} + +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE +static ssize_t print_cpu_modalias(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int len = 0; + u32 i; + + len += sysfs_emit_at(buf, len, + "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:", + CPU_FEATURE_TYPEVAL); + + for (i = 0; i < MAX_CPU_FEATURES; i++) + if (cpu_have_feature(i)) { + if (len + sizeof(",XXXX\n") >= PAGE_SIZE) { + WARN(1, "CPU features overflow page\n"); + break; + } + len += sysfs_emit_at(buf, len, ",%04X", i); + } + len += sysfs_emit_at(buf, len, "\n"); + return len; +} + +static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (buf) { + print_cpu_modalias(NULL, NULL, buf); + add_uevent_var(env, "MODALIAS=%s", buf); + kfree(buf); + } + return 0; +} +#endif + +/* + * register_cpu - Setup a sysfs device for a CPU. + * @cpu - cpu->hotpluggable field set to 1 will generate a control file in + * sysfs for this CPU. + * @num - CPU number to use when creating the device. + * + * Initialize and register the CPU device. + */ +int register_cpu(struct cpu *cpu, int num) +{ + int error; + + cpu->node_id = cpu_to_node(num); + memset(&cpu->dev, 0x00, sizeof(struct device)); + cpu->dev.id = num; + cpu->dev.bus = &cpu_subsys; + cpu->dev.release = cpu_device_release; + cpu->dev.offline_disabled = !cpu->hotpluggable; + cpu->dev.offline = !cpu_online(num); + cpu->dev.of_node = of_get_cpu_node(num, NULL); +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE + cpu->dev.bus->uevent = cpu_uevent; +#endif + cpu->dev.groups = common_cpu_attr_groups; + if (cpu->hotpluggable) + cpu->dev.groups = hotplugable_cpu_attr_groups; + error = device_register(&cpu->dev); + if (error) { + put_device(&cpu->dev); + return error; + } + + per_cpu(cpu_sys_devices, num) = &cpu->dev; + register_cpu_under_node(num, cpu_to_node(num)); + dev_pm_qos_expose_latency_limit(&cpu->dev, + PM_QOS_RESUME_LATENCY_NO_CONSTRAINT); + + return 0; +} + +struct device *get_cpu_device(unsigned cpu) +{ + if (cpu < nr_cpu_ids && cpu_possible(cpu)) + return per_cpu(cpu_sys_devices, cpu); + else + return NULL; +} +EXPORT_SYMBOL_GPL(get_cpu_device); + +static void device_create_release(struct device *dev) +{ + kfree(dev); +} + +__printf(4, 0) +static struct device * +__cpu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, va_list args) +{ + struct device *dev = NULL; + int retval = -ENODEV; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + retval = -ENOMEM; + goto error; + } + + device_initialize(dev); + dev->parent = parent; + dev->groups = groups; + dev->release = device_create_release; + device_set_pm_not_required(dev); + dev_set_drvdata(dev, drvdata); + + retval = kobject_set_name_vargs(&dev->kobj, fmt, args); + if (retval) + goto error; + + retval = device_add(dev); + if (retval) + goto error; + + return dev; + +error: + put_device(dev); + return ERR_PTR(retval); +} + +struct device *cpu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...) +{ + va_list vargs; + struct device *dev; + + va_start(vargs, fmt); + dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs); + va_end(vargs); + return dev; +} +EXPORT_SYMBOL_GPL(cpu_device_create); + +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE +static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL); +#endif + +static struct attribute *cpu_root_attrs[] = { +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE + &dev_attr_probe.attr, + &dev_attr_release.attr, +#endif + &cpu_attrs[0].attr.attr, + &cpu_attrs[1].attr.attr, + &cpu_attrs[2].attr.attr, + &dev_attr_kernel_max.attr, + &dev_attr_offline.attr, + &dev_attr_isolated.attr, +#ifdef CONFIG_NO_HZ_FULL + &dev_attr_nohz_full.attr, +#endif +#ifdef CONFIG_GENERIC_CPU_AUTOPROBE + &dev_attr_modalias.attr, +#endif + NULL +}; + +static struct attribute_group cpu_root_attr_group = { + .attrs = cpu_root_attrs, +}; + +static const struct attribute_group *cpu_root_attr_groups[] = { + &cpu_root_attr_group, + NULL, +}; + +bool cpu_is_hotpluggable(unsigned cpu) +{ + struct device *dev = get_cpu_device(cpu); + return dev && container_of(dev, struct cpu, dev)->hotpluggable + && tick_nohz_cpu_hotpluggable(cpu); +} +EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); + +#ifdef CONFIG_GENERIC_CPU_DEVICES +static DEFINE_PER_CPU(struct cpu, cpu_devices); +#endif + +static void __init cpu_dev_register_generic(void) +{ +#ifdef CONFIG_GENERIC_CPU_DEVICES + int i; + + for_each_possible_cpu(i) { + if (register_cpu(&per_cpu(cpu_devices, i), i)) + panic("Failed to register CPU device"); + } +#endif +} + +#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES + +ssize_t __weak cpu_show_meltdown(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v1(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spectre_v2(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_mds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_tsx_async_abort(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_itlb_multihit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_srbds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_retbleed(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + +static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); +static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); +static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); +static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); +static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); +static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); +static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); +static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); +static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); +static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); +static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); + +static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, + &dev_attr_spectre_v1.attr, + &dev_attr_spectre_v2.attr, + &dev_attr_spec_store_bypass.attr, + &dev_attr_l1tf.attr, + &dev_attr_mds.attr, + &dev_attr_tsx_async_abort.attr, + &dev_attr_itlb_multihit.attr, + &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, + &dev_attr_retbleed.attr, + &dev_attr_gather_data_sampling.attr, + &dev_attr_spec_rstack_overflow.attr, + NULL +}; + +static const struct attribute_group cpu_root_vulnerabilities_group = { + .name = "vulnerabilities", + .attrs = cpu_root_vulnerabilities_attrs, +}; + +static void __init cpu_register_vulnerabilities(void) +{ + if (sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpu_root_vulnerabilities_group)) + pr_err("Unable to register CPU vulnerabilities\n"); +} + +#else +static inline void cpu_register_vulnerabilities(void) { } +#endif + +void __init cpu_dev_init(void) +{ + if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) + panic("Failed to register CPU subsystem"); + + cpu_dev_register_generic(); + cpu_register_vulnerabilities(); +} diff --git a/drivers/base/dd.c b/drivers/base/dd.c new file mode 100644 index 000000000..1e8318acf --- /dev/null +++ b/drivers/base/dd.c @@ -0,0 +1,1287 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/dd.c - The core device/driver interactions. + * + * This file contains the (sometimes tricky) code that controls the + * interactions between devices and drivers, which primarily includes + * driver binding and unbinding. + * + * All of this code used to exist in drivers/base/bus.c, but was + * relocated to here in the name of compartmentalization (since it wasn't + * strictly code just for the 'struct bus_type'. + * + * Copyright (c) 2002-5 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (c) 2007-2009 Novell Inc. + */ + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/dma-map-ops.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/async.h> +#include <linux/pm_runtime.h> +#include <linux/pinctrl/devinfo.h> +#include <linux/slab.h> + +#include "base.h" +#include "power/power.h" + +/* + * Deferred Probe infrastructure. + * + * Sometimes driver probe order matters, but the kernel doesn't always have + * dependency information which means some drivers will get probed before a + * resource it depends on is available. For example, an SDHCI driver may + * first need a GPIO line from an i2c GPIO controller before it can be + * initialized. If a required resource is not available yet, a driver can + * request probing to be deferred by returning -EPROBE_DEFER from its probe hook + * + * Deferred probe maintains two lists of devices, a pending list and an active + * list. A driver returning -EPROBE_DEFER causes the device to be added to the + * pending list. A successful driver probe will trigger moving all devices + * from the pending to the active list so that the workqueue will eventually + * retry them. + * + * The deferred_probe_mutex must be held any time the deferred_probe_*_list + * of the (struct device*)->p->deferred_probe pointers are manipulated + */ +static DEFINE_MUTEX(deferred_probe_mutex); +static LIST_HEAD(deferred_probe_pending_list); +static LIST_HEAD(deferred_probe_active_list); +static atomic_t deferred_trigger_count = ATOMIC_INIT(0); +static struct dentry *deferred_devices; +static bool initcalls_done; + +/* Save the async probe drivers' name from kernel cmdline */ +#define ASYNC_DRV_NAMES_MAX_LEN 256 +static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN]; + +/* + * In some cases, like suspend to RAM or hibernation, It might be reasonable + * to prohibit probing of devices as it could be unsafe. + * Once defer_all_probes is true all drivers probes will be forcibly deferred. + */ +static bool defer_all_probes; + +/* + * deferred_probe_work_func() - Retry probing devices in the active list. + */ +static void deferred_probe_work_func(struct work_struct *work) +{ + struct device *dev; + struct device_private *private; + /* + * This block processes every device in the deferred 'active' list. + * Each device is removed from the active list and passed to + * bus_probe_device() to re-attempt the probe. The loop continues + * until every device in the active list is removed and retried. + * + * Note: Once the device is removed from the list and the mutex is + * released, it is possible for the device get freed by another thread + * and cause a illegal pointer dereference. This code uses + * get/put_device() to ensure the device structure cannot disappear + * from under our feet. + */ + mutex_lock(&deferred_probe_mutex); + while (!list_empty(&deferred_probe_active_list)) { + private = list_first_entry(&deferred_probe_active_list, + typeof(*dev->p), deferred_probe); + dev = private->device; + list_del_init(&private->deferred_probe); + + get_device(dev); + + kfree(dev->p->deferred_probe_reason); + dev->p->deferred_probe_reason = NULL; + + /* + * Drop the mutex while probing each device; the probe path may + * manipulate the deferred list + */ + mutex_unlock(&deferred_probe_mutex); + + /* + * Force the device to the end of the dpm_list since + * the PM code assumes that the order we add things to + * the list is a good order for suspend but deferred + * probe makes that very unsafe. + */ + device_pm_move_to_tail(dev); + + dev_dbg(dev, "Retrying from deferred list\n"); + bus_probe_device(dev); + mutex_lock(&deferred_probe_mutex); + + put_device(dev); + } + mutex_unlock(&deferred_probe_mutex); +} +static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func); + +void driver_deferred_probe_add(struct device *dev) +{ + mutex_lock(&deferred_probe_mutex); + if (list_empty(&dev->p->deferred_probe)) { + dev_dbg(dev, "Added to deferred list\n"); + list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list); + } + mutex_unlock(&deferred_probe_mutex); +} + +void driver_deferred_probe_del(struct device *dev) +{ + mutex_lock(&deferred_probe_mutex); + if (!list_empty(&dev->p->deferred_probe)) { + dev_dbg(dev, "Removed from deferred list\n"); + list_del_init(&dev->p->deferred_probe); + kfree(dev->p->deferred_probe_reason); + dev->p->deferred_probe_reason = NULL; + } + mutex_unlock(&deferred_probe_mutex); +} + +static bool driver_deferred_probe_enable = false; +/** + * driver_deferred_probe_trigger() - Kick off re-probing deferred devices + * + * This functions moves all devices from the pending list to the active + * list and schedules the deferred probe workqueue to process them. It + * should be called anytime a driver is successfully bound to a device. + * + * Note, there is a race condition in multi-threaded probe. In the case where + * more than one device is probing at the same time, it is possible for one + * probe to complete successfully while another is about to defer. If the second + * depends on the first, then it will get put on the pending list after the + * trigger event has already occurred and will be stuck there. + * + * The atomic 'deferred_trigger_count' is used to determine if a successful + * trigger has occurred in the midst of probing a driver. If the trigger count + * changes in the midst of a probe, then deferred processing should be triggered + * again. + */ +static void driver_deferred_probe_trigger(void) +{ + if (!driver_deferred_probe_enable) + return; + + /* + * A successful probe means that all the devices in the pending list + * should be triggered to be reprobed. Move all the deferred devices + * into the active list so they can be retried by the workqueue + */ + mutex_lock(&deferred_probe_mutex); + atomic_inc(&deferred_trigger_count); + list_splice_tail_init(&deferred_probe_pending_list, + &deferred_probe_active_list); + mutex_unlock(&deferred_probe_mutex); + + /* + * Kick the re-probe thread. It may already be scheduled, but it is + * safe to kick it again. + */ + schedule_work(&deferred_probe_work); +} + +/** + * device_block_probing() - Block/defer device's probes + * + * It will disable probing of devices and defer their probes instead. + */ +void device_block_probing(void) +{ + defer_all_probes = true; + /* sync with probes to avoid races. */ + wait_for_device_probe(); +} + +/** + * device_unblock_probing() - Unblock/enable device's probes + * + * It will restore normal behavior and trigger re-probing of deferred + * devices. + */ +void device_unblock_probing(void) +{ + defer_all_probes = false; + driver_deferred_probe_trigger(); +} + +/** + * device_set_deferred_probe_reason() - Set defer probe reason message for device + * @dev: the pointer to the struct device + * @vaf: the pointer to va_format structure with message + */ +void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf) +{ + const char *drv = dev_driver_string(dev); + + mutex_lock(&deferred_probe_mutex); + + kfree(dev->p->deferred_probe_reason); + dev->p->deferred_probe_reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf); + + mutex_unlock(&deferred_probe_mutex); +} + +/* + * deferred_devs_show() - Show the devices in the deferred probe pending list. + */ +static int deferred_devs_show(struct seq_file *s, void *data) +{ + struct device_private *curr; + + mutex_lock(&deferred_probe_mutex); + + list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe) + seq_printf(s, "%s\t%s", dev_name(curr->device), + curr->device->p->deferred_probe_reason ?: "\n"); + + mutex_unlock(&deferred_probe_mutex); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(deferred_devs); + +int driver_deferred_probe_timeout; +EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout); + +static int __init deferred_probe_timeout_setup(char *str) +{ + int timeout; + + if (!kstrtoint(str, 10, &timeout)) + driver_deferred_probe_timeout = timeout; + return 1; +} +__setup("deferred_probe_timeout=", deferred_probe_timeout_setup); + +/** + * driver_deferred_probe_check_state() - Check deferred probe state + * @dev: device to check + * + * Return: + * -ENODEV if initcalls have completed and modules are disabled. + * -ETIMEDOUT if the deferred probe timeout was set and has expired + * and modules are enabled. + * -EPROBE_DEFER in other cases. + * + * Drivers or subsystems can opt-in to calling this function instead of directly + * returning -EPROBE_DEFER. + */ +int driver_deferred_probe_check_state(struct device *dev) +{ + if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) { + dev_warn(dev, "ignoring dependency for device, assuming no driver\n"); + return -ENODEV; + } + + if (!driver_deferred_probe_timeout && initcalls_done) { + dev_warn(dev, "deferred probe timeout, ignoring dependency\n"); + return -ETIMEDOUT; + } + + return -EPROBE_DEFER; +} + +static void deferred_probe_timeout_work_func(struct work_struct *work) +{ + struct device_private *p; + + driver_deferred_probe_timeout = 0; + driver_deferred_probe_trigger(); + flush_work(&deferred_probe_work); + + mutex_lock(&deferred_probe_mutex); + list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe) + dev_info(p->device, "deferred probe pending\n"); + mutex_unlock(&deferred_probe_mutex); +} +static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func); + +/** + * deferred_probe_initcall() - Enable probing of deferred devices + * + * We don't want to get in the way when the bulk of drivers are getting probed. + * Instead, this initcall makes sure that deferred probing is delayed until + * late_initcall time. + */ +static int deferred_probe_initcall(void) +{ + deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL, + NULL, &deferred_devs_fops); + + driver_deferred_probe_enable = true; + driver_deferred_probe_trigger(); + /* Sort as many dependencies as possible before exiting initcalls */ + flush_work(&deferred_probe_work); + initcalls_done = true; + + /* + * Trigger deferred probe again, this time we won't defer anything + * that is optional + */ + driver_deferred_probe_trigger(); + flush_work(&deferred_probe_work); + + if (driver_deferred_probe_timeout > 0) { + schedule_delayed_work(&deferred_probe_timeout_work, + driver_deferred_probe_timeout * HZ); + } + return 0; +} +late_initcall(deferred_probe_initcall); + +static void __exit deferred_probe_exit(void) +{ + debugfs_remove_recursive(deferred_devices); +} +__exitcall(deferred_probe_exit); + +/** + * device_is_bound() - Check if device is bound to a driver + * @dev: device to check + * + * Returns true if passed device has already finished probing successfully + * against a driver. + * + * This function must be called with the device lock held. + */ +bool device_is_bound(struct device *dev) +{ + return dev->p && klist_node_attached(&dev->p->knode_driver); +} + +static void driver_bound(struct device *dev) +{ + if (device_is_bound(dev)) { + pr_warn("%s: device %s already bound\n", + __func__, kobject_name(&dev->kobj)); + return; + } + + pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name, + __func__, dev_name(dev)); + + klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); + device_links_driver_bound(dev); + + device_pm_check_callbacks(dev); + + /* + * Make sure the device is no longer in one of the deferred lists and + * kick off retrying all pending devices + */ + driver_deferred_probe_del(dev); + driver_deferred_probe_trigger(); + + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_BOUND_DRIVER, dev); + + kobject_uevent(&dev->kobj, KOBJ_BIND); +} + +static ssize_t coredump_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + device_lock(dev); + dev->driver->coredump(dev); + device_unlock(dev); + + return count; +} +static DEVICE_ATTR_WO(coredump); + +static int driver_sysfs_add(struct device *dev) +{ + int ret; + + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_BIND_DRIVER, dev); + + ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj, + kobject_name(&dev->kobj)); + if (ret) + goto fail; + + ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj, + "driver"); + if (ret) + goto rm_dev; + + if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump || + !device_create_file(dev, &dev_attr_coredump)) + return 0; + + sysfs_remove_link(&dev->kobj, "driver"); + +rm_dev: + sysfs_remove_link(&dev->driver->p->kobj, + kobject_name(&dev->kobj)); + +fail: + return ret; +} + +static void driver_sysfs_remove(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv) { + if (drv->coredump) + device_remove_file(dev, &dev_attr_coredump); + sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj)); + sysfs_remove_link(&dev->kobj, "driver"); + } +} + +/** + * device_bind_driver - bind a driver to one device. + * @dev: device. + * + * Allow manual attachment of a driver to a device. + * Caller must have already set @dev->driver. + * + * Note that this does not modify the bus reference count. + * Please verify that is accounted for before calling this. + * (It is ok to call with no other effort from a driver's probe() method.) + * + * This function must be called with the device lock held. + */ +int device_bind_driver(struct device *dev) +{ + int ret; + + ret = driver_sysfs_add(dev); + if (!ret) + driver_bound(dev); + else if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_DRIVER_NOT_BOUND, dev); + return ret; +} +EXPORT_SYMBOL_GPL(device_bind_driver); + +static atomic_t probe_count = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); + +static void driver_deferred_probe_add_trigger(struct device *dev, + int local_trigger_count) +{ + driver_deferred_probe_add(dev); + /* Did a trigger occur while probing? Need to re-trigger if yes */ + if (local_trigger_count != atomic_read(&deferred_trigger_count)) + driver_deferred_probe_trigger(); +} + +static ssize_t state_synced_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + bool val; + + device_lock(dev); + val = dev->state_synced; + device_unlock(dev); + + return sysfs_emit(buf, "%u\n", val); +} +static DEVICE_ATTR_RO(state_synced); + +static int really_probe(struct device *dev, struct device_driver *drv) +{ + int ret = -EPROBE_DEFER; + int local_trigger_count = atomic_read(&deferred_trigger_count); + bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) && + !drv->suppress_bind_attrs; + + if (defer_all_probes) { + /* + * Value of defer_all_probes can be set only by + * device_block_probing() which, in turn, will call + * wait_for_device_probe() right after that to avoid any races. + */ + dev_dbg(dev, "Driver %s force probe deferral\n", drv->name); + driver_deferred_probe_add(dev); + return ret; + } + + ret = device_links_check_suppliers(dev); + if (ret == -EPROBE_DEFER) + driver_deferred_probe_add_trigger(dev, local_trigger_count); + if (ret) + return ret; + + atomic_inc(&probe_count); + pr_debug("bus: '%s': %s: probing driver %s with device %s\n", + drv->bus->name, __func__, drv->name, dev_name(dev)); + if (!list_empty(&dev->devres_head)) { + dev_crit(dev, "Resources present before probing\n"); + ret = -EBUSY; + goto done; + } + +re_probe: + dev->driver = drv; + + /* If using pinctrl, bind pins now before probing */ + ret = pinctrl_bind_pins(dev); + if (ret) + goto pinctrl_bind_failed; + + if (dev->bus->dma_configure) { + ret = dev->bus->dma_configure(dev); + if (ret) + goto probe_failed; + } + + ret = driver_sysfs_add(dev); + if (ret) { + pr_err("%s: driver_sysfs_add(%s) failed\n", + __func__, dev_name(dev)); + goto probe_failed; + } + + if (dev->pm_domain && dev->pm_domain->activate) { + ret = dev->pm_domain->activate(dev); + if (ret) + goto probe_failed; + } + + if (dev->bus->probe) { + ret = dev->bus->probe(dev); + if (ret) + goto probe_failed; + } else if (drv->probe) { + ret = drv->probe(dev); + if (ret) + goto probe_failed; + } + + ret = device_add_groups(dev, drv->dev_groups); + if (ret) { + dev_err(dev, "device_add_groups() failed\n"); + goto dev_groups_failed; + } + + if (dev_has_sync_state(dev)) { + ret = device_create_file(dev, &dev_attr_state_synced); + if (ret) { + dev_err(dev, "state_synced sysfs add failed\n"); + goto dev_sysfs_state_synced_failed; + } + } + + if (test_remove) { + test_remove = false; + + device_remove_file(dev, &dev_attr_state_synced); + device_remove_groups(dev, drv->dev_groups); + + if (dev->bus->remove) + dev->bus->remove(dev); + else if (drv->remove) + drv->remove(dev); + + devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; + driver_sysfs_remove(dev); + dev->driver = NULL; + dev_set_drvdata(dev, NULL); + if (dev->pm_domain && dev->pm_domain->dismiss) + dev->pm_domain->dismiss(dev); + pm_runtime_reinit(dev); + + goto re_probe; + } + + pinctrl_init_done(dev); + + if (dev->pm_domain && dev->pm_domain->sync) + dev->pm_domain->sync(dev); + + driver_bound(dev); + ret = 1; + pr_debug("bus: '%s': %s: bound device %s to driver %s\n", + drv->bus->name, __func__, dev_name(dev), drv->name); + goto done; + +dev_sysfs_state_synced_failed: + device_remove_groups(dev, drv->dev_groups); +dev_groups_failed: + if (dev->bus->remove) + dev->bus->remove(dev); + else if (drv->remove) + drv->remove(dev); +probe_failed: + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_DRIVER_NOT_BOUND, dev); +pinctrl_bind_failed: + device_links_no_driver(dev); + devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; + driver_sysfs_remove(dev); + dev->driver = NULL; + dev_set_drvdata(dev, NULL); + if (dev->pm_domain && dev->pm_domain->dismiss) + dev->pm_domain->dismiss(dev); + pm_runtime_reinit(dev); + dev_pm_set_driver_flags(dev, 0); + + switch (ret) { + case -EPROBE_DEFER: + /* Driver requested deferred probing */ + dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name); + driver_deferred_probe_add_trigger(dev, local_trigger_count); + break; + case -ENODEV: + case -ENXIO: + pr_debug("%s: probe of %s rejects match %d\n", + drv->name, dev_name(dev), ret); + break; + default: + /* driver matched but the probe failed */ + pr_warn("%s: probe of %s failed with error %d\n", + drv->name, dev_name(dev), ret); + } + /* + * Ignore errors returned by ->probe so that the next driver can try + * its luck. + */ + ret = 0; +done: + atomic_dec(&probe_count); + wake_up_all(&probe_waitqueue); + return ret; +} + +/* + * For initcall_debug, show the driver probe time. + */ +static int really_probe_debug(struct device *dev, struct device_driver *drv) +{ + ktime_t calltime, rettime; + int ret; + + calltime = ktime_get(); + ret = really_probe(dev, drv); + rettime = ktime_get(); + /* + * Don't change this to pr_debug() because that requires + * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the + * kernel commandline to print this all the time at the debug level. + */ + printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n", + dev_name(dev), ret, ktime_us_delta(rettime, calltime)); + return ret; +} + +/** + * driver_probe_done + * Determine if the probe sequence is finished or not. + * + * Should somehow figure out how to use a semaphore, not an atomic variable... + */ +int driver_probe_done(void) +{ + int local_probe_count = atomic_read(&probe_count); + + pr_debug("%s: probe_count = %d\n", __func__, local_probe_count); + if (local_probe_count) + return -EBUSY; + return 0; +} + +/** + * wait_for_device_probe + * Wait for device probing to be completed. + */ +void wait_for_device_probe(void) +{ + /* wait for the deferred probe workqueue to finish */ + flush_work(&deferred_probe_work); + + /* wait for the known devices to complete their probing */ + wait_event(probe_waitqueue, atomic_read(&probe_count) == 0); + async_synchronize_full(); +} +EXPORT_SYMBOL_GPL(wait_for_device_probe); + +/** + * driver_probe_device - attempt to bind device & driver together + * @drv: driver to bind a device to + * @dev: device to try to bind to the driver + * + * This function returns -ENODEV if the device is not registered, + * 1 if the device is bound successfully and 0 otherwise. + * + * This function must be called with @dev lock held. When called for a + * USB interface, @dev->parent lock must be held as well. + * + * If the device has a parent, runtime-resume the parent before driver probing. + */ +int driver_probe_device(struct device_driver *drv, struct device *dev) +{ + int ret = 0; + + if (!device_is_registered(dev)) + return -ENODEV; + + pr_debug("bus: '%s': %s: matched device %s with driver %s\n", + drv->bus->name, __func__, dev_name(dev), drv->name); + + pm_runtime_get_suppliers(dev); + if (dev->parent) + pm_runtime_get_sync(dev->parent); + + pm_runtime_barrier(dev); + if (initcall_debug) + ret = really_probe_debug(dev, drv); + else + ret = really_probe(dev, drv); + pm_request_idle(dev); + + if (dev->parent) + pm_runtime_put(dev->parent); + + pm_runtime_put_suppliers(dev); + return ret; +} + +static inline bool cmdline_requested_async_probing(const char *drv_name) +{ + return parse_option_str(async_probe_drv_names, drv_name); +} + +/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */ +static int __init save_async_options(char *buf) +{ + if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN) + pr_warn("Too long list of driver names for 'driver_async_probe'!\n"); + + strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); + return 1; +} +__setup("driver_async_probe=", save_async_options); + +bool driver_allows_async_probing(struct device_driver *drv) +{ + switch (drv->probe_type) { + case PROBE_PREFER_ASYNCHRONOUS: + return true; + + case PROBE_FORCE_SYNCHRONOUS: + return false; + + default: + if (cmdline_requested_async_probing(drv->name)) + return true; + + if (module_requested_async_probing(drv->owner)) + return true; + + return false; + } +} + +struct device_attach_data { + struct device *dev; + + /* + * Indicates whether we are are considering asynchronous probing or + * not. Only initial binding after device or driver registration + * (including deferral processing) may be done asynchronously, the + * rest is always synchronous, as we expect it is being done by + * request from userspace. + */ + bool check_async; + + /* + * Indicates if we are binding synchronous or asynchronous drivers. + * When asynchronous probing is enabled we'll execute 2 passes + * over drivers: first pass doing synchronous probing and second + * doing asynchronous probing (if synchronous did not succeed - + * most likely because there was no driver requiring synchronous + * probing - and we found asynchronous driver during first pass). + * The 2 passes are done because we can't shoot asynchronous + * probe for given device and driver from bus_for_each_drv() since + * driver pointer is not guaranteed to stay valid once + * bus_for_each_drv() iterates to the next driver on the bus. + */ + bool want_async; + + /* + * We'll set have_async to 'true' if, while scanning for matching + * driver, we'll encounter one that requests asynchronous probing. + */ + bool have_async; +}; + +static int __device_attach_driver(struct device_driver *drv, void *_data) +{ + struct device_attach_data *data = _data; + struct device *dev = data->dev; + bool async_allowed; + int ret; + + ret = driver_match_device(drv, dev); + if (ret == 0) { + /* no match */ + return 0; + } else if (ret == -EPROBE_DEFER) { + dev_dbg(dev, "Device match requests probe deferral\n"); + driver_deferred_probe_add(dev); + /* + * Device can't match with a driver right now, so don't attempt + * to match or bind with other drivers on the bus. + */ + return ret; + } else if (ret < 0) { + dev_dbg(dev, "Bus failed to match device: %d\n", ret); + return ret; + } /* ret > 0 means positive match */ + + async_allowed = driver_allows_async_probing(drv); + + if (async_allowed) + data->have_async = true; + + if (data->check_async && async_allowed != data->want_async) + return 0; + + return driver_probe_device(drv, dev); +} + +static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) +{ + struct device *dev = _dev; + struct device_attach_data data = { + .dev = dev, + .check_async = true, + .want_async = true, + }; + + device_lock(dev); + + /* + * Check if device has already been removed or claimed. This may + * happen with driver loading, device discovery/registration, + * and deferred probe processing happens all at once with + * multiple threads. + */ + if (dev->p->dead || dev->driver) + goto out_unlock; + + if (dev->parent) + pm_runtime_get_sync(dev->parent); + + bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver); + dev_dbg(dev, "async probe completed\n"); + + pm_request_idle(dev); + + if (dev->parent) + pm_runtime_put(dev->parent); +out_unlock: + device_unlock(dev); + + put_device(dev); +} + +static int __device_attach(struct device *dev, bool allow_async) +{ + int ret = 0; + bool async = false; + + device_lock(dev); + if (dev->p->dead) { + goto out_unlock; + } else if (dev->driver) { + if (device_is_bound(dev)) { + ret = 1; + goto out_unlock; + } + ret = device_bind_driver(dev); + if (ret == 0) + ret = 1; + else { + dev->driver = NULL; + ret = 0; + } + } else { + struct device_attach_data data = { + .dev = dev, + .check_async = allow_async, + .want_async = false, + }; + + if (dev->parent) + pm_runtime_get_sync(dev->parent); + + ret = bus_for_each_drv(dev->bus, NULL, &data, + __device_attach_driver); + if (!ret && allow_async && data.have_async) { + /* + * If we could not find appropriate driver + * synchronously and we are allowed to do + * async probes and there are drivers that + * want to probe asynchronously, we'll + * try them. + */ + dev_dbg(dev, "scheduling asynchronous probe\n"); + get_device(dev); + async = true; + } else { + pm_request_idle(dev); + } + + if (dev->parent) + pm_runtime_put(dev->parent); + } +out_unlock: + device_unlock(dev); + if (async) + async_schedule_dev(__device_attach_async_helper, dev); + return ret; +} + +/** + * device_attach - try to attach device to a driver. + * @dev: device. + * + * Walk the list of drivers that the bus has and call + * driver_probe_device() for each pair. If a compatible + * pair is found, break out and return. + * + * Returns 1 if the device was bound to a driver; + * 0 if no matching driver was found; + * -ENODEV if the device is not registered. + * + * When called for a USB interface, @dev->parent lock must be held. + */ +int device_attach(struct device *dev) +{ + return __device_attach(dev, false); +} +EXPORT_SYMBOL_GPL(device_attach); + +void device_initial_probe(struct device *dev) +{ + __device_attach(dev, true); +} + +/* + * __device_driver_lock - acquire locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will take the required locks for manipulating dev->drv. + * Normally this will just be the @dev lock, but when called for a USB + * interface, @parent lock will be held as well. + */ +static void __device_driver_lock(struct device *dev, struct device *parent) +{ + if (parent && dev->bus->need_parent_lock) + device_lock(parent); + device_lock(dev); +} + +/* + * __device_driver_unlock - release locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will release the required locks for manipulating dev->drv. + * Normally this will just be the the @dev lock, but when called for a + * USB interface, @parent lock will be released as well. + */ +static void __device_driver_unlock(struct device *dev, struct device *parent) +{ + device_unlock(dev); + if (parent && dev->bus->need_parent_lock) + device_unlock(parent); +} + +/** + * device_driver_attach - attach a specific driver to a specific device + * @drv: Driver to attach + * @dev: Device to attach it to + * + * Manually attach driver to a device. Will acquire both @dev lock and + * @dev->parent lock if needed. + */ +int device_driver_attach(struct device_driver *drv, struct device *dev) +{ + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + return ret; +} + +static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) +{ + struct device *dev = _dev; + struct device_driver *drv; + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + drv = dev->p->async_driver; + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret); + + put_device(dev); +} + +static int __driver_attach(struct device *dev, void *data) +{ + struct device_driver *drv = data; + bool async = false; + int ret; + + /* + * Lock device and try to bind to it. We drop the error + * here and always return 0, because we need to keep trying + * to bind to devices and some drivers will return an error + * simply if it didn't support the device. + * + * driver_probe_device() will spit a warning if there + * is an error. + */ + + ret = driver_match_device(drv, dev); + if (ret == 0) { + /* no match */ + return 0; + } else if (ret == -EPROBE_DEFER) { + dev_dbg(dev, "Device match requests probe deferral\n"); + driver_deferred_probe_add(dev); + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; + } else if (ret < 0) { + dev_dbg(dev, "Bus failed to match device: %d\n", ret); + /* + * Driver could not match with device, but may match with + * another device on the bus. + */ + return 0; + } /* ret > 0 means positive match */ + + if (driver_allows_async_probing(drv)) { + /* + * Instead of probing the device synchronously we will + * probe it asynchronously to allow for more parallelism. + * + * We only take the device lock here in order to guarantee + * that the dev->driver and async_driver fields are protected + */ + dev_dbg(dev, "probing driver %s asynchronously\n", drv->name); + device_lock(dev); + if (!dev->driver) { + get_device(dev); + dev->p->async_driver = drv; + async = true; + } + device_unlock(dev); + if (async) + async_schedule_dev(__driver_attach_async_helper, dev); + return 0; + } + + device_driver_attach(drv, dev); + + return 0; +} + +/** + * driver_attach - try to bind driver to devices. + * @drv: driver. + * + * Walk the list of devices that the bus has on it and try to + * match the driver with each one. If driver_probe_device() + * returns 0 and the @dev->driver is set, we've found a + * compatible pair. + */ +int driver_attach(struct device_driver *drv) +{ + return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach); +} +EXPORT_SYMBOL_GPL(driver_attach); + +/* + * __device_release_driver() must be called with @dev lock held. + * When called for a USB interface, @dev->parent lock must be held as well. + */ +static void __device_release_driver(struct device *dev, struct device *parent) +{ + struct device_driver *drv; + + drv = dev->driver; + if (drv) { + pm_runtime_get_sync(dev); + + while (device_links_busy(dev)) { + __device_driver_unlock(dev, parent); + + device_links_unbind_consumers(dev); + + __device_driver_lock(dev, parent); + /* + * A concurrent invocation of the same function might + * have released the driver successfully while this one + * was waiting, so check for that. + */ + if (dev->driver != drv) { + pm_runtime_put(dev); + return; + } + } + + driver_sysfs_remove(dev); + + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_UNBIND_DRIVER, + dev); + + pm_runtime_put_sync(dev); + + device_remove_file(dev, &dev_attr_state_synced); + device_remove_groups(dev, drv->dev_groups); + + if (dev->bus && dev->bus->remove) + dev->bus->remove(dev); + else if (drv->remove) + drv->remove(dev); + + devres_release_all(dev); + arch_teardown_dma_ops(dev); + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; + dev->driver = NULL; + dev_set_drvdata(dev, NULL); + if (dev->pm_domain && dev->pm_domain->dismiss) + dev->pm_domain->dismiss(dev); + pm_runtime_reinit(dev); + dev_pm_set_driver_flags(dev, 0); + + device_links_driver_cleanup(dev); + + klist_remove(&dev->p->knode_driver); + device_pm_check_callbacks(dev); + if (dev->bus) + blocking_notifier_call_chain(&dev->bus->p->bus_notifier, + BUS_NOTIFY_UNBOUND_DRIVER, + dev); + + kobject_uevent(&dev->kobj, KOBJ_UNBIND); + } +} + +void device_release_driver_internal(struct device *dev, + struct device_driver *drv, + struct device *parent) +{ + __device_driver_lock(dev, parent); + + if (!drv || drv == dev->driver) + __device_release_driver(dev, parent); + + __device_driver_unlock(dev, parent); +} + +/** + * device_release_driver - manually detach device from driver. + * @dev: device. + * + * Manually detach device from driver. + * When called for a USB interface, @dev->parent lock must be held. + * + * If this function is to be called with @dev->parent lock held, ensure that + * the device's consumers are unbound in advance or that their locks can be + * acquired under the @dev->parent lock. + */ +void device_release_driver(struct device *dev) +{ + /* + * If anyone calls device_release_driver() recursively from + * within their ->remove callback for the same device, they + * will deadlock right here. + */ + device_release_driver_internal(dev, NULL, NULL); +} +EXPORT_SYMBOL_GPL(device_release_driver); + +/** + * device_driver_detach - detach driver from a specific device + * @dev: device to detach driver from + * + * Detach driver from device. Will acquire both @dev lock and @dev->parent + * lock if needed. + */ +void device_driver_detach(struct device *dev) +{ + device_release_driver_internal(dev, NULL, dev->parent); +} + +/** + * driver_detach - detach driver from all devices it controls. + * @drv: driver. + */ +void driver_detach(struct device_driver *drv) +{ + struct device_private *dev_prv; + struct device *dev; + + if (driver_allows_async_probing(drv)) + async_synchronize_full(); + + for (;;) { + spin_lock(&drv->p->klist_devices.k_lock); + if (list_empty(&drv->p->klist_devices.k_list)) { + spin_unlock(&drv->p->klist_devices.k_lock); + break; + } + dev_prv = list_last_entry(&drv->p->klist_devices.k_list, + struct device_private, + knode_driver.n_node); + dev = dev_prv->device; + get_device(dev); + spin_unlock(&drv->p->klist_devices.k_lock); + device_release_driver_internal(dev, drv, dev->parent); + put_device(dev); + } +} diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c new file mode 100644 index 000000000..4d725d1fd --- /dev/null +++ b/drivers/base/devcoredump.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(c) 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * Author: Johannes Berg <johannes@sipsolutions.net> + */ +#include <linux/module.h> +#include <linux/device.h> +#include <linux/devcoredump.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/workqueue.h> + +static struct class devcd_class; + +/* global disable flag, for security purposes */ +static bool devcd_disabled; + +/* if data isn't read by userspace after 5 minutes then delete it */ +#define DEVCD_TIMEOUT (HZ * 60 * 5) + +struct devcd_entry { + struct device devcd_dev; + void *data; + size_t datalen; + /* + * Here, mutex is required to serialize the calls to del_wk work between + * user/kernel space which happens when devcd is added with device_add() + * and that sends uevent to user space. User space reads the uevents, + * and calls to devcd_data_write() which try to modify the work which is + * not even initialized/queued from devcoredump. + * + * + * + * cpu0(X) cpu1(Y) + * + * dev_coredump() uevent sent to user space + * device_add() ======================> user space process Y reads the + * uevents writes to devcd fd + * which results into writes to + * + * devcd_data_write() + * mod_delayed_work() + * try_to_grab_pending() + * del_timer() + * debug_assert_init() + * INIT_DELAYED_WORK() + * schedule_delayed_work() + * + * + * Also, mutex alone would not be enough to avoid scheduling of + * del_wk work after it get flush from a call to devcd_free() + * mentioned as below. + * + * disabled_store() + * devcd_free() + * mutex_lock() devcd_data_write() + * flush_delayed_work() + * mutex_unlock() + * mutex_lock() + * mod_delayed_work() + * mutex_unlock() + * So, delete_work flag is required. + */ + struct mutex mutex; + bool delete_work; + struct module *owner; + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen); + void (*free)(void *data); + struct delayed_work del_wk; + struct device *failing_dev; +}; + +static struct devcd_entry *dev_to_devcd(struct device *dev) +{ + return container_of(dev, struct devcd_entry, devcd_dev); +} + +static void devcd_dev_release(struct device *dev) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + devcd->free(devcd->data); + module_put(devcd->owner); + + /* + * this seems racy, but I don't see a notifier or such on + * a struct device to know when it goes away? + */ + if (devcd->failing_dev->kobj.sd) + sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj, + "devcoredump"); + + put_device(devcd->failing_dev); + kfree(devcd); +} + +static void devcd_del(struct work_struct *wk) +{ + struct devcd_entry *devcd; + + devcd = container_of(wk, struct devcd_entry, del_wk.work); + + device_del(&devcd->devcd_dev); + put_device(&devcd->devcd_dev); +} + +static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct devcd_entry *devcd = dev_to_devcd(dev); + + return devcd->read(buffer, offset, count, devcd->data, devcd->datalen); +} + +static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct devcd_entry *devcd = dev_to_devcd(dev); + + mutex_lock(&devcd->mutex); + if (!devcd->delete_work) { + devcd->delete_work = true; + mod_delayed_work(system_wq, &devcd->del_wk, 0); + } + mutex_unlock(&devcd->mutex); + + return count; +} + +static struct bin_attribute devcd_attr_data = { + .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, }, + .size = 0, + .read = devcd_data_read, + .write = devcd_data_write, +}; + +static struct bin_attribute *devcd_dev_bin_attrs[] = { + &devcd_attr_data, NULL, +}; + +static const struct attribute_group devcd_dev_group = { + .bin_attrs = devcd_dev_bin_attrs, +}; + +static const struct attribute_group *devcd_dev_groups[] = { + &devcd_dev_group, NULL, +}; + +static int devcd_free(struct device *dev, void *data) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + mutex_lock(&devcd->mutex); + if (!devcd->delete_work) + devcd->delete_work = true; + + flush_delayed_work(&devcd->del_wk); + mutex_unlock(&devcd->mutex); + return 0; +} + +static ssize_t disabled_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", devcd_disabled); +} + +/* + * + * disabled_store() worker() + * class_for_each_device(&devcd_class, + * NULL, NULL, devcd_free) + * ... + * ... + * while ((dev = class_dev_iter_next(&iter)) + * devcd_del() + * device_del() + * put_device() <- last reference + * error = fn(dev, data) devcd_dev_release() + * devcd_free(dev, data) kfree(devcd) + * mutex_lock(&devcd->mutex); + * + * + * In the above diagram, It looks like disabled_store() would be racing with parallely + * running devcd_del() and result in memory abort while acquiring devcd->mutex which + * is called after kfree of devcd memory after dropping its last reference with + * put_device(). However, this will not happens as fn(dev, data) runs + * with its own reference to device via klist_node so it is not its last reference. + * so, above situation would not occur. + */ + +static ssize_t disabled_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + long tmp = simple_strtol(buf, NULL, 10); + + /* + * This essentially makes the attribute write-once, since you can't + * go back to not having it disabled. This is intentional, it serves + * as a system lockdown feature. + */ + if (tmp != 1) + return -EINVAL; + + devcd_disabled = true; + + class_for_each_device(&devcd_class, NULL, NULL, devcd_free); + + return count; +} +static CLASS_ATTR_RW(disabled); + +static struct attribute *devcd_class_attrs[] = { + &class_attr_disabled.attr, + NULL, +}; +ATTRIBUTE_GROUPS(devcd_class); + +static struct class devcd_class = { + .name = "devcoredump", + .owner = THIS_MODULE, + .dev_release = devcd_dev_release, + .dev_groups = devcd_dev_groups, + .class_groups = devcd_class_groups, +}; + +static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen) +{ + return memory_read_from_buffer(buffer, count, &offset, data, datalen); +} + +static void devcd_freev(void *data) +{ + vfree(data); +} + +/** + * dev_coredumpv - create device coredump with vmalloc data + * @dev: the struct device for the crashed device + * @data: vmalloc data containing the device coredump + * @datalen: length of the data + * @gfp: allocation flags + * + * This function takes ownership of the vmalloc'ed data and will free + * it when it is no longer used. See dev_coredumpm() for more information. + */ +void dev_coredumpv(struct device *dev, void *data, size_t datalen, + gfp_t gfp) +{ + dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev); +} +EXPORT_SYMBOL_GPL(dev_coredumpv); + +static int devcd_match_failing(struct device *dev, const void *failing) +{ + struct devcd_entry *devcd = dev_to_devcd(dev); + + return devcd->failing_dev == failing; +} + +/** + * devcd_free_sgtable - free all the memory of the given scatterlist table + * (i.e. both pages and scatterlist instances) + * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained + * using the sg_chain function then that function should be called only once + * on the chained table + * @table: pointer to sg_table to free + */ +static void devcd_free_sgtable(void *data) +{ + _devcd_free_sgtable(data); +} + +/** + * devcd_read_from_table - copy data from sg_table to a given buffer + * and return the number of bytes read + * @buffer: the buffer to copy the data to it + * @buf_len: the length of the buffer + * @data: the scatterlist table to copy from + * @offset: start copy from @offset@ bytes from the head of the data + * in the given scatterlist + * @data_len: the length of the data in the sg_table + */ +static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, + size_t buf_len, void *data, + size_t data_len) +{ + struct scatterlist *table = data; + + if (offset > data_len) + return -EINVAL; + + if (offset + buf_len > data_len) + buf_len = data_len - offset; + return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len, + offset); +} + +/** + * dev_coredumpm - create device coredump with read/free methods + * @dev: the struct device for the crashed device + * @owner: the module that contains the read/free functions, use %THIS_MODULE + * @data: data cookie for the @read/@free functions + * @datalen: length of the data + * @gfp: allocation flags + * @read: function to read from the given buffer + * @free: function to free the given buffer + * + * Creates a new device coredump for the given device. If a previous one hasn't + * been read yet, the new coredump is discarded. The data lifetime is determined + * by the device coredump framework and when it is no longer needed the @free + * function will be called to free the data. + */ +void dev_coredumpm(struct device *dev, struct module *owner, + void *data, size_t datalen, gfp_t gfp, + ssize_t (*read)(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen), + void (*free)(void *data)) +{ + static atomic_t devcd_count = ATOMIC_INIT(0); + struct devcd_entry *devcd; + struct device *existing; + + if (devcd_disabled) + goto free; + + existing = class_find_device(&devcd_class, NULL, dev, + devcd_match_failing); + if (existing) { + put_device(existing); + goto free; + } + + if (!try_module_get(owner)) + goto free; + + devcd = kzalloc(sizeof(*devcd), gfp); + if (!devcd) + goto put_module; + + devcd->owner = owner; + devcd->data = data; + devcd->datalen = datalen; + devcd->read = read; + devcd->free = free; + devcd->failing_dev = get_device(dev); + devcd->delete_work = false; + + mutex_init(&devcd->mutex); + device_initialize(&devcd->devcd_dev); + + dev_set_name(&devcd->devcd_dev, "devcd%d", + atomic_inc_return(&devcd_count)); + devcd->devcd_dev.class = &devcd_class; + + mutex_lock(&devcd->mutex); + dev_set_uevent_suppress(&devcd->devcd_dev, true); + if (device_add(&devcd->devcd_dev)) + goto put_device; + + if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj, + "failing_device")) + /* nothing - symlink will be missing */; + + if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj, + "devcoredump")) + /* nothing - symlink will be missing */; + + dev_set_uevent_suppress(&devcd->devcd_dev, false); + kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); + schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); + mutex_unlock(&devcd->mutex); + return; + put_device: + put_device(&devcd->devcd_dev); + mutex_unlock(&devcd->mutex); + put_module: + module_put(owner); + free: + free(data); +} +EXPORT_SYMBOL_GPL(dev_coredumpm); + +/** + * dev_coredumpsg - create device coredump that uses scatterlist as data + * parameter + * @dev: the struct device for the crashed device + * @table: the dump data + * @datalen: length of the data + * @gfp: allocation flags + * + * Creates a new device coredump for the given device. If a previous one hasn't + * been read yet, the new coredump is discarded. The data lifetime is determined + * by the device coredump framework and when it is no longer needed + * it will free the data. + */ +void dev_coredumpsg(struct device *dev, struct scatterlist *table, + size_t datalen, gfp_t gfp) +{ + dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable, + devcd_free_sgtable); +} +EXPORT_SYMBOL_GPL(dev_coredumpsg); + +static int __init devcoredump_init(void) +{ + return class_register(&devcd_class); +} +__initcall(devcoredump_init); + +static void __exit devcoredump_exit(void) +{ + class_for_each_device(&devcd_class, NULL, NULL, devcd_free); + class_unregister(&devcd_class); +} +__exitcall(devcoredump_exit); diff --git a/drivers/base/devres.c b/drivers/base/devres.c new file mode 100644 index 000000000..586e9a75c --- /dev/null +++ b/drivers/base/devres.c @@ -0,0 +1,1233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/devres.c - device resource management + * + * Copyright (c) 2006 SUSE Linux Products GmbH + * Copyright (c) 2006 Tejun Heo <teheo@suse.de> + */ + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/percpu.h> + +#include <asm/sections.h> + +#include "base.h" + +struct devres_node { + struct list_head entry; + dr_release_t release; +#ifdef CONFIG_DEBUG_DEVRES + const char *name; + size_t size; +#endif +}; + +struct devres { + struct devres_node node; + /* + * Some archs want to perform DMA into kmalloc caches + * and need a guaranteed alignment larger than + * the alignment of a 64-bit integer. + * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same + * buffer alignment as if it was allocated by plain kmalloc(). + */ + u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; +}; + +struct devres_group { + struct devres_node node[2]; + void *id; + int color; + /* -- 8 pointers */ +}; + +#ifdef CONFIG_DEBUG_DEVRES +static int log_devres = 0; +module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); + +static void set_node_dbginfo(struct devres_node *node, const char *name, + size_t size) +{ + node->name = name; + node->size = size; +} + +static void devres_log(struct device *dev, struct devres_node *node, + const char *op) +{ + if (unlikely(log_devres)) + dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n", + op, node, node->name, (unsigned long)node->size); +} +#else /* CONFIG_DEBUG_DEVRES */ +#define set_node_dbginfo(node, n, s) do {} while (0) +#define devres_log(dev, node, op) do {} while (0) +#endif /* CONFIG_DEBUG_DEVRES */ + +/* + * Release functions for devres group. These callbacks are used only + * for identification. + */ +static void group_open_release(struct device *dev, void *res) +{ + /* noop */ +} + +static void group_close_release(struct device *dev, void *res) +{ + /* noop */ +} + +static struct devres_group * node_to_group(struct devres_node *node) +{ + if (node->release == &group_open_release) + return container_of(node, struct devres_group, node[0]); + if (node->release == &group_close_release) + return container_of(node, struct devres_group, node[1]); + return NULL; +} + +static bool check_dr_size(size_t size, size_t *tot_size) +{ + /* We must catch any near-SIZE_MAX cases that could overflow. */ + if (unlikely(check_add_overflow(sizeof(struct devres), + size, tot_size))) + return false; + + return true; +} + +static __always_inline struct devres * alloc_dr(dr_release_t release, + size_t size, gfp_t gfp, int nid) +{ + size_t tot_size; + struct devres *dr; + + if (!check_dr_size(size, &tot_size)) + return NULL; + + dr = kmalloc_node_track_caller(tot_size, gfp, nid); + if (unlikely(!dr)) + return NULL; + + memset(dr, 0, offsetof(struct devres, data)); + + INIT_LIST_HEAD(&dr->node.entry); + dr->node.release = release; + return dr; +} + +static void add_dr(struct device *dev, struct devres_node *node) +{ + devres_log(dev, node, "ADD"); + BUG_ON(!list_empty(&node->entry)); + list_add_tail(&node->entry, &dev->devres_head); +} + +static void replace_dr(struct device *dev, + struct devres_node *old, struct devres_node *new) +{ + devres_log(dev, old, "REPLACE"); + BUG_ON(!list_empty(&new->entry)); + list_replace(&old->entry, &new->entry); +} + +#ifdef CONFIG_DEBUG_DEVRES +void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, + const char *name) +{ + struct devres *dr; + + dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); + if (unlikely(!dr)) + return NULL; + set_node_dbginfo(&dr->node, name, size); + return dr->data; +} +EXPORT_SYMBOL_GPL(__devres_alloc_node); +#else +/** + * devres_alloc - Allocate device resource data + * @release: Release function devres will be associated with + * @size: Allocation size + * @gfp: Allocation flags + * @nid: NUMA node + * + * Allocate devres of @size bytes. The allocated area is zeroed, then + * associated with @release. The returned pointer can be passed to + * other devres_*() functions. + * + * RETURNS: + * Pointer to allocated devres on success, NULL on failure. + */ +void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) +{ + struct devres *dr; + + dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); + if (unlikely(!dr)) + return NULL; + return dr->data; +} +EXPORT_SYMBOL_GPL(devres_alloc_node); +#endif + +/** + * devres_for_each_res - Resource iterator + * @dev: Device to iterate resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * @fn: Function to be called for each matched resource. + * @data: Data for @fn, the 3rd parameter of @fn + * + * Call @fn for each devres of @dev which is associated with @release + * and for which @match returns 1. + * + * RETURNS: + * void + */ +void devres_for_each_res(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data, + void (*fn)(struct device *, void *, void *), + void *data) +{ + struct devres_node *node; + struct devres_node *tmp; + unsigned long flags; + + if (!fn) + return; + + spin_lock_irqsave(&dev->devres_lock, flags); + list_for_each_entry_safe_reverse(node, tmp, + &dev->devres_head, entry) { + struct devres *dr = container_of(node, struct devres, node); + + if (node->release != release) + continue; + if (match && !match(dev, dr->data, match_data)) + continue; + fn(dev, dr->data, data); + } + spin_unlock_irqrestore(&dev->devres_lock, flags); +} +EXPORT_SYMBOL_GPL(devres_for_each_res); + +/** + * devres_free - Free device resource data + * @res: Pointer to devres data to free + * + * Free devres created with devres_alloc(). + */ +void devres_free(void *res) +{ + if (res) { + struct devres *dr = container_of(res, struct devres, data); + + BUG_ON(!list_empty(&dr->node.entry)); + kfree(dr); + } +} +EXPORT_SYMBOL_GPL(devres_free); + +/** + * devres_add - Register device resource + * @dev: Device to add resource to + * @res: Resource to register + * + * Register devres @res to @dev. @res should have been allocated + * using devres_alloc(). On driver detach, the associated release + * function will be invoked and devres will be freed automatically. + */ +void devres_add(struct device *dev, void *res) +{ + struct devres *dr = container_of(res, struct devres, data); + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + add_dr(dev, &dr->node); + spin_unlock_irqrestore(&dev->devres_lock, flags); +} +EXPORT_SYMBOL_GPL(devres_add); + +static struct devres *find_dr(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + struct devres_node *node; + + list_for_each_entry_reverse(node, &dev->devres_head, entry) { + struct devres *dr = container_of(node, struct devres, node); + + if (node->release != release) + continue; + if (match && !match(dev, dr->data, match_data)) + continue; + return dr; + } + + return NULL; +} + +/** + * devres_find - Find device resource + * @dev: Device to lookup resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev which is associated with @release + * and for which @match returns 1. If @match is NULL, it's considered + * to match all. + * + * RETURNS: + * Pointer to found devres, NULL if not found. + */ +void * devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + struct devres *dr; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + dr = find_dr(dev, release, match, match_data); + spin_unlock_irqrestore(&dev->devres_lock, flags); + + if (dr) + return dr->data; + return NULL; +} +EXPORT_SYMBOL_GPL(devres_find); + +/** + * devres_get - Find devres, if non-existent, add one atomically + * @dev: Device to lookup or add devres for + * @new_res: Pointer to new initialized devres to add if not found + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev which has the same release function + * as @new_res and for which @match return 1. If found, @new_res is + * freed; otherwise, @new_res is added atomically. + * + * RETURNS: + * Pointer to found or added devres. + */ +void * devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data) +{ + struct devres *new_dr = container_of(new_res, struct devres, data); + struct devres *dr; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + dr = find_dr(dev, new_dr->node.release, match, match_data); + if (!dr) { + add_dr(dev, &new_dr->node); + dr = new_dr; + new_res = NULL; + } + spin_unlock_irqrestore(&dev->devres_lock, flags); + devres_free(new_res); + + return dr->data; +} +EXPORT_SYMBOL_GPL(devres_get); + +/** + * devres_remove - Find a device resource and remove it + * @dev: Device to find resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev associated with @release and for + * which @match returns 1. If @match is NULL, it's considered to + * match all. If found, the resource is removed atomically and + * returned. + * + * RETURNS: + * Pointer to removed devres on success, NULL if not found. + */ +void * devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + struct devres *dr; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + dr = find_dr(dev, release, match, match_data); + if (dr) { + list_del_init(&dr->node.entry); + devres_log(dev, &dr->node, "REM"); + } + spin_unlock_irqrestore(&dev->devres_lock, flags); + + if (dr) + return dr->data; + return NULL; +} +EXPORT_SYMBOL_GPL(devres_remove); + +/** + * devres_destroy - Find a device resource and destroy it + * @dev: Device to find resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev associated with @release and for + * which @match returns 1. If @match is NULL, it's considered to + * match all. If found, the resource is removed atomically and freed. + * + * Note that the release function for the resource will not be called, + * only the devres-allocated data will be freed. The caller becomes + * responsible for freeing any other data. + * + * RETURNS: + * 0 if devres is found and freed, -ENOENT if not found. + */ +int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + void *res; + + res = devres_remove(dev, release, match, match_data); + if (unlikely(!res)) + return -ENOENT; + + devres_free(res); + return 0; +} +EXPORT_SYMBOL_GPL(devres_destroy); + + +/** + * devres_release - Find a device resource and destroy it, calling release + * @dev: Device to find resource from + * @release: Look for resources associated with this release function + * @match: Match function (optional) + * @match_data: Data for the match function + * + * Find the latest devres of @dev associated with @release and for + * which @match returns 1. If @match is NULL, it's considered to + * match all. If found, the resource is removed atomically, the + * release function called and the resource freed. + * + * RETURNS: + * 0 if devres is found and freed, -ENOENT if not found. + */ +int devres_release(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data) +{ + void *res; + + res = devres_remove(dev, release, match, match_data); + if (unlikely(!res)) + return -ENOENT; + + (*release)(dev, res); + devres_free(res); + return 0; +} +EXPORT_SYMBOL_GPL(devres_release); + +static int remove_nodes(struct device *dev, + struct list_head *first, struct list_head *end, + struct list_head *todo) +{ + int cnt = 0, nr_groups = 0; + struct list_head *cur; + + /* First pass - move normal devres entries to @todo and clear + * devres_group colors. + */ + cur = first; + while (cur != end) { + struct devres_node *node; + struct devres_group *grp; + + node = list_entry(cur, struct devres_node, entry); + cur = cur->next; + + grp = node_to_group(node); + if (grp) { + /* clear color of group markers in the first pass */ + grp->color = 0; + nr_groups++; + } else { + /* regular devres entry */ + if (&node->entry == first) + first = first->next; + list_move_tail(&node->entry, todo); + cnt++; + } + } + + if (!nr_groups) + return cnt; + + /* Second pass - Scan groups and color them. A group gets + * color value of two iff the group is wholly contained in + * [cur, end). That is, for a closed group, both opening and + * closing markers should be in the range, while just the + * opening marker is enough for an open group. + */ + cur = first; + while (cur != end) { + struct devres_node *node; + struct devres_group *grp; + + node = list_entry(cur, struct devres_node, entry); + cur = cur->next; + + grp = node_to_group(node); + BUG_ON(!grp || list_empty(&grp->node[0].entry)); + + grp->color++; + if (list_empty(&grp->node[1].entry)) + grp->color++; + + BUG_ON(grp->color <= 0 || grp->color > 2); + if (grp->color == 2) { + /* No need to update cur or end. The removed + * nodes are always before both. + */ + list_move_tail(&grp->node[0].entry, todo); + list_del_init(&grp->node[1].entry); + } + } + + return cnt; +} + +static int release_nodes(struct device *dev, struct list_head *first, + struct list_head *end, unsigned long flags) + __releases(&dev->devres_lock) +{ + LIST_HEAD(todo); + int cnt; + struct devres *dr, *tmp; + + cnt = remove_nodes(dev, first, end, &todo); + + spin_unlock_irqrestore(&dev->devres_lock, flags); + + /* Release. Note that both devres and devres_group are + * handled as devres in the following loop. This is safe. + */ + list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) { + devres_log(dev, &dr->node, "REL"); + dr->node.release(dev, dr->data); + kfree(dr); + } + + return cnt; +} + +/** + * devres_release_all - Release all managed resources + * @dev: Device to release resources for + * + * Release all resources associated with @dev. This function is + * called on driver detach. + */ +int devres_release_all(struct device *dev) +{ + unsigned long flags; + + /* Looks like an uninitialized device structure */ + if (WARN_ON(dev->devres_head.next == NULL)) + return -ENODEV; + spin_lock_irqsave(&dev->devres_lock, flags); + return release_nodes(dev, dev->devres_head.next, &dev->devres_head, + flags); +} + +/** + * devres_open_group - Open a new devres group + * @dev: Device to open devres group for + * @id: Separator ID + * @gfp: Allocation flags + * + * Open a new devres group for @dev with @id. For @id, using a + * pointer to an object which won't be used for another group is + * recommended. If @id is NULL, address-wise unique ID is created. + * + * RETURNS: + * ID of the new group, NULL on failure. + */ +void * devres_open_group(struct device *dev, void *id, gfp_t gfp) +{ + struct devres_group *grp; + unsigned long flags; + + grp = kmalloc(sizeof(*grp), gfp); + if (unlikely(!grp)) + return NULL; + + grp->node[0].release = &group_open_release; + grp->node[1].release = &group_close_release; + INIT_LIST_HEAD(&grp->node[0].entry); + INIT_LIST_HEAD(&grp->node[1].entry); + set_node_dbginfo(&grp->node[0], "grp<", 0); + set_node_dbginfo(&grp->node[1], "grp>", 0); + grp->id = grp; + if (id) + grp->id = id; + + spin_lock_irqsave(&dev->devres_lock, flags); + add_dr(dev, &grp->node[0]); + spin_unlock_irqrestore(&dev->devres_lock, flags); + return grp->id; +} +EXPORT_SYMBOL_GPL(devres_open_group); + +/* Find devres group with ID @id. If @id is NULL, look for the latest. */ +static struct devres_group * find_group(struct device *dev, void *id) +{ + struct devres_node *node; + + list_for_each_entry_reverse(node, &dev->devres_head, entry) { + struct devres_group *grp; + + if (node->release != &group_open_release) + continue; + + grp = container_of(node, struct devres_group, node[0]); + + if (id) { + if (grp->id == id) + return grp; + } else if (list_empty(&grp->node[1].entry)) + return grp; + } + + return NULL; +} + +/** + * devres_close_group - Close a devres group + * @dev: Device to close devres group for + * @id: ID of target group, can be NULL + * + * Close the group identified by @id. If @id is NULL, the latest open + * group is selected. + */ +void devres_close_group(struct device *dev, void *id) +{ + struct devres_group *grp; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + + grp = find_group(dev, id); + if (grp) + add_dr(dev, &grp->node[1]); + else + WARN_ON(1); + + spin_unlock_irqrestore(&dev->devres_lock, flags); +} +EXPORT_SYMBOL_GPL(devres_close_group); + +/** + * devres_remove_group - Remove a devres group + * @dev: Device to remove group for + * @id: ID of target group, can be NULL + * + * Remove the group identified by @id. If @id is NULL, the latest + * open group is selected. Note that removing a group doesn't affect + * any other resources. + */ +void devres_remove_group(struct device *dev, void *id) +{ + struct devres_group *grp; + unsigned long flags; + + spin_lock_irqsave(&dev->devres_lock, flags); + + grp = find_group(dev, id); + if (grp) { + list_del_init(&grp->node[0].entry); + list_del_init(&grp->node[1].entry); + devres_log(dev, &grp->node[0], "REM"); + } else + WARN_ON(1); + + spin_unlock_irqrestore(&dev->devres_lock, flags); + + kfree(grp); +} +EXPORT_SYMBOL_GPL(devres_remove_group); + +/** + * devres_release_group - Release resources in a devres group + * @dev: Device to release group for + * @id: ID of target group, can be NULL + * + * Release all resources in the group identified by @id. If @id is + * NULL, the latest open group is selected. The selected group and + * groups properly nested inside the selected group are removed. + * + * RETURNS: + * The number of released non-group resources. + */ +int devres_release_group(struct device *dev, void *id) +{ + struct devres_group *grp; + unsigned long flags; + int cnt = 0; + + spin_lock_irqsave(&dev->devres_lock, flags); + + grp = find_group(dev, id); + if (grp) { + struct list_head *first = &grp->node[0].entry; + struct list_head *end = &dev->devres_head; + + if (!list_empty(&grp->node[1].entry)) + end = grp->node[1].entry.next; + + cnt = release_nodes(dev, first, end, flags); + } else { + WARN_ON(1); + spin_unlock_irqrestore(&dev->devres_lock, flags); + } + + return cnt; +} +EXPORT_SYMBOL_GPL(devres_release_group); + +/* + * Custom devres actions allow inserting a simple function call + * into the teadown sequence. + */ + +struct action_devres { + void *data; + void (*action)(void *); +}; + +static int devm_action_match(struct device *dev, void *res, void *p) +{ + struct action_devres *devres = res; + struct action_devres *target = p; + + return devres->action == target->action && + devres->data == target->data; +} + +static void devm_action_release(struct device *dev, void *res) +{ + struct action_devres *devres = res; + + devres->action(devres->data); +} + +/** + * devm_add_action() - add a custom action to list of managed resources + * @dev: Device that owns the action + * @action: Function that should be called + * @data: Pointer to data passed to @action implementation + * + * This adds a custom action to the list of managed resources so that + * it gets executed as part of standard resource unwinding. + */ +int devm_add_action(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres *devres; + + devres = devres_alloc(devm_action_release, + sizeof(struct action_devres), GFP_KERNEL); + if (!devres) + return -ENOMEM; + + devres->data = data; + devres->action = action; + + devres_add(dev, devres); + return 0; +} +EXPORT_SYMBOL_GPL(devm_add_action); + +/** + * devm_remove_action() - removes previously added custom action + * @dev: Device that owns the action + * @action: Function implementing the action + * @data: Pointer to data passed to @action implementation + * + * Removes instance of @action previously added by devm_add_action(). + * Both action and data should match one of the existing entries. + */ +void devm_remove_action(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres devres = { + .data = data, + .action = action, + }; + + WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, + &devres)); +} +EXPORT_SYMBOL_GPL(devm_remove_action); + +/** + * devm_release_action() - release previously added custom action + * @dev: Device that owns the action + * @action: Function implementing the action + * @data: Pointer to data passed to @action implementation + * + * Releases and removes instance of @action previously added by + * devm_add_action(). Both action and data should match one of the + * existing entries. + */ +void devm_release_action(struct device *dev, void (*action)(void *), void *data) +{ + struct action_devres devres = { + .data = data, + .action = action, + }; + + WARN_ON(devres_release(dev, devm_action_release, devm_action_match, + &devres)); + +} +EXPORT_SYMBOL_GPL(devm_release_action); + +/* + * Managed kmalloc/kfree + */ +static void devm_kmalloc_release(struct device *dev, void *res) +{ + /* noop */ +} + +static int devm_kmalloc_match(struct device *dev, void *res, void *data) +{ + return res == data; +} + +/** + * devm_kmalloc - Resource-managed kmalloc + * @dev: Device to allocate memory for + * @size: Allocation size + * @gfp: Allocation gfp flags + * + * Managed kmalloc. Memory allocated with this function is + * automatically freed on driver detach. Like all other devres + * resources, guaranteed alignment is unsigned long long. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) +{ + struct devres *dr; + + if (unlikely(!size)) + return ZERO_SIZE_PTR; + + /* use raw alloc_dr for kmalloc caller tracing */ + dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); + if (unlikely(!dr)) + return NULL; + + /* + * This is named devm_kzalloc_release for historical reasons + * The initial implementation did not support kmalloc, only kzalloc + */ + set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); + devres_add(dev, dr->data); + return dr->data; +} +EXPORT_SYMBOL_GPL(devm_kmalloc); + +/** + * devm_krealloc - Resource-managed krealloc() + * @dev: Device to re-allocate memory for + * @ptr: Pointer to the memory chunk to re-allocate + * @new_size: New allocation size + * @gfp: Allocation gfp flags + * + * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc(). + * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR, + * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the + * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't + * change the order in which the release callback for the re-alloc'ed devres + * will be called (except when falling back to devm_kmalloc() or when freeing + * resources when new_size is zero). The contents of the memory are preserved + * up to the lesser of new and old sizes. + */ +void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) +{ + size_t total_new_size, total_old_size; + struct devres *old_dr, *new_dr; + unsigned long flags; + + if (unlikely(!new_size)) { + devm_kfree(dev, ptr); + return ZERO_SIZE_PTR; + } + + if (unlikely(ZERO_OR_NULL_PTR(ptr))) + return devm_kmalloc(dev, new_size, gfp); + + if (WARN_ON(is_kernel_rodata((unsigned long)ptr))) + /* + * We cannot reliably realloc a const string returned by + * devm_kstrdup_const(). + */ + return NULL; + + if (!check_dr_size(new_size, &total_new_size)) + return NULL; + + total_old_size = ksize(container_of(ptr, struct devres, data)); + if (total_old_size == 0) { + WARN(1, "Pointer doesn't point to dynamically allocated memory."); + return NULL; + } + + /* + * If new size is smaller or equal to the actual number of bytes + * allocated previously - just return the same pointer. + */ + if (total_new_size <= total_old_size) + return ptr; + + /* + * Otherwise: allocate new, larger chunk. We need to allocate before + * taking the lock as most probably the caller uses GFP_KERNEL. + */ + new_dr = alloc_dr(devm_kmalloc_release, + total_new_size, gfp, dev_to_node(dev)); + if (!new_dr) + return NULL; + + /* + * The spinlock protects the linked list against concurrent + * modifications but not the resource itself. + */ + spin_lock_irqsave(&dev->devres_lock, flags); + + old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr); + if (!old_dr) { + spin_unlock_irqrestore(&dev->devres_lock, flags); + kfree(new_dr); + WARN(1, "Memory chunk not managed or managed by a different device."); + return NULL; + } + + replace_dr(dev, &old_dr->node, &new_dr->node); + + spin_unlock_irqrestore(&dev->devres_lock, flags); + + /* + * We can copy the memory contents after releasing the lock as we're + * no longer modyfing the list links. + */ + memcpy(new_dr->data, old_dr->data, + total_old_size - offsetof(struct devres, data)); + /* + * Same for releasing the old devres - it's now been removed from the + * list. This is also the reason why we must not use devm_kfree() - the + * links are no longer valid. + */ + kfree(old_dr); + + return new_dr->data; +} +EXPORT_SYMBOL_GPL(devm_krealloc); + +/** + * devm_kstrdup - Allocate resource managed space and + * copy an existing string into that. + * @dev: Device to allocate memory for + * @s: the string to duplicate + * @gfp: the GFP mask used in the devm_kmalloc() call when + * allocating memory + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) +{ + size_t size; + char *buf; + + if (!s) + return NULL; + + size = strlen(s) + 1; + buf = devm_kmalloc(dev, size, gfp); + if (buf) + memcpy(buf, s, size); + return buf; +} +EXPORT_SYMBOL_GPL(devm_kstrdup); + +/** + * devm_kstrdup_const - resource managed conditional string duplication + * @dev: device for which to duplicate the string + * @s: the string to duplicate + * @gfp: the GFP mask used in the kmalloc() call when allocating memory + * + * Strings allocated by devm_kstrdup_const will be automatically freed when + * the associated device is detached. + * + * RETURNS: + * Source string if it is in .rodata section otherwise it falls back to + * devm_kstrdup. + */ +const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) +{ + if (is_kernel_rodata((unsigned long)s)) + return s; + + return devm_kstrdup(dev, s, gfp); +} +EXPORT_SYMBOL_GPL(devm_kstrdup_const); + +/** + * devm_kvasprintf - Allocate resource managed space and format a string + * into that. + * @dev: Device to allocate memory for + * @gfp: the GFP mask used in the devm_kmalloc() call when + * allocating memory + * @fmt: The printf()-style format string + * @ap: Arguments for the format string + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap) +{ + unsigned int len; + char *p; + va_list aq; + + va_copy(aq, ap); + len = vsnprintf(NULL, 0, fmt, aq); + va_end(aq); + + p = devm_kmalloc(dev, len+1, gfp); + if (!p) + return NULL; + + vsnprintf(p, len+1, fmt, ap); + + return p; +} +EXPORT_SYMBOL(devm_kvasprintf); + +/** + * devm_kasprintf - Allocate resource managed space and format a string + * into that. + * @dev: Device to allocate memory for + * @gfp: the GFP mask used in the devm_kmalloc() call when + * allocating memory + * @fmt: The printf()-style format string + * @...: Arguments for the format string + * RETURNS: + * Pointer to allocated string on success, NULL on failure. + */ +char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) +{ + va_list ap; + char *p; + + va_start(ap, fmt); + p = devm_kvasprintf(dev, gfp, fmt, ap); + va_end(ap); + + return p; +} +EXPORT_SYMBOL_GPL(devm_kasprintf); + +/** + * devm_kfree - Resource-managed kfree + * @dev: Device this memory belongs to + * @p: Memory to free + * + * Free memory allocated with devm_kmalloc(). + */ +void devm_kfree(struct device *dev, const void *p) +{ + int rc; + + /* + * Special cases: pointer to a string in .rodata returned by + * devm_kstrdup_const() or NULL/ZERO ptr. + */ + if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) + return; + + rc = devres_destroy(dev, devm_kmalloc_release, + devm_kmalloc_match, (void *)p); + WARN_ON(rc); +} +EXPORT_SYMBOL_GPL(devm_kfree); + +/** + * devm_kmemdup - Resource-managed kmemdup + * @dev: Device this memory belongs to + * @src: Memory region to duplicate + * @len: Memory region length + * @gfp: GFP mask to use + * + * Duplicate region of a memory using resource managed kmalloc + */ +void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) +{ + void *p; + + p = devm_kmalloc(dev, len, gfp); + if (p) + memcpy(p, src, len); + + return p; +} +EXPORT_SYMBOL_GPL(devm_kmemdup); + +struct pages_devres { + unsigned long addr; + unsigned int order; +}; + +static int devm_pages_match(struct device *dev, void *res, void *p) +{ + struct pages_devres *devres = res; + struct pages_devres *target = p; + + return devres->addr == target->addr; +} + +static void devm_pages_release(struct device *dev, void *res) +{ + struct pages_devres *devres = res; + + free_pages(devres->addr, devres->order); +} + +/** + * devm_get_free_pages - Resource-managed __get_free_pages + * @dev: Device to allocate memory for + * @gfp_mask: Allocation gfp flags + * @order: Allocation size is (1 << order) pages + * + * Managed get_free_pages. Memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Address of allocated memory on success, 0 on failure. + */ + +unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order) +{ + struct pages_devres *devres; + unsigned long addr; + + addr = __get_free_pages(gfp_mask, order); + + if (unlikely(!addr)) + return 0; + + devres = devres_alloc(devm_pages_release, + sizeof(struct pages_devres), GFP_KERNEL); + if (unlikely(!devres)) { + free_pages(addr, order); + return 0; + } + + devres->addr = addr; + devres->order = order; + + devres_add(dev, devres); + return addr; +} +EXPORT_SYMBOL_GPL(devm_get_free_pages); + +/** + * devm_free_pages - Resource-managed free_pages + * @dev: Device this memory belongs to + * @addr: Memory to free + * + * Free memory allocated with devm_get_free_pages(). Unlike free_pages, + * there is no need to supply the @order. + */ +void devm_free_pages(struct device *dev, unsigned long addr) +{ + struct pages_devres devres = { .addr = addr }; + + WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, + &devres)); +} +EXPORT_SYMBOL_GPL(devm_free_pages); + +static void devm_percpu_release(struct device *dev, void *pdata) +{ + void __percpu *p; + + p = *(void __percpu **)pdata; + free_percpu(p); +} + +static int devm_percpu_match(struct device *dev, void *data, void *p) +{ + struct devres *devr = container_of(data, struct devres, data); + + return *(void **)devr->data == p; +} + +/** + * __devm_alloc_percpu - Resource-managed alloc_percpu + * @dev: Device to allocate per-cpu memory for + * @size: Size of per-cpu memory to allocate + * @align: Alignment of per-cpu memory to allocate + * + * Managed alloc_percpu. Per-cpu memory allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated memory on success, NULL on failure. + */ +void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, + size_t align) +{ + void *p; + void __percpu *pcpu; + + pcpu = __alloc_percpu(size, align); + if (!pcpu) + return NULL; + + p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); + if (!p) { + free_percpu(pcpu); + return NULL; + } + + *(void __percpu **)p = pcpu; + + devres_add(dev, p); + + return pcpu; +} +EXPORT_SYMBOL_GPL(__devm_alloc_percpu); + +/** + * devm_free_percpu - Resource-managed free_percpu + * @dev: Device this memory belongs to + * @pdata: Per-cpu memory to free + * + * Free memory allocated with devm_alloc_percpu(). + */ +void devm_free_percpu(struct device *dev, void __percpu *pdata) +{ + WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, + (void *)pdata)); +} +EXPORT_SYMBOL_GPL(devm_free_percpu); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c new file mode 100644 index 000000000..b5cbaa61c --- /dev/null +++ b/drivers/base/devtmpfs.c @@ -0,0 +1,483 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * devtmpfs - kernel-maintained tmpfs-based /dev + * + * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org> + * + * During bootup, before any driver core device is registered, + * devtmpfs, a tmpfs-based filesystem is created. Every driver-core + * device which requests a device node, will add a node in this + * filesystem. + * By default, all devices are named after the name of the device, + * owned by root and have a default mode of 0600. Subsystems can + * overwrite the default setting if needed. + */ + +#include <linux/kernel.h> +#include <linux/syscalls.h> +#include <linux/mount.h> +#include <linux/device.h> +#include <linux/genhd.h> +#include <linux/namei.h> +#include <linux/fs.h> +#include <linux/shmem_fs.h> +#include <linux/ramfs.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/init_syscalls.h> +#include <uapi/linux/mount.h> +#include "base.h" + +static struct task_struct *thread; + +static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT); + +static DEFINE_SPINLOCK(req_lock); + +static struct req { + struct req *next; + struct completion done; + int err; + const char *name; + umode_t mode; /* 0 => delete */ + kuid_t uid; + kgid_t gid; + struct device *dev; +} *requests; + +static int __init mount_param(char *str) +{ + mount_dev = simple_strtoul(str, NULL, 0); + return 1; +} +__setup("devtmpfs.mount=", mount_param); + +static struct vfsmount *mnt; + +static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + struct super_block *s = mnt->mnt_sb; + int err; + + atomic_inc(&s->s_active); + down_write(&s->s_umount); + err = reconfigure_single(s, flags, data); + if (err < 0) { + deactivate_locked_super(s); + return ERR_PTR(err); + } + return dget(s->s_root); +} + +static struct file_system_type internal_fs_type = { + .name = "devtmpfs", +#ifdef CONFIG_TMPFS + .init_fs_context = shmem_init_fs_context, + .parameters = shmem_fs_parameters, +#else + .init_fs_context = ramfs_init_fs_context, + .parameters = ramfs_fs_parameters, +#endif + .kill_sb = kill_litter_super, +}; + +static struct file_system_type dev_fs_type = { + .name = "devtmpfs", + .mount = public_dev_mount, +}; + +#ifdef CONFIG_BLOCK +static inline int is_blockdev(struct device *dev) +{ + return dev->class == &block_class; +} +#else +static inline int is_blockdev(struct device *dev) { return 0; } +#endif + +static int devtmpfs_submit_req(struct req *req, const char *tmp) +{ + init_completion(&req->done); + + spin_lock(&req_lock); + req->next = requests; + requests = req; + spin_unlock(&req_lock); + + wake_up_process(thread); + wait_for_completion(&req->done); + + kfree(tmp); + + return req->err; +} + +int devtmpfs_create_node(struct device *dev) +{ + const char *tmp = NULL; + struct req req; + + if (!thread) + return 0; + + req.mode = 0; + req.uid = GLOBAL_ROOT_UID; + req.gid = GLOBAL_ROOT_GID; + req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp); + if (!req.name) + return -ENOMEM; + + if (req.mode == 0) + req.mode = 0600; + if (is_blockdev(dev)) + req.mode |= S_IFBLK; + else + req.mode |= S_IFCHR; + + req.dev = dev; + + return devtmpfs_submit_req(&req, tmp); +} + +int devtmpfs_delete_node(struct device *dev) +{ + const char *tmp = NULL; + struct req req; + + if (!thread) + return 0; + + req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp); + if (!req.name) + return -ENOMEM; + + req.mode = 0; + req.dev = dev; + + return devtmpfs_submit_req(&req, tmp); +} + +static int dev_mkdir(const char *name, umode_t mode) +{ + struct dentry *dentry; + struct path path; + int err; + + dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + err = vfs_mkdir(d_inode(path.dentry), dentry, mode); + if (!err) + /* mark as kernel-created inode */ + d_inode(dentry)->i_private = &thread; + done_path_create(&path, dentry); + return err; +} + +static int create_path(const char *nodepath) +{ + char *path; + char *s; + int err = 0; + + /* parent directories do not exist, create them */ + path = kstrdup(nodepath, GFP_KERNEL); + if (!path) + return -ENOMEM; + + s = path; + for (;;) { + s = strchr(s, '/'); + if (!s) + break; + s[0] = '\0'; + err = dev_mkdir(path, 0755); + if (err && err != -EEXIST) + break; + s[0] = '/'; + s++; + } + kfree(path); + return err; +} + +static int handle_create(const char *nodename, umode_t mode, kuid_t uid, + kgid_t gid, struct device *dev) +{ + struct dentry *dentry; + struct path path; + int err; + + dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); + if (dentry == ERR_PTR(-ENOENT)) { + create_path(nodename); + dentry = kern_path_create(AT_FDCWD, nodename, &path, 0); + } + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt); + if (!err) { + struct iattr newattrs; + + newattrs.ia_mode = mode; + newattrs.ia_uid = uid; + newattrs.ia_gid = gid; + newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID; + inode_lock(d_inode(dentry)); + notify_change(dentry, &newattrs, NULL); + inode_unlock(d_inode(dentry)); + + /* mark as kernel-created inode */ + d_inode(dentry)->i_private = &thread; + } + done_path_create(&path, dentry); + return err; +} + +static int dev_rmdir(const char *name) +{ + struct path parent; + struct dentry *dentry; + int err; + + dentry = kern_path_locked(name, &parent); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + if (d_really_is_positive(dentry)) { + if (d_inode(dentry)->i_private == &thread) + err = vfs_rmdir(d_inode(parent.dentry), dentry); + else + err = -EPERM; + } else { + err = -ENOENT; + } + dput(dentry); + inode_unlock(d_inode(parent.dentry)); + path_put(&parent); + return err; +} + +static int delete_path(const char *nodepath) +{ + char *path; + int err = 0; + + path = kstrdup(nodepath, GFP_KERNEL); + if (!path) + return -ENOMEM; + + for (;;) { + char *base; + + base = strrchr(path, '/'); + if (!base) + break; + base[0] = '\0'; + err = dev_rmdir(path); + if (err) + break; + } + + kfree(path); + return err; +} + +static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat) +{ + /* did we create it */ + if (inode->i_private != &thread) + return 0; + + /* does the dev_t match */ + if (is_blockdev(dev)) { + if (!S_ISBLK(stat->mode)) + return 0; + } else { + if (!S_ISCHR(stat->mode)) + return 0; + } + if (stat->rdev != dev->devt) + return 0; + + /* ours */ + return 1; +} + +static int handle_remove(const char *nodename, struct device *dev) +{ + struct path parent; + struct dentry *dentry; + int deleted = 0; + int err; + + dentry = kern_path_locked(nodename, &parent); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (d_really_is_positive(dentry)) { + struct kstat stat; + struct path p = {.mnt = parent.mnt, .dentry = dentry}; + err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE, + AT_STATX_SYNC_AS_STAT); + if (!err && dev_mynode(dev, d_inode(dentry), &stat)) { + struct iattr newattrs; + /* + * before unlinking this node, reset permissions + * of possible references like hardlinks + */ + newattrs.ia_uid = GLOBAL_ROOT_UID; + newattrs.ia_gid = GLOBAL_ROOT_GID; + newattrs.ia_mode = stat.mode & ~0777; + newattrs.ia_valid = + ATTR_UID|ATTR_GID|ATTR_MODE; + inode_lock(d_inode(dentry)); + notify_change(dentry, &newattrs, NULL); + inode_unlock(d_inode(dentry)); + err = vfs_unlink(d_inode(parent.dentry), dentry, NULL); + if (!err || err == -ENOENT) + deleted = 1; + } + } else { + err = -ENOENT; + } + dput(dentry); + inode_unlock(d_inode(parent.dentry)); + + path_put(&parent); + if (deleted && strchr(nodename, '/')) + delete_path(nodename); + return err; +} + +/* + * If configured, or requested by the commandline, devtmpfs will be + * auto-mounted after the kernel mounted the root filesystem. + */ +int __init devtmpfs_mount(void) +{ + int err; + + if (!mount_dev) + return 0; + + if (!thread) + return 0; + + err = init_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL); + if (err) + printk(KERN_INFO "devtmpfs: error mounting %i\n", err); + else + printk(KERN_INFO "devtmpfs: mounted\n"); + return err; +} + +static DECLARE_COMPLETION(setup_done); + +static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid, + struct device *dev) +{ + if (mode) + return handle_create(name, mode, uid, gid, dev); + else + return handle_remove(name, dev); +} + +static void __noreturn devtmpfs_work_loop(void) +{ + while (1) { + spin_lock(&req_lock); + while (requests) { + struct req *req = requests; + requests = NULL; + spin_unlock(&req_lock); + while (req) { + struct req *next = req->next; + req->err = handle(req->name, req->mode, + req->uid, req->gid, req->dev); + complete(&req->done); + req = next; + } + spin_lock(&req_lock); + } + __set_current_state(TASK_INTERRUPTIBLE); + spin_unlock(&req_lock); + schedule(); + } +} + +static int __init devtmpfs_setup(void *p) +{ + int err; + + err = ksys_unshare(CLONE_NEWNS); + if (err) + goto out; + err = init_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL); + if (err) + goto out; + init_chdir("/.."); /* will traverse into overmounted root */ + init_chroot("."); +out: + *(int *)p = err; + return err; +} + +/* + * The __ref is because devtmpfs_setup needs to be __init for the routines it + * calls. That call is done while devtmpfs_init, which is marked __init, + * synchronously waits for it to complete. + */ +static int __ref devtmpfsd(void *p) +{ + int err = devtmpfs_setup(p); + + complete(&setup_done); + if (err) + return err; + devtmpfs_work_loop(); + return 0; +} + +/* + * Create devtmpfs instance, driver-core devices will add their device + * nodes here. + */ +int __init devtmpfs_init(void) +{ + char opts[] = "mode=0755"; + int err; + + mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts); + if (IS_ERR(mnt)) { + printk(KERN_ERR "devtmpfs: unable to create devtmpfs %ld\n", + PTR_ERR(mnt)); + return PTR_ERR(mnt); + } + err = register_filesystem(&dev_fs_type); + if (err) { + printk(KERN_ERR "devtmpfs: unable to register devtmpfs " + "type %i\n", err); + return err; + } + + thread = kthread_run(devtmpfsd, &err, "kdevtmpfs"); + if (!IS_ERR(thread)) { + wait_for_completion(&setup_done); + } else { + err = PTR_ERR(thread); + thread = NULL; + } + + if (err) { + printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); + unregister_filesystem(&dev_fs_type); + return err; + } + + printk(KERN_INFO "devtmpfs: initialized\n"); + return 0; +} diff --git a/drivers/base/driver.c b/drivers/base/driver.c new file mode 100644 index 000000000..1b9d47b10 --- /dev/null +++ b/drivers/base/driver.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * driver.c - centralized device driver management + * + * Copyright (c) 2002-3 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (c) 2007 Novell Inc. + */ + +#include <linux/device/driver.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include "base.h" + +static struct device *next_device(struct klist_iter *i) +{ + struct klist_node *n = klist_next(i); + struct device *dev = NULL; + struct device_private *dev_prv; + + if (n) { + dev_prv = to_device_private_driver(n); + dev = dev_prv->device; + } + return dev; +} + +/** + * driver_set_override() - Helper to set or clear driver override. + * @dev: Device to change + * @override: Address of string to change (e.g. &device->driver_override); + * The contents will be freed and hold newly allocated override. + * @s: NUL-terminated string, new driver name to force a match, pass empty + * string to clear it ("" or "\n", where the latter is only for sysfs + * interface). + * @len: length of @s + * + * Helper to set or clear driver override in a device, intended for the cases + * when the driver_override field is allocated by driver/bus code. + * + * Returns: 0 on success or a negative error code on failure. + */ +int driver_set_override(struct device *dev, const char **override, + const char *s, size_t len) +{ + const char *new, *old; + char *cp; + + if (!override || !s) + return -EINVAL; + + /* + * The stored value will be used in sysfs show callback (sysfs_emit()), + * which has a length limit of PAGE_SIZE and adds a trailing newline. + * Thus we can store one character less to avoid truncation during sysfs + * show. + */ + if (len >= (PAGE_SIZE - 1)) + return -EINVAL; + + if (!len) { + /* Empty string passed - clear override */ + device_lock(dev); + old = *override; + *override = NULL; + device_unlock(dev); + kfree(old); + + return 0; + } + + cp = strnchr(s, len, '\n'); + if (cp) + len = cp - s; + + new = kstrndup(s, len, GFP_KERNEL); + if (!new) + return -ENOMEM; + + device_lock(dev); + old = *override; + if (cp != s) { + *override = new; + } else { + /* "\n" passed - clear override */ + kfree(new); + *override = NULL; + } + device_unlock(dev); + + kfree(old); + + return 0; +} +EXPORT_SYMBOL_GPL(driver_set_override); + +/** + * driver_for_each_device - Iterator for devices bound to a driver. + * @drv: Driver we're iterating. + * @start: Device to begin with + * @data: Data to pass to the callback. + * @fn: Function to call for each device. + * + * Iterate over the @drv's list of devices calling @fn for each one. + */ +int driver_for_each_device(struct device_driver *drv, struct device *start, + void *data, int (*fn)(struct device *, void *)) +{ + struct klist_iter i; + struct device *dev; + int error = 0; + + if (!drv) + return -EINVAL; + + klist_iter_init_node(&drv->p->klist_devices, &i, + start ? &start->p->knode_driver : NULL); + while (!error && (dev = next_device(&i))) + error = fn(dev, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(driver_for_each_device); + +/** + * driver_find_device - device iterator for locating a particular device. + * @drv: The device's driver + * @start: Device to begin with + * @data: Data to pass to match function + * @match: Callback function to check device + * + * This is similar to the driver_for_each_device() function above, but + * it returns a reference to a device that is 'found' for later use, as + * determined by the @match callback. + * + * The callback should return 0 if the device doesn't match and non-zero + * if it does. If the callback returns non-zero, this function will + * return to the caller and not iterate over any more devices. + */ +struct device *driver_find_device(struct device_driver *drv, + struct device *start, const void *data, + int (*match)(struct device *dev, const void *data)) +{ + struct klist_iter i; + struct device *dev; + + if (!drv || !drv->p) + return NULL; + + klist_iter_init_node(&drv->p->klist_devices, &i, + (start ? &start->p->knode_driver : NULL)); + while ((dev = next_device(&i))) + if (match(dev, data) && get_device(dev)) + break; + klist_iter_exit(&i); + return dev; +} +EXPORT_SYMBOL_GPL(driver_find_device); + +/** + * driver_create_file - create sysfs file for driver. + * @drv: driver. + * @attr: driver attribute descriptor. + */ +int driver_create_file(struct device_driver *drv, + const struct driver_attribute *attr) +{ + int error; + + if (drv) + error = sysfs_create_file(&drv->p->kobj, &attr->attr); + else + error = -EINVAL; + return error; +} +EXPORT_SYMBOL_GPL(driver_create_file); + +/** + * driver_remove_file - remove sysfs file for driver. + * @drv: driver. + * @attr: driver attribute descriptor. + */ +void driver_remove_file(struct device_driver *drv, + const struct driver_attribute *attr) +{ + if (drv) + sysfs_remove_file(&drv->p->kobj, &attr->attr); +} +EXPORT_SYMBOL_GPL(driver_remove_file); + +int driver_add_groups(struct device_driver *drv, + const struct attribute_group **groups) +{ + return sysfs_create_groups(&drv->p->kobj, groups); +} + +void driver_remove_groups(struct device_driver *drv, + const struct attribute_group **groups) +{ + sysfs_remove_groups(&drv->p->kobj, groups); +} + +/** + * driver_register - register driver with bus + * @drv: driver to register + * + * We pass off most of the work to the bus_add_driver() call, + * since most of the things we have to do deal with the bus + * structures. + */ +int driver_register(struct device_driver *drv) +{ + int ret; + struct device_driver *other; + + if (!drv->bus->p) { + pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n", + drv->name, drv->bus->name); + return -EINVAL; + } + + if ((drv->bus->probe && drv->probe) || + (drv->bus->remove && drv->remove) || + (drv->bus->shutdown && drv->shutdown)) + pr_warn("Driver '%s' needs updating - please use " + "bus_type methods\n", drv->name); + + other = driver_find(drv->name, drv->bus); + if (other) { + pr_err("Error: Driver '%s' is already registered, " + "aborting...\n", drv->name); + return -EBUSY; + } + + ret = bus_add_driver(drv); + if (ret) + return ret; + ret = driver_add_groups(drv, drv->groups); + if (ret) { + bus_remove_driver(drv); + return ret; + } + kobject_uevent(&drv->p->kobj, KOBJ_ADD); + + return ret; +} +EXPORT_SYMBOL_GPL(driver_register); + +/** + * driver_unregister - remove driver from system. + * @drv: driver. + * + * Again, we pass off most of the work to the bus-level call. + */ +void driver_unregister(struct device_driver *drv) +{ + if (!drv || !drv->p) { + WARN(1, "Unexpected driver unregister!\n"); + return; + } + driver_remove_groups(drv, drv->groups); + bus_remove_driver(drv); +} +EXPORT_SYMBOL_GPL(driver_unregister); + +/** + * driver_find - locate driver on a bus by its name. + * @name: name of the driver. + * @bus: bus to scan for the driver. + * + * Call kset_find_obj() to iterate over list of drivers on + * a bus to find driver by name. Return driver if found. + * + * This routine provides no locking to prevent the driver it returns + * from being unregistered or unloaded while the caller is using it. + * The caller is responsible for preventing this. + */ +struct device_driver *driver_find(const char *name, struct bus_type *bus) +{ + struct kobject *k = kset_find_obj(bus->p->drivers_kset, name); + struct driver_private *priv; + + if (k) { + /* Drop reference added by kset_find_obj() */ + kobject_put(k); + priv = to_driver(k); + return priv->driver; + } + return NULL; +} +EXPORT_SYMBOL_GPL(driver_find); diff --git a/drivers/base/firmware.c b/drivers/base/firmware.c new file mode 100644 index 000000000..8dff940e0 --- /dev/null +++ b/drivers/base/firmware.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * firmware.c - firmware subsystem hoohaw. + * + * Copyright (c) 2002-3 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (c) 2007 Novell Inc. + */ +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> + +#include "base.h" + +struct kobject *firmware_kobj; +EXPORT_SYMBOL_GPL(firmware_kobj); + +int __init firmware_init(void) +{ + firmware_kobj = kobject_create_and_add("firmware", NULL); + if (!firmware_kobj) + return -ENOMEM; + return 0; +} diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig new file mode 100644 index 000000000..5b24f3959 --- /dev/null +++ b/drivers/base/firmware_loader/Kconfig @@ -0,0 +1,185 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "Firmware loader" + +config FW_LOADER + tristate "Firmware loading facility" if EXPERT + default y + help + This enables the firmware loading facility in the kernel. The kernel + will first look for built-in firmware, if it has any. Next, it will + look for the requested firmware in a series of filesystem paths: + + o firmware_class path module parameter or kernel boot param + o /lib/firmware/updates/UTS_RELEASE + o /lib/firmware/updates + o /lib/firmware/UTS_RELEASE + o /lib/firmware + + Enabling this feature only increases your kernel image by about + 828 bytes, enable this option unless you are certain you don't + need firmware. + + You typically want this built-in (=y) but you can also enable this + as a module, in which case the firmware_class module will be built. + You also want to be sure to enable this built-in if you are going to + enable built-in firmware (CONFIG_EXTRA_FIRMWARE). + +if FW_LOADER + +config FW_LOADER_PAGED_BUF + bool + +config EXTRA_FIRMWARE + string "Build named firmware blobs into the kernel binary" + help + Device drivers which require firmware can typically deal with + having the kernel load firmware from the various supported + /lib/firmware/ paths. This option enables you to build into the + kernel firmware files. Built-in firmware searches are preceded + over firmware lookups using your filesystem over the supported + /lib/firmware paths documented on CONFIG_FW_LOADER. + + This may be useful for testing or if the firmware is required early on + in boot and cannot rely on the firmware being placed in an initrd or + initramfs. + + This option is a string and takes the (space-separated) names of the + firmware files -- the same names that appear in MODULE_FIRMWARE() + and request_firmware() in the source. These files should exist under + the directory specified by the EXTRA_FIRMWARE_DIR option, which is + /lib/firmware by default. + + For example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin", copy + the usb8388.bin file into /lib/firmware, and build the kernel. Then + any request_firmware("usb8388.bin") will be satisfied internally + inside the kernel without ever looking at your filesystem at runtime. + + WARNING: If you include additional firmware files into your binary + kernel image that are not available under the terms of the GPL, + then it may be a violation of the GPL to distribute the resulting + image since it combines both GPL and non-GPL work. You should + consult a lawyer of your own before distributing such an image. + +config EXTRA_FIRMWARE_DIR + string "Firmware blobs root directory" + depends on EXTRA_FIRMWARE != "" + default "/lib/firmware" + help + This option controls the directory in which the kernel build system + looks for the firmware files listed in the EXTRA_FIRMWARE option. + +config FW_LOADER_USER_HELPER + bool "Enable the firmware sysfs fallback mechanism" + select FW_LOADER_PAGED_BUF + help + This option enables a sysfs loading facility to enable firmware + loading to the kernel through userspace as a fallback mechanism + if and only if the kernel's direct filesystem lookup for the + firmware failed using the different /lib/firmware/ paths, or the + path specified in the firmware_class path module parameter, or the + firmware_class path kernel boot parameter if the firmware_class is + built-in. For details on how to work with the sysfs fallback mechanism + refer to Documentation/driver-api/firmware/fallback-mechanisms.rst. + + The direct filesystem lookup for firmware is always used first now. + + If the kernel's direct filesystem lookup for firmware fails to find + the requested firmware a sysfs fallback loading facility is made + available and userspace is informed about this through uevents. + The uevent can be suppressed if the driver explicitly requested it, + this is known as the driver using the custom fallback mechanism. + If the custom fallback mechanism is used userspace must always + acknowledge failure to find firmware as the timeout for the fallback + mechanism is disabled, and failed requests will linger forever. + + This used to be the default firmware loading facility, and udev used + to listen for uvents to load firmware for the kernel. The firmware + loading facility functionality in udev has been removed, as such it + can no longer be relied upon as a fallback mechanism. Linux no longer + relies on or uses a fallback mechanism in userspace. If you need to + rely on one refer to the permissively licensed firmwared: + + https://github.com/teg/firmwared + + Since this was the default firmware loading facility at one point, + old userspace may exist which relies upon it, and as such this + mechanism can never be removed from the kernel. + + You should only enable this functionality if you are certain you + require a fallback mechanism and have a userspace mechanism ready to + load firmware in case it is not found. One main reason for this may + be if you have drivers which require firmware built-in and for + whatever reason cannot place the required firmware in initramfs. + Another reason kernels may have this feature enabled is to support a + driver which explicitly relies on this fallback mechanism. Only two + drivers need this today: + + o CONFIG_LEDS_LP55XX_COMMON + o CONFIG_DELL_RBU + + Outside of supporting the above drivers, another reason for needing + this may be that your firmware resides outside of the paths the kernel + looks for and cannot possibly be specified using the firmware_class + path module parameter or kernel firmware_class path boot parameter + if firmware_class is built-in. + + A modern use case may be to temporarily mount a custom partition + during provisioning which is only accessible to userspace, and then + to use it to look for and fetch the required firmware. Such type of + driver functionality may not even ever be desirable upstream by + vendors, and as such is only required to be supported as an interface + for provisioning. Since udev's firmware loading facility has been + removed you can use firmwared or a fork of it to customize how you + want to load firmware based on uevents issued. + + Enabling this option will increase your kernel image size by about + 13436 bytes. + + If you are unsure about this, say N here, unless you are Linux + distribution and need to support the above two drivers, or you are + certain you need to support some really custom firmware loading + facility in userspace. + +config FW_LOADER_USER_HELPER_FALLBACK + bool "Force the firmware sysfs fallback mechanism when possible" + depends on FW_LOADER_USER_HELPER + help + Enabling this option forces a sysfs userspace fallback mechanism + to be used for all firmware requests which explicitly do not disable a + a fallback mechanism. Firmware calls which do prohibit a fallback + mechanism is request_firmware_direct(). This option is kept for + backward compatibility purposes given this precise mechanism can also + be enabled by setting the proc sysctl value to true: + + /proc/sys/kernel/firmware_config/force_sysfs_fallback + + If you are unsure about this, say N here. + +config FW_LOADER_COMPRESS + bool "Enable compressed firmware support" + select FW_LOADER_PAGED_BUF + select XZ_DEC + help + This option enables the support for loading compressed firmware + files. The caller of firmware API receives the decompressed file + content. The compressed file is loaded as a fallback, only after + loading the raw file failed at first. + + Currently only XZ-compressed files are supported, and they have to + be compressed with either none or crc32 integrity check type (pass + "-C crc32" option to xz command). + +config FW_CACHE + bool "Enable firmware caching during suspend" + depends on PM_SLEEP + default y if PM_SLEEP + help + Because firmware caching generates uevent messages that are sent + over a netlink socket, it can prevent suspend on many platforms. + It is also not always useful, so on such platforms we have the + option. + + If unsure, say Y. + +endif # FW_LOADER +endmenu diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile new file mode 100644 index 000000000..e87843408 --- /dev/null +++ b/drivers/base/firmware_loader/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for the Linux firmware loader + +obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o +obj-$(CONFIG_FW_LOADER) += firmware_class.o +firmware_class-objs := main.o +firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o +firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o + +obj-y += builtin/ diff --git a/drivers/base/firmware_loader/builtin/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore new file mode 100644 index 000000000..166f76b43 --- /dev/null +++ b/drivers/base/firmware_loader/builtin/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +*.gen.S diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile new file mode 100644 index 000000000..5fa7ce374 --- /dev/null +++ b/drivers/base/firmware_loader/builtin/Makefile @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a +# leading /, it's relative to $(srctree). +fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR)) +fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir)) + +obj-y := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE))) + +FWNAME = $(patsubst $(obj)/%.gen.S,%,$@) +comma := , +FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME))))) +ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long) +ASM_ALIGN = $(if $(CONFIG_64BIT),3,2) +PROGBITS = $(if $(CONFIG_ARM),%,@)progbits + +filechk_fwbin = \ + echo "/* Generated by $(src)/Makefile */" ;\ + echo " .section .rodata" ;\ + echo " .p2align 4" ;\ + echo "_fw_$(FWSTR)_bin:" ;\ + echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\ + echo "_fw_end:" ;\ + echo " .section .rodata.str,\"aMS\",$(PROGBITS),1" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo "_fw_$(FWSTR)_name:" ;\ + echo " .string \"$(FWNAME)\"" ;\ + echo " .section .builtin_fw,\"a\",$(PROGBITS)" ;\ + echo " .p2align $(ASM_ALIGN)" ;\ + echo " $(ASM_WORD) _fw_$(FWSTR)_name" ;\ + echo " $(ASM_WORD) _fw_$(FWSTR)_bin" ;\ + echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin" + +$(obj)/%.gen.S: FORCE + $(call filechk,fwbin) + +# The .o files depend on the binaries directly; the .S files don't. +$(addprefix $(obj)/, $(obj-y)): $(obj)/%.gen.o: $(fwdir)/% + +targets := $(patsubst $(obj)/%,%, \ + $(shell find $(obj) -name \*.gen.S 2>/dev/null)) diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c new file mode 100644 index 000000000..0fdd18ce2 --- /dev/null +++ b/drivers/base/firmware_loader/fallback.c @@ -0,0 +1,660 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/types.h> +#include <linux/kconfig.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/security.h> +#include <linux/highmem.h> +#include <linux/umh.h> +#include <linux/sysctl.h> +#include <linux/vmalloc.h> +#include <linux/module.h> + +#include "fallback.h" +#include "firmware.h" + +/* + * firmware fallback mechanism + */ + +MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE); + +extern struct firmware_fallback_config fw_fallback_config; + +/* These getters are vetted to use int properly */ +static inline int __firmware_loading_timeout(void) +{ + return fw_fallback_config.loading_timeout; +} + +/* These setters are vetted to use int properly */ +static void __fw_fallback_set_timeout(int timeout) +{ + fw_fallback_config.loading_timeout = timeout; +} + +/* + * use small loading timeout for caching devices' firmware because all these + * firmware images have been loaded successfully at lease once, also system is + * ready for completing firmware loading now. The maximum size of firmware in + * current distributions is about 2M bytes, so 10 secs should be enough. + */ +void fw_fallback_set_cache_timeout(void) +{ + fw_fallback_config.old_timeout = __firmware_loading_timeout(); + __fw_fallback_set_timeout(10); +} + +/* Restores the timeout to the value last configured during normal operation */ +void fw_fallback_set_default_timeout(void) +{ + __fw_fallback_set_timeout(fw_fallback_config.old_timeout); +} + +static long firmware_loading_timeout(void) +{ + return __firmware_loading_timeout() > 0 ? + __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET; +} + +static inline bool fw_sysfs_done(struct fw_priv *fw_priv) +{ + return __fw_state_check(fw_priv, FW_STATUS_DONE); +} + +static inline bool fw_sysfs_loading(struct fw_priv *fw_priv) +{ + return __fw_state_check(fw_priv, FW_STATUS_LOADING); +} + +static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout) +{ + return __fw_state_wait_common(fw_priv, timeout); +} + +struct fw_sysfs { + bool nowait; + struct device dev; + struct fw_priv *fw_priv; + struct firmware *fw; +}; + +static struct fw_sysfs *to_fw_sysfs(struct device *dev) +{ + return container_of(dev, struct fw_sysfs, dev); +} + +static void __fw_load_abort(struct fw_priv *fw_priv) +{ + /* + * There is a small window in which user can write to 'loading' + * between loading done/aborted and disappearance of 'loading' + */ + if (fw_state_is_aborted(fw_priv) || fw_sysfs_done(fw_priv)) + return; + + fw_state_aborted(fw_priv); +} + +static void fw_load_abort(struct fw_sysfs *fw_sysfs) +{ + struct fw_priv *fw_priv = fw_sysfs->fw_priv; + + __fw_load_abort(fw_priv); +} + +static LIST_HEAD(pending_fw_head); + +void kill_pending_fw_fallback_reqs(bool only_kill_custom) +{ + struct fw_priv *fw_priv; + struct fw_priv *next; + + mutex_lock(&fw_lock); + list_for_each_entry_safe(fw_priv, next, &pending_fw_head, + pending_list) { + if (!fw_priv->need_uevent || !only_kill_custom) + __fw_load_abort(fw_priv); + } + mutex_unlock(&fw_lock); +} + +static ssize_t timeout_show(struct class *class, struct class_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", __firmware_loading_timeout()); +} + +/** + * firmware_timeout_store() - set number of seconds to wait for firmware + * @class: device class pointer + * @attr: device attribute pointer + * @buf: buffer to scan for timeout value + * @count: number of bytes in @buf + * + * Sets the number of seconds to wait for the firmware. Once + * this expires an error will be returned to the driver and no + * firmware will be provided. + * + * Note: zero means 'wait forever'. + **/ +static ssize_t timeout_store(struct class *class, struct class_attribute *attr, + const char *buf, size_t count) +{ + int tmp_loading_timeout = simple_strtol(buf, NULL, 10); + + if (tmp_loading_timeout < 0) + tmp_loading_timeout = 0; + + __fw_fallback_set_timeout(tmp_loading_timeout); + + return count; +} +static CLASS_ATTR_RW(timeout); + +static struct attribute *firmware_class_attrs[] = { + &class_attr_timeout.attr, + NULL, +}; +ATTRIBUTE_GROUPS(firmware_class); + +static void fw_dev_release(struct device *dev) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + + kfree(fw_sysfs); +} + +static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env) +{ + if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name)) + return -ENOMEM; + if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout())) + return -ENOMEM; + if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait)) + return -ENOMEM; + + return 0; +} + +static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + int err = 0; + + mutex_lock(&fw_lock); + if (fw_sysfs->fw_priv) + err = do_firmware_uevent(fw_sysfs, env); + mutex_unlock(&fw_lock); + return err; +} + +static struct class firmware_class = { + .name = "firmware", + .class_groups = firmware_class_groups, + .dev_uevent = firmware_uevent, + .dev_release = fw_dev_release, +}; + +int register_sysfs_loader(void) +{ + return class_register(&firmware_class); +} + +void unregister_sysfs_loader(void) +{ + class_unregister(&firmware_class); +} + +static ssize_t firmware_loading_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + int loading = 0; + + mutex_lock(&fw_lock); + if (fw_sysfs->fw_priv) + loading = fw_sysfs_loading(fw_sysfs->fw_priv); + mutex_unlock(&fw_lock); + + return sysfs_emit(buf, "%d\n", loading); +} + +/** + * firmware_loading_store() - set value in the 'loading' control file + * @dev: device pointer + * @attr: device attribute pointer + * @buf: buffer to scan for loading control value + * @count: number of bytes in @buf + * + * The relevant values are: + * + * 1: Start a load, discarding any previous partial load. + * 0: Conclude the load and hand the data to the driver code. + * -1: Conclude the load with an error and discard any written data. + **/ +static ssize_t firmware_loading_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + struct fw_priv *fw_priv; + ssize_t written = count; + int loading = simple_strtol(buf, NULL, 10); + + mutex_lock(&fw_lock); + fw_priv = fw_sysfs->fw_priv; + if (fw_state_is_aborted(fw_priv)) + goto out; + + switch (loading) { + case 1: + /* discarding any previous partial load */ + if (!fw_sysfs_done(fw_priv)) { + fw_free_paged_buf(fw_priv); + fw_state_start(fw_priv); + } + break; + case 0: + if (fw_sysfs_loading(fw_priv)) { + int rc; + + /* + * Several loading requests may be pending on + * one same firmware buf, so let all requests + * see the mapped 'buf->data' once the loading + * is completed. + * */ + rc = fw_map_paged_buf(fw_priv); + if (rc) + dev_err(dev, "%s: map pages failed\n", + __func__); + else + rc = security_kernel_post_load_data(fw_priv->data, + fw_priv->size, + LOADING_FIRMWARE, "blob"); + + /* + * Same logic as fw_load_abort, only the DONE bit + * is ignored and we set ABORT only on failure. + */ + if (rc) { + fw_state_aborted(fw_priv); + written = rc; + } else { + fw_state_done(fw_priv); + } + break; + } + fallthrough; + default: + dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); + fallthrough; + case -1: + fw_load_abort(fw_sysfs); + break; + } +out: + mutex_unlock(&fw_lock); + return written; +} + +static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store); + +static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer, + loff_t offset, size_t count, bool read) +{ + if (read) + memcpy(buffer, fw_priv->data + offset, count); + else + memcpy(fw_priv->data + offset, buffer, count); +} + +static void firmware_rw(struct fw_priv *fw_priv, char *buffer, + loff_t offset, size_t count, bool read) +{ + while (count) { + void *page_data; + int page_nr = offset >> PAGE_SHIFT; + int page_ofs = offset & (PAGE_SIZE-1); + int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); + + page_data = kmap(fw_priv->pages[page_nr]); + + if (read) + memcpy(buffer, page_data + page_ofs, page_cnt); + else + memcpy(page_data + page_ofs, buffer, page_cnt); + + kunmap(fw_priv->pages[page_nr]); + buffer += page_cnt; + offset += page_cnt; + count -= page_cnt; + } +} + +static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + struct fw_priv *fw_priv; + ssize_t ret_count; + + mutex_lock(&fw_lock); + fw_priv = fw_sysfs->fw_priv; + if (!fw_priv || fw_sysfs_done(fw_priv)) { + ret_count = -ENODEV; + goto out; + } + if (offset > fw_priv->size) { + ret_count = 0; + goto out; + } + if (count > fw_priv->size - offset) + count = fw_priv->size - offset; + + ret_count = count; + + if (fw_priv->data) + firmware_rw_data(fw_priv, buffer, offset, count, true); + else + firmware_rw(fw_priv, buffer, offset, count, true); + +out: + mutex_unlock(&fw_lock); + return ret_count; +} + +static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size) +{ + int err; + + err = fw_grow_paged_buf(fw_sysfs->fw_priv, + PAGE_ALIGN(min_size) >> PAGE_SHIFT); + if (err) + fw_load_abort(fw_sysfs); + return err; +} + +/** + * firmware_data_write() - write method for firmware + * @filp: open sysfs file + * @kobj: kobject for the device + * @bin_attr: bin_attr structure + * @buffer: buffer being written + * @offset: buffer offset for write in total data store area + * @count: buffer size + * + * Data written to the 'data' attribute will be later handed to + * the driver as a firmware image. + **/ +static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buffer, loff_t offset, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev); + struct fw_priv *fw_priv; + ssize_t retval; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + mutex_lock(&fw_lock); + fw_priv = fw_sysfs->fw_priv; + if (!fw_priv || fw_sysfs_done(fw_priv)) { + retval = -ENODEV; + goto out; + } + + if (fw_priv->data) { + if (offset + count > fw_priv->allocated_size) { + retval = -ENOMEM; + goto out; + } + firmware_rw_data(fw_priv, buffer, offset, count, false); + retval = count; + } else { + retval = fw_realloc_pages(fw_sysfs, offset + count); + if (retval) + goto out; + + retval = count; + firmware_rw(fw_priv, buffer, offset, count, false); + } + + fw_priv->size = max_t(size_t, offset + count, fw_priv->size); +out: + mutex_unlock(&fw_lock); + return retval; +} + +static struct bin_attribute firmware_attr_data = { + .attr = { .name = "data", .mode = 0644 }, + .size = 0, + .read = firmware_data_read, + .write = firmware_data_write, +}; + +static struct attribute *fw_dev_attrs[] = { + &dev_attr_loading.attr, + NULL +}; + +static struct bin_attribute *fw_dev_bin_attrs[] = { + &firmware_attr_data, + NULL +}; + +static const struct attribute_group fw_dev_attr_group = { + .attrs = fw_dev_attrs, + .bin_attrs = fw_dev_bin_attrs, +}; + +static const struct attribute_group *fw_dev_attr_groups[] = { + &fw_dev_attr_group, + NULL +}; + +static struct fw_sysfs * +fw_create_instance(struct firmware *firmware, const char *fw_name, + struct device *device, u32 opt_flags) +{ + struct fw_sysfs *fw_sysfs; + struct device *f_dev; + + fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL); + if (!fw_sysfs) { + fw_sysfs = ERR_PTR(-ENOMEM); + goto exit; + } + + fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT); + fw_sysfs->fw = firmware; + f_dev = &fw_sysfs->dev; + + device_initialize(f_dev); + dev_set_name(f_dev, "%s", fw_name); + f_dev->parent = device; + f_dev->class = &firmware_class; + f_dev->groups = fw_dev_attr_groups; +exit: + return fw_sysfs; +} + +/** + * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism + * @fw_sysfs: firmware sysfs information for the firmware to load + * @timeout: timeout to wait for the load + * + * In charge of constructing a sysfs fallback interface for firmware loading. + **/ +static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout) +{ + int retval = 0; + struct device *f_dev = &fw_sysfs->dev; + struct fw_priv *fw_priv = fw_sysfs->fw_priv; + + /* fall back on userspace loading */ + if (!fw_priv->data) + fw_priv->is_paged_buf = true; + + dev_set_uevent_suppress(f_dev, true); + + retval = device_add(f_dev); + if (retval) { + dev_err(f_dev, "%s: device_register failed\n", __func__); + goto err_put_dev; + } + + mutex_lock(&fw_lock); + if (fw_state_is_aborted(fw_priv)) { + mutex_unlock(&fw_lock); + retval = -EINTR; + goto out; + } + list_add(&fw_priv->pending_list, &pending_fw_head); + mutex_unlock(&fw_lock); + + if (fw_priv->opt_flags & FW_OPT_UEVENT) { + fw_priv->need_uevent = true; + dev_set_uevent_suppress(f_dev, false); + dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name); + kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD); + } else { + timeout = MAX_JIFFY_OFFSET; + } + + retval = fw_sysfs_wait_timeout(fw_priv, timeout); + if (retval < 0 && retval != -ENOENT) { + mutex_lock(&fw_lock); + fw_load_abort(fw_sysfs); + mutex_unlock(&fw_lock); + } + + if (fw_state_is_aborted(fw_priv)) { + if (retval == -ERESTARTSYS) + retval = -EINTR; + } else if (fw_priv->is_paged_buf && !fw_priv->data) + retval = -ENOMEM; + +out: + device_del(f_dev); +err_put_dev: + put_device(f_dev); + return retval; +} + +static int fw_load_from_user_helper(struct firmware *firmware, + const char *name, struct device *device, + u32 opt_flags) +{ + struct fw_sysfs *fw_sysfs; + long timeout; + int ret; + + timeout = firmware_loading_timeout(); + if (opt_flags & FW_OPT_NOWAIT) { + timeout = usermodehelper_read_lock_wait(timeout); + if (!timeout) { + dev_dbg(device, "firmware: %s loading timed out\n", + name); + return -EBUSY; + } + } else { + ret = usermodehelper_read_trylock(); + if (WARN_ON(ret)) { + dev_err(device, "firmware: %s will not be loaded\n", + name); + return ret; + } + } + + fw_sysfs = fw_create_instance(firmware, name, device, opt_flags); + if (IS_ERR(fw_sysfs)) { + ret = PTR_ERR(fw_sysfs); + goto out_unlock; + } + + fw_sysfs->fw_priv = firmware->priv; + ret = fw_load_sysfs_fallback(fw_sysfs, timeout); + + if (!ret) + ret = assign_fw(firmware, device); + +out_unlock: + usermodehelper_read_unlock(); + + return ret; +} + +static bool fw_force_sysfs_fallback(u32 opt_flags) +{ + if (fw_fallback_config.force_sysfs_fallback) + return true; + if (!(opt_flags & FW_OPT_USERHELPER)) + return false; + return true; +} + +static bool fw_run_sysfs_fallback(u32 opt_flags) +{ + int ret; + + if (fw_fallback_config.ignore_sysfs_fallback) { + pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n"); + return false; + } + + if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS)) + return false; + + /* Also permit LSMs and IMA to fail firmware sysfs fallback */ + ret = security_kernel_load_data(LOADING_FIRMWARE, true); + if (ret < 0) + return false; + + return fw_force_sysfs_fallback(opt_flags); +} + +/** + * firmware_fallback_sysfs() - use the fallback mechanism to find firmware + * @fw: pointer to firmware image + * @name: name of firmware file to look for + * @device: device for which firmware is being loaded + * @opt_flags: options to control firmware loading behaviour, as defined by + * &enum fw_opt + * @ret: return value from direct lookup which triggered the fallback mechanism + * + * This function is called if direct lookup for the firmware failed, it enables + * a fallback mechanism through userspace by exposing a sysfs loading + * interface. Userspace is in charge of loading the firmware through the sysfs + * loading interface. This sysfs fallback mechanism may be disabled completely + * on a system by setting the proc sysctl value ignore_sysfs_fallback to true. + * If this is false we check if the internal API caller set the + * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback + * mechanism. A system may want to enforce the sysfs fallback mechanism at all + * times, it can do this by setting ignore_sysfs_fallback to false and + * force_sysfs_fallback to true. + * Enabling force_sysfs_fallback is functionally equivalent to build a kernel + * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK. + **/ +int firmware_fallback_sysfs(struct firmware *fw, const char *name, + struct device *device, + u32 opt_flags, + int ret) +{ + if (!fw_run_sysfs_fallback(opt_flags)) + return ret; + + if (!(opt_flags & FW_OPT_NO_WARN)) + dev_warn(device, "Falling back to sysfs fallback for: %s\n", + name); + else + dev_dbg(device, "Falling back to sysfs fallback for: %s\n", + name); + return fw_load_from_user_helper(fw, name, device, opt_flags); +} diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h new file mode 100644 index 000000000..3af7205b3 --- /dev/null +++ b/drivers/base/firmware_loader/fallback.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FIRMWARE_FALLBACK_H +#define __FIRMWARE_FALLBACK_H + +#include <linux/firmware.h> +#include <linux/device.h> + +#include "firmware.h" + +/** + * struct firmware_fallback_config - firmware fallback configuration settings + * + * Helps describe and fine tune the fallback mechanism. + * + * @force_sysfs_fallback: force the sysfs fallback mechanism to be used + * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y. + * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y + * functionality on a kernel where that config entry has been disabled. + * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism. + * This emulates the behaviour as if we had set the kernel + * config CONFIG_FW_LOADER_USER_HELPER=n. + * @old_timeout: for internal use + * @loading_timeout: the timeout to wait for the fallback mechanism before + * giving up, in seconds. + */ +struct firmware_fallback_config { + unsigned int force_sysfs_fallback; + unsigned int ignore_sysfs_fallback; + int old_timeout; + int loading_timeout; +}; + +#ifdef CONFIG_FW_LOADER_USER_HELPER +int firmware_fallback_sysfs(struct firmware *fw, const char *name, + struct device *device, + u32 opt_flags, + int ret); +void kill_pending_fw_fallback_reqs(bool only_kill_custom); + +void fw_fallback_set_cache_timeout(void); +void fw_fallback_set_default_timeout(void); + +int register_sysfs_loader(void); +void unregister_sysfs_loader(void); +#else /* CONFIG_FW_LOADER_USER_HELPER */ +static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name, + struct device *device, + u32 opt_flags, + int ret) +{ + /* Keep carrying over the same error */ + return ret; +} + +static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { } +static inline void fw_fallback_set_cache_timeout(void) { } +static inline void fw_fallback_set_default_timeout(void) { } + +static inline int register_sysfs_loader(void) +{ + return 0; +} + +static inline void unregister_sysfs_loader(void) +{ +} +#endif /* CONFIG_FW_LOADER_USER_HELPER */ + +#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE +int firmware_fallback_platform(struct fw_priv *fw_priv); +#else +static inline int firmware_fallback_platform(struct fw_priv *fw_priv) +{ + return -ENOENT; +} +#endif + +#endif /* __FIRMWARE_FALLBACK_H */ diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c new file mode 100644 index 000000000..00af99f0a --- /dev/null +++ b/drivers/base/firmware_loader/fallback_platform.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/efi_embedded_fw.h> +#include <linux/property.h> +#include <linux/security.h> +#include <linux/vmalloc.h> + +#include "fallback.h" +#include "firmware.h" + +int firmware_fallback_platform(struct fw_priv *fw_priv) +{ + const u8 *data; + size_t size; + int rc; + + if (!(fw_priv->opt_flags & FW_OPT_FALLBACK_PLATFORM)) + return -ENOENT; + + rc = security_kernel_load_data(LOADING_FIRMWARE, true); + if (rc) + return rc; + + rc = efi_get_embedded_fw(fw_priv->fw_name, &data, &size); + if (rc) + return rc; /* rc == -ENOENT when the fw was not found */ + + if (fw_priv->data && size > fw_priv->allocated_size) + return -ENOMEM; + + rc = security_kernel_post_load_data((u8 *)data, size, LOADING_FIRMWARE, + "platform"); + if (rc) + return rc; + + if (!fw_priv->data) + fw_priv->data = vmalloc(size); + if (!fw_priv->data) + return -ENOMEM; + + memcpy(fw_priv->data, data, size); + fw_priv->size = size; + fw_state_done(fw_priv); + return 0; +} diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c new file mode 100644 index 000000000..46a731ded --- /dev/null +++ b/drivers/base/firmware_loader/fallback_table.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/types.h> +#include <linux/kconfig.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/security.h> +#include <linux/highmem.h> +#include <linux/umh.h> +#include <linux/sysctl.h> + +#include "fallback.h" +#include "firmware.h" + +/* + * firmware fallback configuration table + */ + +struct firmware_fallback_config fw_fallback_config = { + .force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK), + .loading_timeout = 60, + .old_timeout = 60, +}; +EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE); + +#ifdef CONFIG_SYSCTL +struct ctl_table firmware_config_table[] = { + { + .procname = "force_sysfs_fallback", + .data = &fw_fallback_config.force_sysfs_fallback, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "ignore_sysfs_fallback", + .data = &fw_fallback_config.ignore_sysfs_fallback, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { } +}; +#endif diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h new file mode 100644 index 000000000..a3014e9e2 --- /dev/null +++ b/drivers/base/firmware_loader/firmware.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __FIRMWARE_LOADER_H +#define __FIRMWARE_LOADER_H + +#include <linux/bitops.h> +#include <linux/firmware.h> +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/completion.h> + +#include <generated/utsrelease.h> + +/** + * enum fw_opt - options to control firmware loading behaviour + * + * @FW_OPT_UEVENT: Enables the fallback mechanism to send a kobject uevent + * when the firmware is not found. Userspace is in charge to load the + * firmware using the sysfs loading facility. + * @FW_OPT_NOWAIT: Used to describe the firmware request is asynchronous. + * @FW_OPT_USERHELPER: Enable the fallback mechanism, in case the direct + * filesystem lookup fails at finding the firmware. For details refer to + * firmware_fallback_sysfs(). + * @FW_OPT_NO_WARN: Quiet, avoid printing warning messages. + * @FW_OPT_NOCACHE: Disables firmware caching. Firmware caching is used to + * cache the firmware upon suspend, so that upon resume races against the + * firmware file lookup on storage is avoided. Used for calls where the + * file may be too big, or where the driver takes charge of its own + * firmware caching mechanism. + * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes + * precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER. + * @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in + * the platform's main firmware. If both this fallback and the sysfs + * fallback are enabled, then this fallback will be tried first. + * @FW_OPT_PARTIAL: Allow partial read of firmware instead of needing to read + * entire file. + */ +enum fw_opt { + FW_OPT_UEVENT = BIT(0), + FW_OPT_NOWAIT = BIT(1), + FW_OPT_USERHELPER = BIT(2), + FW_OPT_NO_WARN = BIT(3), + FW_OPT_NOCACHE = BIT(4), + FW_OPT_NOFALLBACK_SYSFS = BIT(5), + FW_OPT_FALLBACK_PLATFORM = BIT(6), + FW_OPT_PARTIAL = BIT(7), +}; + +enum fw_status { + FW_STATUS_UNKNOWN, + FW_STATUS_LOADING, + FW_STATUS_DONE, + FW_STATUS_ABORTED, +}; + +/* + * Concurrent request_firmware() for the same firmware need to be + * serialized. struct fw_state is simple state machine which hold the + * state of the firmware loading. + */ +struct fw_state { + struct completion completion; + enum fw_status status; +}; + +struct fw_priv { + struct kref ref; + struct list_head list; + struct firmware_cache *fwc; + struct fw_state fw_st; + void *data; + size_t size; + size_t allocated_size; + size_t offset; + u32 opt_flags; +#ifdef CONFIG_FW_LOADER_PAGED_BUF + bool is_paged_buf; + struct page **pages; + int nr_pages; + int page_array_size; +#endif +#ifdef CONFIG_FW_LOADER_USER_HELPER + bool need_uevent; + struct list_head pending_list; +#endif + const char *fw_name; +}; + +extern struct mutex fw_lock; + +static inline bool __fw_state_check(struct fw_priv *fw_priv, + enum fw_status status) +{ + struct fw_state *fw_st = &fw_priv->fw_st; + + return fw_st->status == status; +} + +static inline int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout) +{ + struct fw_state *fw_st = &fw_priv->fw_st; + long ret; + + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); + if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) + return -ENOENT; + if (!ret) + return -ETIMEDOUT; + + return ret < 0 ? ret : 0; +} + +static inline void __fw_state_set(struct fw_priv *fw_priv, + enum fw_status status) +{ + struct fw_state *fw_st = &fw_priv->fw_st; + + WRITE_ONCE(fw_st->status, status); + + if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) { +#ifdef CONFIG_FW_LOADER_USER_HELPER + /* + * Doing this here ensures that the fw_priv is deleted from + * the pending list in all abort/done paths. + */ + list_del_init(&fw_priv->pending_list); +#endif + complete_all(&fw_st->completion); + } +} + +static inline void fw_state_aborted(struct fw_priv *fw_priv) +{ + __fw_state_set(fw_priv, FW_STATUS_ABORTED); +} + +static inline bool fw_state_is_aborted(struct fw_priv *fw_priv) +{ + return __fw_state_check(fw_priv, FW_STATUS_ABORTED); +} + +static inline void fw_state_start(struct fw_priv *fw_priv) +{ + __fw_state_set(fw_priv, FW_STATUS_LOADING); +} + +static inline void fw_state_done(struct fw_priv *fw_priv) +{ + __fw_state_set(fw_priv, FW_STATUS_DONE); +} + +int assign_fw(struct firmware *fw, struct device *device); + +#ifdef CONFIG_FW_LOADER_PAGED_BUF +void fw_free_paged_buf(struct fw_priv *fw_priv); +int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed); +int fw_map_paged_buf(struct fw_priv *fw_priv); +bool fw_is_paged_buf(struct fw_priv *fw_priv); +#else +static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {} +static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; } +static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; } +static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; } +#endif + +#endif /* __FIRMWARE_LOADER_H */ diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c new file mode 100644 index 000000000..a4dd500bc --- /dev/null +++ b/drivers/base/firmware_loader/main.c @@ -0,0 +1,1589 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * main.c - Multi purpose firmware loading support + * + * Copyright (c) 2003 Manuel Estrada Sainz + * + * Please see Documentation/driver-api/firmware/ for more information. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/capability.h> +#include <linux/device.h> +#include <linux/kernel_read_file.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/timer.h> +#include <linux/vmalloc.h> +#include <linux/interrupt.h> +#include <linux/bitops.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/highmem.h> +#include <linux/firmware.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/file.h> +#include <linux/list.h> +#include <linux/fs.h> +#include <linux/async.h> +#include <linux/pm.h> +#include <linux/suspend.h> +#include <linux/syscore_ops.h> +#include <linux/reboot.h> +#include <linux/security.h> +#include <linux/xz.h> + +#include <generated/utsrelease.h> + +#include "../base.h" +#include "firmware.h" +#include "fallback.h" + +MODULE_AUTHOR("Manuel Estrada Sainz"); +MODULE_DESCRIPTION("Multi purpose firmware loading support"); +MODULE_LICENSE("GPL"); + +struct firmware_cache { + /* firmware_buf instance will be added into the below list */ + spinlock_t lock; + struct list_head head; + int state; + +#ifdef CONFIG_FW_CACHE + /* + * Names of firmware images which have been cached successfully + * will be added into the below list so that device uncache + * helper can trace which firmware images have been cached + * before. + */ + spinlock_t name_lock; + struct list_head fw_names; + + struct delayed_work work; + + struct notifier_block pm_notify; +#endif +}; + +struct fw_cache_entry { + struct list_head list; + const char *name; +}; + +struct fw_name_devm { + unsigned long magic; + const char *name; +}; + +static inline struct fw_priv *to_fw_priv(struct kref *ref) +{ + return container_of(ref, struct fw_priv, ref); +} + +#define FW_LOADER_NO_CACHE 0 +#define FW_LOADER_START_CACHE 1 + +/* fw_lock could be moved to 'struct fw_sysfs' but since it is just + * guarding for corner cases a global lock should be OK */ +DEFINE_MUTEX(fw_lock); + +static struct firmware_cache fw_cache; + +/* Builtin firmware support */ + +#ifdef CONFIG_FW_LOADER + +extern struct builtin_fw __start_builtin_fw[]; +extern struct builtin_fw __end_builtin_fw[]; + +static bool fw_copy_to_prealloc_buf(struct firmware *fw, + void *buf, size_t size) +{ + if (!buf) + return true; + if (size < fw->size) + return false; + memcpy(buf, fw->data, fw->size); + return true; +} + +static bool fw_get_builtin_firmware(struct firmware *fw, const char *name, + void *buf, size_t size) +{ + struct builtin_fw *b_fw; + + for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { + if (strcmp(name, b_fw->name) == 0) { + fw->size = b_fw->size; + fw->data = b_fw->data; + return fw_copy_to_prealloc_buf(fw, buf, size); + } + } + + return false; +} + +static bool fw_is_builtin_firmware(const struct firmware *fw) +{ + struct builtin_fw *b_fw; + + for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) + if (fw->data == b_fw->data) + return true; + + return false; +} + +#else /* Module case - no builtin firmware support */ + +static inline bool fw_get_builtin_firmware(struct firmware *fw, + const char *name, void *buf, + size_t size) +{ + return false; +} + +static inline bool fw_is_builtin_firmware(const struct firmware *fw) +{ + return false; +} +#endif + +static void fw_state_init(struct fw_priv *fw_priv) +{ + struct fw_state *fw_st = &fw_priv->fw_st; + + init_completion(&fw_st->completion); + fw_st->status = FW_STATUS_UNKNOWN; +} + +static inline int fw_state_wait(struct fw_priv *fw_priv) +{ + return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT); +} + +static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv); + +static struct fw_priv *__allocate_fw_priv(const char *fw_name, + struct firmware_cache *fwc, + void *dbuf, + size_t size, + size_t offset, + u32 opt_flags) +{ + struct fw_priv *fw_priv; + + /* For a partial read, the buffer must be preallocated. */ + if ((opt_flags & FW_OPT_PARTIAL) && !dbuf) + return NULL; + + /* Only partial reads are allowed to use an offset. */ + if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL)) + return NULL; + + fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC); + if (!fw_priv) + return NULL; + + fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC); + if (!fw_priv->fw_name) { + kfree(fw_priv); + return NULL; + } + + kref_init(&fw_priv->ref); + fw_priv->fwc = fwc; + fw_priv->data = dbuf; + fw_priv->allocated_size = size; + fw_priv->offset = offset; + fw_priv->opt_flags = opt_flags; + fw_state_init(fw_priv); +#ifdef CONFIG_FW_LOADER_USER_HELPER + INIT_LIST_HEAD(&fw_priv->pending_list); +#endif + + pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv); + + return fw_priv; +} + +static struct fw_priv *__lookup_fw_priv(const char *fw_name) +{ + struct fw_priv *tmp; + struct firmware_cache *fwc = &fw_cache; + + list_for_each_entry(tmp, &fwc->head, list) + if (!strcmp(tmp->fw_name, fw_name)) + return tmp; + return NULL; +} + +/* Returns 1 for batching firmware requests with the same name */ +static int alloc_lookup_fw_priv(const char *fw_name, + struct firmware_cache *fwc, + struct fw_priv **fw_priv, + void *dbuf, + size_t size, + size_t offset, + u32 opt_flags) +{ + struct fw_priv *tmp; + + spin_lock(&fwc->lock); + /* + * Do not merge requests that are marked to be non-cached or + * are performing partial reads. + */ + if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) { + tmp = __lookup_fw_priv(fw_name); + if (tmp) { + kref_get(&tmp->ref); + spin_unlock(&fwc->lock); + *fw_priv = tmp; + pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n"); + return 1; + } + } + + tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags); + if (tmp) { + INIT_LIST_HEAD(&tmp->list); + if (!(opt_flags & FW_OPT_NOCACHE)) + list_add(&tmp->list, &fwc->head); + } + spin_unlock(&fwc->lock); + + *fw_priv = tmp; + + return tmp ? 0 : -ENOMEM; +} + +static void __free_fw_priv(struct kref *ref) + __releases(&fwc->lock) +{ + struct fw_priv *fw_priv = to_fw_priv(ref); + struct firmware_cache *fwc = fw_priv->fwc; + + pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n", + __func__, fw_priv->fw_name, fw_priv, fw_priv->data, + (unsigned int)fw_priv->size); + + list_del(&fw_priv->list); + spin_unlock(&fwc->lock); + + if (fw_is_paged_buf(fw_priv)) + fw_free_paged_buf(fw_priv); + else if (!fw_priv->allocated_size) + vfree(fw_priv->data); + + kfree_const(fw_priv->fw_name); + kfree(fw_priv); +} + +static void free_fw_priv(struct fw_priv *fw_priv) +{ + struct firmware_cache *fwc = fw_priv->fwc; + spin_lock(&fwc->lock); + if (!kref_put(&fw_priv->ref, __free_fw_priv)) + spin_unlock(&fwc->lock); +} + +#ifdef CONFIG_FW_LOADER_PAGED_BUF +bool fw_is_paged_buf(struct fw_priv *fw_priv) +{ + return fw_priv->is_paged_buf; +} + +void fw_free_paged_buf(struct fw_priv *fw_priv) +{ + int i; + + if (!fw_priv->pages) + return; + + vunmap(fw_priv->data); + + for (i = 0; i < fw_priv->nr_pages; i++) + __free_page(fw_priv->pages[i]); + kvfree(fw_priv->pages); + fw_priv->pages = NULL; + fw_priv->page_array_size = 0; + fw_priv->nr_pages = 0; +} + +int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) +{ + /* If the array of pages is too small, grow it */ + if (fw_priv->page_array_size < pages_needed) { + int new_array_size = max(pages_needed, + fw_priv->page_array_size * 2); + struct page **new_pages; + + new_pages = kvmalloc_array(new_array_size, sizeof(void *), + GFP_KERNEL); + if (!new_pages) + return -ENOMEM; + memcpy(new_pages, fw_priv->pages, + fw_priv->page_array_size * sizeof(void *)); + memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * + (new_array_size - fw_priv->page_array_size)); + kvfree(fw_priv->pages); + fw_priv->pages = new_pages; + fw_priv->page_array_size = new_array_size; + } + + while (fw_priv->nr_pages < pages_needed) { + fw_priv->pages[fw_priv->nr_pages] = + alloc_page(GFP_KERNEL | __GFP_HIGHMEM); + + if (!fw_priv->pages[fw_priv->nr_pages]) + return -ENOMEM; + fw_priv->nr_pages++; + } + + return 0; +} + +int fw_map_paged_buf(struct fw_priv *fw_priv) +{ + /* one pages buffer should be mapped/unmapped only once */ + if (!fw_priv->pages) + return 0; + + vunmap(fw_priv->data); + fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0, + PAGE_KERNEL_RO); + if (!fw_priv->data) + return -ENOMEM; + + return 0; +} +#endif + +/* + * XZ-compressed firmware support + */ +#ifdef CONFIG_FW_LOADER_COMPRESS +/* show an error and return the standard error code */ +static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret) +{ + if (xz_ret != XZ_STREAM_END) { + dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret); + return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL; + } + return 0; +} + +/* single-shot decompression onto the pre-allocated buffer */ +static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv, + size_t in_size, const void *in_buffer) +{ + struct xz_dec *xz_dec; + struct xz_buf xz_buf; + enum xz_ret xz_ret; + + xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1); + if (!xz_dec) + return -ENOMEM; + + xz_buf.in_size = in_size; + xz_buf.in = in_buffer; + xz_buf.in_pos = 0; + xz_buf.out_size = fw_priv->allocated_size; + xz_buf.out = fw_priv->data; + xz_buf.out_pos = 0; + + xz_ret = xz_dec_run(xz_dec, &xz_buf); + xz_dec_end(xz_dec); + + fw_priv->size = xz_buf.out_pos; + return fw_decompress_xz_error(dev, xz_ret); +} + +/* decompression on paged buffer and map it */ +static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv, + size_t in_size, const void *in_buffer) +{ + struct xz_dec *xz_dec; + struct xz_buf xz_buf; + enum xz_ret xz_ret; + struct page *page; + int err = 0; + + xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1); + if (!xz_dec) + return -ENOMEM; + + xz_buf.in_size = in_size; + xz_buf.in = in_buffer; + xz_buf.in_pos = 0; + + fw_priv->is_paged_buf = true; + fw_priv->size = 0; + do { + if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) { + err = -ENOMEM; + goto out; + } + + /* decompress onto the new allocated page */ + page = fw_priv->pages[fw_priv->nr_pages - 1]; + xz_buf.out = kmap(page); + xz_buf.out_pos = 0; + xz_buf.out_size = PAGE_SIZE; + xz_ret = xz_dec_run(xz_dec, &xz_buf); + kunmap(page); + fw_priv->size += xz_buf.out_pos; + /* partial decompression means either end or error */ + if (xz_buf.out_pos != PAGE_SIZE) + break; + } while (xz_ret == XZ_OK); + + err = fw_decompress_xz_error(dev, xz_ret); + if (!err) + err = fw_map_paged_buf(fw_priv); + + out: + xz_dec_end(xz_dec); + return err; +} + +static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv, + size_t in_size, const void *in_buffer) +{ + /* if the buffer is pre-allocated, we can perform in single-shot mode */ + if (fw_priv->data) + return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer); + else + return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer); +} +#endif /* CONFIG_FW_LOADER_COMPRESS */ + +/* direct firmware loading support */ +static char fw_path_para[256]; +static const char * const fw_path[] = { + fw_path_para, + "/lib/firmware/updates/" UTS_RELEASE, + "/lib/firmware/updates", + "/lib/firmware/" UTS_RELEASE, + "/lib/firmware" +}; + +/* + * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH' + * from kernel command line because firmware_class is generally built in + * kernel instead of module. + */ +module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644); +MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path"); + +static int +fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv, + const char *suffix, + int (*decompress)(struct device *dev, + struct fw_priv *fw_priv, + size_t in_size, + const void *in_buffer)) +{ + size_t size; + int i, len; + int rc = -ENOENT; + char *path; + size_t msize = INT_MAX; + void *buffer = NULL; + + /* Already populated data member means we're loading into a buffer */ + if (!decompress && fw_priv->data) { + buffer = fw_priv->data; + msize = fw_priv->allocated_size; + } + + path = __getname(); + if (!path) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(fw_path); i++) { + size_t file_size = 0; + size_t *file_size_ptr = NULL; + + /* skip the unset customized path */ + if (!fw_path[i][0]) + continue; + + len = snprintf(path, PATH_MAX, "%s/%s%s", + fw_path[i], fw_priv->fw_name, suffix); + if (len >= PATH_MAX) { + rc = -ENAMETOOLONG; + break; + } + + fw_priv->size = 0; + + /* + * The total file size is only examined when doing a partial + * read; the "full read" case needs to fail if the whole + * firmware was not completely loaded. + */ + if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer) + file_size_ptr = &file_size; + + /* load firmware files from the mount namespace of init */ + rc = kernel_read_file_from_path_initns(path, fw_priv->offset, + &buffer, msize, + file_size_ptr, + READING_FIRMWARE); + if (rc < 0) { + if (rc != -ENOENT) + dev_warn(device, "loading %s failed with error %d\n", + path, rc); + else + dev_dbg(device, "loading %s failed for no such file or directory.\n", + path); + continue; + } + size = rc; + rc = 0; + + dev_dbg(device, "Loading firmware from %s\n", path); + if (decompress) { + dev_dbg(device, "f/w decompressing %s\n", + fw_priv->fw_name); + rc = decompress(device, fw_priv, size, buffer); + /* discard the superfluous original content */ + vfree(buffer); + buffer = NULL; + if (rc) { + fw_free_paged_buf(fw_priv); + continue; + } + } else { + dev_dbg(device, "direct-loading %s\n", + fw_priv->fw_name); + if (!fw_priv->data) + fw_priv->data = buffer; + fw_priv->size = size; + } + fw_state_done(fw_priv); + break; + } + __putname(path); + + return rc; +} + +/* firmware holds the ownership of pages */ +static void firmware_free_data(const struct firmware *fw) +{ + /* Loaded directly? */ + if (!fw->priv) { + vfree(fw->data); + return; + } + free_fw_priv(fw->priv); +} + +/* store the pages buffer info firmware from buf */ +static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw) +{ + fw->priv = fw_priv; + fw->size = fw_priv->size; + fw->data = fw_priv->data; + + pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n", + __func__, fw_priv->fw_name, fw_priv, fw_priv->data, + (unsigned int)fw_priv->size); +} + +#ifdef CONFIG_FW_CACHE +static void fw_name_devm_release(struct device *dev, void *res) +{ + struct fw_name_devm *fwn = res; + + if (fwn->magic == (unsigned long)&fw_cache) + pr_debug("%s: fw_name-%s devm-%p released\n", + __func__, fwn->name, res); + kfree_const(fwn->name); +} + +static int fw_devm_match(struct device *dev, void *res, + void *match_data) +{ + struct fw_name_devm *fwn = res; + + return (fwn->magic == (unsigned long)&fw_cache) && + !strcmp(fwn->name, match_data); +} + +static struct fw_name_devm *fw_find_devm_name(struct device *dev, + const char *name) +{ + struct fw_name_devm *fwn; + + fwn = devres_find(dev, fw_name_devm_release, + fw_devm_match, (void *)name); + return fwn; +} + +static bool fw_cache_is_setup(struct device *dev, const char *name) +{ + struct fw_name_devm *fwn; + + fwn = fw_find_devm_name(dev, name); + if (fwn) + return true; + + return false; +} + +/* add firmware name into devres list */ +static int fw_add_devm_name(struct device *dev, const char *name) +{ + struct fw_name_devm *fwn; + + if (fw_cache_is_setup(dev, name)) + return 0; + + fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm), + GFP_KERNEL); + if (!fwn) + return -ENOMEM; + fwn->name = kstrdup_const(name, GFP_KERNEL); + if (!fwn->name) { + devres_free(fwn); + return -ENOMEM; + } + + fwn->magic = (unsigned long)&fw_cache; + devres_add(dev, fwn); + + return 0; +} +#else +static bool fw_cache_is_setup(struct device *dev, const char *name) +{ + return false; +} + +static int fw_add_devm_name(struct device *dev, const char *name) +{ + return 0; +} +#endif + +int assign_fw(struct firmware *fw, struct device *device) +{ + struct fw_priv *fw_priv = fw->priv; + int ret; + + mutex_lock(&fw_lock); + if (!fw_priv->size || fw_state_is_aborted(fw_priv)) { + mutex_unlock(&fw_lock); + return -ENOENT; + } + + /* + * add firmware name into devres list so that we can auto cache + * and uncache firmware for device. + * + * device may has been deleted already, but the problem + * should be fixed in devres or driver core. + */ + /* don't cache firmware handled without uevent */ + if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) && + !(fw_priv->opt_flags & FW_OPT_NOCACHE)) { + ret = fw_add_devm_name(device, fw_priv->fw_name); + if (ret) { + mutex_unlock(&fw_lock); + return ret; + } + } + + /* + * After caching firmware image is started, let it piggyback + * on request firmware. + */ + if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) && + fw_priv->fwc->state == FW_LOADER_START_CACHE) + fw_cache_piggyback_on_request(fw_priv); + + /* pass the pages buffer to driver at the last minute */ + fw_set_page_data(fw_priv, fw); + mutex_unlock(&fw_lock); + return 0; +} + +/* prepare firmware and firmware_buf structs; + * return 0 if a firmware is already assigned, 1 if need to load one, + * or a negative error code + */ +static int +_request_firmware_prepare(struct firmware **firmware_p, const char *name, + struct device *device, void *dbuf, size_t size, + size_t offset, u32 opt_flags) +{ + struct firmware *firmware; + struct fw_priv *fw_priv; + int ret; + + *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); + if (!firmware) { + dev_err(device, "%s: kmalloc(struct firmware) failed\n", + __func__); + return -ENOMEM; + } + + if (fw_get_builtin_firmware(firmware, name, dbuf, size)) { + dev_dbg(device, "using built-in %s\n", name); + return 0; /* assigned */ + } + + ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size, + offset, opt_flags); + + /* + * bind with 'priv' now to avoid warning in failure path + * of requesting firmware. + */ + firmware->priv = fw_priv; + + if (ret > 0) { + ret = fw_state_wait(fw_priv); + if (!ret) { + fw_set_page_data(fw_priv, firmware); + return 0; /* assigned */ + } + } + + if (ret < 0) + return ret; + return 1; /* need to load */ +} + +/* + * Batched requests need only one wake, we need to do this step last due to the + * fallback mechanism. The buf is protected with kref_get(), and it won't be + * released until the last user calls release_firmware(). + * + * Failed batched requests are possible as well, in such cases we just share + * the struct fw_priv and won't release it until all requests are woken + * and have gone through this same path. + */ +static void fw_abort_batch_reqs(struct firmware *fw) +{ + struct fw_priv *fw_priv; + + /* Loaded directly? */ + if (!fw || !fw->priv) + return; + + fw_priv = fw->priv; + mutex_lock(&fw_lock); + if (!fw_state_is_aborted(fw_priv)) + fw_state_aborted(fw_priv); + mutex_unlock(&fw_lock); +} + +/* called from request_firmware() and request_firmware_work_func() */ +static int +_request_firmware(const struct firmware **firmware_p, const char *name, + struct device *device, void *buf, size_t size, + size_t offset, u32 opt_flags) +{ + struct firmware *fw = NULL; + struct cred *kern_cred = NULL; + const struct cred *old_cred; + bool nondirect = false; + int ret; + + if (!firmware_p) + return -EINVAL; + + if (!name || name[0] == '\0') { + ret = -EINVAL; + goto out; + } + + ret = _request_firmware_prepare(&fw, name, device, buf, size, + offset, opt_flags); + if (ret <= 0) /* error or already assigned */ + goto out; + + /* + * We are about to try to access the firmware file. Because we may have been + * called by a driver when serving an unrelated request from userland, we use + * the kernel credentials to read the file. + */ + kern_cred = prepare_kernel_cred(NULL); + if (!kern_cred) { + ret = -ENOMEM; + goto out; + } + old_cred = override_creds(kern_cred); + + ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL); + + /* Only full reads can support decompression, platform, and sysfs. */ + if (!(opt_flags & FW_OPT_PARTIAL)) + nondirect = true; + +#ifdef CONFIG_FW_LOADER_COMPRESS + if (ret == -ENOENT && nondirect) + ret = fw_get_filesystem_firmware(device, fw->priv, ".xz", + fw_decompress_xz); +#endif + if (ret == -ENOENT && nondirect) + ret = firmware_fallback_platform(fw->priv); + + if (ret) { + if (!(opt_flags & FW_OPT_NO_WARN)) + dev_warn(device, + "Direct firmware load for %s failed with error %d\n", + name, ret); + if (nondirect) + ret = firmware_fallback_sysfs(fw, name, device, + opt_flags, ret); + } else + ret = assign_fw(fw, device); + + revert_creds(old_cred); + put_cred(kern_cred); + + out: + if (ret < 0) { + fw_abort_batch_reqs(fw); + release_firmware(fw); + fw = NULL; + } + + *firmware_p = fw; + return ret; +} + +/** + * request_firmware() - send firmware request and wait for it + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * @firmware_p will be used to return a firmware image by the name + * of @name for device @device. + * + * Should be called from user context where sleeping is allowed. + * + * @name will be used as $FIRMWARE in the uevent environment and + * should be distinctive enough not to be confused with any other + * firmware image for this or any other device. + * + * Caller must hold the reference count of @device. + * + * The function can be called safely inside device's suspend and + * resume callback. + **/ +int +request_firmware(const struct firmware **firmware_p, const char *name, + struct device *device) +{ + int ret; + + /* Need to pin this module until return */ + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, NULL, 0, 0, + FW_OPT_UEVENT); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL(request_firmware); + +/** + * firmware_request_nowarn() - request for an optional fw module + * @firmware: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function is similar in behaviour to request_firmware(), except it + * doesn't produce warning messages when the file is not found. The sysfs + * fallback mechanism is enabled if direct filesystem lookup fails. However, + * failures to find the firmware file with it are still suppressed. It is + * therefore up to the driver to check for the return value of this call and to + * decide when to inform the users of errors. + **/ +int firmware_request_nowarn(const struct firmware **firmware, const char *name, + struct device *device) +{ + int ret; + + /* Need to pin this module until return */ + __module_get(THIS_MODULE); + ret = _request_firmware(firmware, name, device, NULL, 0, 0, + FW_OPT_UEVENT | FW_OPT_NO_WARN); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL_GPL(firmware_request_nowarn); + +/** + * request_firmware_direct() - load firmware directly without usermode helper + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function works pretty much like request_firmware(), but this doesn't + * fall back to usermode helper even if the firmware couldn't be loaded + * directly from fs. Hence it's useful for loading optional firmwares, which + * aren't always present, without extra long timeouts of udev. + **/ +int request_firmware_direct(const struct firmware **firmware_p, + const char *name, struct device *device) +{ + int ret; + + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, NULL, 0, 0, + FW_OPT_UEVENT | FW_OPT_NO_WARN | + FW_OPT_NOFALLBACK_SYSFS); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL_GPL(request_firmware_direct); + +/** + * firmware_request_platform() - request firmware with platform-fw fallback + * @firmware: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded + * + * This function is similar in behaviour to request_firmware, except that if + * direct filesystem lookup fails, it will fallback to looking for a copy of the + * requested firmware embedded in the platform's main (e.g. UEFI) firmware. + **/ +int firmware_request_platform(const struct firmware **firmware, + const char *name, struct device *device) +{ + int ret; + + /* Need to pin this module until return */ + __module_get(THIS_MODULE); + ret = _request_firmware(firmware, name, device, NULL, 0, 0, + FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL_GPL(firmware_request_platform); + +/** + * firmware_request_cache() - cache firmware for suspend so resume can use it + * @name: name of firmware file + * @device: device for which firmware should be cached for + * + * There are some devices with an optimization that enables the device to not + * require loading firmware on system reboot. This optimization may still + * require the firmware present on resume from suspend. This routine can be + * used to ensure the firmware is present on resume from suspend in these + * situations. This helper is not compatible with drivers which use + * request_firmware_into_buf() or request_firmware_nowait() with no uevent set. + **/ +int firmware_request_cache(struct device *device, const char *name) +{ + int ret; + + mutex_lock(&fw_lock); + ret = fw_add_devm_name(device, name); + mutex_unlock(&fw_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(firmware_request_cache); + +/** + * request_firmware_into_buf() - load firmware into a previously allocated buffer + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded and DMA region allocated + * @buf: address of buffer to load firmware into + * @size: size of buffer + * + * This function works pretty much like request_firmware(), but it doesn't + * allocate a buffer to hold the firmware data. Instead, the firmware + * is loaded directly into the buffer pointed to by @buf and the @firmware_p + * data member is pointed at @buf. + * + * This function doesn't cache firmware either. + */ +int +request_firmware_into_buf(const struct firmware **firmware_p, const char *name, + struct device *device, void *buf, size_t size) +{ + int ret; + + if (fw_cache_is_setup(device, name)) + return -EOPNOTSUPP; + + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, buf, size, 0, + FW_OPT_UEVENT | FW_OPT_NOCACHE); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL(request_firmware_into_buf); + +/** + * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer + * @firmware_p: pointer to firmware image + * @name: name of firmware file + * @device: device for which firmware is being loaded and DMA region allocated + * @buf: address of buffer to load firmware into + * @size: size of buffer + * @offset: offset into file to read + * + * This function works pretty much like request_firmware_into_buf except + * it allows a partial read of the file. + */ +int +request_partial_firmware_into_buf(const struct firmware **firmware_p, + const char *name, struct device *device, + void *buf, size_t size, size_t offset) +{ + int ret; + + if (fw_cache_is_setup(device, name)) + return -EOPNOTSUPP; + + __module_get(THIS_MODULE); + ret = _request_firmware(firmware_p, name, device, buf, size, offset, + FW_OPT_UEVENT | FW_OPT_NOCACHE | + FW_OPT_PARTIAL); + module_put(THIS_MODULE); + return ret; +} +EXPORT_SYMBOL(request_partial_firmware_into_buf); + +/** + * release_firmware() - release the resource associated with a firmware image + * @fw: firmware resource to release + **/ +void release_firmware(const struct firmware *fw) +{ + if (fw) { + if (!fw_is_builtin_firmware(fw)) + firmware_free_data(fw); + kfree(fw); + } +} +EXPORT_SYMBOL(release_firmware); + +/* Async support */ +struct firmware_work { + struct work_struct work; + struct module *module; + const char *name; + struct device *device; + void *context; + void (*cont)(const struct firmware *fw, void *context); + u32 opt_flags; +}; + +static void request_firmware_work_func(struct work_struct *work) +{ + struct firmware_work *fw_work; + const struct firmware *fw; + + fw_work = container_of(work, struct firmware_work, work); + + _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0, + fw_work->opt_flags); + fw_work->cont(fw, fw_work->context); + put_device(fw_work->device); /* taken in request_firmware_nowait() */ + + module_put(fw_work->module); + kfree_const(fw_work->name); + kfree(fw_work); +} + +/** + * request_firmware_nowait() - asynchronous version of request_firmware + * @module: module requesting the firmware + * @uevent: sends uevent to copy the firmware image if this flag + * is non-zero else the firmware copy must be done manually. + * @name: name of firmware file + * @device: device for which firmware is being loaded + * @gfp: allocation flags + * @context: will be passed over to @cont, and + * @fw may be %NULL if firmware request fails. + * @cont: function will be called asynchronously when the firmware + * request is over. + * + * Caller must hold the reference count of @device. + * + * Asynchronous variant of request_firmware() for user contexts: + * - sleep for as small periods as possible since it may + * increase kernel boot time of built-in device drivers + * requesting firmware in their ->probe() methods, if + * @gfp is GFP_KERNEL. + * + * - can't sleep at all if @gfp is GFP_ATOMIC. + **/ +int +request_firmware_nowait( + struct module *module, bool uevent, + const char *name, struct device *device, gfp_t gfp, void *context, + void (*cont)(const struct firmware *fw, void *context)) +{ + struct firmware_work *fw_work; + + fw_work = kzalloc(sizeof(struct firmware_work), gfp); + if (!fw_work) + return -ENOMEM; + + fw_work->module = module; + fw_work->name = kstrdup_const(name, gfp); + if (!fw_work->name) { + kfree(fw_work); + return -ENOMEM; + } + fw_work->device = device; + fw_work->context = context; + fw_work->cont = cont; + fw_work->opt_flags = FW_OPT_NOWAIT | + (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER); + + if (!uevent && fw_cache_is_setup(device, name)) { + kfree_const(fw_work->name); + kfree(fw_work); + return -EOPNOTSUPP; + } + + if (!try_module_get(module)) { + kfree_const(fw_work->name); + kfree(fw_work); + return -EFAULT; + } + + get_device(fw_work->device); + INIT_WORK(&fw_work->work, request_firmware_work_func); + schedule_work(&fw_work->work); + return 0; +} +EXPORT_SYMBOL(request_firmware_nowait); + +#ifdef CONFIG_FW_CACHE +static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain); + +/** + * cache_firmware() - cache one firmware image in kernel memory space + * @fw_name: the firmware image name + * + * Cache firmware in kernel memory so that drivers can use it when + * system isn't ready for them to request firmware image from userspace. + * Once it returns successfully, driver can use request_firmware or its + * nowait version to get the cached firmware without any interacting + * with userspace + * + * Return 0 if the firmware image has been cached successfully + * Return !0 otherwise + * + */ +static int cache_firmware(const char *fw_name) +{ + int ret; + const struct firmware *fw; + + pr_debug("%s: %s\n", __func__, fw_name); + + ret = request_firmware(&fw, fw_name, NULL); + if (!ret) + kfree(fw); + + pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret); + + return ret; +} + +static struct fw_priv *lookup_fw_priv(const char *fw_name) +{ + struct fw_priv *tmp; + struct firmware_cache *fwc = &fw_cache; + + spin_lock(&fwc->lock); + tmp = __lookup_fw_priv(fw_name); + spin_unlock(&fwc->lock); + + return tmp; +} + +/** + * uncache_firmware() - remove one cached firmware image + * @fw_name: the firmware image name + * + * Uncache one firmware image which has been cached successfully + * before. + * + * Return 0 if the firmware cache has been removed successfully + * Return !0 otherwise + * + */ +static int uncache_firmware(const char *fw_name) +{ + struct fw_priv *fw_priv; + struct firmware fw; + + pr_debug("%s: %s\n", __func__, fw_name); + + if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0)) + return 0; + + fw_priv = lookup_fw_priv(fw_name); + if (fw_priv) { + free_fw_priv(fw_priv); + return 0; + } + + return -EINVAL; +} + +static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) +{ + struct fw_cache_entry *fce; + + fce = kzalloc(sizeof(*fce), GFP_ATOMIC); + if (!fce) + goto exit; + + fce->name = kstrdup_const(name, GFP_ATOMIC); + if (!fce->name) { + kfree(fce); + fce = NULL; + goto exit; + } +exit: + return fce; +} + +static int __fw_entry_found(const char *name) +{ + struct firmware_cache *fwc = &fw_cache; + struct fw_cache_entry *fce; + + list_for_each_entry(fce, &fwc->fw_names, list) { + if (!strcmp(fce->name, name)) + return 1; + } + return 0; +} + +static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv) +{ + const char *name = fw_priv->fw_name; + struct firmware_cache *fwc = fw_priv->fwc; + struct fw_cache_entry *fce; + + spin_lock(&fwc->name_lock); + if (__fw_entry_found(name)) + goto found; + + fce = alloc_fw_cache_entry(name); + if (fce) { + list_add(&fce->list, &fwc->fw_names); + kref_get(&fw_priv->ref); + pr_debug("%s: fw: %s\n", __func__, name); + } +found: + spin_unlock(&fwc->name_lock); +} + +static void free_fw_cache_entry(struct fw_cache_entry *fce) +{ + kfree_const(fce->name); + kfree(fce); +} + +static void __async_dev_cache_fw_image(void *fw_entry, + async_cookie_t cookie) +{ + struct fw_cache_entry *fce = fw_entry; + struct firmware_cache *fwc = &fw_cache; + int ret; + + ret = cache_firmware(fce->name); + if (ret) { + spin_lock(&fwc->name_lock); + list_del(&fce->list); + spin_unlock(&fwc->name_lock); + + free_fw_cache_entry(fce); + } +} + +/* called with dev->devres_lock held */ +static void dev_create_fw_entry(struct device *dev, void *res, + void *data) +{ + struct fw_name_devm *fwn = res; + const char *fw_name = fwn->name; + struct list_head *head = data; + struct fw_cache_entry *fce; + + fce = alloc_fw_cache_entry(fw_name); + if (fce) + list_add(&fce->list, head); +} + +static int devm_name_match(struct device *dev, void *res, + void *match_data) +{ + struct fw_name_devm *fwn = res; + return (fwn->magic == (unsigned long)match_data); +} + +static void dev_cache_fw_image(struct device *dev, void *data) +{ + LIST_HEAD(todo); + struct fw_cache_entry *fce; + struct fw_cache_entry *fce_next; + struct firmware_cache *fwc = &fw_cache; + + devres_for_each_res(dev, fw_name_devm_release, + devm_name_match, &fw_cache, + dev_create_fw_entry, &todo); + + list_for_each_entry_safe(fce, fce_next, &todo, list) { + list_del(&fce->list); + + spin_lock(&fwc->name_lock); + /* only one cache entry for one firmware */ + if (!__fw_entry_found(fce->name)) { + list_add(&fce->list, &fwc->fw_names); + } else { + free_fw_cache_entry(fce); + fce = NULL; + } + spin_unlock(&fwc->name_lock); + + if (fce) + async_schedule_domain(__async_dev_cache_fw_image, + (void *)fce, + &fw_cache_domain); + } +} + +static void __device_uncache_fw_images(void) +{ + struct firmware_cache *fwc = &fw_cache; + struct fw_cache_entry *fce; + + spin_lock(&fwc->name_lock); + while (!list_empty(&fwc->fw_names)) { + fce = list_entry(fwc->fw_names.next, + struct fw_cache_entry, list); + list_del(&fce->list); + spin_unlock(&fwc->name_lock); + + uncache_firmware(fce->name); + free_fw_cache_entry(fce); + + spin_lock(&fwc->name_lock); + } + spin_unlock(&fwc->name_lock); +} + +/** + * device_cache_fw_images() - cache devices' firmware + * + * If one device called request_firmware or its nowait version + * successfully before, the firmware names are recored into the + * device's devres link list, so device_cache_fw_images can call + * cache_firmware() to cache these firmwares for the device, + * then the device driver can load its firmwares easily at + * time when system is not ready to complete loading firmware. + */ +static void device_cache_fw_images(void) +{ + struct firmware_cache *fwc = &fw_cache; + DEFINE_WAIT(wait); + + pr_debug("%s\n", __func__); + + /* cancel uncache work */ + cancel_delayed_work_sync(&fwc->work); + + fw_fallback_set_cache_timeout(); + + mutex_lock(&fw_lock); + fwc->state = FW_LOADER_START_CACHE; + dpm_for_each_dev(NULL, dev_cache_fw_image); + mutex_unlock(&fw_lock); + + /* wait for completion of caching firmware for all devices */ + async_synchronize_full_domain(&fw_cache_domain); + + fw_fallback_set_default_timeout(); +} + +/** + * device_uncache_fw_images() - uncache devices' firmware + * + * uncache all firmwares which have been cached successfully + * by device_uncache_fw_images earlier + */ +static void device_uncache_fw_images(void) +{ + pr_debug("%s\n", __func__); + __device_uncache_fw_images(); +} + +static void device_uncache_fw_images_work(struct work_struct *work) +{ + device_uncache_fw_images(); +} + +/** + * device_uncache_fw_images_delay() - uncache devices firmwares + * @delay: number of milliseconds to delay uncache device firmwares + * + * uncache all devices's firmwares which has been cached successfully + * by device_cache_fw_images after @delay milliseconds. + */ +static void device_uncache_fw_images_delay(unsigned long delay) +{ + queue_delayed_work(system_power_efficient_wq, &fw_cache.work, + msecs_to_jiffies(delay)); +} + +static int fw_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused) +{ + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: + /* + * kill pending fallback requests with a custom fallback + * to avoid stalling suspend. + */ + kill_pending_fw_fallback_reqs(true); + device_cache_fw_images(); + break; + + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + /* + * In case that system sleep failed and syscore_suspend is + * not called. + */ + mutex_lock(&fw_lock); + fw_cache.state = FW_LOADER_NO_CACHE; + mutex_unlock(&fw_lock); + + device_uncache_fw_images_delay(10 * MSEC_PER_SEC); + break; + } + + return 0; +} + +/* stop caching firmware once syscore_suspend is reached */ +static int fw_suspend(void) +{ + fw_cache.state = FW_LOADER_NO_CACHE; + return 0; +} + +static struct syscore_ops fw_syscore_ops = { + .suspend = fw_suspend, +}; + +static int __init register_fw_pm_ops(void) +{ + int ret; + + spin_lock_init(&fw_cache.name_lock); + INIT_LIST_HEAD(&fw_cache.fw_names); + + INIT_DELAYED_WORK(&fw_cache.work, + device_uncache_fw_images_work); + + fw_cache.pm_notify.notifier_call = fw_pm_notify; + ret = register_pm_notifier(&fw_cache.pm_notify); + if (ret) + return ret; + + register_syscore_ops(&fw_syscore_ops); + + return ret; +} + +static inline void unregister_fw_pm_ops(void) +{ + unregister_syscore_ops(&fw_syscore_ops); + unregister_pm_notifier(&fw_cache.pm_notify); +} +#else +static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv) +{ +} +static inline int register_fw_pm_ops(void) +{ + return 0; +} +static inline void unregister_fw_pm_ops(void) +{ +} +#endif + +static void __init fw_cache_init(void) +{ + spin_lock_init(&fw_cache.lock); + INIT_LIST_HEAD(&fw_cache.head); + fw_cache.state = FW_LOADER_NO_CACHE; +} + +static int fw_shutdown_notify(struct notifier_block *unused1, + unsigned long unused2, void *unused3) +{ + /* + * Kill all pending fallback requests to avoid both stalling shutdown, + * and avoid a deadlock with the usermode_lock. + */ + kill_pending_fw_fallback_reqs(false); + + return NOTIFY_DONE; +} + +static struct notifier_block fw_shutdown_nb = { + .notifier_call = fw_shutdown_notify, +}; + +static int __init firmware_class_init(void) +{ + int ret; + + /* No need to unfold these on exit */ + fw_cache_init(); + + ret = register_fw_pm_ops(); + if (ret) + return ret; + + ret = register_reboot_notifier(&fw_shutdown_nb); + if (ret) + goto out; + + return register_sysfs_loader(); + +out: + unregister_fw_pm_ops(); + return ret; +} + +static void __exit firmware_class_exit(void) +{ + unregister_fw_pm_ops(); + unregister_reboot_notifier(&fw_shutdown_nb); + unregister_sysfs_loader(); +} + +fs_initcall(firmware_class_init); +module_exit(firmware_class_exit); diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c new file mode 100644 index 000000000..1ce59b4b5 --- /dev/null +++ b/drivers/base/hypervisor.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * hypervisor.c - /sys/hypervisor subsystem. + * + * Copyright (C) IBM Corp. 2006 + * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de> + * Copyright (C) 2007 Novell Inc. + */ + +#include <linux/kobject.h> +#include <linux/device.h> +#include <linux/export.h> +#include "base.h" + +struct kobject *hypervisor_kobj; +EXPORT_SYMBOL_GPL(hypervisor_kobj); + +int __init hypervisor_init(void) +{ + hypervisor_kobj = kobject_create_and_add("hypervisor", NULL); + if (!hypervisor_kobj) + return -ENOMEM; + return 0; +} diff --git a/drivers/base/init.c b/drivers/base/init.c new file mode 100644 index 000000000..908e6520e --- /dev/null +++ b/drivers/base/init.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2002-3 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/memory.h> +#include <linux/of.h> + +#include "base.h" + +/** + * driver_init - initialize driver model. + * + * Call the driver model init functions to initialize their + * subsystems. Called early from init/main.c. + */ +void __init driver_init(void) +{ + /* These are the core pieces */ + devtmpfs_init(); + devices_init(); + buses_init(); + classes_init(); + firmware_init(); + hypervisor_init(); + + /* These are also core pieces, but must come after the + * core core pieces. + */ + of_core_init(); + platform_bus_init(); + cpu_dev_init(); + memory_dev_init(); + container_dev_init(); +} diff --git a/drivers/base/isa.c b/drivers/base/isa.c new file mode 100644 index 000000000..2772f5d19 --- /dev/null +++ b/drivers/base/isa.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ISA bus. + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/isa.h> + +static struct device isa_bus = { + .init_name = "isa" +}; + +struct isa_dev { + struct device dev; + struct device *next; + unsigned int id; +}; + +#define to_isa_dev(x) container_of((x), struct isa_dev, dev) + +static int isa_bus_match(struct device *dev, struct device_driver *driver) +{ + struct isa_driver *isa_driver = to_isa_driver(driver); + + if (dev->platform_data == isa_driver) { + if (!isa_driver->match || + isa_driver->match(dev, to_isa_dev(dev)->id)) + return 1; + dev->platform_data = NULL; + } + return 0; +} + +static int isa_bus_probe(struct device *dev) +{ + struct isa_driver *isa_driver = dev->platform_data; + + if (isa_driver && isa_driver->probe) + return isa_driver->probe(dev, to_isa_dev(dev)->id); + + return 0; +} + +static int isa_bus_remove(struct device *dev) +{ + struct isa_driver *isa_driver = dev->platform_data; + + if (isa_driver && isa_driver->remove) + return isa_driver->remove(dev, to_isa_dev(dev)->id); + + return 0; +} + +static void isa_bus_shutdown(struct device *dev) +{ + struct isa_driver *isa_driver = dev->platform_data; + + if (isa_driver && isa_driver->shutdown) + isa_driver->shutdown(dev, to_isa_dev(dev)->id); +} + +static int isa_bus_suspend(struct device *dev, pm_message_t state) +{ + struct isa_driver *isa_driver = dev->platform_data; + + if (isa_driver && isa_driver->suspend) + return isa_driver->suspend(dev, to_isa_dev(dev)->id, state); + + return 0; +} + +static int isa_bus_resume(struct device *dev) +{ + struct isa_driver *isa_driver = dev->platform_data; + + if (isa_driver && isa_driver->resume) + return isa_driver->resume(dev, to_isa_dev(dev)->id); + + return 0; +} + +static struct bus_type isa_bus_type = { + .name = "isa", + .match = isa_bus_match, + .probe = isa_bus_probe, + .remove = isa_bus_remove, + .shutdown = isa_bus_shutdown, + .suspend = isa_bus_suspend, + .resume = isa_bus_resume +}; + +static void isa_dev_release(struct device *dev) +{ + kfree(to_isa_dev(dev)); +} + +void isa_unregister_driver(struct isa_driver *isa_driver) +{ + struct device *dev = isa_driver->devices; + + while (dev) { + struct device *tmp = to_isa_dev(dev)->next; + device_unregister(dev); + dev = tmp; + } + driver_unregister(&isa_driver->driver); +} +EXPORT_SYMBOL_GPL(isa_unregister_driver); + +int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev) +{ + int error; + unsigned int id; + + isa_driver->driver.bus = &isa_bus_type; + isa_driver->devices = NULL; + + error = driver_register(&isa_driver->driver); + if (error) + return error; + + for (id = 0; id < ndev; id++) { + struct isa_dev *isa_dev; + + isa_dev = kzalloc(sizeof *isa_dev, GFP_KERNEL); + if (!isa_dev) { + error = -ENOMEM; + break; + } + + isa_dev->dev.parent = &isa_bus; + isa_dev->dev.bus = &isa_bus_type; + + dev_set_name(&isa_dev->dev, "%s.%u", + isa_driver->driver.name, id); + isa_dev->dev.platform_data = isa_driver; + isa_dev->dev.release = isa_dev_release; + isa_dev->id = id; + + isa_dev->dev.coherent_dma_mask = DMA_BIT_MASK(24); + isa_dev->dev.dma_mask = &isa_dev->dev.coherent_dma_mask; + + error = device_register(&isa_dev->dev); + if (error) { + put_device(&isa_dev->dev); + break; + } + + if (isa_dev->dev.platform_data) { + isa_dev->next = isa_driver->devices; + isa_driver->devices = &isa_dev->dev; + } else + device_unregister(&isa_dev->dev); + } + + if (!error && !isa_driver->devices) + error = -ENODEV; + + if (error) + isa_unregister_driver(isa_driver); + + return error; +} +EXPORT_SYMBOL_GPL(isa_register_driver); + +static int __init isa_bus_init(void) +{ + int error; + + error = bus_register(&isa_bus_type); + if (!error) { + error = device_register(&isa_bus); + if (error) + bus_unregister(&isa_bus_type); + } + return error; +} + +postcore_initcall(isa_bus_init); diff --git a/drivers/base/map.c b/drivers/base/map.c new file mode 100644 index 000000000..5650ab2b2 --- /dev/null +++ b/drivers/base/map.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/drivers/base/map.c + * + * (C) Copyright Al Viro 2002,2003 + * + * NOTE: data structure needs to be changed. It works, but for large dev_t + * it will be too slow. It is isolated, though, so these changes will be + * local to that file. + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/kdev_t.h> +#include <linux/kobject.h> +#include <linux/kobj_map.h> + +struct kobj_map { + struct probe { + struct probe *next; + dev_t dev; + unsigned long range; + struct module *owner; + kobj_probe_t *get; + int (*lock)(dev_t, void *); + void *data; + } *probes[255]; + struct mutex *lock; +}; + +int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, + struct module *module, kobj_probe_t *probe, + int (*lock)(dev_t, void *), void *data) +{ + unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; + unsigned index = MAJOR(dev); + unsigned i; + struct probe *p; + + if (n > 255) + n = 255; + + p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL); + if (p == NULL) + return -ENOMEM; + + for (i = 0; i < n; i++, p++) { + p->owner = module; + p->get = probe; + p->lock = lock; + p->dev = dev; + p->range = range; + p->data = data; + } + mutex_lock(domain->lock); + for (i = 0, p -= n; i < n; i++, p++, index++) { + struct probe **s = &domain->probes[index % 255]; + while (*s && (*s)->range < range) + s = &(*s)->next; + p->next = *s; + *s = p; + } + mutex_unlock(domain->lock); + return 0; +} + +void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range) +{ + unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; + unsigned index = MAJOR(dev); + unsigned i; + struct probe *found = NULL; + + if (n > 255) + n = 255; + + mutex_lock(domain->lock); + for (i = 0; i < n; i++, index++) { + struct probe **s; + for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) { + struct probe *p = *s; + if (p->dev == dev && p->range == range) { + *s = p->next; + if (!found) + found = p; + break; + } + } + } + mutex_unlock(domain->lock); + kfree(found); +} + +struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index) +{ + struct kobject *kobj; + struct probe *p; + unsigned long best = ~0UL; + +retry: + mutex_lock(domain->lock); + for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) { + struct kobject *(*probe)(dev_t, int *, void *); + struct module *owner; + void *data; + + if (p->dev > dev || p->dev + p->range - 1 < dev) + continue; + if (p->range - 1 >= best) + break; + if (!try_module_get(p->owner)) + continue; + owner = p->owner; + data = p->data; + probe = p->get; + best = p->range - 1; + *index = dev - p->dev; + if (p->lock && p->lock(dev, data) < 0) { + module_put(owner); + continue; + } + mutex_unlock(domain->lock); + kobj = probe(dev, index, data); + /* Currently ->owner protects _only_ ->probe() itself. */ + module_put(owner); + if (kobj) + return kobj; + goto retry; + } + mutex_unlock(domain->lock); + return NULL; +} + +struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock) +{ + struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL); + struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL); + int i; + + if ((p == NULL) || (base == NULL)) { + kfree(p); + kfree(base); + return NULL; + } + + base->dev = 1; + base->range = ~0; + base->get = base_probe; + for (i = 0; i < 255; i++) + p->probes[i] = base; + p->lock = lock; + return p; +} diff --git a/drivers/base/memory.c b/drivers/base/memory.c new file mode 100644 index 000000000..49eb14271 --- /dev/null +++ b/drivers/base/memory.c @@ -0,0 +1,823 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Memory subsystem support + * + * Written by Matt Tolentino <matthew.e.tolentino@intel.com> + * Dave Hansen <haveblue@us.ibm.com> + * + * This file provides the necessary infrastructure to represent + * a SPARSEMEM-memory-model system's physical memory in /sysfs. + * All arch-independent code that assumes MEMORY_HOTPLUG requires + * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/topology.h> +#include <linux/capability.h> +#include <linux/device.h> +#include <linux/memory.h> +#include <linux/memory_hotplug.h> +#include <linux/mm.h> +#include <linux/stat.h> +#include <linux/slab.h> +#include <linux/xarray.h> + +#include <linux/atomic.h> +#include <linux/uaccess.h> + +#define MEMORY_CLASS_NAME "memory" + +static const char *const online_type_to_str[] = { + [MMOP_OFFLINE] = "offline", + [MMOP_ONLINE] = "online", + [MMOP_ONLINE_KERNEL] = "online_kernel", + [MMOP_ONLINE_MOVABLE] = "online_movable", +}; + +int memhp_online_type_from_str(const char *str) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { + if (sysfs_streq(str, online_type_to_str[i])) + return i; + } + return -EINVAL; +} + +#define to_memory_block(dev) container_of(dev, struct memory_block, dev) + +static int sections_per_block; + +static inline unsigned long memory_block_id(unsigned long section_nr) +{ + return section_nr / sections_per_block; +} + +static inline unsigned long pfn_to_block_id(unsigned long pfn) +{ + return memory_block_id(pfn_to_section_nr(pfn)); +} + +static inline unsigned long phys_to_block_id(unsigned long phys) +{ + return pfn_to_block_id(PFN_DOWN(phys)); +} + +static int memory_subsys_online(struct device *dev); +static int memory_subsys_offline(struct device *dev); + +static struct bus_type memory_subsys = { + .name = MEMORY_CLASS_NAME, + .dev_name = MEMORY_CLASS_NAME, + .online = memory_subsys_online, + .offline = memory_subsys_offline, +}; + +/* + * Memory blocks are cached in a local radix tree to avoid + * a costly linear search for the corresponding device on + * the subsystem bus. + */ +static DEFINE_XARRAY(memory_blocks); + +static BLOCKING_NOTIFIER_HEAD(memory_chain); + +int register_memory_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&memory_chain, nb); +} +EXPORT_SYMBOL(register_memory_notifier); + +void unregister_memory_notifier(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&memory_chain, nb); +} +EXPORT_SYMBOL(unregister_memory_notifier); + +static void memory_block_release(struct device *dev) +{ + struct memory_block *mem = to_memory_block(dev); + + kfree(mem); +} + +unsigned long __weak memory_block_size_bytes(void) +{ + return MIN_MEMORY_BLOCK_SIZE; +} +EXPORT_SYMBOL_GPL(memory_block_size_bytes); + +/* + * Show the first physical section index (number) of this memory block. + */ +static ssize_t phys_index_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct memory_block *mem = to_memory_block(dev); + unsigned long phys_index; + + phys_index = mem->start_section_nr / sections_per_block; + + return sysfs_emit(buf, "%08lx\n", phys_index); +} + +/* + * Legacy interface that we cannot remove. Always indicate "removable" + * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. + */ +static ssize_t removable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); +} + +/* + * online, offline, going offline, etc. + */ +static ssize_t state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct memory_block *mem = to_memory_block(dev); + const char *output; + + /* + * We can probably put these states in a nice little array + * so that they're not open-coded + */ + switch (mem->state) { + case MEM_ONLINE: + output = "online"; + break; + case MEM_OFFLINE: + output = "offline"; + break; + case MEM_GOING_OFFLINE: + output = "going-offline"; + break; + default: + WARN_ON(1); + return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state); + } + + return sysfs_emit(buf, "%s\n", output); +} + +int memory_notify(unsigned long val, void *v) +{ + return blocking_notifier_call_chain(&memory_chain, val, v); +} + +/* + * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is + * OK to have direct references to sparsemem variables in here. + */ +static int +memory_block_action(unsigned long start_section_nr, unsigned long action, + int online_type, int nid) +{ + unsigned long start_pfn; + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + int ret; + + start_pfn = section_nr_to_pfn(start_section_nr); + + switch (action) { + case MEM_ONLINE: + ret = online_pages(start_pfn, nr_pages, online_type, nid); + break; + case MEM_OFFLINE: + ret = offline_pages(start_pfn, nr_pages); + break; + default: + WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " + "%ld\n", __func__, start_section_nr, action, action); + ret = -EINVAL; + } + + return ret; +} + +static int memory_block_change_state(struct memory_block *mem, + unsigned long to_state, unsigned long from_state_req) +{ + int ret = 0; + + if (mem->state != from_state_req) + return -EINVAL; + + if (to_state == MEM_OFFLINE) + mem->state = MEM_GOING_OFFLINE; + + ret = memory_block_action(mem->start_section_nr, to_state, + mem->online_type, mem->nid); + + mem->state = ret ? from_state_req : to_state; + + return ret; +} + +/* The device lock serializes operations on memory_subsys_[online|offline] */ +static int memory_subsys_online(struct device *dev) +{ + struct memory_block *mem = to_memory_block(dev); + int ret; + + if (mem->state == MEM_ONLINE) + return 0; + + /* + * When called via device_online() without configuring the online_type, + * we want to default to MMOP_ONLINE. + */ + if (mem->online_type == MMOP_OFFLINE) + mem->online_type = MMOP_ONLINE; + + ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); + mem->online_type = MMOP_OFFLINE; + + return ret; +} + +static int memory_subsys_offline(struct device *dev) +{ + struct memory_block *mem = to_memory_block(dev); + + if (mem->state == MEM_OFFLINE) + return 0; + + return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); +} + +static ssize_t state_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + const int online_type = memhp_online_type_from_str(buf); + struct memory_block *mem = to_memory_block(dev); + int ret; + + if (online_type < 0) + return -EINVAL; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + switch (online_type) { + case MMOP_ONLINE_KERNEL: + case MMOP_ONLINE_MOVABLE: + case MMOP_ONLINE: + /* mem->online_type is protected by device_hotplug_lock */ + mem->online_type = online_type; + ret = device_online(&mem->dev); + break; + case MMOP_OFFLINE: + ret = device_offline(&mem->dev); + break; + default: + ret = -EINVAL; /* should never happen */ + } + + unlock_device_hotplug(); + + if (ret < 0) + return ret; + if (ret) + return -EINVAL; + + return count; +} + +/* + * Legacy interface that we cannot remove: s390x exposes the storage increment + * covered by a memory block, allowing for identifying which memory blocks + * comprise a storage increment. Since a memory block spans complete + * storage increments nowadays, this interface is basically unused. Other + * archs never exposed != 0. + */ +static ssize_t phys_device_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct memory_block *mem = to_memory_block(dev); + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + + return sysfs_emit(buf, "%d\n", + arch_get_memory_phys_device(start_pfn)); +} + +#ifdef CONFIG_MEMORY_HOTREMOVE +static int print_allowed_zone(char *buf, int len, int nid, + unsigned long start_pfn, unsigned long nr_pages, + int online_type, struct zone *default_zone) +{ + struct zone *zone; + + zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); + if (zone == default_zone) + return 0; + + return sysfs_emit_at(buf, len, " %s", zone->name); +} + +static ssize_t valid_zones_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct memory_block *mem = to_memory_block(dev); + unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + struct zone *default_zone; + int len = 0; + int nid; + + /* + * Check the existing zone. Make sure that we do that only on the + * online nodes otherwise the page_zone is not reliable + */ + if (mem->state == MEM_ONLINE) { + /* + * The block contains more than one zone can not be offlined. + * This can happen e.g. for ZONE_DMA and ZONE_DMA32 + */ + default_zone = test_pages_in_a_zone(start_pfn, + start_pfn + nr_pages); + if (!default_zone) + return sysfs_emit(buf, "%s\n", "none"); + len += sysfs_emit_at(buf, len, "%s", default_zone->name); + goto out; + } + + nid = mem->nid; + default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn, + nr_pages); + + len += sysfs_emit_at(buf, len, "%s", default_zone->name); + len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, + MMOP_ONLINE_KERNEL, default_zone); + len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, + MMOP_ONLINE_MOVABLE, default_zone); +out: + len += sysfs_emit_at(buf, len, "\n"); + return len; +} +static DEVICE_ATTR_RO(valid_zones); +#endif + +static DEVICE_ATTR_RO(phys_index); +static DEVICE_ATTR_RW(state); +static DEVICE_ATTR_RO(phys_device); +static DEVICE_ATTR_RO(removable); + +/* + * Show the memory block size (shared by all memory blocks). + */ +static ssize_t block_size_bytes_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%lx\n", memory_block_size_bytes()); +} + +static DEVICE_ATTR_RO(block_size_bytes); + +/* + * Memory auto online policy. + */ + +static ssize_t auto_online_blocks_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%s\n", + online_type_to_str[memhp_default_online_type]); +} + +static ssize_t auto_online_blocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + const int online_type = memhp_online_type_from_str(buf); + + if (online_type < 0) + return -EINVAL; + + memhp_default_online_type = online_type; + return count; +} + +static DEVICE_ATTR_RW(auto_online_blocks); + +/* + * Some architectures will have custom drivers to do this, and + * will not need to do it from userspace. The fake hot-add code + * as well as ppc64 will do all of their discovery in userspace + * and will require this interface. + */ +#ifdef CONFIG_ARCH_MEMORY_PROBE +static ssize_t probe_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + u64 phys_addr; + int nid, ret; + unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; + + ret = kstrtoull(buf, 0, &phys_addr); + if (ret) + return ret; + + if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) + return -EINVAL; + + ret = lock_device_hotplug_sysfs(); + if (ret) + return ret; + + nid = memory_add_physaddr_to_nid(phys_addr); + ret = __add_memory(nid, phys_addr, + MIN_MEMORY_BLOCK_SIZE * sections_per_block, + MHP_NONE); + + if (ret) + goto out; + + ret = count; +out: + unlock_device_hotplug(); + return ret; +} + +static DEVICE_ATTR_WO(probe); +#endif + +#ifdef CONFIG_MEMORY_FAILURE +/* + * Support for offlining pages of memory + */ + +/* Soft offline a page */ +static ssize_t soft_offline_page_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + u64 pfn; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (kstrtoull(buf, 0, &pfn) < 0) + return -EINVAL; + pfn >>= PAGE_SHIFT; + ret = soft_offline_page(pfn, 0); + return ret == 0 ? count : ret; +} + +/* Forcibly offline a page, including killing processes. */ +static ssize_t hard_offline_page_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + u64 pfn; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (kstrtoull(buf, 0, &pfn) < 0) + return -EINVAL; + pfn >>= PAGE_SHIFT; + ret = memory_failure(pfn, 0); + return ret ? ret : count; +} + +static DEVICE_ATTR_WO(soft_offline_page); +static DEVICE_ATTR_WO(hard_offline_page); +#endif + +/* See phys_device_show(). */ +int __weak arch_get_memory_phys_device(unsigned long start_pfn) +{ + return 0; +} + +/* + * A reference for the returned memory block device is acquired. + * + * Called under device_hotplug_lock. + */ +static struct memory_block *find_memory_block_by_id(unsigned long block_id) +{ + struct memory_block *mem; + + mem = xa_load(&memory_blocks, block_id); + if (mem) + get_device(&mem->dev); + return mem; +} + +/* + * Called under device_hotplug_lock. + */ +struct memory_block *find_memory_block(struct mem_section *section) +{ + unsigned long block_id = memory_block_id(__section_nr(section)); + + return find_memory_block_by_id(block_id); +} + +static struct attribute *memory_memblk_attrs[] = { + &dev_attr_phys_index.attr, + &dev_attr_state.attr, + &dev_attr_phys_device.attr, + &dev_attr_removable.attr, +#ifdef CONFIG_MEMORY_HOTREMOVE + &dev_attr_valid_zones.attr, +#endif + NULL +}; + +static struct attribute_group memory_memblk_attr_group = { + .attrs = memory_memblk_attrs, +}; + +static const struct attribute_group *memory_memblk_attr_groups[] = { + &memory_memblk_attr_group, + NULL, +}; + +/* + * register_memory - Setup a sysfs device for a memory block + */ +static +int register_memory(struct memory_block *memory) +{ + int ret; + + memory->dev.bus = &memory_subsys; + memory->dev.id = memory->start_section_nr / sections_per_block; + memory->dev.release = memory_block_release; + memory->dev.groups = memory_memblk_attr_groups; + memory->dev.offline = memory->state == MEM_OFFLINE; + + ret = device_register(&memory->dev); + if (ret) { + put_device(&memory->dev); + return ret; + } + ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, + GFP_KERNEL)); + if (ret) + device_unregister(&memory->dev); + + return ret; +} + +static int init_memory_block(unsigned long block_id, unsigned long state) +{ + struct memory_block *mem; + int ret = 0; + + mem = find_memory_block_by_id(block_id); + if (mem) { + put_device(&mem->dev); + return -EEXIST; + } + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return -ENOMEM; + + mem->start_section_nr = block_id * sections_per_block; + mem->state = state; + mem->nid = NUMA_NO_NODE; + + ret = register_memory(mem); + + return ret; +} + +static int add_memory_block(unsigned long base_section_nr) +{ + int section_count = 0; + unsigned long nr; + + for (nr = base_section_nr; nr < base_section_nr + sections_per_block; + nr++) + if (present_section_nr(nr)) + section_count++; + + if (section_count == 0) + return 0; + return init_memory_block(memory_block_id(base_section_nr), + MEM_ONLINE); +} + +static void unregister_memory(struct memory_block *memory) +{ + if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) + return; + + WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL); + + /* drop the ref. we got via find_memory_block() */ + put_device(&memory->dev); + device_unregister(&memory->dev); +} + +/* + * Create memory block devices for the given memory area. Start and size + * have to be aligned to memory block granularity. Memory block devices + * will be initialized as offline. + * + * Called under device_hotplug_lock. + */ +int create_memory_block_devices(unsigned long start, unsigned long size) +{ + const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); + unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); + struct memory_block *mem; + unsigned long block_id; + int ret = 0; + + if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || + !IS_ALIGNED(size, memory_block_size_bytes()))) + return -EINVAL; + + for (block_id = start_block_id; block_id != end_block_id; block_id++) { + ret = init_memory_block(block_id, MEM_OFFLINE); + if (ret) + break; + } + if (ret) { + end_block_id = block_id; + for (block_id = start_block_id; block_id != end_block_id; + block_id++) { + mem = find_memory_block_by_id(block_id); + if (WARN_ON_ONCE(!mem)) + continue; + unregister_memory(mem); + } + } + return ret; +} + +/* + * Remove memory block devices for the given memory area. Start and size + * have to be aligned to memory block granularity. Memory block devices + * have to be offline. + * + * Called under device_hotplug_lock. + */ +void remove_memory_block_devices(unsigned long start, unsigned long size) +{ + const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); + const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); + struct memory_block *mem; + unsigned long block_id; + + if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || + !IS_ALIGNED(size, memory_block_size_bytes()))) + return; + + for (block_id = start_block_id; block_id != end_block_id; block_id++) { + mem = find_memory_block_by_id(block_id); + if (WARN_ON_ONCE(!mem)) + continue; + unregister_memory_block_under_nodes(mem); + unregister_memory(mem); + } +} + +/* return true if the memory block is offlined, otherwise, return false */ +bool is_memblock_offlined(struct memory_block *mem) +{ + return mem->state == MEM_OFFLINE; +} + +static struct attribute *memory_root_attrs[] = { +#ifdef CONFIG_ARCH_MEMORY_PROBE + &dev_attr_probe.attr, +#endif + +#ifdef CONFIG_MEMORY_FAILURE + &dev_attr_soft_offline_page.attr, + &dev_attr_hard_offline_page.attr, +#endif + + &dev_attr_block_size_bytes.attr, + &dev_attr_auto_online_blocks.attr, + NULL +}; + +static struct attribute_group memory_root_attr_group = { + .attrs = memory_root_attrs, +}; + +static const struct attribute_group *memory_root_attr_groups[] = { + &memory_root_attr_group, + NULL, +}; + +/* + * Initialize the sysfs support for memory devices. At the time this function + * is called, we cannot have concurrent creation/deletion of memory block + * devices, the device_hotplug_lock is not needed. + */ +void __init memory_dev_init(void) +{ + int ret; + unsigned long block_sz, nr; + + /* Validate the configured memory block size */ + block_sz = memory_block_size_bytes(); + if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE) + panic("Memory block size not suitable: 0x%lx\n", block_sz); + sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; + + ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); + if (ret) + panic("%s() failed to register subsystem: %d\n", __func__, ret); + + /* + * Create entries for memory sections that were found + * during boot and have been initialized + */ + for (nr = 0; nr <= __highest_present_section_nr; + nr += sections_per_block) { + ret = add_memory_block(nr); + if (ret) + panic("%s() failed to add memory block: %d\n", __func__, + ret); + } +} + +/** + * walk_memory_blocks - walk through all present memory blocks overlapped + * by the range [start, start + size) + * + * @start: start address of the memory range + * @size: size of the memory range + * @arg: argument passed to func + * @func: callback for each memory section walked + * + * This function walks through all present memory blocks overlapped by the + * range [start, start + size), calling func on each memory block. + * + * In case func() returns an error, walking is aborted and the error is + * returned. + * + * Called under device_hotplug_lock. + */ +int walk_memory_blocks(unsigned long start, unsigned long size, + void *arg, walk_memory_blocks_func_t func) +{ + const unsigned long start_block_id = phys_to_block_id(start); + const unsigned long end_block_id = phys_to_block_id(start + size - 1); + struct memory_block *mem; + unsigned long block_id; + int ret = 0; + + if (!size) + return 0; + + for (block_id = start_block_id; block_id <= end_block_id; block_id++) { + mem = find_memory_block_by_id(block_id); + if (!mem) + continue; + + ret = func(mem, arg); + put_device(&mem->dev); + if (ret) + break; + } + return ret; +} + +struct for_each_memory_block_cb_data { + walk_memory_blocks_func_t func; + void *arg; +}; + +static int for_each_memory_block_cb(struct device *dev, void *data) +{ + struct memory_block *mem = to_memory_block(dev); + struct for_each_memory_block_cb_data *cb_data = data; + + return cb_data->func(mem, cb_data->arg); +} + +/** + * for_each_memory_block - walk through all present memory blocks + * + * @arg: argument passed to func + * @func: callback for each memory block walked + * + * This function walks through all present memory blocks, calling func on + * each memory block. + * + * In case func() returns an error, walking is aborted and the error is + * returned. + */ +int for_each_memory_block(void *arg, walk_memory_blocks_func_t func) +{ + struct for_each_memory_block_cb_data cb_data = { + .func = func, + .arg = arg, + }; + + return bus_for_each_dev(&memory_subsys, NULL, &cb_data, + for_each_memory_block_cb); +} diff --git a/drivers/base/module.c b/drivers/base/module.c new file mode 100644 index 000000000..46ad4d636 --- /dev/null +++ b/drivers/base/module.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * module.c - module sysfs fun for drivers + */ +#include <linux/device.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "base.h" + +static char *make_driver_name(struct device_driver *drv) +{ + char *driver_name; + + driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name); + if (!driver_name) + return NULL; + + return driver_name; +} + +static void module_create_drivers_dir(struct module_kobject *mk) +{ + static DEFINE_MUTEX(drivers_dir_mutex); + + mutex_lock(&drivers_dir_mutex); + if (mk && !mk->drivers_dir) + mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); + mutex_unlock(&drivers_dir_mutex); +} + +void module_add_driver(struct module *mod, struct device_driver *drv) +{ + char *driver_name; + int no_warn; + struct module_kobject *mk = NULL; + + if (!drv) + return; + + if (mod) + mk = &mod->mkobj; + else if (drv->mod_name) { + struct kobject *mkobj; + + /* Lookup built-in module entry in /sys/modules */ + mkobj = kset_find_obj(module_kset, drv->mod_name); + if (mkobj) { + mk = container_of(mkobj, struct module_kobject, kobj); + /* remember our module structure */ + drv->p->mkobj = mk; + /* kset_find_obj took a reference */ + kobject_put(mkobj); + } + } + + if (!mk) + return; + + /* Don't check return codes; these calls are idempotent */ + no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module"); + driver_name = make_driver_name(drv); + if (driver_name) { + module_create_drivers_dir(mk); + no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, + driver_name); + kfree(driver_name); + } +} + +void module_remove_driver(struct device_driver *drv) +{ + struct module_kobject *mk = NULL; + char *driver_name; + + if (!drv) + return; + + sysfs_remove_link(&drv->p->kobj, "module"); + + if (drv->owner) + mk = &drv->owner->mkobj; + else if (drv->p->mkobj) + mk = drv->p->mkobj; + if (mk && mk->drivers_dir) { + driver_name = make_driver_name(drv); + if (driver_name) { + sysfs_remove_link(mk->drivers_dir, driver_name); + kfree(driver_name); + } + } +} diff --git a/drivers/base/node.c b/drivers/base/node.c new file mode 100644 index 000000000..5f745c906 --- /dev/null +++ b/drivers/base/node.c @@ -0,0 +1,1067 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Basic Node interface support + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/memory.h> +#include <linux/vmstat.h> +#include <linux/notifier.h> +#include <linux/node.h> +#include <linux/hugetlb.h> +#include <linux/compaction.h> +#include <linux/cpumask.h> +#include <linux/topology.h> +#include <linux/nodemask.h> +#include <linux/cpu.h> +#include <linux/device.h> +#include <linux/pm_runtime.h> +#include <linux/swap.h> +#include <linux/slab.h> + +static struct bus_type node_subsys = { + .name = "node", + .dev_name = "node", +}; + + +static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) +{ + ssize_t n; + cpumask_var_t mask; + struct node *node_dev = to_node(dev); + + /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ + BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); + + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return 0; + + cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); + n = cpumap_print_to_pagebuf(list, buf, mask); + free_cpumask_var(mask); + + return n; +} + +static inline ssize_t cpumap_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return node_read_cpumap(dev, false, buf); +} + +static DEVICE_ATTR_RO(cpumap); + +static inline ssize_t cpulist_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return node_read_cpumap(dev, true, buf); +} + +static DEVICE_ATTR_RO(cpulist); + +/** + * struct node_access_nodes - Access class device to hold user visible + * relationships to other nodes. + * @dev: Device for this memory access class + * @list_node: List element in the node's access list + * @access: The access class rank + * @hmem_attrs: Heterogeneous memory performance attributes + */ +struct node_access_nodes { + struct device dev; + struct list_head list_node; + unsigned access; +#ifdef CONFIG_HMEM_REPORTING + struct node_hmem_attrs hmem_attrs; +#endif +}; +#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev) + +static struct attribute *node_init_access_node_attrs[] = { + NULL, +}; + +static struct attribute *node_targ_access_node_attrs[] = { + NULL, +}; + +static const struct attribute_group initiators = { + .name = "initiators", + .attrs = node_init_access_node_attrs, +}; + +static const struct attribute_group targets = { + .name = "targets", + .attrs = node_targ_access_node_attrs, +}; + +static const struct attribute_group *node_access_node_groups[] = { + &initiators, + &targets, + NULL, +}; + +static void node_remove_accesses(struct node *node) +{ + struct node_access_nodes *c, *cnext; + + list_for_each_entry_safe(c, cnext, &node->access_list, list_node) { + list_del(&c->list_node); + device_unregister(&c->dev); + } +} + +static void node_access_release(struct device *dev) +{ + kfree(to_access_nodes(dev)); +} + +static struct node_access_nodes *node_init_node_access(struct node *node, + unsigned access) +{ + struct node_access_nodes *access_node; + struct device *dev; + + list_for_each_entry(access_node, &node->access_list, list_node) + if (access_node->access == access) + return access_node; + + access_node = kzalloc(sizeof(*access_node), GFP_KERNEL); + if (!access_node) + return NULL; + + access_node->access = access; + dev = &access_node->dev; + dev->parent = &node->dev; + dev->release = node_access_release; + dev->groups = node_access_node_groups; + if (dev_set_name(dev, "access%u", access)) + goto free; + + if (device_register(dev)) + goto free_name; + + pm_runtime_no_callbacks(dev); + list_add_tail(&access_node->list_node, &node->access_list); + return access_node; +free_name: + kfree_const(dev->kobj.name); +free: + kfree(access_node); + return NULL; +} + +#ifdef CONFIG_HMEM_REPORTING +#define ACCESS_ATTR(name) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sysfs_emit(buf, "%u\n", \ + to_access_nodes(dev)->hmem_attrs.name); \ +} \ +static DEVICE_ATTR_RO(name) + +ACCESS_ATTR(read_bandwidth); +ACCESS_ATTR(read_latency); +ACCESS_ATTR(write_bandwidth); +ACCESS_ATTR(write_latency); + +static struct attribute *access_attrs[] = { + &dev_attr_read_bandwidth.attr, + &dev_attr_read_latency.attr, + &dev_attr_write_bandwidth.attr, + &dev_attr_write_latency.attr, + NULL, +}; + +/** + * node_set_perf_attrs - Set the performance values for given access class + * @nid: Node identifier to be set + * @hmem_attrs: Heterogeneous memory performance attributes + * @access: The access class the for the given attributes + */ +void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs, + unsigned access) +{ + struct node_access_nodes *c; + struct node *node; + int i; + + if (WARN_ON_ONCE(!node_online(nid))) + return; + + node = node_devices[nid]; + c = node_init_node_access(node, access); + if (!c) + return; + + c->hmem_attrs = *hmem_attrs; + for (i = 0; access_attrs[i] != NULL; i++) { + if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i], + "initiators")) { + pr_info("failed to add performance attribute to node %d\n", + nid); + break; + } + } +} + +/** + * struct node_cache_info - Internal tracking for memory node caches + * @dev: Device represeting the cache level + * @node: List element for tracking in the node + * @cache_attrs:Attributes for this cache level + */ +struct node_cache_info { + struct device dev; + struct list_head node; + struct node_cache_attrs cache_attrs; +}; +#define to_cache_info(device) container_of(device, struct node_cache_info, dev) + +#define CACHE_ATTR(name, fmt) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return sysfs_emit(buf, fmt "\n", \ + to_cache_info(dev)->cache_attrs.name); \ +} \ +DEVICE_ATTR_RO(name); + +CACHE_ATTR(size, "%llu") +CACHE_ATTR(line_size, "%u") +CACHE_ATTR(indexing, "%u") +CACHE_ATTR(write_policy, "%u") + +static struct attribute *cache_attrs[] = { + &dev_attr_indexing.attr, + &dev_attr_size.attr, + &dev_attr_line_size.attr, + &dev_attr_write_policy.attr, + NULL, +}; +ATTRIBUTE_GROUPS(cache); + +static void node_cache_release(struct device *dev) +{ + kfree(dev); +} + +static void node_cacheinfo_release(struct device *dev) +{ + struct node_cache_info *info = to_cache_info(dev); + kfree(info); +} + +static void node_init_cache_dev(struct node *node) +{ + struct device *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return; + + device_initialize(dev); + dev->parent = &node->dev; + dev->release = node_cache_release; + if (dev_set_name(dev, "memory_side_cache")) + goto put_device; + + if (device_add(dev)) + goto put_device; + + pm_runtime_no_callbacks(dev); + node->cache_dev = dev; + return; +put_device: + put_device(dev); +} + +/** + * node_add_cache() - add cache attribute to a memory node + * @nid: Node identifier that has new cache attributes + * @cache_attrs: Attributes for the cache being added + */ +void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs) +{ + struct node_cache_info *info; + struct device *dev; + struct node *node; + + if (!node_online(nid) || !node_devices[nid]) + return; + + node = node_devices[nid]; + list_for_each_entry(info, &node->cache_attrs, node) { + if (info->cache_attrs.level == cache_attrs->level) { + dev_warn(&node->dev, + "attempt to add duplicate cache level:%d\n", + cache_attrs->level); + return; + } + } + + if (!node->cache_dev) + node_init_cache_dev(node); + if (!node->cache_dev) + return; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return; + + dev = &info->dev; + device_initialize(dev); + dev->parent = node->cache_dev; + dev->release = node_cacheinfo_release; + dev->groups = cache_groups; + if (dev_set_name(dev, "index%d", cache_attrs->level)) + goto put_device; + + info->cache_attrs = *cache_attrs; + if (device_add(dev)) { + dev_warn(&node->dev, "failed to add cache level:%d\n", + cache_attrs->level); + goto put_device; + } + pm_runtime_no_callbacks(dev); + list_add_tail(&info->node, &node->cache_attrs); + return; +put_device: + put_device(dev); +} + +static void node_remove_caches(struct node *node) +{ + struct node_cache_info *info, *next; + + if (!node->cache_dev) + return; + + list_for_each_entry_safe(info, next, &node->cache_attrs, node) { + list_del(&info->node); + device_unregister(&info->dev); + } + device_unregister(node->cache_dev); +} + +static void node_init_caches(unsigned int nid) +{ + INIT_LIST_HEAD(&node_devices[nid]->cache_attrs); +} +#else +static void node_init_caches(unsigned int nid) { } +static void node_remove_caches(struct node *node) { } +#endif + +#define K(x) ((x) << (PAGE_SHIFT - 10)) +static ssize_t node_read_meminfo(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int len = 0; + int nid = dev->id; + struct pglist_data *pgdat = NODE_DATA(nid); + struct sysinfo i; + unsigned long sreclaimable, sunreclaimable; + + si_meminfo_node(&i, nid); + sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); + sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); + len = sysfs_emit_at(buf, len, + "Node %d MemTotal: %8lu kB\n" + "Node %d MemFree: %8lu kB\n" + "Node %d MemUsed: %8lu kB\n" + "Node %d Active: %8lu kB\n" + "Node %d Inactive: %8lu kB\n" + "Node %d Active(anon): %8lu kB\n" + "Node %d Inactive(anon): %8lu kB\n" + "Node %d Active(file): %8lu kB\n" + "Node %d Inactive(file): %8lu kB\n" + "Node %d Unevictable: %8lu kB\n" + "Node %d Mlocked: %8lu kB\n", + nid, K(i.totalram), + nid, K(i.freeram), + nid, K(i.totalram - i.freeram), + nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + + node_page_state(pgdat, NR_ACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + + node_page_state(pgdat, NR_INACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), + nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), + nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)), + nid, K(node_page_state(pgdat, NR_UNEVICTABLE)), + nid, K(sum_zone_node_page_state(nid, NR_MLOCK))); + +#ifdef CONFIG_HIGHMEM + len += sysfs_emit_at(buf, len, + "Node %d HighTotal: %8lu kB\n" + "Node %d HighFree: %8lu kB\n" + "Node %d LowTotal: %8lu kB\n" + "Node %d LowFree: %8lu kB\n", + nid, K(i.totalhigh), + nid, K(i.freehigh), + nid, K(i.totalram - i.totalhigh), + nid, K(i.freeram - i.freehigh)); +#endif + len += sysfs_emit_at(buf, len, + "Node %d Dirty: %8lu kB\n" + "Node %d Writeback: %8lu kB\n" + "Node %d FilePages: %8lu kB\n" + "Node %d Mapped: %8lu kB\n" + "Node %d AnonPages: %8lu kB\n" + "Node %d Shmem: %8lu kB\n" + "Node %d KernelStack: %8lu kB\n" +#ifdef CONFIG_SHADOW_CALL_STACK + "Node %d ShadowCallStack:%8lu kB\n" +#endif + "Node %d PageTables: %8lu kB\n" + "Node %d NFS_Unstable: %8lu kB\n" + "Node %d Bounce: %8lu kB\n" + "Node %d WritebackTmp: %8lu kB\n" + "Node %d KReclaimable: %8lu kB\n" + "Node %d Slab: %8lu kB\n" + "Node %d SReclaimable: %8lu kB\n" + "Node %d SUnreclaim: %8lu kB\n" +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + "Node %d AnonHugePages: %8lu kB\n" + "Node %d ShmemHugePages: %8lu kB\n" + "Node %d ShmemPmdMapped: %8lu kB\n" + "Node %d FileHugePages: %8lu kB\n" + "Node %d FilePmdMapped: %8lu kB\n" +#endif + , + nid, K(node_page_state(pgdat, NR_FILE_DIRTY)), + nid, K(node_page_state(pgdat, NR_WRITEBACK)), + nid, K(node_page_state(pgdat, NR_FILE_PAGES)), + nid, K(node_page_state(pgdat, NR_FILE_MAPPED)), + nid, K(node_page_state(pgdat, NR_ANON_MAPPED)), + nid, K(i.sharedram), + nid, node_page_state(pgdat, NR_KERNEL_STACK_KB), +#ifdef CONFIG_SHADOW_CALL_STACK + nid, node_page_state(pgdat, NR_KERNEL_SCS_KB), +#endif + nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)), + nid, 0UL, + nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)), + nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), + nid, K(sreclaimable + + node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)), + nid, K(sreclaimable + sunreclaimable), + nid, K(sreclaimable), + nid, K(sunreclaimable) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + , + nid, K(node_page_state(pgdat, NR_ANON_THPS) * + HPAGE_PMD_NR), + nid, K(node_page_state(pgdat, NR_SHMEM_THPS) * + HPAGE_PMD_NR), + nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) * + HPAGE_PMD_NR), + nid, K(node_page_state(pgdat, NR_FILE_THPS) * + HPAGE_PMD_NR), + nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) * + HPAGE_PMD_NR) +#endif + ); + len += hugetlb_report_node_meminfo(buf, len, nid); + return len; +} + +#undef K +static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL); + +static ssize_t node_read_numastat(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, + "numa_hit %lu\n" + "numa_miss %lu\n" + "numa_foreign %lu\n" + "interleave_hit %lu\n" + "local_node %lu\n" + "other_node %lu\n", + sum_zone_numa_state(dev->id, NUMA_HIT), + sum_zone_numa_state(dev->id, NUMA_MISS), + sum_zone_numa_state(dev->id, NUMA_FOREIGN), + sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT), + sum_zone_numa_state(dev->id, NUMA_LOCAL), + sum_zone_numa_state(dev->id, NUMA_OTHER)); +} +static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL); + +static ssize_t node_read_vmstat(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int nid = dev->id; + struct pglist_data *pgdat = NODE_DATA(nid); + int i; + int len = 0; + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + len += sysfs_emit_at(buf, len, "%s %lu\n", + zone_stat_name(i), + sum_zone_node_page_state(nid, i)); + +#ifdef CONFIG_NUMA + for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) + len += sysfs_emit_at(buf, len, "%s %lu\n", + numa_stat_name(i), + sum_zone_numa_state(nid, i)); + +#endif + for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) + len += sysfs_emit_at(buf, len, "%s %lu\n", + node_stat_name(i), + node_page_state_pages(pgdat, i)); + + return len; +} +static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL); + +static ssize_t node_read_distance(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int nid = dev->id; + int len = 0; + int i; + + /* + * buf is currently PAGE_SIZE in length and each node needs 4 chars + * at the most (distance + space or newline). + */ + BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); + + for_each_online_node(i) { + len += sysfs_emit_at(buf, len, "%s%d", + i ? " " : "", node_distance(nid, i)); + } + + len += sysfs_emit_at(buf, len, "\n"); + return len; +} +static DEVICE_ATTR(distance, 0444, node_read_distance, NULL); + +static struct attribute *node_dev_attrs[] = { + &dev_attr_cpumap.attr, + &dev_attr_cpulist.attr, + &dev_attr_meminfo.attr, + &dev_attr_numastat.attr, + &dev_attr_distance.attr, + &dev_attr_vmstat.attr, + NULL +}; +ATTRIBUTE_GROUPS(node_dev); + +#ifdef CONFIG_HUGETLBFS +/* + * hugetlbfs per node attributes registration interface: + * When/if hugetlb[fs] subsystem initializes [sometime after this module], + * it will register its per node attributes for all online nodes with + * memory. It will also call register_hugetlbfs_with_node(), below, to + * register its attribute registration functions with this node driver. + * Once these hooks have been initialized, the node driver will call into + * the hugetlb module to [un]register attributes for hot-plugged nodes. + */ +static node_registration_func_t __hugetlb_register_node; +static node_registration_func_t __hugetlb_unregister_node; + +static inline bool hugetlb_register_node(struct node *node) +{ + if (__hugetlb_register_node && + node_state(node->dev.id, N_MEMORY)) { + __hugetlb_register_node(node); + return true; + } + return false; +} + +static inline void hugetlb_unregister_node(struct node *node) +{ + if (__hugetlb_unregister_node) + __hugetlb_unregister_node(node); +} + +void register_hugetlbfs_with_node(node_registration_func_t doregister, + node_registration_func_t unregister) +{ + __hugetlb_register_node = doregister; + __hugetlb_unregister_node = unregister; +} +#else +static inline void hugetlb_register_node(struct node *node) {} + +static inline void hugetlb_unregister_node(struct node *node) {} +#endif + +static void node_device_release(struct device *dev) +{ + struct node *node = to_node(dev); + +#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) + /* + * We schedule the work only when a memory section is + * onlined/offlined on this node. When we come here, + * all the memory on this node has been offlined, + * so we won't enqueue new work to this work. + * + * The work is using node->node_work, so we should + * flush work before freeing the memory. + */ + flush_work(&node->node_work); +#endif + kfree(node); +} + +/* + * register_node - Setup a sysfs device for a node. + * @num - Node number to use when creating the device. + * + * Initialize and register the node device. + */ +static int register_node(struct node *node, int num) +{ + int error; + + node->dev.id = num; + node->dev.bus = &node_subsys; + node->dev.release = node_device_release; + node->dev.groups = node_dev_groups; + error = device_register(&node->dev); + + if (error) + put_device(&node->dev); + else { + hugetlb_register_node(node); + + compaction_register_node(node); + } + return error; +} + +/** + * unregister_node - unregister a node device + * @node: node going away + * + * Unregisters a node device @node. All the devices on the node must be + * unregistered before calling this function. + */ +void unregister_node(struct node *node) +{ + compaction_unregister_node(node); + hugetlb_unregister_node(node); /* no-op, if memoryless node */ + node_remove_accesses(node); + node_remove_caches(node); + device_unregister(&node->dev); +} + +struct node *node_devices[MAX_NUMNODES]; + +/* + * register cpu under node + */ +int register_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + int ret; + struct device *obj; + + if (!node_online(nid)) + return 0; + + obj = get_cpu_device(cpu); + if (!obj) + return 0; + + ret = sysfs_create_link(&node_devices[nid]->dev.kobj, + &obj->kobj, + kobject_name(&obj->kobj)); + if (ret) + return ret; + + return sysfs_create_link(&obj->kobj, + &node_devices[nid]->dev.kobj, + kobject_name(&node_devices[nid]->dev.kobj)); +} + +/** + * register_memory_node_under_compute_node - link memory node to its compute + * node for a given access class. + * @mem_nid: Memory node number + * @cpu_nid: Cpu node number + * @access: Access class to register + * + * Description: + * For use with platforms that may have separate memory and compute nodes. + * This function will export node relationships linking which memory + * initiator nodes can access memory targets at a given ranked access + * class. + */ +int register_memory_node_under_compute_node(unsigned int mem_nid, + unsigned int cpu_nid, + unsigned access) +{ + struct node *init_node, *targ_node; + struct node_access_nodes *initiator, *target; + int ret; + + if (!node_online(cpu_nid) || !node_online(mem_nid)) + return -ENODEV; + + init_node = node_devices[cpu_nid]; + targ_node = node_devices[mem_nid]; + initiator = node_init_node_access(init_node, access); + target = node_init_node_access(targ_node, access); + if (!initiator || !target) + return -ENOMEM; + + ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets", + &targ_node->dev.kobj, + dev_name(&targ_node->dev)); + if (ret) + return ret; + + ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators", + &init_node->dev.kobj, + dev_name(&init_node->dev)); + if (ret) + goto err; + + return 0; + err: + sysfs_remove_link_from_group(&initiator->dev.kobj, "targets", + dev_name(&targ_node->dev)); + return ret; +} + +int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) +{ + struct device *obj; + + if (!node_online(nid)) + return 0; + + obj = get_cpu_device(cpu); + if (!obj) + return 0; + + sysfs_remove_link(&node_devices[nid]->dev.kobj, + kobject_name(&obj->kobj)); + sysfs_remove_link(&obj->kobj, + kobject_name(&node_devices[nid]->dev.kobj)); + + return 0; +} + +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +static int __ref get_nid_for_pfn(unsigned long pfn) +{ + if (!pfn_valid_within(pfn)) + return -1; +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + if (system_state < SYSTEM_RUNNING) + return early_pfn_to_nid(pfn); +#endif + return pfn_to_nid(pfn); +} + +static void do_register_memory_block_under_node(int nid, + struct memory_block *mem_blk) +{ + int ret; + + /* + * If this memory block spans multiple nodes, we only indicate + * the last processed node. + */ + mem_blk->nid = nid; + + ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, + &mem_blk->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + if (ret && ret != -EEXIST) + dev_err_ratelimited(&node_devices[nid]->dev, + "can't create link to %s in sysfs (%d)\n", + kobject_name(&mem_blk->dev.kobj), ret); + + ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj, + &node_devices[nid]->dev.kobj, + kobject_name(&node_devices[nid]->dev.kobj)); + if (ret && ret != -EEXIST) + dev_err_ratelimited(&mem_blk->dev, + "can't create link to %s in sysfs (%d)\n", + kobject_name(&node_devices[nid]->dev.kobj), + ret); +} + +/* register memory section under specified node if it spans that node */ +static int register_mem_block_under_node_early(struct memory_block *mem_blk, + void *arg) +{ + unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; + unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); + unsigned long end_pfn = start_pfn + memory_block_pfns - 1; + int nid = *(int *)arg; + unsigned long pfn; + + for (pfn = start_pfn; pfn <= end_pfn; pfn++) { + int page_nid; + + /* + * memory block could have several absent sections from start. + * skip pfn range from absent section + */ + if (!pfn_in_present_section(pfn)) { + pfn = round_down(pfn + PAGES_PER_SECTION, + PAGES_PER_SECTION) - 1; + continue; + } + + /* + * We need to check if page belongs to nid only at the boot + * case because node's ranges can be interleaved. + */ + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; + + do_register_memory_block_under_node(nid, mem_blk); + return 0; + } + /* mem section does not span the specified node */ + return 0; +} + +/* + * During hotplug we know that all pages in the memory block belong to the same + * node. + */ +static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, + void *arg) +{ + int nid = *(int *)arg; + + do_register_memory_block_under_node(nid, mem_blk); + return 0; +} + +/* + * Unregister a memory block device under the node it spans. Memory blocks + * with multiple nodes cannot be offlined and therefore also never be removed. + */ +void unregister_memory_block_under_nodes(struct memory_block *mem_blk) +{ + if (mem_blk->nid == NUMA_NO_NODE) + return; + + sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + sysfs_remove_link(&mem_blk->dev.kobj, + kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); +} + +void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, + enum meminit_context context) +{ + walk_memory_blocks_func_t func; + + if (context == MEMINIT_HOTPLUG) + func = register_mem_block_under_node_hotplug; + else + func = register_mem_block_under_node_early; + + walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), + (void *)&nid, func); + return; +} + +#ifdef CONFIG_HUGETLBFS +/* + * Handle per node hstate attribute [un]registration on transistions + * to/from memoryless state. + */ +static void node_hugetlb_work(struct work_struct *work) +{ + struct node *node = container_of(work, struct node, node_work); + + /* + * We only get here when a node transitions to/from memoryless state. + * We can detect which transition occurred by examining whether the + * node has memory now. hugetlb_register_node() already check this + * so we try to register the attributes. If that fails, then the + * node has transitioned to memoryless, try to unregister the + * attributes. + */ + if (!hugetlb_register_node(node)) + hugetlb_unregister_node(node); +} + +static void init_node_hugetlb_work(int nid) +{ + INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work); +} + +static int node_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct memory_notify *mnb = arg; + int nid = mnb->status_change_nid; + + switch (action) { + case MEM_ONLINE: + case MEM_OFFLINE: + /* + * offload per node hstate [un]registration to a work thread + * when transitioning to/from memoryless state. + */ + if (nid != NUMA_NO_NODE) + schedule_work(&node_devices[nid]->node_work); + break; + + case MEM_GOING_ONLINE: + case MEM_GOING_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + default: + break; + } + + return NOTIFY_OK; +} +#endif /* CONFIG_HUGETLBFS */ +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ + !defined(CONFIG_HUGETLBFS) +static inline int node_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + return NOTIFY_OK; +} + +static void init_node_hugetlb_work(int nid) { } + +#endif + +int __register_one_node(int nid) +{ + int error; + int cpu; + + node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL); + if (!node_devices[nid]) + return -ENOMEM; + + error = register_node(node_devices[nid], nid); + + /* link cpu under this node */ + for_each_present_cpu(cpu) { + if (cpu_to_node(cpu) == nid) + register_cpu_under_node(cpu, nid); + } + + INIT_LIST_HEAD(&node_devices[nid]->access_list); + /* initialize work queue for memory hot plug */ + init_node_hugetlb_work(nid); + node_init_caches(nid); + + return error; +} + +void unregister_one_node(int nid) +{ + if (!node_devices[nid]) + return; + + unregister_node(node_devices[nid]); + node_devices[nid] = NULL; +} + +/* + * node states attributes + */ + +struct node_attr { + struct device_attribute attr; + enum node_states state; +}; + +static ssize_t show_node_state(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct node_attr *na = container_of(attr, struct node_attr, attr); + + return sysfs_emit(buf, "%*pbl\n", + nodemask_pr_args(&node_states[na->state])); +} + +#define _NODE_ATTR(name, state) \ + { __ATTR(name, 0444, show_node_state, NULL), state } + +static struct node_attr node_state_attr[] = { + [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE), + [N_ONLINE] = _NODE_ATTR(online, N_ONLINE), + [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), +#ifdef CONFIG_HIGHMEM + [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), +#endif + [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY), + [N_CPU] = _NODE_ATTR(has_cpu, N_CPU), + [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator, + N_GENERIC_INITIATOR), +}; + +static struct attribute *node_state_attrs[] = { + &node_state_attr[N_POSSIBLE].attr.attr, + &node_state_attr[N_ONLINE].attr.attr, + &node_state_attr[N_NORMAL_MEMORY].attr.attr, +#ifdef CONFIG_HIGHMEM + &node_state_attr[N_HIGH_MEMORY].attr.attr, +#endif + &node_state_attr[N_MEMORY].attr.attr, + &node_state_attr[N_CPU].attr.attr, + &node_state_attr[N_GENERIC_INITIATOR].attr.attr, + NULL +}; + +static struct attribute_group memory_root_attr_group = { + .attrs = node_state_attrs, +}; + +static const struct attribute_group *cpu_root_attr_groups[] = { + &memory_root_attr_group, + NULL, +}; + +#define NODE_CALLBACK_PRI 2 /* lower than SLAB */ +static int __init register_node_type(void) +{ + int ret; + + BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); + BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); + + ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); + if (!ret) { + static struct notifier_block node_memory_callback_nb = { + .notifier_call = node_memory_callback, + .priority = NODE_CALLBACK_PRI, + }; + register_hotmemory_notifier(&node_memory_callback_nb); + } + + /* + * Note: we're not going to unregister the node class if we fail + * to register the node state class attribute files. + */ + return ret; +} +postcore_initcall(register_node_type); diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c new file mode 100644 index 000000000..c22864458 --- /dev/null +++ b/drivers/base/pinctrl.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver core interface to the pinctrl subsystem. + * + * Copyright (C) 2012 ST-Ericsson SA + * Written on behalf of Linaro for ST-Ericsson + * Based on bits of regulator core, gpio core and clk core + * + * Author: Linus Walleij <linus.walleij@linaro.org> + */ + +#include <linux/device.h> +#include <linux/pinctrl/devinfo.h> +#include <linux/pinctrl/consumer.h> +#include <linux/slab.h> + +/** + * pinctrl_bind_pins() - called by the device core before probe + * @dev: the device that is just about to probe + */ +int pinctrl_bind_pins(struct device *dev) +{ + int ret; + + if (dev->of_node_reused) + return 0; + + dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL); + if (!dev->pins) + return -ENOMEM; + + dev->pins->p = devm_pinctrl_get(dev); + if (IS_ERR(dev->pins->p)) { + dev_dbg(dev, "no pinctrl handle\n"); + ret = PTR_ERR(dev->pins->p); + goto cleanup_alloc; + } + + dev->pins->default_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(dev->pins->default_state)) { + dev_dbg(dev, "no default pinctrl state\n"); + ret = 0; + goto cleanup_get; + } + + dev->pins->init_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_INIT); + if (IS_ERR(dev->pins->init_state)) { + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no init pinctrl state\n"); + + ret = pinctrl_select_state(dev->pins->p, + dev->pins->default_state); + } else { + ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state); + } + + if (ret) { + dev_dbg(dev, "failed to activate initial pinctrl state\n"); + goto cleanup_get; + } + +#ifdef CONFIG_PM + /* + * If power management is enabled, we also look for the optional + * sleep and idle pin states, with semantics as defined in + * <linux/pinctrl/pinctrl-state.h> + */ + dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_SLEEP); + if (IS_ERR(dev->pins->sleep_state)) + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no sleep pinctrl state\n"); + + dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p, + PINCTRL_STATE_IDLE); + if (IS_ERR(dev->pins->idle_state)) + /* Not supplying this state is perfectly legal */ + dev_dbg(dev, "no idle pinctrl state\n"); +#endif + + return 0; + + /* + * If no pinctrl handle or default state was found for this device, + * let's explicitly free the pin container in the device, there is + * no point in keeping it around. + */ +cleanup_get: + devm_pinctrl_put(dev->pins->p); +cleanup_alloc: + devm_kfree(dev, dev->pins); + dev->pins = NULL; + + /* Return deferrals */ + if (ret == -EPROBE_DEFER) + return ret; + /* Return serious errors */ + if (ret == -EINVAL) + return ret; + /* We ignore errors like -ENOENT meaning no pinctrl state */ + + return 0; +} diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c new file mode 100644 index 000000000..c4a17e5ed --- /dev/null +++ b/drivers/base/platform-msi.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MSI framework for platform devices + * + * Copyright (C) 2015 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + */ + +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/msi.h> +#include <linux/slab.h> + +#define DEV_ID_SHIFT 21 +#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT)) + +/* + * Internal data structure containing a (made up, but unique) devid + * and the callback to write the MSI message. + */ +struct platform_msi_priv_data { + struct device *dev; + void *host_data; + msi_alloc_info_t arg; + irq_write_msi_msg_t write_msg; + int devid; +}; + +/* The devid allocator */ +static DEFINE_IDA(platform_msi_devid_ida); + +#ifdef GENERIC_MSI_DOMAIN_OPS +/* + * Convert an msi_desc to a globaly unique identifier (per-device + * devid + msi_desc position in the msi_list). + */ +static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) +{ + u32 devid; + + devid = desc->platform.msi_priv_data->devid; + + return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; +} + +static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = platform_msi_calc_hwirq(desc); +} + +static int platform_msi_init(struct irq_domain *domain, + struct msi_domain_info *info, + unsigned int virq, irq_hw_number_t hwirq, + msi_alloc_info_t *arg) +{ + return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + info->chip, info->chip_data); +} +#else +#define platform_msi_set_desc NULL +#define platform_msi_init NULL +#endif + +static void platform_msi_update_dom_ops(struct msi_domain_info *info) +{ + struct msi_domain_ops *ops = info->ops; + + BUG_ON(!ops); + + if (ops->msi_init == NULL) + ops->msi_init = platform_msi_init; + if (ops->set_desc == NULL) + ops->set_desc = platform_msi_set_desc; +} + +static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + struct platform_msi_priv_data *priv_data; + + priv_data = desc->platform.msi_priv_data; + + priv_data->write_msg(desc, msg); +} + +static void platform_msi_update_chip_ops(struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + BUG_ON(!chip); + if (!chip->irq_mask) + chip->irq_mask = irq_chip_mask_parent; + if (!chip->irq_unmask) + chip->irq_unmask = irq_chip_unmask_parent; + if (!chip->irq_eoi) + chip->irq_eoi = irq_chip_eoi_parent; + if (!chip->irq_set_affinity) + chip->irq_set_affinity = msi_domain_set_affinity; + if (!chip->irq_write_msi_msg) + chip->irq_write_msi_msg = platform_msi_write_msg; + if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) && + !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI))) + info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; +} + +static void platform_msi_free_descs(struct device *dev, int base, int nvec) +{ + struct msi_desc *desc, *tmp; + + list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { + if (desc->platform.msi_index >= base && + desc->platform.msi_index < (base + nvec)) { + list_del(&desc->list); + free_msi_entry(desc); + } + } +} + +static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq, + int nvec, + struct platform_msi_priv_data *data) + +{ + struct msi_desc *desc; + int i, base = 0; + + if (!list_empty(dev_to_msi_list(dev))) { + desc = list_last_entry(dev_to_msi_list(dev), + struct msi_desc, list); + base = desc->platform.msi_index + 1; + } + + for (i = 0; i < nvec; i++) { + desc = alloc_msi_entry(dev, 1, NULL); + if (!desc) + break; + + desc->platform.msi_priv_data = data; + desc->platform.msi_index = base + i; + desc->irq = virq ? virq + i : 0; + + list_add_tail(&desc->list, dev_to_msi_list(dev)); + } + + if (i != nvec) { + /* Clean up the mess */ + platform_msi_free_descs(dev, base, nvec); + + return -ENOMEM; + } + + return 0; +} + +static int platform_msi_alloc_descs(struct device *dev, int nvec, + struct platform_msi_priv_data *data) + +{ + return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data); +} + +/** + * platform_msi_create_irq_domain - Create a platform MSI interrupt domain + * @fwnode: Optional fwnode of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Updates the domain and chip ops and creates a platform MSI + * interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent) +{ + struct irq_domain *domain; + + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) + platform_msi_update_dom_ops(info); + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + platform_msi_update_chip_ops(info); + + domain = msi_create_irq_domain(fwnode, info, parent); + if (domain) + irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI); + + return domain; +} + +static struct platform_msi_priv_data * +platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct platform_msi_priv_data *datap; + /* + * Limit the number of interrupts to 2048 per device. Should we + * need to bump this up, DEV_ID_SHIFT should be adjusted + * accordingly (which would impact the max number of MSI + * capable devices). + */ + if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS) + return ERR_PTR(-EINVAL); + + if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { + dev_err(dev, "Incompatible msi_domain, giving up\n"); + return ERR_PTR(-EINVAL); + } + + /* Already had a helping of MSI? Greed... */ + if (!list_empty(dev_to_msi_list(dev))) + return ERR_PTR(-EBUSY); + + datap = kzalloc(sizeof(*datap), GFP_KERNEL); + if (!datap) + return ERR_PTR(-ENOMEM); + + datap->devid = ida_simple_get(&platform_msi_devid_ida, + 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); + if (datap->devid < 0) { + int err = datap->devid; + kfree(datap); + return ERR_PTR(err); + } + + datap->write_msg = write_msi_msg; + datap->dev = dev; + + return datap; +} + +static void platform_msi_free_priv_data(struct platform_msi_priv_data *data) +{ + ida_simple_remove(&platform_msi_devid_ida, data->devid); + kfree(data); +} + +/** + * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev + * @dev: The device for which to allocate interrupts + * @nvec: The number of interrupts to allocate + * @write_msi_msg: Callback to write an interrupt message for @dev + * + * Returns: + * Zero for success, or an error code in case of failure + */ +int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, + irq_write_msi_msg_t write_msi_msg) +{ + struct platform_msi_priv_data *priv_data; + int err; + + priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); + if (IS_ERR(priv_data)) + return PTR_ERR(priv_data); + + err = platform_msi_alloc_descs(dev, nvec, priv_data); + if (err) + goto out_free_priv_data; + + err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); + if (err) + goto out_free_desc; + + return 0; + +out_free_desc: + platform_msi_free_descs(dev, 0, nvec); +out_free_priv_data: + platform_msi_free_priv_data(priv_data); + + return err; +} +EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs); + +/** + * platform_msi_domain_free_irqs - Free MSI interrupts for @dev + * @dev: The device for which to free interrupts + */ +void platform_msi_domain_free_irqs(struct device *dev) +{ + if (!list_empty(dev_to_msi_list(dev))) { + struct msi_desc *desc; + + desc = first_msi_entry(dev); + platform_msi_free_priv_data(desc->platform.msi_priv_data); + } + + msi_domain_free_irqs(dev->msi_domain, dev); + platform_msi_free_descs(dev, 0, MAX_DEV_MSIS); +} +EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs); + +/** + * platform_msi_get_host_data - Query the private data associated with + * a platform-msi domain + * @domain: The platform-msi domain + * + * Returns the private data provided when calling + * platform_msi_create_device_domain. + */ +void *platform_msi_get_host_data(struct irq_domain *domain) +{ + struct platform_msi_priv_data *data = domain->host_data; + return data->host_data; +} + +/** + * platform_msi_create_device_domain - Create a platform-msi domain + * + * @dev: The device generating the MSIs + * @nvec: The number of MSIs that need to be allocated + * @write_msi_msg: Callback to write an interrupt message for @dev + * @ops: The hierarchy domain operations to use + * @host_data: Private data associated to this domain + * + * Returns an irqdomain for @nvec interrupts + */ +struct irq_domain * +__platform_msi_create_device_domain(struct device *dev, + unsigned int nvec, + bool is_tree, + irq_write_msi_msg_t write_msi_msg, + const struct irq_domain_ops *ops, + void *host_data) +{ + struct platform_msi_priv_data *data; + struct irq_domain *domain; + int err; + + data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); + if (IS_ERR(data)) + return NULL; + + data->host_data = host_data; + domain = irq_domain_create_hierarchy(dev->msi_domain, 0, + is_tree ? 0 : nvec, + dev->fwnode, ops, data); + if (!domain) + goto free_priv; + + err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg); + if (err) + goto free_domain; + + return domain; + +free_domain: + irq_domain_remove(domain); +free_priv: + platform_msi_free_priv_data(data); + return NULL; +} + +/** + * platform_msi_domain_free - Free interrupts associated with a platform-msi + * domain + * + * @domain: The platform-msi domain + * @virq: The base irq from which to perform the free operation + * @nvec: How many interrupts to free from @virq + */ +void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nvec) +{ + struct platform_msi_priv_data *data = domain->host_data; + struct msi_desc *desc, *tmp; + for_each_msi_entry_safe(desc, tmp, data->dev) { + if (WARN_ON(!desc->irq || desc->nvec_used != 1)) + return; + if (!(desc->irq >= virq && desc->irq < (virq + nvec))) + continue; + + irq_domain_free_irqs_common(domain, desc->irq, 1); + list_del(&desc->list); + free_msi_entry(desc); + } +} + +/** + * platform_msi_domain_alloc - Allocate interrupts associated with + * a platform-msi domain + * + * @domain: The platform-msi domain + * @virq: The base irq from which to perform the allocate operation + * @nr_irqs: How many interrupts to free from @virq + * + * Return 0 on success, or an error code on failure. Must be called + * with irq_domain_mutex held (which can only be done as part of a + * top-level interrupt allocation). + */ +int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct platform_msi_priv_data *data = domain->host_data; + int err; + + err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data); + if (err) + return err; + + err = msi_domain_populate_irqs(domain->parent, data->dev, + virq, nr_irqs, &data->arg); + if (err) + platform_msi_domain_free(domain, virq, nr_irqs); + + return err; +} diff --git a/drivers/base/platform.c b/drivers/base/platform.c new file mode 100644 index 000000000..647066229 --- /dev/null +++ b/drivers/base/platform.c @@ -0,0 +1,1387 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * platform.c - platform 'pseudo' bus for legacy devices + * + * Copyright (c) 2002-3 Patrick Mochel + * Copyright (c) 2002-3 Open Source Development Labs + * + * Please see Documentation/driver-api/driver-model/platform.rst for more + * information. + */ + +#include <linux/string.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/memblock.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/idr.h> +#include <linux/acpi.h> +#include <linux/clk/clk-conf.h> +#include <linux/limits.h> +#include <linux/property.h> +#include <linux/kmemleak.h> +#include <linux/types.h> + +#include "base.h" +#include "power/power.h" + +/* For automatically allocated device IDs */ +static DEFINE_IDA(platform_devid_ida); + +struct device platform_bus = { + .init_name = "platform", +}; +EXPORT_SYMBOL_GPL(platform_bus); + +/** + * platform_get_resource - get a resource for a device + * @dev: platform device + * @type: resource type + * @num: resource index + * + * Return: a pointer to the resource or NULL on failure. + */ +struct resource *platform_get_resource(struct platform_device *dev, + unsigned int type, unsigned int num) +{ + u32 i; + + for (i = 0; i < dev->num_resources; i++) { + struct resource *r = &dev->resource[i]; + + if (type == resource_type(r) && num-- == 0) + return r; + } + return NULL; +} +EXPORT_SYMBOL_GPL(platform_get_resource); + +#ifdef CONFIG_HAS_IOMEM +/** + * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a + * platform device and get resource + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource management + * @index: resource index + * @res: optional output parameter to store a pointer to the obtained resource. + * + * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code + * on failure. + */ +void __iomem * +devm_platform_get_and_ioremap_resource(struct platform_device *pdev, + unsigned int index, struct resource **res) +{ + struct resource *r; + + r = platform_get_resource(pdev, IORESOURCE_MEM, index); + if (res) + *res = r; + return devm_ioremap_resource(&pdev->dev, r); +} +EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); + +/** + * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform + * device + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource management + * @index: resource index + * + * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code + * on failure. + */ +void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, + unsigned int index) +{ + return devm_platform_get_and_ioremap_resource(pdev, index, NULL); +} +EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); + +/** + * devm_platform_ioremap_resource_wc - write-combined variant of + * devm_platform_ioremap_resource() + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource management + * @index: resource index + * + * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code + * on failure. + */ +void __iomem *devm_platform_ioremap_resource_wc(struct platform_device *pdev, + unsigned int index) +{ + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, index); + return devm_ioremap_resource_wc(&pdev->dev, res); +} + +/** + * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for + * a platform device, retrieve the + * resource by name + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource management + * @name: name of the resource + * + * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code + * on failure. + */ +void __iomem * +devm_platform_ioremap_resource_byname(struct platform_device *pdev, + const char *name) +{ + struct resource *res; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + return devm_ioremap_resource(&pdev->dev, res); +} +EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); +#endif /* CONFIG_HAS_IOMEM */ + +/** + * platform_get_irq_optional - get an optional IRQ for a device + * @dev: platform device + * @num: IRQ number index + * + * Gets an IRQ for a platform device. Device drivers should check the return + * value for errors so as to not pass a negative integer value to the + * request_irq() APIs. This is the same as platform_get_irq(), except that it + * does not print an error message if an IRQ can not be obtained. + * + * For example:: + * + * int irq = platform_get_irq_optional(pdev, 0); + * if (irq < 0) + * return irq; + * + * Return: non-zero IRQ number on success, negative error number on failure. + */ +int platform_get_irq_optional(struct platform_device *dev, unsigned int num) +{ + int ret; +#ifdef CONFIG_SPARC + /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ + if (!dev || num >= dev->archdata.num_irqs) + return -ENXIO; + ret = dev->archdata.irqs[num]; + goto out; +#else + struct resource *r; + + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + ret = of_irq_get(dev->dev.of_node, num); + if (ret > 0 || ret == -EPROBE_DEFER) + goto out; + } + + r = platform_get_resource(dev, IORESOURCE_IRQ, num); + if (has_acpi_companion(&dev->dev)) { + if (r && r->flags & IORESOURCE_DISABLED) { + ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); + if (ret) + goto out; + } + } + + /* + * The resources may pass trigger flags to the irqs that need + * to be set up. It so happens that the trigger flags for + * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* + * settings. + */ + if (r && r->flags & IORESOURCE_BITS) { + struct irq_data *irqd; + + irqd = irq_get_irq_data(r->start); + if (!irqd) { + ret = -ENXIO; + goto out; + } + irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); + } + + if (r) { + ret = r->start; + goto out; + } + + /* + * For the index 0 interrupt, allow falling back to GpioInt + * resources. While a device could have both Interrupt and GpioInt + * resources, making this fallback ambiguous, in many common cases + * the device will only expose one IRQ, and this fallback + * allows a common code path across either kind of resource. + */ + if (num == 0 && has_acpi_companion(&dev->dev)) { + ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); + /* Our callers expect -ENXIO for missing IRQs. */ + if (ret >= 0 || ret == -EPROBE_DEFER) + goto out; + } + + ret = -ENXIO; +#endif +out: + WARN(ret == 0, "0 is an invalid IRQ number\n"); + return ret; +} +EXPORT_SYMBOL_GPL(platform_get_irq_optional); + +/** + * platform_get_irq - get an IRQ for a device + * @dev: platform device + * @num: IRQ number index + * + * Gets an IRQ for a platform device and prints an error message if finding the + * IRQ fails. Device drivers should check the return value for errors so as to + * not pass a negative integer value to the request_irq() APIs. + * + * For example:: + * + * int irq = platform_get_irq(pdev, 0); + * if (irq < 0) + * return irq; + * + * Return: non-zero IRQ number on success, negative error number on failure. + */ +int platform_get_irq(struct platform_device *dev, unsigned int num) +{ + int ret; + + ret = platform_get_irq_optional(dev, num); + if (ret < 0 && ret != -EPROBE_DEFER) + dev_err(&dev->dev, "IRQ index %u not found\n", num); + + return ret; +} +EXPORT_SYMBOL_GPL(platform_get_irq); + +/** + * platform_irq_count - Count the number of IRQs a platform device uses + * @dev: platform device + * + * Return: Number of IRQs a platform device uses or EPROBE_DEFER + */ +int platform_irq_count(struct platform_device *dev) +{ + int ret, nr = 0; + + while ((ret = platform_get_irq_optional(dev, nr)) >= 0) + nr++; + + if (ret == -EPROBE_DEFER) + return ret; + + return nr; +} +EXPORT_SYMBOL_GPL(platform_irq_count); + +/** + * platform_get_resource_byname - get a resource for a device by name + * @dev: platform device + * @type: resource type + * @name: resource name + */ +struct resource *platform_get_resource_byname(struct platform_device *dev, + unsigned int type, + const char *name) +{ + u32 i; + + for (i = 0; i < dev->num_resources; i++) { + struct resource *r = &dev->resource[i]; + + if (unlikely(!r->name)) + continue; + + if (type == resource_type(r) && !strcmp(r->name, name)) + return r; + } + return NULL; +} +EXPORT_SYMBOL_GPL(platform_get_resource_byname); + +static int __platform_get_irq_byname(struct platform_device *dev, + const char *name) +{ + struct resource *r; + int ret; + + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { + ret = of_irq_get_byname(dev->dev.of_node, name); + if (ret > 0 || ret == -EPROBE_DEFER) + return ret; + } + + r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); + if (r) { + WARN(r->start == 0, "0 is an invalid IRQ number\n"); + return r->start; + } + + return -ENXIO; +} + +/** + * platform_get_irq_byname - get an IRQ for a device by name + * @dev: platform device + * @name: IRQ name + * + * Get an IRQ like platform_get_irq(), but then by name rather then by index. + * + * Return: non-zero IRQ number on success, negative error number on failure. + */ +int platform_get_irq_byname(struct platform_device *dev, const char *name) +{ + int ret; + + ret = __platform_get_irq_byname(dev, name); + if (ret < 0 && ret != -EPROBE_DEFER) + dev_err(&dev->dev, "IRQ %s not found\n", name); + + return ret; +} +EXPORT_SYMBOL_GPL(platform_get_irq_byname); + +/** + * platform_get_irq_byname_optional - get an optional IRQ for a device by name + * @dev: platform device + * @name: IRQ name + * + * Get an optional IRQ by name like platform_get_irq_byname(). Except that it + * does not print an error message if an IRQ can not be obtained. + * + * Return: non-zero IRQ number on success, negative error number on failure. + */ +int platform_get_irq_byname_optional(struct platform_device *dev, + const char *name) +{ + return __platform_get_irq_byname(dev, name); +} +EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); + +/** + * platform_add_devices - add a numbers of platform devices + * @devs: array of platform devices to add + * @num: number of platform devices in array + */ +int platform_add_devices(struct platform_device **devs, int num) +{ + int i, ret = 0; + + for (i = 0; i < num; i++) { + ret = platform_device_register(devs[i]); + if (ret) { + while (--i >= 0) + platform_device_unregister(devs[i]); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(platform_add_devices); + +struct platform_object { + struct platform_device pdev; + char name[]; +}; + +/* + * Set up default DMA mask for platform devices if the they weren't + * previously set by the architecture / DT. + */ +static void setup_pdev_dma_masks(struct platform_device *pdev) +{ + pdev->dev.dma_parms = &pdev->dma_parms; + + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + if (!pdev->dev.dma_mask) { + pdev->platform_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->platform_dma_mask; + } +}; + +/** + * platform_device_put - destroy a platform device + * @pdev: platform device to free + * + * Free all memory associated with a platform device. This function must + * _only_ be externally called in error cases. All other usage is a bug. + */ +void platform_device_put(struct platform_device *pdev) +{ + if (!IS_ERR_OR_NULL(pdev)) + put_device(&pdev->dev); +} +EXPORT_SYMBOL_GPL(platform_device_put); + +static void platform_device_release(struct device *dev) +{ + struct platform_object *pa = container_of(dev, struct platform_object, + pdev.dev); + + of_device_node_put(&pa->pdev.dev); + kfree(pa->pdev.dev.platform_data); + kfree(pa->pdev.mfd_cell); + kfree(pa->pdev.resource); + kfree(pa->pdev.driver_override); + kfree(pa); +} + +/** + * platform_device_alloc - create a platform device + * @name: base name of the device we're adding + * @id: instance id + * + * Create a platform device object which can have other objects attached + * to it, and which will have attached objects freed when it is released. + */ +struct platform_device *platform_device_alloc(const char *name, int id) +{ + struct platform_object *pa; + + pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); + if (pa) { + strcpy(pa->name, name); + pa->pdev.name = pa->name; + pa->pdev.id = id; + device_initialize(&pa->pdev.dev); + pa->pdev.dev.release = platform_device_release; + setup_pdev_dma_masks(&pa->pdev); + } + + return pa ? &pa->pdev : NULL; +} +EXPORT_SYMBOL_GPL(platform_device_alloc); + +/** + * platform_device_add_resources - add resources to a platform device + * @pdev: platform device allocated by platform_device_alloc to add resources to + * @res: set of resources that needs to be allocated for the device + * @num: number of resources + * + * Add a copy of the resources to the platform device. The memory + * associated with the resources will be freed when the platform device is + * released. + */ +int platform_device_add_resources(struct platform_device *pdev, + const struct resource *res, unsigned int num) +{ + struct resource *r = NULL; + + if (res) { + r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); + if (!r) + return -ENOMEM; + } + + kfree(pdev->resource); + pdev->resource = r; + pdev->num_resources = num; + return 0; +} +EXPORT_SYMBOL_GPL(platform_device_add_resources); + +/** + * platform_device_add_data - add platform-specific data to a platform device + * @pdev: platform device allocated by platform_device_alloc to add resources to + * @data: platform specific data for this platform device + * @size: size of platform specific data + * + * Add a copy of platform specific data to the platform device's + * platform_data pointer. The memory associated with the platform data + * will be freed when the platform device is released. + */ +int platform_device_add_data(struct platform_device *pdev, const void *data, + size_t size) +{ + void *d = NULL; + + if (data) { + d = kmemdup(data, size, GFP_KERNEL); + if (!d) + return -ENOMEM; + } + + kfree(pdev->dev.platform_data); + pdev->dev.platform_data = d; + return 0; +} +EXPORT_SYMBOL_GPL(platform_device_add_data); + +/** + * platform_device_add_properties - add built-in properties to a platform device + * @pdev: platform device to add properties to + * @properties: null terminated array of properties to add + * + * The function will take deep copy of @properties and attach the copy to the + * platform device. The memory associated with properties will be freed when the + * platform device is released. + */ +int platform_device_add_properties(struct platform_device *pdev, + const struct property_entry *properties) +{ + return device_add_properties(&pdev->dev, properties); +} +EXPORT_SYMBOL_GPL(platform_device_add_properties); + +/** + * platform_device_add - add a platform device to device hierarchy + * @pdev: platform device we're adding + * + * This is part 2 of platform_device_register(), though may be called + * separately _iff_ pdev was allocated by platform_device_alloc(). + */ +int platform_device_add(struct platform_device *pdev) +{ + u32 i; + int ret; + + if (!pdev) + return -EINVAL; + + if (!pdev->dev.parent) + pdev->dev.parent = &platform_bus; + + pdev->dev.bus = &platform_bus_type; + + switch (pdev->id) { + default: + dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); + break; + case PLATFORM_DEVID_NONE: + dev_set_name(&pdev->dev, "%s", pdev->name); + break; + case PLATFORM_DEVID_AUTO: + /* + * Automatically allocated device ID. We mark it as such so + * that we remember it must be freed, and we append a suffix + * to avoid namespace collision with explicit IDs. + */ + ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); + if (ret < 0) + goto err_out; + pdev->id = ret; + pdev->id_auto = true; + dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); + break; + } + + for (i = 0; i < pdev->num_resources; i++) { + struct resource *p, *r = &pdev->resource[i]; + + if (r->name == NULL) + r->name = dev_name(&pdev->dev); + + p = r->parent; + if (!p) { + if (resource_type(r) == IORESOURCE_MEM) + p = &iomem_resource; + else if (resource_type(r) == IORESOURCE_IO) + p = &ioport_resource; + } + + if (p) { + ret = insert_resource(p, r); + if (ret) { + dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); + goto failed; + } + } + } + + pr_debug("Registering platform device '%s'. Parent at %s\n", + dev_name(&pdev->dev), dev_name(pdev->dev.parent)); + + ret = device_add(&pdev->dev); + if (ret == 0) + return ret; + + failed: + if (pdev->id_auto) { + ida_free(&platform_devid_ida, pdev->id); + pdev->id = PLATFORM_DEVID_AUTO; + } + + while (i--) { + struct resource *r = &pdev->resource[i]; + if (r->parent) + release_resource(r); + } + + err_out: + return ret; +} +EXPORT_SYMBOL_GPL(platform_device_add); + +/** + * platform_device_del - remove a platform-level device + * @pdev: platform device we're removing + * + * Note that this function will also release all memory- and port-based + * resources owned by the device (@dev->resource). This function must + * _only_ be externally called in error cases. All other usage is a bug. + */ +void platform_device_del(struct platform_device *pdev) +{ + u32 i; + + if (!IS_ERR_OR_NULL(pdev)) { + device_del(&pdev->dev); + + if (pdev->id_auto) { + ida_free(&platform_devid_ida, pdev->id); + pdev->id = PLATFORM_DEVID_AUTO; + } + + for (i = 0; i < pdev->num_resources; i++) { + struct resource *r = &pdev->resource[i]; + if (r->parent) + release_resource(r); + } + } +} +EXPORT_SYMBOL_GPL(platform_device_del); + +/** + * platform_device_register - add a platform-level device + * @pdev: platform device we're adding + */ +int platform_device_register(struct platform_device *pdev) +{ + device_initialize(&pdev->dev); + setup_pdev_dma_masks(pdev); + return platform_device_add(pdev); +} +EXPORT_SYMBOL_GPL(platform_device_register); + +/** + * platform_device_unregister - unregister a platform-level device + * @pdev: platform device we're unregistering + * + * Unregistration is done in 2 steps. First we release all resources + * and remove it from the subsystem, then we drop reference count by + * calling platform_device_put(). + */ +void platform_device_unregister(struct platform_device *pdev) +{ + platform_device_del(pdev); + platform_device_put(pdev); +} +EXPORT_SYMBOL_GPL(platform_device_unregister); + +/** + * platform_device_register_full - add a platform-level device with + * resources and platform-specific data + * + * @pdevinfo: data used to create device + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +struct platform_device *platform_device_register_full( + const struct platform_device_info *pdevinfo) +{ + int ret; + struct platform_device *pdev; + + pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); + if (!pdev) + return ERR_PTR(-ENOMEM); + + pdev->dev.parent = pdevinfo->parent; + pdev->dev.fwnode = pdevinfo->fwnode; + pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); + pdev->dev.of_node_reused = pdevinfo->of_node_reused; + + if (pdevinfo->dma_mask) { + pdev->platform_dma_mask = pdevinfo->dma_mask; + pdev->dev.dma_mask = &pdev->platform_dma_mask; + pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; + } + + ret = platform_device_add_resources(pdev, + pdevinfo->res, pdevinfo->num_res); + if (ret) + goto err; + + ret = platform_device_add_data(pdev, + pdevinfo->data, pdevinfo->size_data); + if (ret) + goto err; + + if (pdevinfo->properties) { + ret = platform_device_add_properties(pdev, + pdevinfo->properties); + if (ret) + goto err; + } + + ret = platform_device_add(pdev); + if (ret) { +err: + ACPI_COMPANION_SET(&pdev->dev, NULL); + platform_device_put(pdev); + return ERR_PTR(ret); + } + + return pdev; +} +EXPORT_SYMBOL_GPL(platform_device_register_full); + +static int platform_drv_probe(struct device *_dev) +{ + struct platform_driver *drv = to_platform_driver(_dev->driver); + struct platform_device *dev = to_platform_device(_dev); + int ret; + + ret = of_clk_set_defaults(_dev->of_node, false); + if (ret < 0) + return ret; + + ret = dev_pm_domain_attach(_dev, true); + if (ret) + goto out; + + if (drv->probe) { + ret = drv->probe(dev); + if (ret) + dev_pm_domain_detach(_dev, true); + } + +out: + if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { + dev_warn(_dev, "probe deferral not supported\n"); + ret = -ENXIO; + } + + return ret; +} + +static int platform_drv_probe_fail(struct device *_dev) +{ + return -ENXIO; +} + +static int platform_drv_remove(struct device *_dev) +{ + struct platform_driver *drv = to_platform_driver(_dev->driver); + struct platform_device *dev = to_platform_device(_dev); + int ret = 0; + + if (drv->remove) + ret = drv->remove(dev); + dev_pm_domain_detach(_dev, true); + + return ret; +} + +static void platform_drv_shutdown(struct device *_dev) +{ + struct platform_driver *drv = to_platform_driver(_dev->driver); + struct platform_device *dev = to_platform_device(_dev); + + if (drv->shutdown) + drv->shutdown(dev); +} + +/** + * __platform_driver_register - register a driver for platform-level devices + * @drv: platform driver structure + * @owner: owning module/driver + */ +int __platform_driver_register(struct platform_driver *drv, + struct module *owner) +{ + drv->driver.owner = owner; + drv->driver.bus = &platform_bus_type; + drv->driver.probe = platform_drv_probe; + drv->driver.remove = platform_drv_remove; + drv->driver.shutdown = platform_drv_shutdown; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(__platform_driver_register); + +/** + * platform_driver_unregister - unregister a driver for platform-level devices + * @drv: platform driver structure + */ +void platform_driver_unregister(struct platform_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL_GPL(platform_driver_unregister); + +/** + * __platform_driver_probe - register driver for non-hotpluggable device + * @drv: platform driver structure + * @probe: the driver probe routine, probably from an __init section + * @module: module which will be the owner of the driver + * + * Use this instead of platform_driver_register() when you know the device + * is not hotpluggable and has already been registered, and you want to + * remove its run-once probe() infrastructure from memory after the driver + * has bound to the device. + * + * One typical use for this would be with drivers for controllers integrated + * into system-on-chip processors, where the controller devices have been + * configured as part of board setup. + * + * Note that this is incompatible with deferred probing. + * + * Returns zero if the driver registered and bound to a device, else returns + * a negative error code and with the driver not registered. + */ +int __init_or_module __platform_driver_probe(struct platform_driver *drv, + int (*probe)(struct platform_device *), struct module *module) +{ + int retval, code; + + if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { + pr_err("%s: drivers registered with %s can not be probed asynchronously\n", + drv->driver.name, __func__); + return -EINVAL; + } + + /* + * We have to run our probes synchronously because we check if + * we find any devices to bind to and exit with error if there + * are any. + */ + drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; + + /* + * Prevent driver from requesting probe deferral to avoid further + * futile probe attempts. + */ + drv->prevent_deferred_probe = true; + + /* make sure driver won't have bind/unbind attributes */ + drv->driver.suppress_bind_attrs = true; + + /* temporary section violation during probe() */ + drv->probe = probe; + retval = code = __platform_driver_register(drv, module); + if (retval) + return retval; + + /* + * Fixup that section violation, being paranoid about code scanning + * the list of drivers in order to probe new devices. Check to see + * if the probe was successful, and make sure any forced probes of + * new devices fail. + */ + spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); + drv->probe = NULL; + if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) + retval = -ENODEV; + drv->driver.probe = platform_drv_probe_fail; + spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); + + if (code != retval) + platform_driver_unregister(drv); + return retval; +} +EXPORT_SYMBOL_GPL(__platform_driver_probe); + +/** + * __platform_create_bundle - register driver and create corresponding device + * @driver: platform driver structure + * @probe: the driver probe routine, probably from an __init section + * @res: set of resources that needs to be allocated for the device + * @n_res: number of resources + * @data: platform specific data for this platform device + * @size: size of platform specific data + * @module: module which will be the owner of the driver + * + * Use this in legacy-style modules that probe hardware directly and + * register a single platform device and corresponding platform driver. + * + * Returns &struct platform_device pointer on success, or ERR_PTR() on error. + */ +struct platform_device * __init_or_module __platform_create_bundle( + struct platform_driver *driver, + int (*probe)(struct platform_device *), + struct resource *res, unsigned int n_res, + const void *data, size_t size, struct module *module) +{ + struct platform_device *pdev; + int error; + + pdev = platform_device_alloc(driver->driver.name, -1); + if (!pdev) { + error = -ENOMEM; + goto err_out; + } + + error = platform_device_add_resources(pdev, res, n_res); + if (error) + goto err_pdev_put; + + error = platform_device_add_data(pdev, data, size); + if (error) + goto err_pdev_put; + + error = platform_device_add(pdev); + if (error) + goto err_pdev_put; + + error = __platform_driver_probe(driver, probe, module); + if (error) + goto err_pdev_del; + + return pdev; + +err_pdev_del: + platform_device_del(pdev); +err_pdev_put: + platform_device_put(pdev); +err_out: + return ERR_PTR(error); +} +EXPORT_SYMBOL_GPL(__platform_create_bundle); + +/** + * __platform_register_drivers - register an array of platform drivers + * @drivers: an array of drivers to register + * @count: the number of drivers to register + * @owner: module owning the drivers + * + * Registers platform drivers specified by an array. On failure to register a + * driver, all previously registered drivers will be unregistered. Callers of + * this API should use platform_unregister_drivers() to unregister drivers in + * the reverse order. + * + * Returns: 0 on success or a negative error code on failure. + */ +int __platform_register_drivers(struct platform_driver * const *drivers, + unsigned int count, struct module *owner) +{ + unsigned int i; + int err; + + for (i = 0; i < count; i++) { + pr_debug("registering platform driver %ps\n", drivers[i]); + + err = __platform_driver_register(drivers[i], owner); + if (err < 0) { + pr_err("failed to register platform driver %ps: %d\n", + drivers[i], err); + goto error; + } + } + + return 0; + +error: + while (i--) { + pr_debug("unregistering platform driver %ps\n", drivers[i]); + platform_driver_unregister(drivers[i]); + } + + return err; +} +EXPORT_SYMBOL_GPL(__platform_register_drivers); + +/** + * platform_unregister_drivers - unregister an array of platform drivers + * @drivers: an array of drivers to unregister + * @count: the number of drivers to unregister + * + * Unregisters platform drivers specified by an array. This is typically used + * to complement an earlier call to platform_register_drivers(). Drivers are + * unregistered in the reverse order in which they were registered. + */ +void platform_unregister_drivers(struct platform_driver * const *drivers, + unsigned int count) +{ + while (count--) { + pr_debug("unregistering platform driver %ps\n", drivers[count]); + platform_driver_unregister(drivers[count]); + } +} +EXPORT_SYMBOL_GPL(platform_unregister_drivers); + +/* modalias support enables more hands-off userspace setup: + * (a) environment variable lets new-style hotplug events work once system is + * fully running: "modprobe $MODALIAS" + * (b) sysfs attribute lets new-style coldplug recover from hotplug events + * mishandled before system is fully running: "modprobe $(cat modalias)" + */ +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + int len; + + len = of_device_modalias(dev, buf, PAGE_SIZE); + if (len != -ENODEV) + return len; + + len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); + if (len != -ENODEV) + return len; + + return sysfs_emit(buf, "platform:%s\n", pdev->name); +} +static DEVICE_ATTR_RO(modalias); + +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + int ret; + + ret = driver_set_override(dev, &pdev->driver_override, buf, count); + if (ret) + return ret; + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + ssize_t len; + + device_lock(dev); + len = sysfs_emit(buf, "%s\n", pdev->driver_override); + device_unlock(dev); + + return len; +} +static DEVICE_ATTR_RW(driver_override); + +static ssize_t numa_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", dev_to_node(dev)); +} +static DEVICE_ATTR_RO(numa_node); + +static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, + int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + + if (a == &dev_attr_numa_node.attr && + dev_to_node(dev) == NUMA_NO_NODE) + return 0; + + return a->mode; +} + +static struct attribute *platform_dev_attrs[] = { + &dev_attr_modalias.attr, + &dev_attr_numa_node.attr, + &dev_attr_driver_override.attr, + NULL, +}; + +static struct attribute_group platform_dev_group = { + .attrs = platform_dev_attrs, + .is_visible = platform_dev_attrs_visible, +}; +__ATTRIBUTE_GROUPS(platform_dev); + +static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct platform_device *pdev = to_platform_device(dev); + int rc; + + /* Some devices have extra OF data and an OF-style MODALIAS */ + rc = of_device_uevent_modalias(dev, env); + if (rc != -ENODEV) + return rc; + + rc = acpi_device_uevent_modalias(dev, env); + if (rc != -ENODEV) + return rc; + + add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, + pdev->name); + return 0; +} + +static const struct platform_device_id *platform_match_id( + const struct platform_device_id *id, + struct platform_device *pdev) +{ + while (id->name[0]) { + if (strcmp(pdev->name, id->name) == 0) { + pdev->id_entry = id; + return id; + } + id++; + } + return NULL; +} + +/** + * platform_match - bind platform device to platform driver. + * @dev: device. + * @drv: driver. + * + * Platform device IDs are assumed to be encoded like this: + * "<name><instance>", where <name> is a short description of the type of + * device, like "pci" or "floppy", and <instance> is the enumerated + * instance of the device, like '0' or '42'. Driver IDs are simply + * "<name>". So, extract the <name> from the platform_device structure, + * and compare it against the name of the driver. Return whether they match + * or not. + */ +static int platform_match(struct device *dev, struct device_driver *drv) +{ + struct platform_device *pdev = to_platform_device(dev); + struct platform_driver *pdrv = to_platform_driver(drv); + + /* When driver_override is set, only bind to the matching driver */ + if (pdev->driver_override) + return !strcmp(pdev->driver_override, drv->name); + + /* Attempt an OF style match first */ + if (of_driver_match_device(dev, drv)) + return 1; + + /* Then try ACPI style match */ + if (acpi_driver_match_device(dev, drv)) + return 1; + + /* Then try to match against the id table */ + if (pdrv->id_table) + return platform_match_id(pdrv->id_table, pdev) != NULL; + + /* fall-back to driver name match */ + return (strcmp(pdev->name, drv->name) == 0); +} + +#ifdef CONFIG_PM_SLEEP + +static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct platform_driver *pdrv = to_platform_driver(dev->driver); + struct platform_device *pdev = to_platform_device(dev); + int ret = 0; + + if (dev->driver && pdrv->suspend) + ret = pdrv->suspend(pdev, mesg); + + return ret; +} + +static int platform_legacy_resume(struct device *dev) +{ + struct platform_driver *pdrv = to_platform_driver(dev->driver); + struct platform_device *pdev = to_platform_device(dev); + int ret = 0; + + if (dev->driver && pdrv->resume) + ret = pdrv->resume(pdev); + + return ret; +} + +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_SUSPEND + +int platform_pm_suspend(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend) + ret = drv->pm->suspend(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_SUSPEND); + } + + return ret; +} + +int platform_pm_resume(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume) + ret = drv->pm->resume(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +#endif /* CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATE_CALLBACKS + +int platform_pm_freeze(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze) + ret = drv->pm->freeze(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_FREEZE); + } + + return ret; +} + +int platform_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw) + ret = drv->pm->thaw(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +int platform_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff) + ret = drv->pm->poweroff(dev); + } else { + ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return ret; +} + +int platform_pm_restore(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore) + ret = drv->pm->restore(dev); + } else { + ret = platform_legacy_resume(dev); + } + + return ret; +} + +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + +int platform_dma_configure(struct device *dev) +{ + enum dev_dma_attr attr; + int ret = 0; + + if (dev->of_node) { + ret = of_dma_configure(dev, dev->of_node, true); + } else if (has_acpi_companion(dev)) { + attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); + ret = acpi_dma_configure(dev, attr); + } + + return ret; +} + +static const struct dev_pm_ops platform_dev_pm_ops = { + .runtime_suspend = pm_generic_runtime_suspend, + .runtime_resume = pm_generic_runtime_resume, + USE_PLATFORM_PM_SLEEP_OPS +}; + +struct bus_type platform_bus_type = { + .name = "platform", + .dev_groups = platform_dev_groups, + .match = platform_match, + .uevent = platform_uevent, + .dma_configure = platform_dma_configure, + .pm = &platform_dev_pm_ops, +}; +EXPORT_SYMBOL_GPL(platform_bus_type); + +static inline int __platform_match(struct device *dev, const void *drv) +{ + return platform_match(dev, (struct device_driver *)drv); +} + +/** + * platform_find_device_by_driver - Find a platform device with a given + * driver. + * @start: The device to start the search from. + * @drv: The device driver to look for. + */ +struct device *platform_find_device_by_driver(struct device *start, + const struct device_driver *drv) +{ + return bus_find_device(&platform_bus_type, start, drv, + __platform_match); +} +EXPORT_SYMBOL_GPL(platform_find_device_by_driver); + +void __weak __init early_platform_cleanup(void) { } + +int __init platform_bus_init(void) +{ + int error; + + early_platform_cleanup(); + + error = device_register(&platform_bus); + if (error) { + put_device(&platform_bus); + return error; + } + error = bus_register(&platform_bus_type); + if (error) + device_unregister(&platform_bus); + of_platform_register_reconfig_notifier(); + return error; +} diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile new file mode 100644 index 000000000..8fdd0073e --- /dev/null +++ b/drivers/base/power/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o +obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o +obj-$(CONFIG_PM_TRACE_RTC) += trace.o +obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o +obj-$(CONFIG_HAVE_CLK) += clock_ops.o +obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o + +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c new file mode 100644 index 000000000..ced6863a1 --- /dev/null +++ b/drivers/base/power/clock_ops.c @@ -0,0 +1,647 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks + * + * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/pm.h> +#include <linux/pm_clock.h> +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/of_clk.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/pm_domain.h> +#include <linux/pm_runtime.h> + +#ifdef CONFIG_PM_CLK + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED, + PCE_STATUS_ENABLED, + PCE_STATUS_ERROR, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; +}; + +/** + * pm_clk_enable - Enable a clock, reporting any errors + * @dev: The device for the given clock + * @ce: PM clock entry corresponding to the clock. + */ +static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce) +{ + int ret; + + if (ce->status < PCE_STATUS_ERROR) { + ret = clk_enable(ce->clk); + if (!ret) + ce->status = PCE_STATUS_ENABLED; + else + dev_err(dev, "%s: failed to enable clk %p, error %d\n", + __func__, ce->clk, ret); + } +} + +/** + * pm_clk_acquire - Acquire a device clock. + * @dev: Device whose clock is to be acquired. + * @ce: PM clock entry corresponding to the clock. + */ +static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) +{ + if (!ce->clk) + ce->clk = clk_get(dev, ce->con_id); + if (IS_ERR(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + } else { + if (clk_prepare(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + dev_err(dev, "clk_prepare() failed\n"); + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, + "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); + } + } +} + +static int __pm_clk_add(struct device *dev, const char *con_id, + struct clk *clk) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + + if (!psd) + return -EINVAL; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) + return -ENOMEM; + + if (con_id) { + ce->con_id = kstrdup(con_id, GFP_KERNEL); + if (!ce->con_id) { + kfree(ce); + return -ENOMEM; + } + } else { + if (IS_ERR(clk)) { + kfree(ce); + return -ENOENT; + } + ce->clk = clk; + } + + pm_clk_acquire(dev, ce); + + spin_lock_irq(&psd->lock); + list_add_tail(&ce->node, &psd->clock_list); + spin_unlock_irq(&psd->lock); + return 0; +} + +/** + * pm_clk_add - Start using a device clock for power management. + * @dev: Device whose clock is going to be used for power management. + * @con_id: Connection ID of the clock. + * + * Add the clock represented by @con_id to the list of clocks used for + * the power management of @dev. + */ +int pm_clk_add(struct device *dev, const char *con_id) +{ + return __pm_clk_add(dev, con_id, NULL); +} +EXPORT_SYMBOL_GPL(pm_clk_add); + +/** + * pm_clk_add_clk - Start using a device clock for power management. + * @dev: Device whose clock is going to be used for power management. + * @clk: Clock pointer + * + * Add the clock to the list of clocks used for the power management of @dev. + * The power-management code will take control of the clock reference, so + * callers should not call clk_put() on @clk after this function sucessfully + * returned. + */ +int pm_clk_add_clk(struct device *dev, struct clk *clk) +{ + return __pm_clk_add(dev, NULL, clk); +} +EXPORT_SYMBOL_GPL(pm_clk_add_clk); + + +/** + * of_pm_clk_add_clk - Start using a device clock for power management. + * @dev: Device whose clock is going to be used for power management. + * @name: Name of clock that is going to be used for power management. + * + * Add the clock described in the 'clocks' device-tree node that matches + * with the 'name' provided, to the list of clocks used for the power + * management of @dev. On success, returns 0. Returns a negative error + * code if the clock is not found or cannot be added. + */ +int of_pm_clk_add_clk(struct device *dev, const char *name) +{ + struct clk *clk; + int ret; + + if (!dev || !dev->of_node || !name) + return -EINVAL; + + clk = of_clk_get_by_name(dev->of_node, name); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + ret = pm_clk_add_clk(dev, clk); + if (ret) { + clk_put(clk); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(of_pm_clk_add_clk); + +/** + * of_pm_clk_add_clks - Start using device clock(s) for power management. + * @dev: Device whose clock(s) is going to be used for power management. + * + * Add a series of clocks described in the 'clocks' device-tree node for + * a device to the list of clocks used for the power management of @dev. + * On success, returns the number of clocks added. Returns a negative + * error code if there are no clocks in the device node for the device + * or if adding a clock fails. + */ +int of_pm_clk_add_clks(struct device *dev) +{ + struct clk **clks; + int i, count; + int ret; + + if (!dev || !dev->of_node) + return -EINVAL; + + count = of_clk_get_parent_count(dev->of_node); + if (count <= 0) + return -ENODEV; + + clks = kcalloc(count, sizeof(*clks), GFP_KERNEL); + if (!clks) + return -ENOMEM; + + for (i = 0; i < count; i++) { + clks[i] = of_clk_get(dev->of_node, i); + if (IS_ERR(clks[i])) { + ret = PTR_ERR(clks[i]); + goto error; + } + + ret = pm_clk_add_clk(dev, clks[i]); + if (ret) { + clk_put(clks[i]); + goto error; + } + } + + kfree(clks); + + return i; + +error: + while (i--) + pm_clk_remove_clk(dev, clks[i]); + + kfree(clks); + + return ret; +} +EXPORT_SYMBOL_GPL(of_pm_clk_add_clks); + +/** + * __pm_clk_remove - Destroy PM clock entry. + * @ce: PM clock entry to destroy. + */ +static void __pm_clk_remove(struct pm_clock_entry *ce) +{ + if (!ce) + return; + + if (ce->status < PCE_STATUS_ERROR) { + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); + + if (ce->status >= PCE_STATUS_ACQUIRED) { + clk_unprepare(ce->clk); + clk_put(ce->clk); + } + } + + kfree(ce->con_id); + kfree(ce); +} + +/** + * pm_clk_remove - Stop using a device clock for power management. + * @dev: Device whose clock should not be used for PM any more. + * @con_id: Connection ID of the clock. + * + * Remove the clock represented by @con_id from the list of clocks used for + * the power management of @dev. + */ +void pm_clk_remove(struct device *dev, const char *con_id) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + + if (!psd) + return; + + spin_lock_irq(&psd->lock); + + list_for_each_entry(ce, &psd->clock_list, node) { + if (!con_id && !ce->con_id) + goto remove; + else if (!con_id || !ce->con_id) + continue; + else if (!strcmp(con_id, ce->con_id)) + goto remove; + } + + spin_unlock_irq(&psd->lock); + return; + + remove: + list_del(&ce->node); + spin_unlock_irq(&psd->lock); + + __pm_clk_remove(ce); +} +EXPORT_SYMBOL_GPL(pm_clk_remove); + +/** + * pm_clk_remove_clk - Stop using a device clock for power management. + * @dev: Device whose clock should not be used for PM any more. + * @clk: Clock pointer + * + * Remove the clock pointed to by @clk from the list of clocks used for + * the power management of @dev. + */ +void pm_clk_remove_clk(struct device *dev, struct clk *clk) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + + if (!psd || !clk) + return; + + spin_lock_irq(&psd->lock); + + list_for_each_entry(ce, &psd->clock_list, node) { + if (clk == ce->clk) + goto remove; + } + + spin_unlock_irq(&psd->lock); + return; + + remove: + list_del(&ce->node); + spin_unlock_irq(&psd->lock); + + __pm_clk_remove(ce); +} +EXPORT_SYMBOL_GPL(pm_clk_remove_clk); + +/** + * pm_clk_init - Initialize a device's list of power management clocks. + * @dev: Device to initialize the list of PM clocks for. + * + * Initialize the lock and clock_list members of the device's pm_subsys_data + * object. + */ +void pm_clk_init(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + if (psd) + INIT_LIST_HEAD(&psd->clock_list); +} +EXPORT_SYMBOL_GPL(pm_clk_init); + +/** + * pm_clk_create - Create and initialize a device's list of PM clocks. + * @dev: Device to create and initialize the list of PM clocks for. + * + * Allocate a struct pm_subsys_data object, initialize its lock and clock_list + * members and make the @dev's power.subsys_data field point to it. + */ +int pm_clk_create(struct device *dev) +{ + return dev_pm_get_subsys_data(dev); +} +EXPORT_SYMBOL_GPL(pm_clk_create); + +/** + * pm_clk_destroy - Destroy a device's list of power management clocks. + * @dev: Device to destroy the list of PM clocks for. + * + * Clear the @dev's power.subsys_data field, remove the list of clock entries + * from the struct pm_subsys_data object pointed to by it before and free + * that object. + */ +void pm_clk_destroy(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce, *c; + struct list_head list; + + if (!psd) + return; + + INIT_LIST_HEAD(&list); + + spin_lock_irq(&psd->lock); + + list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) + list_move(&ce->node, &list); + + spin_unlock_irq(&psd->lock); + + dev_pm_put_subsys_data(dev); + + list_for_each_entry_safe_reverse(ce, c, &list, node) { + list_del(&ce->node); + __pm_clk_remove(ce); + } +} +EXPORT_SYMBOL_GPL(pm_clk_destroy); + +/** + * pm_clk_suspend - Disable clocks in a device's PM clock list. + * @dev: Device to disable the clocks for. + */ +int pm_clk_suspend(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + unsigned long flags; + + dev_dbg(dev, "%s()\n", __func__); + + if (!psd) + return 0; + + spin_lock_irqsave(&psd->lock, flags); + + list_for_each_entry_reverse(ce, &psd->clock_list, node) { + if (ce->status < PCE_STATUS_ERROR) { + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); + ce->status = PCE_STATUS_ACQUIRED; + } + } + + spin_unlock_irqrestore(&psd->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(pm_clk_suspend); + +/** + * pm_clk_resume - Enable clocks in a device's PM clock list. + * @dev: Device to enable the clocks for. + */ +int pm_clk_resume(struct device *dev) +{ + struct pm_subsys_data *psd = dev_to_psd(dev); + struct pm_clock_entry *ce; + unsigned long flags; + + dev_dbg(dev, "%s()\n", __func__); + + if (!psd) + return 0; + + spin_lock_irqsave(&psd->lock, flags); + + list_for_each_entry(ce, &psd->clock_list, node) + __pm_clk_enable(dev, ce); + + spin_unlock_irqrestore(&psd->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(pm_clk_resume); + +/** + * pm_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the pm_domain member of that object is copied to the device's + * pm_domain field and its con_ids member is used to populate the device's list + * of PM clocks, depending on @action. + * + * If the device's pm_domain field is already populated with a value different + * from the one stored in the struct pm_clk_notifier_block object, the function + * does nothing. + */ +static int pm_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char **con_id; + int error; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (dev->pm_domain) + break; + + error = pm_clk_create(dev); + if (error) + break; + + dev_pm_domain_set(dev, clknb->pm_domain); + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids; *con_id; con_id++) + pm_clk_add(dev, *con_id); + } else { + pm_clk_add(dev, NULL); + } + + break; + case BUS_NOTIFY_DEL_DEVICE: + if (dev->pm_domain != clknb->pm_domain) + break; + + dev_pm_domain_set(dev, NULL); + pm_clk_destroy(dev); + break; + } + + return 0; +} + +int pm_clk_runtime_suspend(struct device *dev) +{ + int ret; + + dev_dbg(dev, "%s\n", __func__); + + ret = pm_generic_runtime_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend device\n"); + return ret; + } + + ret = pm_clk_suspend(dev); + if (ret) { + dev_err(dev, "failed to suspend clock\n"); + pm_generic_runtime_resume(dev); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend); + +int pm_clk_runtime_resume(struct device *dev) +{ + int ret; + + dev_dbg(dev, "%s\n", __func__); + + ret = pm_clk_resume(dev); + if (ret) { + dev_err(dev, "failed to resume clock\n"); + return ret; + } + + return pm_generic_runtime_resume(dev); +} +EXPORT_SYMBOL_GPL(pm_clk_runtime_resume); + +#else /* !CONFIG_PM_CLK */ + +/** + * enable_clock - Enable a device clock. + * @dev: Device whose clock is to be enabled. + * @con_id: Connection ID of the clock. + */ +static void enable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_prepare_enable(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced on.\n"); + } +} + +/** + * disable_clock - Disable a device clock. + * @dev: Device whose clock is to be disabled. + * @con_id: Connection ID of the clock. + */ +static void disable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_disable_unprepare(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced off.\n"); + } +} + +/** + * pm_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the con_ids member of that object is used to enable or disable + * the device's clocks, depending on @action. + */ +static int pm_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char **con_id; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_BIND_DRIVER: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids; *con_id; con_id++) + enable_clock(dev, *con_id); + } else { + enable_clock(dev, NULL); + } + break; + case BUS_NOTIFY_DRIVER_NOT_BOUND: + case BUS_NOTIFY_UNBOUND_DRIVER: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids; *con_id; con_id++) + disable_clock(dev, *con_id); + } else { + disable_clock(dev, NULL); + } + break; + } + + return 0; +} + +#endif /* !CONFIG_PM_CLK */ + +/** + * pm_clk_add_notifier - Add bus type notifier for power management clocks. + * @bus: Bus type to add the notifier to. + * @clknb: Notifier to be added to the given bus type. + * + * The nb member of @clknb is not expected to be initialized and its + * notifier_call member will be replaced with pm_clk_notify(). However, + * the remaining members of @clknb should be populated prior to calling this + * routine. + */ +void pm_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb) +{ + if (!bus || !clknb) + return; + + clknb->nb.notifier_call = pm_clk_notify; + bus_register_notifier(bus, &clknb->nb); +} +EXPORT_SYMBOL_GPL(pm_clk_add_notifier); diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c new file mode 100644 index 000000000..bbddb267c --- /dev/null +++ b/drivers/base/power/common.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/common.c - Common device power management code. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/pm_clock.h> +#include <linux/acpi.h> +#include <linux/pm_domain.h> + +#include "power.h" + +/** + * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. + * @dev: Device to handle. + * + * If power.subsys_data is NULL, point it to a new object, otherwise increment + * its reference counter. Return 0 if new object has been created or refcount + * increased, otherwise negative error code. + */ +int dev_pm_get_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + + psd = kzalloc(sizeof(*psd), GFP_KERNEL); + if (!psd) + return -ENOMEM; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data) { + dev->power.subsys_data->refcount++; + } else { + spin_lock_init(&psd->lock); + psd->refcount = 1; + dev->power.subsys_data = psd; + pm_clk_init(dev); + psd = NULL; + } + + spin_unlock_irq(&dev->power.lock); + + /* kfree() verifies that its argument is nonzero. */ + kfree(psd); + + return 0; +} +EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); + +/** + * dev_pm_put_subsys_data - Drop reference to power.subsys_data. + * @dev: Device to handle. + * + * If the reference counter of power.subsys_data is zero after dropping the + * reference, power.subsys_data is removed. + */ +void dev_pm_put_subsys_data(struct device *dev) +{ + struct pm_subsys_data *psd; + + spin_lock_irq(&dev->power.lock); + + psd = dev_to_psd(dev); + if (!psd) + goto out; + + if (--psd->refcount == 0) + dev->power.subsys_data = NULL; + else + psd = NULL; + + out: + spin_unlock_irq(&dev->power.lock); + kfree(psd); +} +EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); + +/** + * dev_pm_domain_attach - Attach a device to its PM domain. + * @dev: Device to attach. + * @power_on: Used to indicate whether we should power on the device. + * + * The @dev may only be attached to a single PM domain. By iterating through + * the available alternatives we try to find a valid PM domain for the device. + * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain + * should be assigned by the corresponding attach function. + * + * This function should typically be invoked from subsystem level code during + * the probe phase. Especially for those that holds devices which requires + * power management through PM domains. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + * + * Returns 0 on successfully attached PM domain, or when it is found that the + * device doesn't need a PM domain, else a negative error code. + */ +int dev_pm_domain_attach(struct device *dev, bool power_on) +{ + int ret; + + if (dev->pm_domain) + return 0; + + ret = acpi_dev_pm_attach(dev, power_on); + if (!ret) + ret = genpd_dev_pm_attach(dev); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(dev_pm_domain_attach); + +/** + * dev_pm_domain_attach_by_id - Associate a device with one of its PM domains. + * @dev: The device used to lookup the PM domain. + * @index: The index of the PM domain. + * + * As @dev may only be attached to a single PM domain, the backend PM domain + * provider creates a virtual device to attach instead. If attachment succeeds, + * the ->detach() callback in the struct dev_pm_domain are assigned by the + * corresponding backend attach function, as to deal with detaching of the + * created virtual device. + * + * This function should typically be invoked by a driver during the probe phase, + * in case its device requires power management through multiple PM domains. The + * driver may benefit from using the received device, to configure device-links + * towards its original device. Depending on the use-case and if needed, the + * links may be dynamically changed by the driver, which allows it to control + * the power to the PM domains independently from each other. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + * + * Returns the virtual created device when successfully attached to its PM + * domain, NULL in case @dev don't need a PM domain, else an ERR_PTR(). + * Note that, to detach the returned virtual device, the driver shall call + * dev_pm_domain_detach() on it, typically during the remove phase. + */ +struct device *dev_pm_domain_attach_by_id(struct device *dev, + unsigned int index) +{ + if (dev->pm_domain) + return ERR_PTR(-EEXIST); + + return genpd_dev_pm_attach_by_id(dev, index); +} +EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); + +/** + * dev_pm_domain_attach_by_name - Associate a device with one of its PM domains. + * @dev: The device used to lookup the PM domain. + * @name: The name of the PM domain. + * + * For a detailed function description, see dev_pm_domain_attach_by_id(). + */ +struct device *dev_pm_domain_attach_by_name(struct device *dev, + const char *name) +{ + if (dev->pm_domain) + return ERR_PTR(-EEXIST); + + return genpd_dev_pm_attach_by_name(dev, name); +} +EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name); + +/** + * dev_pm_domain_detach - Detach a device from its PM domain. + * @dev: Device to detach. + * @power_off: Used to indicate whether we should power off the device. + * + * This functions will reverse the actions from dev_pm_domain_attach() and + * dev_pm_domain_attach_by_id(), thus it detaches @dev from its PM domain. + * Typically it should be invoked during the remove phase, either from + * subsystem level code or from drivers. + * + * Callers must ensure proper synchronization of this function with power + * management callbacks. + */ +void dev_pm_domain_detach(struct device *dev, bool power_off) +{ + if (dev->pm_domain && dev->pm_domain->detach) + dev->pm_domain->detach(dev, power_off); +} +EXPORT_SYMBOL_GPL(dev_pm_domain_detach); + +/** + * dev_pm_domain_start - Start the device through its PM domain. + * @dev: Device to start. + * + * This function should typically be called during probe by a subsystem/driver, + * when it needs to start its device from the PM domain's perspective. Note + * that, it's assumed that the PM domain is already powered on when this + * function is called. + * + * Returns 0 on success and negative error values on failures. + */ +int dev_pm_domain_start(struct device *dev) +{ + if (dev->pm_domain && dev->pm_domain->start) + return dev->pm_domain->start(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(dev_pm_domain_start); + +/** + * dev_pm_domain_set - Set PM domain of a device. + * @dev: Device whose PM domain is to be set. + * @pd: PM domain to be set, or NULL. + * + * Sets the PM domain the device belongs to. The PM domain of a device needs + * to be set before its probe finishes (it's bound to a driver). + * + * This function must be called with the device lock held. + */ +void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd) +{ + if (dev->pm_domain == pd) + return; + + WARN(pd && device_is_bound(dev), + "PM domains can only be changed for unbound devices\n"); + dev->pm_domain = pd; + device_pm_check_callbacks(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_domain_set); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c new file mode 100644 index 000000000..8a90f08c9 --- /dev/null +++ b/drivers/base/power/domain.c @@ -0,0 +1,3223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/domain.c - Common code related to device power domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + */ +#define pr_fmt(fmt) "PM: " fmt + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> +#include <linux/pm_domain.h> +#include <linux/pm_qos.h> +#include <linux/pm_clock.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/sched.h> +#include <linux/suspend.h> +#include <linux/export.h> +#include <linux/cpu.h> + +#include "power.h" + +#define GENPD_RETRY_MAX_MS 250 /* Approximate */ + +#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ +({ \ + type (*__routine)(struct device *__d); \ + type __ret = (type)0; \ + \ + __routine = genpd->dev_ops.callback; \ + if (__routine) { \ + __ret = __routine(dev); \ + } \ + __ret; \ +}) + +static LIST_HEAD(gpd_list); +static DEFINE_MUTEX(gpd_list_lock); + +struct genpd_lock_ops { + void (*lock)(struct generic_pm_domain *genpd); + void (*lock_nested)(struct generic_pm_domain *genpd, int depth); + int (*lock_interruptible)(struct generic_pm_domain *genpd); + void (*unlock)(struct generic_pm_domain *genpd); +}; + +static void genpd_lock_mtx(struct generic_pm_domain *genpd) +{ + mutex_lock(&genpd->mlock); +} + +static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd, + int depth) +{ + mutex_lock_nested(&genpd->mlock, depth); +} + +static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd) +{ + return mutex_lock_interruptible(&genpd->mlock); +} + +static void genpd_unlock_mtx(struct generic_pm_domain *genpd) +{ + return mutex_unlock(&genpd->mlock); +} + +static const struct genpd_lock_ops genpd_mtx_ops = { + .lock = genpd_lock_mtx, + .lock_nested = genpd_lock_nested_mtx, + .lock_interruptible = genpd_lock_interruptible_mtx, + .unlock = genpd_unlock_mtx, +}; + +static void genpd_lock_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->slock) +{ + unsigned long flags; + + spin_lock_irqsave(&genpd->slock, flags); + genpd->lock_flags = flags; +} + +static void genpd_lock_nested_spin(struct generic_pm_domain *genpd, + int depth) + __acquires(&genpd->slock) +{ + unsigned long flags; + + spin_lock_irqsave_nested(&genpd->slock, flags, depth); + genpd->lock_flags = flags; +} + +static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd) + __acquires(&genpd->slock) +{ + unsigned long flags; + + spin_lock_irqsave(&genpd->slock, flags); + genpd->lock_flags = flags; + return 0; +} + +static void genpd_unlock_spin(struct generic_pm_domain *genpd) + __releases(&genpd->slock) +{ + spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags); +} + +static const struct genpd_lock_ops genpd_spin_ops = { + .lock = genpd_lock_spin, + .lock_nested = genpd_lock_nested_spin, + .lock_interruptible = genpd_lock_interruptible_spin, + .unlock = genpd_unlock_spin, +}; + +#define genpd_lock(p) p->lock_ops->lock(p) +#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) +#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) +#define genpd_unlock(p) p->lock_ops->unlock(p) + +#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON) +#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE) +#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON) +#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP) +#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN) +#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON) + +static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev, + const struct generic_pm_domain *genpd) +{ + bool ret; + + ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd); + + /* + * Warn once if an IRQ safe device is attached to a no sleep domain, as + * to indicate a suboptimal configuration for PM. For an always on + * domain this isn't case, thus don't warn. + */ + if (ret && !genpd_is_always_on(genpd)) + dev_warn_once(dev, "PM domain %s will not be powered off\n", + genpd->name); + + return ret; +} + +static int genpd_runtime_suspend(struct device *dev); + +/* + * Get the generic PM domain for a particular struct device. + * This validates the struct device pointer, the PM domain pointer, + * and checks that the PM domain pointer is a real generic PM domain. + * Any failure results in NULL being returned. + */ +static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain)) + return NULL; + + /* A genpd's always have its ->runtime_suspend() callback assigned. */ + if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend) + return pd_to_genpd(dev->pm_domain); + + return NULL; +} + +/* + * This should only be used where we are certain that the pm_domain + * attached to the device is a genpd domain. + */ +static struct generic_pm_domain *dev_to_genpd(struct device *dev) +{ + if (IS_ERR_OR_NULL(dev->pm_domain)) + return ERR_PTR(-EINVAL); + + return pd_to_genpd(dev->pm_domain); +} + +static int genpd_stop_dev(const struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, stop, dev); +} + +static int genpd_start_dev(const struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, start, dev); +} + +static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) +{ + bool ret = false; + + if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) + ret = !!atomic_dec_and_test(&genpd->sd_count); + + return ret; +} + +static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) +{ + atomic_inc(&genpd->sd_count); + smp_mb__after_atomic(); +} + +#ifdef CONFIG_DEBUG_FS +static void genpd_update_accounting(struct generic_pm_domain *genpd) +{ + ktime_t delta, now; + + now = ktime_get(); + delta = ktime_sub(now, genpd->accounting_time); + + /* + * If genpd->status is active, it means we are just + * out of off and so update the idle time and vice + * versa. + */ + if (genpd->status == GENPD_STATE_ON) { + int state_idx = genpd->state_idx; + + genpd->states[state_idx].idle_time = + ktime_add(genpd->states[state_idx].idle_time, delta); + } else { + genpd->on_time = ktime_add(genpd->on_time, delta); + } + + genpd->accounting_time = now; +} +#else +static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} +#endif + +static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct generic_pm_domain_data *pd_data; + struct pm_domain_data *pdd; + struct gpd_link *link; + + /* New requested state is same as Max requested state */ + if (state == genpd->performance_state) + return state; + + /* New requested state is higher than Max requested state */ + if (state > genpd->performance_state) + return state; + + /* Traverse all devices within the domain */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + pd_data = to_gpd_data(pdd); + + if (pd_data->performance_state > state) + state = pd_data->performance_state; + } + + /* + * Traverse all sub-domains within the domain. This can be + * done without any additional locking as the link->performance_state + * field is protected by the parent genpd->lock, which is already taken. + * + * Also note that link->performance_state (subdomain's performance state + * requirement to parent domain) is different from + * link->child->performance_state (current performance state requirement + * of the devices/sub-domains of the subdomain) and so can have a + * different value. + * + * Note that we also take vote from powered-off sub-domains into account + * as the same is done for devices right now. + */ + list_for_each_entry(link, &genpd->parent_links, parent_node) { + if (link->performance_state > state) + state = link->performance_state; + } + + return state; +} + +static int _genpd_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state, int depth) +{ + struct generic_pm_domain *parent; + struct gpd_link *link; + int parent_state, ret; + + if (state == genpd->performance_state) + return 0; + + /* Propagate to parents of genpd */ + list_for_each_entry(link, &genpd->child_links, child_node) { + parent = link->parent; + + if (!parent->set_performance_state) + continue; + + /* Find parent's performance state */ + ret = dev_pm_opp_xlate_performance_state(genpd->opp_table, + parent->opp_table, + state); + if (unlikely(ret < 0)) + goto err; + + parent_state = ret; + + genpd_lock_nested(parent, depth + 1); + + link->prev_performance_state = link->performance_state; + link->performance_state = parent_state; + parent_state = _genpd_reeval_performance_state(parent, + parent_state); + ret = _genpd_set_performance_state(parent, parent_state, depth + 1); + if (ret) + link->performance_state = link->prev_performance_state; + + genpd_unlock(parent); + + if (ret) + goto err; + } + + ret = genpd->set_performance_state(genpd, state); + if (ret) + goto err; + + genpd->performance_state = state; + return 0; + +err: + /* Encountered an error, lets rollback */ + list_for_each_entry_continue_reverse(link, &genpd->child_links, + child_node) { + parent = link->parent; + + if (!parent->set_performance_state) + continue; + + genpd_lock_nested(parent, depth + 1); + + parent_state = link->prev_performance_state; + link->performance_state = parent_state; + + parent_state = _genpd_reeval_performance_state(parent, + parent_state); + if (_genpd_set_performance_state(parent, parent_state, depth + 1)) { + pr_err("%s: Failed to roll back to %d performance state\n", + parent->name, parent_state); + } + + genpd_unlock(parent); + } + + return ret; +} + +/** + * dev_pm_genpd_set_performance_state- Set performance state of device's power + * domain. + * + * @dev: Device for which the performance-state needs to be set. + * @state: Target performance state of the device. This can be set as 0 when the + * device doesn't have any performance state constraints left (And so + * the device wouldn't participate anymore to find the target + * performance state of the genpd). + * + * It is assumed that the users guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Returns 0 on success and negative error values on failures. + */ +int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state) +{ + struct generic_pm_domain *genpd; + struct generic_pm_domain_data *gpd_data; + unsigned int prev; + int ret; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return -ENODEV; + + if (unlikely(!genpd->set_performance_state)) + return -EINVAL; + + if (WARN_ON(!dev->power.subsys_data || + !dev->power.subsys_data->domain_data)) + return -EINVAL; + + genpd_lock(genpd); + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + prev = gpd_data->performance_state; + gpd_data->performance_state = state; + + state = _genpd_reeval_performance_state(genpd, state); + ret = _genpd_set_performance_state(genpd, state, 0); + if (ret) + gpd_data->performance_state = prev; + + genpd_unlock(genpd); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state); + +static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) +{ + unsigned int state_idx = genpd->state_idx; + ktime_t time_start; + s64 elapsed_ns; + int ret; + + /* Notify consumers that we are about to power on. */ + ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, + GENPD_NOTIFY_PRE_ON, + GENPD_NOTIFY_OFF, NULL); + ret = notifier_to_errno(ret); + if (ret) + return ret; + + if (!genpd->power_on) + goto out; + + if (!timed) { + ret = genpd->power_on(genpd); + if (ret) + goto err; + + goto out; + } + + time_start = ktime_get(); + ret = genpd->power_on(genpd); + if (ret) + goto err; + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns) + goto out; + + genpd->states[state_idx].power_on_latency_ns = elapsed_ns; + genpd->max_off_time_changed = true; + pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", + genpd->name, "on", elapsed_ns); + +out: + raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); + return 0; +err: + raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, + NULL); + return ret; +} + +static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed) +{ + unsigned int state_idx = genpd->state_idx; + ktime_t time_start; + s64 elapsed_ns; + int ret; + + /* Notify consumers that we are about to power off. */ + ret = raw_notifier_call_chain_robust(&genpd->power_notifiers, + GENPD_NOTIFY_PRE_OFF, + GENPD_NOTIFY_ON, NULL); + ret = notifier_to_errno(ret); + if (ret) + return ret; + + if (!genpd->power_off) + goto out; + + if (!timed) { + ret = genpd->power_off(genpd); + if (ret) + goto busy; + + goto out; + } + + time_start = ktime_get(); + ret = genpd->power_off(genpd); + if (ret) + goto busy; + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns) + goto out; + + genpd->states[state_idx].power_off_latency_ns = elapsed_ns; + genpd->max_off_time_changed = true; + pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n", + genpd->name, "off", elapsed_ns); + +out: + raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF, + NULL); + return 0; +busy: + raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL); + return ret; +} + +/** + * genpd_queue_power_off_work - Queue up the execution of genpd_power_off(). + * @genpd: PM domain to power off. + * + * Queue up the execution of genpd_power_off() unless it's already been done + * before. + */ +static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) +{ + queue_work(pm_wq, &genpd->power_off_work); +} + +/** + * genpd_power_off - Remove power from a given PM domain. + * @genpd: PM domain to power down. + * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the + * RPM status of the releated device is in an intermediate state, not yet turned + * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not + * be RPM_SUSPENDED, while it tries to power off the PM domain. + * + * If all of the @genpd's devices have been suspended and all of its subdomains + * have been powered down, remove power from @genpd. + */ +static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, + unsigned int depth) +{ + struct pm_domain_data *pdd; + struct gpd_link *link; + unsigned int not_suspended = 0; + int ret; + + /* + * Do not try to power off the domain in the following situations: + * (1) The domain is already in the "power off" state. + * (2) System suspend is in progress. + */ + if (!genpd_status_on(genpd) || genpd->prepared_count > 0) + return 0; + + /* + * Abort power off for the PM domain in the following situations: + * (1) The domain is configured as always on. + * (2) When the domain has a subdomain being powered on. + */ + if (genpd_is_always_on(genpd) || + genpd_is_rpm_always_on(genpd) || + atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + enum pm_qos_flags_status stat; + + stat = dev_pm_qos_flags(pdd->dev, PM_QOS_FLAG_NO_POWER_OFF); + if (stat > PM_QOS_FLAGS_NONE) + return -EBUSY; + + /* + * Do not allow PM domain to be powered off, when an IRQ safe + * device is part of a non-IRQ safe domain. + */ + if (!pm_runtime_suspended(pdd->dev) || + irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd)) + not_suspended++; + } + + if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on)) + return -EBUSY; + + if (genpd->gov && genpd->gov->power_down_ok) { + if (!genpd->gov->power_down_ok(&genpd->domain)) + return -EAGAIN; + } + + /* Default to shallowest state. */ + if (!genpd->gov) + genpd->state_idx = 0; + + /* Don't power off, if a child domain is waiting to power on. */ + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; + + ret = _genpd_power_off(genpd, true); + if (ret) { + genpd->states[genpd->state_idx].rejected++; + return ret; + } + + genpd->status = GENPD_STATE_OFF; + genpd_update_accounting(genpd); + genpd->states[genpd->state_idx].usage++; + + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_dec(link->parent); + genpd_lock_nested(link->parent, depth + 1); + genpd_power_off(link->parent, false, depth + 1); + genpd_unlock(link->parent); + } + + return 0; +} + +/** + * genpd_power_on - Restore power to a given PM domain and its parents. + * @genpd: PM domain to power up. + * @depth: nesting count for lockdep. + * + * Restore power to @genpd and all of its parents so that it is possible to + * resume a device belonging to it. + */ +static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) +{ + struct gpd_link *link; + int ret = 0; + + if (genpd_status_on(genpd)) + return 0; + + /* + * The list is guaranteed not to change while the loop below is being + * executed, unless one of the parents' .power_on() callbacks fiddles + * with it. + */ + list_for_each_entry(link, &genpd->child_links, child_node) { + struct generic_pm_domain *parent = link->parent; + + genpd_sd_counter_inc(parent); + + genpd_lock_nested(parent, depth + 1); + ret = genpd_power_on(parent, depth + 1); + genpd_unlock(parent); + + if (ret) { + genpd_sd_counter_dec(parent); + goto err; + } + } + + ret = _genpd_power_on(genpd, true); + if (ret) + goto err; + + genpd->status = GENPD_STATE_ON; + genpd_update_accounting(genpd); + + return 0; + + err: + list_for_each_entry_continue_reverse(link, + &genpd->child_links, + child_node) { + genpd_sd_counter_dec(link->parent); + genpd_lock_nested(link->parent, depth + 1); + genpd_power_off(link->parent, false, depth + 1); + genpd_unlock(link->parent); + } + + return ret; +} + +static int genpd_dev_pm_start(struct device *dev) +{ + struct generic_pm_domain *genpd = dev_to_genpd(dev); + + return genpd_start_dev(genpd, dev); +} + +static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, + unsigned long val, void *ptr) +{ + struct generic_pm_domain_data *gpd_data; + struct device *dev; + + gpd_data = container_of(nb, struct generic_pm_domain_data, nb); + dev = gpd_data->base.dev; + + for (;;) { + struct generic_pm_domain *genpd; + struct pm_domain_data *pdd; + + spin_lock_irq(&dev->power.lock); + + pdd = dev->power.subsys_data ? + dev->power.subsys_data->domain_data : NULL; + if (pdd) { + to_gpd_data(pdd)->td.constraint_changed = true; + genpd = dev_to_genpd(dev); + } else { + genpd = ERR_PTR(-ENODATA); + } + + spin_unlock_irq(&dev->power.lock); + + if (!IS_ERR(genpd)) { + genpd_lock(genpd); + genpd->max_off_time_changed = true; + genpd_unlock(genpd); + } + + dev = dev->parent; + if (!dev || dev->power.ignore_children) + break; + } + + return NOTIFY_DONE; +} + +/** + * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0. + * @work: Work structure used for scheduling the execution of this function. + */ +static void genpd_power_off_work_fn(struct work_struct *work) +{ + struct generic_pm_domain *genpd; + + genpd = container_of(work, struct generic_pm_domain, power_off_work); + + genpd_lock(genpd); + genpd_power_off(genpd, false, 0); + genpd_unlock(genpd); +} + +/** + * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks + * @dev: Device to handle. + */ +static int __genpd_runtime_suspend(struct device *dev) +{ + int (*cb)(struct device *__dev); + + if (dev->type && dev->type->pm) + cb = dev->type->pm->runtime_suspend; + else if (dev->class && dev->class->pm) + cb = dev->class->pm->runtime_suspend; + else if (dev->bus && dev->bus->pm) + cb = dev->bus->pm->runtime_suspend; + else + cb = NULL; + + if (!cb && dev->driver && dev->driver->pm) + cb = dev->driver->pm->runtime_suspend; + + return cb ? cb(dev) : 0; +} + +/** + * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks + * @dev: Device to handle. + */ +static int __genpd_runtime_resume(struct device *dev) +{ + int (*cb)(struct device *__dev); + + if (dev->type && dev->type->pm) + cb = dev->type->pm->runtime_resume; + else if (dev->class && dev->class->pm) + cb = dev->class->pm->runtime_resume; + else if (dev->bus && dev->bus->pm) + cb = dev->bus->pm->runtime_resume; + else + cb = NULL; + + if (!cb && dev->driver && dev->driver->pm) + cb = dev->driver->pm->runtime_resume; + + return cb ? cb(dev) : 0; +} + +/** + * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain. + * @dev: Device to suspend. + * + * Carry out a runtime suspend of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int genpd_runtime_suspend(struct device *dev) +{ + struct generic_pm_domain *genpd; + bool (*suspend_ok)(struct device *__dev); + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + /* + * A runtime PM centric subsystem/driver may re-use the runtime PM + * callbacks for other purposes than runtime PM. In those scenarios + * runtime PM is disabled. Under these circumstances, we shall skip + * validating/measuring the PM QoS latency. + */ + suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL; + if (runtime_pm && suspend_ok && !suspend_ok(dev)) + return -EBUSY; + + /* Measure suspend latency. */ + time_start = 0; + if (runtime_pm) + time_start = ktime_get(); + + ret = __genpd_runtime_suspend(dev); + if (ret) + return ret; + + ret = genpd_stop_dev(genpd, dev); + if (ret) { + __genpd_runtime_resume(dev); + return ret; + } + + /* Update suspend latency value if the measured time exceeds it. */ + if (runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->suspend_latency_ns) { + td->suspend_latency_ns = elapsed_ns; + dev_dbg(dev, "suspend latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } + } + + /* + * If power.irq_safe is set, this routine may be run with + * IRQs disabled, so suspend only if the PM domain also is irq_safe. + */ + if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) + return 0; + + genpd_lock(genpd); + genpd_power_off(genpd, true, 0); + genpd_unlock(genpd); + + return 0; +} + +/** + * genpd_runtime_resume - Resume a device belonging to I/O PM domain. + * @dev: Device to resume. + * + * Carry out a runtime resume of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a PM domain consisting of I/O devices. + */ +static int genpd_runtime_resume(struct device *dev) +{ + struct generic_pm_domain *genpd; + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + bool runtime_pm = pm_runtime_enabled(dev); + ktime_t time_start; + s64 elapsed_ns; + int ret; + bool timed = true; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + /* + * As we don't power off a non IRQ safe domain, which holds + * an IRQ safe device, we don't need to restore power to it. + */ + if (irq_safe_dev_in_no_sleep_domain(dev, genpd)) { + timed = false; + goto out; + } + + genpd_lock(genpd); + ret = genpd_power_on(genpd, 0); + genpd_unlock(genpd); + + if (ret) + return ret; + + out: + /* Measure resume latency. */ + time_start = 0; + if (timed && runtime_pm) + time_start = ktime_get(); + + ret = genpd_start_dev(genpd, dev); + if (ret) + goto err_poweroff; + + ret = __genpd_runtime_resume(dev); + if (ret) + goto err_stop; + + /* Update resume latency value if the measured time exceeds it. */ + if (timed && runtime_pm) { + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > td->resume_latency_ns) { + td->resume_latency_ns = elapsed_ns; + dev_dbg(dev, "resume latency exceeded, %lld ns\n", + elapsed_ns); + genpd->max_off_time_changed = true; + td->constraint_changed = true; + } + } + + return 0; + +err_stop: + genpd_stop_dev(genpd, dev); +err_poweroff: + if (!pm_runtime_is_irq_safe(dev) || + (pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) { + genpd_lock(genpd); + genpd_power_off(genpd, true, 0); + genpd_unlock(genpd); + } + + return ret; +} + +static bool pd_ignore_unused; +static int __init pd_ignore_unused_setup(char *__unused) +{ + pd_ignore_unused = true; + return 1; +} +__setup("pd_ignore_unused", pd_ignore_unused_setup); + +/** + * genpd_power_off_unused - Power off all PM domains with no devices in use. + */ +static int __init genpd_power_off_unused(void) +{ + struct generic_pm_domain *genpd; + + if (pd_ignore_unused) { + pr_warn("genpd: Not disabling unused power domains\n"); + return 0; + } + + mutex_lock(&gpd_list_lock); + + list_for_each_entry(genpd, &gpd_list, gpd_list_node) + genpd_queue_power_off_work(genpd); + + mutex_unlock(&gpd_list_lock); + + return 0; +} +late_initcall(genpd_power_off_unused); + +#ifdef CONFIG_PM_SLEEP + +/** + * genpd_sync_power_off - Synchronously power off a PM domain and its parents. + * @genpd: PM domain to power off, if possible. + * @use_lock: use the lock. + * @depth: nesting count for lockdep. + * + * Check if the given PM domain can be powered off (during system suspend or + * hibernation) and do that if so. Also, in that case propagate to its parents. + * + * This function is only called in "noirq" and "syscore" stages of system power + * transitions. The "noirq" callbacks may be executed asynchronously, thus in + * these cases the lock must be held. + */ +static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock, + unsigned int depth) +{ + struct gpd_link *link; + + if (!genpd_status_on(genpd) || genpd_is_always_on(genpd)) + return; + + if (genpd->suspended_count != genpd->device_count + || atomic_read(&genpd->sd_count) > 0) + return; + + /* Choose the deepest state when suspending */ + genpd->state_idx = genpd->state_count - 1; + if (_genpd_power_off(genpd, false)) + return; + + genpd->status = GENPD_STATE_OFF; + + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_dec(link->parent); + + if (use_lock) + genpd_lock_nested(link->parent, depth + 1); + + genpd_sync_power_off(link->parent, use_lock, depth + 1); + + if (use_lock) + genpd_unlock(link->parent); + } +} + +/** + * genpd_sync_power_on - Synchronously power on a PM domain and its parents. + * @genpd: PM domain to power on. + * @use_lock: use the lock. + * @depth: nesting count for lockdep. + * + * This function is only called in "noirq" and "syscore" stages of system power + * transitions. The "noirq" callbacks may be executed asynchronously, thus in + * these cases the lock must be held. + */ +static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock, + unsigned int depth) +{ + struct gpd_link *link; + + if (genpd_status_on(genpd)) + return; + + list_for_each_entry(link, &genpd->child_links, child_node) { + genpd_sd_counter_inc(link->parent); + + if (use_lock) + genpd_lock_nested(link->parent, depth + 1); + + genpd_sync_power_on(link->parent, use_lock, depth + 1); + + if (use_lock) + genpd_unlock(link->parent); + } + + _genpd_power_on(genpd, false); + genpd->status = GENPD_STATE_ON; +} + +/** + * resume_needed - Check whether to resume a device before system suspend. + * @dev: Device to check. + * @genpd: PM domain the device belongs to. + * + * There are two cases in which a device that can wake up the system from sleep + * states should be resumed by genpd_prepare(): (1) if the device is enabled + * to wake up the system and it has to remain active for this purpose while the + * system is in the sleep state and (2) if the device is not enabled to wake up + * the system from sleep states and it generally doesn't generate wakeup signals + * by itself (those signals are generated on its behalf by other parts of the + * system). In the latter case it may be necessary to reconfigure the device's + * wakeup settings during system suspend, because it may have been set up to + * signal remote wakeup from the system's working state as needed by runtime PM. + * Return 'true' in either of the above cases. + */ +static bool resume_needed(struct device *dev, + const struct generic_pm_domain *genpd) +{ + bool active_wakeup; + + if (!device_can_wakeup(dev)) + return false; + + active_wakeup = genpd_is_active_wakeup(genpd); + return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; +} + +/** + * genpd_prepare - Start power transition of a device in a PM domain. + * @dev: Device to start the transition of. + * + * Start a power transition of a device (during a system-wide power transition) + * under the assumption that its pm_domain field points to the domain member of + * an object of type struct generic_pm_domain representing a PM domain + * consisting of I/O devices. + */ +static int genpd_prepare(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + /* + * If a wakeup request is pending for the device, it should be woken up + * at this point and a system wakeup event should be reported if it's + * set up to wake up the system from sleep states. + */ + if (resume_needed(dev, genpd)) + pm_runtime_resume(dev); + + genpd_lock(genpd); + + if (genpd->prepared_count++ == 0) + genpd->suspended_count = 0; + + genpd_unlock(genpd); + + ret = pm_generic_prepare(dev); + if (ret < 0) { + genpd_lock(genpd); + + genpd->prepared_count--; + + genpd_unlock(genpd); + } + + /* Never return 1, as genpd don't cope with the direct_complete path. */ + return ret >= 0 ? 0 : ret; +} + +/** + * genpd_finish_suspend - Completion of suspend or hibernation of device in an + * I/O pm domain. + * @dev: Device to suspend. + * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback. + * + * Stop the device and remove power from the domain if all devices in it have + * been stopped. + */ +static int genpd_finish_suspend(struct device *dev, bool poweroff) +{ + struct generic_pm_domain *genpd; + int ret = 0; + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (poweroff) + ret = pm_generic_poweroff_noirq(dev); + else + ret = pm_generic_suspend_noirq(dev); + if (ret) + return ret; + + if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) + return 0; + + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_stop_dev(genpd, dev); + if (ret) { + if (poweroff) + pm_generic_restore_noirq(dev); + else + pm_generic_resume_noirq(dev); + return ret; + } + } + + genpd_lock(genpd); + genpd->suspended_count++; + genpd_sync_power_off(genpd, true, 0); + genpd_unlock(genpd); + + return 0; +} + +/** + * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. + * @dev: Device to suspend. + * + * Stop the device and remove power from the domain if all devices in it have + * been stopped. + */ +static int genpd_suspend_noirq(struct device *dev) +{ + dev_dbg(dev, "%s()\n", __func__); + + return genpd_finish_suspend(dev, false); +} + +/** + * genpd_resume_noirq - Start of resume of device in an I/O PM domain. + * @dev: Device to resume. + * + * Restore power to the device's PM domain, if necessary, and start the device. + */ +static int genpd_resume_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) + return pm_generic_resume_noirq(dev); + + genpd_lock(genpd); + genpd_sync_power_on(genpd, true, 0); + genpd->suspended_count--; + genpd_unlock(genpd); + + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_start_dev(genpd, dev); + if (ret) + return ret; + } + + return pm_generic_resume_noirq(dev); +} + +/** + * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain. + * @dev: Device to freeze. + * + * Carry out a late freeze of a device under the assumption that its + * pm_domain field points to the domain member of an object of type + * struct generic_pm_domain representing a power domain consisting of I/O + * devices. + */ +static int genpd_freeze_noirq(struct device *dev) +{ + const struct generic_pm_domain *genpd; + int ret = 0; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + ret = pm_generic_freeze_noirq(dev); + if (ret) + return ret; + + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) + ret = genpd_stop_dev(genpd, dev); + + return ret; +} + +/** + * genpd_thaw_noirq - Early thaw of device in an I/O PM domain. + * @dev: Device to thaw. + * + * Start the device, unless power has been removed from the domain already + * before the system transition. + */ +static int genpd_thaw_noirq(struct device *dev) +{ + const struct generic_pm_domain *genpd; + int ret = 0; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_start_dev(genpd, dev); + if (ret) + return ret; + } + + return pm_generic_thaw_noirq(dev); +} + +/** + * genpd_poweroff_noirq - Completion of hibernation of device in an + * I/O PM domain. + * @dev: Device to poweroff. + * + * Stop the device and remove power from the domain if all devices in it have + * been stopped. + */ +static int genpd_poweroff_noirq(struct device *dev) +{ + dev_dbg(dev, "%s()\n", __func__); + + return genpd_finish_suspend(dev, true); +} + +/** + * genpd_restore_noirq - Start of restore of device in an I/O PM domain. + * @dev: Device to resume. + * + * Make sure the domain will be in the same power state as before the + * hibernation the system is resuming from and start the device if necessary. + */ +static int genpd_restore_noirq(struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret = 0; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return -EINVAL; + + /* + * At this point suspended_count == 0 means we are being run for the + * first time for the given domain in the present cycle. + */ + genpd_lock(genpd); + if (genpd->suspended_count++ == 0) { + /* + * The boot kernel might put the domain into arbitrary state, + * so make it appear as powered off to genpd_sync_power_on(), + * so that it tries to power it on in case it was really off. + */ + genpd->status = GENPD_STATE_OFF; + } + + genpd_sync_power_on(genpd, true, 0); + genpd_unlock(genpd); + + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_start_dev(genpd, dev); + if (ret) + return ret; + } + + return pm_generic_restore_noirq(dev); +} + +/** + * genpd_complete - Complete power transition of a device in a power domain. + * @dev: Device to complete the transition of. + * + * Complete a power transition of a device (during a system-wide power + * transition) under the assumption that its pm_domain field points to the + * domain member of an object of type struct generic_pm_domain representing + * a power domain consisting of I/O devices. + */ +static void genpd_complete(struct device *dev) +{ + struct generic_pm_domain *genpd; + + dev_dbg(dev, "%s()\n", __func__); + + genpd = dev_to_genpd(dev); + if (IS_ERR(genpd)) + return; + + pm_generic_complete(dev); + + genpd_lock(genpd); + + genpd->prepared_count--; + if (!genpd->prepared_count) + genpd_queue_power_off_work(genpd); + + genpd_unlock(genpd); +} + +/** + * genpd_syscore_switch - Switch power during system core suspend or resume. + * @dev: Device that normally is marked as "always on" to switch power for. + * + * This routine may only be called during the system core (syscore) suspend or + * resume phase for devices whose "always on" flags are set. + */ +static void genpd_syscore_switch(struct device *dev, bool suspend) +{ + struct generic_pm_domain *genpd; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return; + + if (suspend) { + genpd->suspended_count++; + genpd_sync_power_off(genpd, false, 0); + } else { + genpd_sync_power_on(genpd, false, 0); + genpd->suspended_count--; + } +} + +void pm_genpd_syscore_poweroff(struct device *dev) +{ + genpd_syscore_switch(dev, true); +} +EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff); + +void pm_genpd_syscore_poweron(struct device *dev) +{ + genpd_syscore_switch(dev, false); +} +EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); + +#else /* !CONFIG_PM_SLEEP */ + +#define genpd_prepare NULL +#define genpd_suspend_noirq NULL +#define genpd_resume_noirq NULL +#define genpd_freeze_noirq NULL +#define genpd_thaw_noirq NULL +#define genpd_poweroff_noirq NULL +#define genpd_restore_noirq NULL +#define genpd_complete NULL + +#endif /* CONFIG_PM_SLEEP */ + +static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + int ret; + + ret = dev_pm_get_subsys_data(dev); + if (ret) + return ERR_PTR(ret); + + gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); + if (!gpd_data) { + ret = -ENOMEM; + goto err_put; + } + + gpd_data->base.dev = dev; + gpd_data->td.constraint_changed = true; + gpd_data->td.effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; + gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; + + spin_lock_irq(&dev->power.lock); + + if (dev->power.subsys_data->domain_data) { + ret = -EINVAL; + goto err_free; + } + + dev->power.subsys_data->domain_data = &gpd_data->base; + + spin_unlock_irq(&dev->power.lock); + + return gpd_data; + + err_free: + spin_unlock_irq(&dev->power.lock); + kfree(gpd_data); + err_put: + dev_pm_put_subsys_data(dev); + return ERR_PTR(ret); +} + +static void genpd_free_dev_data(struct device *dev, + struct generic_pm_domain_data *gpd_data) +{ + spin_lock_irq(&dev->power.lock); + + dev->power.subsys_data->domain_data = NULL; + + spin_unlock_irq(&dev->power.lock); + + kfree(gpd_data); + dev_pm_put_subsys_data(dev); +} + +static void genpd_update_cpumask(struct generic_pm_domain *genpd, + int cpu, bool set, unsigned int depth) +{ + struct gpd_link *link; + + if (!genpd_is_cpu_domain(genpd)) + return; + + list_for_each_entry(link, &genpd->child_links, child_node) { + struct generic_pm_domain *parent = link->parent; + + genpd_lock_nested(parent, depth + 1); + genpd_update_cpumask(parent, cpu, set, depth + 1); + genpd_unlock(parent); + } + + if (set) + cpumask_set_cpu(cpu, genpd->cpus); + else + cpumask_clear_cpu(cpu, genpd->cpus); +} + +static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu) +{ + if (cpu >= 0) + genpd_update_cpumask(genpd, cpu, true, 0); +} + +static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu) +{ + if (cpu >= 0) + genpd_update_cpumask(genpd, cpu, false, 0); +} + +static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev) +{ + int cpu; + + if (!genpd_is_cpu_domain(genpd)) + return -1; + + for_each_possible_cpu(cpu) { + if (get_cpu_device(cpu) == dev) + return cpu; + } + + return -1; +} + +static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, + struct device *base_dev) +{ + struct generic_pm_domain_data *gpd_data; + int ret; + + dev_dbg(dev, "%s()\n", __func__); + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)) + return -EINVAL; + + gpd_data = genpd_alloc_dev_data(dev); + if (IS_ERR(gpd_data)) + return PTR_ERR(gpd_data); + + gpd_data->cpu = genpd_get_cpu(genpd, base_dev); + + ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; + if (ret) + goto out; + + genpd_lock(genpd); + + genpd_set_cpumask(genpd, gpd_data->cpu); + dev_pm_domain_set(dev, &genpd->domain); + + genpd->device_count++; + genpd->max_off_time_changed = true; + + list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); + + genpd_unlock(genpd); + out: + if (ret) + genpd_free_dev_data(dev, gpd_data); + else + dev_pm_qos_add_notifier(dev, &gpd_data->nb, + DEV_PM_QOS_RESUME_LATENCY); + + return ret; +} + +/** + * pm_genpd_add_device - Add a device to an I/O PM domain. + * @genpd: PM domain to add the device to. + * @dev: Device to be added. + */ +int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) +{ + int ret; + + mutex_lock(&gpd_list_lock); + ret = genpd_add_device(genpd, dev, dev); + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_add_device); + +static int genpd_remove_device(struct generic_pm_domain *genpd, + struct device *dev) +{ + struct generic_pm_domain_data *gpd_data; + struct pm_domain_data *pdd; + int ret = 0; + + dev_dbg(dev, "%s()\n", __func__); + + pdd = dev->power.subsys_data->domain_data; + gpd_data = to_gpd_data(pdd); + dev_pm_qos_remove_notifier(dev, &gpd_data->nb, + DEV_PM_QOS_RESUME_LATENCY); + + genpd_lock(genpd); + + if (genpd->prepared_count > 0) { + ret = -EAGAIN; + goto out; + } + + genpd->device_count--; + genpd->max_off_time_changed = true; + + genpd_clear_cpumask(genpd, gpd_data->cpu); + dev_pm_domain_set(dev, NULL); + + list_del_init(&pdd->list_node); + + genpd_unlock(genpd); + + if (genpd->detach_dev) + genpd->detach_dev(genpd, dev); + + genpd_free_dev_data(dev, gpd_data); + + return 0; + + out: + genpd_unlock(genpd); + dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY); + + return ret; +} + +/** + * pm_genpd_remove_device - Remove a device from an I/O PM domain. + * @dev: Device to be removed. + */ +int pm_genpd_remove_device(struct device *dev) +{ + struct generic_pm_domain *genpd = dev_to_genpd_safe(dev); + + if (!genpd) + return -EINVAL; + + return genpd_remove_device(genpd, dev); +} +EXPORT_SYMBOL_GPL(pm_genpd_remove_device); + +/** + * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev + * + * @dev: Device that should be associated with the notifier + * @nb: The notifier block to register + * + * Users may call this function to add a genpd power on/off notifier for an + * attached @dev. Only one notifier per device is allowed. The notifier is + * sent when genpd is powering on/off the PM domain. + * + * It is assumed that the user guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Returns 0 on success and negative error values on failures. + */ +int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb) +{ + struct generic_pm_domain *genpd; + struct generic_pm_domain_data *gpd_data; + int ret; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return -ENODEV; + + if (WARN_ON(!dev->power.subsys_data || + !dev->power.subsys_data->domain_data)) + return -EINVAL; + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + if (gpd_data->power_nb) + return -EEXIST; + + genpd_lock(genpd); + ret = raw_notifier_chain_register(&genpd->power_notifiers, nb); + genpd_unlock(genpd); + + if (ret) { + dev_warn(dev, "failed to add notifier for PM domain %s\n", + genpd->name); + return ret; + } + + gpd_data->power_nb = nb; + return 0; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier); + +/** + * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev + * + * @dev: Device that is associated with the notifier + * + * Users may call this function to remove a genpd power on/off notifier for an + * attached @dev. + * + * It is assumed that the user guarantee that the genpd wouldn't be detached + * while this routine is getting called. + * + * Returns 0 on success and negative error values on failures. + */ +int dev_pm_genpd_remove_notifier(struct device *dev) +{ + struct generic_pm_domain *genpd; + struct generic_pm_domain_data *gpd_data; + int ret; + + genpd = dev_to_genpd_safe(dev); + if (!genpd) + return -ENODEV; + + if (WARN_ON(!dev->power.subsys_data || + !dev->power.subsys_data->domain_data)) + return -EINVAL; + + gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); + if (!gpd_data->power_nb) + return -ENODEV; + + genpd_lock(genpd); + ret = raw_notifier_chain_unregister(&genpd->power_notifiers, + gpd_data->power_nb); + genpd_unlock(genpd); + + if (ret) { + dev_warn(dev, "failed to remove notifier for PM domain %s\n", + genpd->name); + return ret; + } + + gpd_data->power_nb = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier); + +static int genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) +{ + struct gpd_link *link, *itr; + int ret = 0; + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain) + || genpd == subdomain) + return -EINVAL; + + /* + * If the domain can be powered on/off in an IRQ safe + * context, ensure that the subdomain can also be + * powered on/off in that context. + */ + if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) { + WARN(1, "Parent %s of subdomain %s must be IRQ safe\n", + genpd->name, subdomain->name); + return -EINVAL; + } + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + genpd_lock(subdomain); + genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); + + if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) { + ret = -EINVAL; + goto out; + } + + list_for_each_entry(itr, &genpd->parent_links, parent_node) { + if (itr->child == subdomain && itr->parent == genpd) { + ret = -EINVAL; + goto out; + } + } + + link->parent = genpd; + list_add_tail(&link->parent_node, &genpd->parent_links); + link->child = subdomain; + list_add_tail(&link->child_node, &subdomain->child_links); + if (genpd_status_on(subdomain)) + genpd_sd_counter_inc(genpd); + + out: + genpd_unlock(genpd); + genpd_unlock(subdomain); + if (ret) + kfree(link); + return ret; +} + +/** + * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. + * @genpd: Leader PM domain to add the subdomain to. + * @subdomain: Subdomain to be added. + */ +int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) +{ + int ret; + + mutex_lock(&gpd_list_lock); + ret = genpd_add_subdomain(genpd, subdomain); + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain); + +/** + * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. + * @genpd: Leader PM domain to remove the subdomain from. + * @subdomain: Subdomain to be removed. + */ +int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, + struct generic_pm_domain *subdomain) +{ + struct gpd_link *l, *link; + int ret = -EINVAL; + + if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) + return -EINVAL; + + genpd_lock(subdomain); + genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING); + + if (!list_empty(&subdomain->parent_links) || subdomain->device_count) { + pr_warn("%s: unable to remove subdomain %s\n", + genpd->name, subdomain->name); + ret = -EBUSY; + goto out; + } + + list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) { + if (link->child != subdomain) + continue; + + list_del(&link->parent_node); + list_del(&link->child_node); + kfree(link); + if (genpd_status_on(subdomain)) + genpd_sd_counter_dec(genpd); + + ret = 0; + break; + } + +out: + genpd_unlock(genpd); + genpd_unlock(subdomain); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain); + +static void genpd_free_default_power_state(struct genpd_power_state *states, + unsigned int state_count) +{ + kfree(states); +} + +static int genpd_set_default_power_state(struct generic_pm_domain *genpd) +{ + struct genpd_power_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + genpd->states = state; + genpd->state_count = 1; + genpd->free_states = genpd_free_default_power_state; + + return 0; +} + +static void genpd_lock_init(struct generic_pm_domain *genpd) +{ + if (genpd->flags & GENPD_FLAG_IRQ_SAFE) { + spin_lock_init(&genpd->slock); + genpd->lock_ops = &genpd_spin_ops; + } else { + mutex_init(&genpd->mlock); + genpd->lock_ops = &genpd_mtx_ops; + } +} + +/** + * pm_genpd_init - Initialize a generic I/O PM domain object. + * @genpd: PM domain object to initialize. + * @gov: PM domain governor to associate with the domain (may be NULL). + * @is_off: Initial value of the domain's power_is_off field. + * + * Returns 0 on successful initialization, else a negative error code. + */ +int pm_genpd_init(struct generic_pm_domain *genpd, + struct dev_power_governor *gov, bool is_off) +{ + int ret; + + if (IS_ERR_OR_NULL(genpd)) + return -EINVAL; + + INIT_LIST_HEAD(&genpd->parent_links); + INIT_LIST_HEAD(&genpd->child_links); + INIT_LIST_HEAD(&genpd->dev_list); + RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers); + genpd_lock_init(genpd); + genpd->gov = gov; + INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); + atomic_set(&genpd->sd_count, 0); + genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON; + genpd->device_count = 0; + genpd->max_off_time_ns = -1; + genpd->max_off_time_changed = true; + genpd->provider = NULL; + genpd->has_provider = false; + genpd->accounting_time = ktime_get(); + genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; + genpd->domain.ops.runtime_resume = genpd_runtime_resume; + genpd->domain.ops.prepare = genpd_prepare; + genpd->domain.ops.suspend_noirq = genpd_suspend_noirq; + genpd->domain.ops.resume_noirq = genpd_resume_noirq; + genpd->domain.ops.freeze_noirq = genpd_freeze_noirq; + genpd->domain.ops.thaw_noirq = genpd_thaw_noirq; + genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq; + genpd->domain.ops.restore_noirq = genpd_restore_noirq; + genpd->domain.ops.complete = genpd_complete; + genpd->domain.start = genpd_dev_pm_start; + + if (genpd->flags & GENPD_FLAG_PM_CLK) { + genpd->dev_ops.stop = pm_clk_suspend; + genpd->dev_ops.start = pm_clk_resume; + } + + /* Always-on domains must be powered on at initialization. */ + if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) && + !genpd_status_on(genpd)) + return -EINVAL; + + if (genpd_is_cpu_domain(genpd) && + !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL)) + return -ENOMEM; + + /* Use only one "off" state if there were no states declared */ + if (genpd->state_count == 0) { + ret = genpd_set_default_power_state(genpd); + if (ret) { + if (genpd_is_cpu_domain(genpd)) + free_cpumask_var(genpd->cpus); + return ret; + } + } else if (!gov && genpd->state_count > 1) { + pr_warn("%s: no governor for states\n", genpd->name); + } + + device_initialize(&genpd->dev); + dev_set_name(&genpd->dev, "%s", genpd->name); + + mutex_lock(&gpd_list_lock); + list_add(&genpd->gpd_list_node, &gpd_list); + mutex_unlock(&gpd_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(pm_genpd_init); + +static int genpd_remove(struct generic_pm_domain *genpd) +{ + struct gpd_link *l, *link; + + if (IS_ERR_OR_NULL(genpd)) + return -EINVAL; + + genpd_lock(genpd); + + if (genpd->has_provider) { + genpd_unlock(genpd); + pr_err("Provider present, unable to remove %s\n", genpd->name); + return -EBUSY; + } + + if (!list_empty(&genpd->parent_links) || genpd->device_count) { + genpd_unlock(genpd); + pr_err("%s: unable to remove %s\n", __func__, genpd->name); + return -EBUSY; + } + + list_for_each_entry_safe(link, l, &genpd->child_links, child_node) { + list_del(&link->parent_node); + list_del(&link->child_node); + kfree(link); + } + + list_del(&genpd->gpd_list_node); + genpd_unlock(genpd); + cancel_work_sync(&genpd->power_off_work); + if (genpd_is_cpu_domain(genpd)) + free_cpumask_var(genpd->cpus); + if (genpd->free_states) + genpd->free_states(genpd->states, genpd->state_count); + + pr_debug("%s: removed %s\n", __func__, genpd->name); + + return 0; +} + +/** + * pm_genpd_remove - Remove a generic I/O PM domain + * @genpd: Pointer to PM domain that is to be removed. + * + * To remove the PM domain, this function: + * - Removes the PM domain as a subdomain to any parent domains, + * if it was added. + * - Removes the PM domain from the list of registered PM domains. + * + * The PM domain will only be removed, if the associated provider has + * been removed, it is not a parent to any other PM domain and has no + * devices associated with it. + */ +int pm_genpd_remove(struct generic_pm_domain *genpd) +{ + int ret; + + mutex_lock(&gpd_list_lock); + ret = genpd_remove(genpd); + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_remove); + +#ifdef CONFIG_PM_GENERIC_DOMAINS_OF + +/* + * Device Tree based PM domain providers. + * + * The code below implements generic device tree based PM domain providers that + * bind device tree nodes with generic PM domains registered in the system. + * + * Any driver that registers generic PM domains and needs to support binding of + * devices to these domains is supposed to register a PM domain provider, which + * maps a PM domain specifier retrieved from the device tree to a PM domain. + * + * Two simple mapping functions have been provided for convenience: + * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping. + * - genpd_xlate_onecell() for mapping of multiple PM domains per node by + * index. + */ + +/** + * struct of_genpd_provider - PM domain provider registration structure + * @link: Entry in global list of PM domain providers + * @node: Pointer to device tree node of PM domain provider + * @xlate: Provider-specific xlate callback mapping a set of specifier cells + * into a PM domain. + * @data: context pointer to be passed into @xlate callback + */ +struct of_genpd_provider { + struct list_head link; + struct device_node *node; + genpd_xlate_t xlate; + void *data; +}; + +/* List of registered PM domain providers. */ +static LIST_HEAD(of_genpd_providers); +/* Mutex to protect the list above. */ +static DEFINE_MUTEX(of_genpd_mutex); + +/** + * genpd_xlate_simple() - Xlate function for direct node-domain mapping + * @genpdspec: OF phandle args to map into a PM domain + * @data: xlate function private data - pointer to struct generic_pm_domain + * + * This is a generic xlate function that can be used to model PM domains that + * have their own device tree nodes. The private data of xlate function needs + * to be a valid pointer to struct generic_pm_domain. + */ +static struct generic_pm_domain *genpd_xlate_simple( + struct of_phandle_args *genpdspec, + void *data) +{ + return data; +} + +/** + * genpd_xlate_onecell() - Xlate function using a single index. + * @genpdspec: OF phandle args to map into a PM domain + * @data: xlate function private data - pointer to struct genpd_onecell_data + * + * This is a generic xlate function that can be used to model simple PM domain + * controllers that have one device tree node and provide multiple PM domains. + * A single cell is used as an index into an array of PM domains specified in + * the genpd_onecell_data struct when registering the provider. + */ +static struct generic_pm_domain *genpd_xlate_onecell( + struct of_phandle_args *genpdspec, + void *data) +{ + struct genpd_onecell_data *genpd_data = data; + unsigned int idx = genpdspec->args[0]; + + if (genpdspec->args_count != 1) + return ERR_PTR(-EINVAL); + + if (idx >= genpd_data->num_domains) { + pr_err("%s: invalid domain index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + if (!genpd_data->domains[idx]) + return ERR_PTR(-ENOENT); + + return genpd_data->domains[idx]; +} + +/** + * genpd_add_provider() - Register a PM domain provider for a node + * @np: Device node pointer associated with the PM domain provider. + * @xlate: Callback for decoding PM domain from phandle arguments. + * @data: Context pointer for @xlate callback. + */ +static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, + void *data) +{ + struct of_genpd_provider *cp; + + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) + return -ENOMEM; + + cp->node = of_node_get(np); + cp->data = data; + cp->xlate = xlate; + + mutex_lock(&of_genpd_mutex); + list_add(&cp->link, &of_genpd_providers); + mutex_unlock(&of_genpd_mutex); + pr_debug("Added domain provider from %pOF\n", np); + + return 0; +} + +static bool genpd_present(const struct generic_pm_domain *genpd) +{ + const struct generic_pm_domain *gpd; + + list_for_each_entry(gpd, &gpd_list, gpd_list_node) + if (gpd == genpd) + return true; + return false; +} + +/** + * of_genpd_add_provider_simple() - Register a simple PM domain provider + * @np: Device node pointer associated with the PM domain provider. + * @genpd: Pointer to PM domain associated with the PM domain provider. + */ +int of_genpd_add_provider_simple(struct device_node *np, + struct generic_pm_domain *genpd) +{ + int ret = -EINVAL; + + if (!np || !genpd) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + + if (!genpd_present(genpd)) + goto unlock; + + genpd->dev.of_node = np; + + /* Parse genpd OPP table */ + if (genpd->set_performance_state) { + ret = dev_pm_opp_of_add_table(&genpd->dev); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&genpd->dev, "Failed to add OPP table: %d\n", + ret); + goto unlock; + } + + /* + * Save table for faster processing while setting performance + * state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev); + WARN_ON(IS_ERR(genpd->opp_table)); + } + + ret = genpd_add_provider(np, genpd_xlate_simple, genpd); + if (ret) { + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); + dev_pm_opp_of_remove_table(&genpd->dev); + } + + goto unlock; + } + + genpd->provider = &np->fwnode; + genpd->has_provider = true; + +unlock: + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple); + +/** + * of_genpd_add_provider_onecell() - Register a onecell PM domain provider + * @np: Device node pointer associated with the PM domain provider. + * @data: Pointer to the data associated with the PM domain provider. + */ +int of_genpd_add_provider_onecell(struct device_node *np, + struct genpd_onecell_data *data) +{ + struct generic_pm_domain *genpd; + unsigned int i; + int ret = -EINVAL; + + if (!np || !data) + return -EINVAL; + + mutex_lock(&gpd_list_lock); + + if (!data->xlate) + data->xlate = genpd_xlate_onecell; + + for (i = 0; i < data->num_domains; i++) { + genpd = data->domains[i]; + + if (!genpd) + continue; + if (!genpd_present(genpd)) + goto error; + + genpd->dev.of_node = np; + + /* Parse genpd OPP table */ + if (genpd->set_performance_state) { + ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&genpd->dev, "Failed to add OPP table for index %d: %d\n", + i, ret); + goto error; + } + + /* + * Save table for faster processing while setting + * performance state. + */ + genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i); + WARN_ON(IS_ERR(genpd->opp_table)); + } + + genpd->provider = &np->fwnode; + genpd->has_provider = true; + } + + ret = genpd_add_provider(np, data->xlate, data); + if (ret < 0) + goto error; + + mutex_unlock(&gpd_list_lock); + + return 0; + +error: + while (i--) { + genpd = data->domains[i]; + + if (!genpd) + continue; + + genpd->provider = NULL; + genpd->has_provider = false; + + if (genpd->set_performance_state) { + dev_pm_opp_put_opp_table(genpd->opp_table); + dev_pm_opp_of_remove_table(&genpd->dev); + } + } + + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell); + +/** + * of_genpd_del_provider() - Remove a previously registered PM domain provider + * @np: Device node pointer associated with the PM domain provider + */ +void of_genpd_del_provider(struct device_node *np) +{ + struct of_genpd_provider *cp, *tmp; + struct generic_pm_domain *gpd; + + mutex_lock(&gpd_list_lock); + mutex_lock(&of_genpd_mutex); + list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) { + if (cp->node == np) { + /* + * For each PM domain associated with the + * provider, set the 'has_provider' to false + * so that the PM domain can be safely removed. + */ + list_for_each_entry(gpd, &gpd_list, gpd_list_node) { + if (gpd->provider == &np->fwnode) { + gpd->has_provider = false; + + if (!gpd->set_performance_state) + continue; + + dev_pm_opp_put_opp_table(gpd->opp_table); + dev_pm_opp_of_remove_table(&gpd->dev); + } + } + + list_del(&cp->link); + of_node_put(cp->node); + kfree(cp); + break; + } + } + mutex_unlock(&of_genpd_mutex); + mutex_unlock(&gpd_list_lock); +} +EXPORT_SYMBOL_GPL(of_genpd_del_provider); + +/** + * genpd_get_from_provider() - Look-up PM domain + * @genpdspec: OF phandle args to use for look-up + * + * Looks for a PM domain provider under the node specified by @genpdspec and if + * found, uses xlate function of the provider to map phandle args to a PM + * domain. + * + * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR() + * on failure. + */ +static struct generic_pm_domain *genpd_get_from_provider( + struct of_phandle_args *genpdspec) +{ + struct generic_pm_domain *genpd = ERR_PTR(-ENOENT); + struct of_genpd_provider *provider; + + if (!genpdspec) + return ERR_PTR(-EINVAL); + + mutex_lock(&of_genpd_mutex); + + /* Check if we have such a provider in our array */ + list_for_each_entry(provider, &of_genpd_providers, link) { + if (provider->node == genpdspec->np) + genpd = provider->xlate(genpdspec, provider->data); + if (!IS_ERR(genpd)) + break; + } + + mutex_unlock(&of_genpd_mutex); + + return genpd; +} + +/** + * of_genpd_add_device() - Add a device to an I/O PM domain + * @genpdspec: OF phandle args to use for look-up PM domain + * @dev: Device to be added. + * + * Looks-up an I/O PM domain based upon phandle args provided and adds + * the device to the PM domain. Returns a negative error code on failure. + */ +int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev) +{ + struct generic_pm_domain *genpd; + int ret; + + mutex_lock(&gpd_list_lock); + + genpd = genpd_get_from_provider(genpdspec); + if (IS_ERR(genpd)) { + ret = PTR_ERR(genpd); + goto out; + } + + ret = genpd_add_device(genpd, dev, dev); + +out: + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_device); + +/** + * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain. + * @parent_spec: OF phandle args to use for parent PM domain look-up + * @subdomain_spec: OF phandle args to use for subdomain look-up + * + * Looks-up a parent PM domain and subdomain based upon phandle args + * provided and adds the subdomain to the parent PM domain. Returns a + * negative error code on failure. + */ +int of_genpd_add_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec) +{ + struct generic_pm_domain *parent, *subdomain; + int ret; + + mutex_lock(&gpd_list_lock); + + parent = genpd_get_from_provider(parent_spec); + if (IS_ERR(parent)) { + ret = PTR_ERR(parent); + goto out; + } + + subdomain = genpd_get_from_provider(subdomain_spec); + if (IS_ERR(subdomain)) { + ret = PTR_ERR(subdomain); + goto out; + } + + ret = genpd_add_subdomain(parent, subdomain); + +out: + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_add_subdomain); + +/** + * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. + * @parent_spec: OF phandle args to use for parent PM domain look-up + * @subdomain_spec: OF phandle args to use for subdomain look-up + * + * Looks-up a parent PM domain and subdomain based upon phandle args + * provided and removes the subdomain from the parent PM domain. Returns a + * negative error code on failure. + */ +int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec, + struct of_phandle_args *subdomain_spec) +{ + struct generic_pm_domain *parent, *subdomain; + int ret; + + mutex_lock(&gpd_list_lock); + + parent = genpd_get_from_provider(parent_spec); + if (IS_ERR(parent)) { + ret = PTR_ERR(parent); + goto out; + } + + subdomain = genpd_get_from_provider(subdomain_spec); + if (IS_ERR(subdomain)) { + ret = PTR_ERR(subdomain); + goto out; + } + + ret = pm_genpd_remove_subdomain(parent, subdomain); + +out: + mutex_unlock(&gpd_list_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain); + +/** + * of_genpd_remove_last - Remove the last PM domain registered for a provider + * @provider: Pointer to device structure associated with provider + * + * Find the last PM domain that was added by a particular provider and + * remove this PM domain from the list of PM domains. The provider is + * identified by the 'provider' device structure that is passed. The PM + * domain will only be removed, if the provider associated with domain + * has been removed. + * + * Returns a valid pointer to struct generic_pm_domain on success or + * ERR_PTR() on failure. + */ +struct generic_pm_domain *of_genpd_remove_last(struct device_node *np) +{ + struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT); + int ret; + + if (IS_ERR_OR_NULL(np)) + return ERR_PTR(-EINVAL); + + mutex_lock(&gpd_list_lock); + list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) { + if (gpd->provider == &np->fwnode) { + ret = genpd_remove(gpd); + genpd = ret ? ERR_PTR(ret) : gpd; + break; + } + } + mutex_unlock(&gpd_list_lock); + + return genpd; +} +EXPORT_SYMBOL_GPL(of_genpd_remove_last); + +static void genpd_release_dev(struct device *dev) +{ + of_node_put(dev->of_node); + kfree(dev); +} + +static struct bus_type genpd_bus_type = { + .name = "genpd", +}; + +/** + * genpd_dev_pm_detach - Detach a device from its PM domain. + * @dev: Device to detach. + * @power_off: Currently not used + * + * Try to locate a corresponding generic PM domain, which the device was + * attached to previously. If such is found, the device is detached from it. + */ +static void genpd_dev_pm_detach(struct device *dev, bool power_off) +{ + struct generic_pm_domain *pd; + unsigned int i; + int ret = 0; + + pd = dev_to_genpd(dev); + if (IS_ERR(pd)) + return; + + dev_dbg(dev, "removing from PM domain %s\n", pd->name); + + for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) { + ret = genpd_remove_device(pd, dev); + if (ret != -EAGAIN) + break; + + mdelay(i); + cond_resched(); + } + + if (ret < 0) { + dev_err(dev, "failed to remove from PM domain %s: %d", + pd->name, ret); + return; + } + + /* Check if PM domain can be powered off after removing this device. */ + genpd_queue_power_off_work(pd); + + /* Unregister the device if it was created by genpd. */ + if (dev->bus == &genpd_bus_type) + device_unregister(dev); +} + +static void genpd_dev_pm_sync(struct device *dev) +{ + struct generic_pm_domain *pd; + + pd = dev_to_genpd(dev); + if (IS_ERR(pd)) + return; + + genpd_queue_power_off_work(pd); +} + +static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev, + unsigned int index, bool power_on) +{ + struct of_phandle_args pd_args; + struct generic_pm_domain *pd; + int ret; + + ret = of_parse_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells", index, &pd_args); + if (ret < 0) + return ret; + + mutex_lock(&gpd_list_lock); + pd = genpd_get_from_provider(&pd_args); + of_node_put(pd_args.np); + if (IS_ERR(pd)) { + mutex_unlock(&gpd_list_lock); + dev_dbg(dev, "%s() failed to find PM domain: %ld\n", + __func__, PTR_ERR(pd)); + return driver_deferred_probe_check_state(base_dev); + } + + dev_dbg(dev, "adding to PM domain %s\n", pd->name); + + ret = genpd_add_device(pd, dev, base_dev); + mutex_unlock(&gpd_list_lock); + + if (ret < 0) { + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to add to PM domain %s: %d", + pd->name, ret); + return ret; + } + + dev->pm_domain->detach = genpd_dev_pm_detach; + dev->pm_domain->sync = genpd_dev_pm_sync; + + if (power_on) { + genpd_lock(pd); + ret = genpd_power_on(pd, 0); + genpd_unlock(pd); + } + + if (ret) + genpd_remove_device(pd, dev); + + return ret ? -EPROBE_DEFER : 1; +} + +/** + * genpd_dev_pm_attach - Attach a device to its PM domain using DT. + * @dev: Device to attach. + * + * Parse device's OF node to find a PM domain specifier. If such is found, + * attaches the device to retrieved pm_domain ops. + * + * Returns 1 on successfully attached PM domain, 0 when the device don't need a + * PM domain or when multiple power-domains exists for it, else a negative error + * code. Note that if a power-domain exists for the device, but it cannot be + * found or turned on, then return -EPROBE_DEFER to ensure that the device is + * not probed and to re-try again later. + */ +int genpd_dev_pm_attach(struct device *dev) +{ + if (!dev->of_node) + return 0; + + /* + * Devices with multiple PM domains must be attached separately, as we + * can only attach one PM domain per device. + */ + if (of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells") != 1) + return 0; + + return __genpd_dev_pm_attach(dev, dev, 0, true); +} +EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); + +/** + * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains. + * @dev: The device used to lookup the PM domain. + * @index: The index of the PM domain. + * + * Parse device's OF node to find a PM domain specifier at the provided @index. + * If such is found, creates a virtual device and attaches it to the retrieved + * pm_domain ops. To deal with detaching of the virtual device, the ->detach() + * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach(). + * + * Returns the created virtual device if successfully attached PM domain, NULL + * when the device don't need a PM domain, else an ERR_PTR() in case of + * failures. If a power-domain exists for the device, but cannot be found or + * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device + * is not probed and to re-try again later. + */ +struct device *genpd_dev_pm_attach_by_id(struct device *dev, + unsigned int index) +{ + struct device *virt_dev; + int num_domains; + int ret; + + if (!dev->of_node) + return NULL; + + /* Verify that the index is within a valid range. */ + num_domains = of_count_phandle_with_args(dev->of_node, "power-domains", + "#power-domain-cells"); + if (index >= num_domains) + return NULL; + + /* Allocate and register device on the genpd bus. */ + virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL); + if (!virt_dev) + return ERR_PTR(-ENOMEM); + + dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev)); + virt_dev->bus = &genpd_bus_type; + virt_dev->release = genpd_release_dev; + virt_dev->of_node = of_node_get(dev->of_node); + + ret = device_register(virt_dev); + if (ret) { + put_device(virt_dev); + return ERR_PTR(ret); + } + + /* Try to attach the device to the PM domain at the specified index. */ + ret = __genpd_dev_pm_attach(virt_dev, dev, index, false); + if (ret < 1) { + device_unregister(virt_dev); + return ret ? ERR_PTR(ret) : NULL; + } + + pm_runtime_enable(virt_dev); + genpd_queue_power_off_work(dev_to_genpd(virt_dev)); + + return virt_dev; +} +EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); + +/** + * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains. + * @dev: The device used to lookup the PM domain. + * @name: The name of the PM domain. + * + * Parse device's OF node to find a PM domain specifier using the + * power-domain-names DT property. For further description see + * genpd_dev_pm_attach_by_id(). + */ +struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) +{ + int index; + + if (!dev->of_node) + return NULL; + + index = of_property_match_string(dev->of_node, "power-domain-names", + name); + if (index < 0) + return NULL; + + return genpd_dev_pm_attach_by_id(dev, index); +} + +static const struct of_device_id idle_state_match[] = { + { .compatible = "domain-idle-state", }, + { } +}; + +static int genpd_parse_state(struct genpd_power_state *genpd_state, + struct device_node *state_node) +{ + int err; + u32 residency; + u32 entry_latency, exit_latency; + + err = of_property_read_u32(state_node, "entry-latency-us", + &entry_latency); + if (err) { + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); + return -EINVAL; + } + + err = of_property_read_u32(state_node, "exit-latency-us", + &exit_latency); + if (err) { + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); + return -EINVAL; + } + + err = of_property_read_u32(state_node, "min-residency-us", &residency); + if (!err) + genpd_state->residency_ns = 1000LL * residency; + + genpd_state->power_on_latency_ns = 1000LL * exit_latency; + genpd_state->power_off_latency_ns = 1000LL * entry_latency; + genpd_state->fwnode = &state_node->fwnode; + + return 0; +} + +static int genpd_iterate_idle_states(struct device_node *dn, + struct genpd_power_state *states) +{ + int ret; + struct of_phandle_iterator it; + struct device_node *np; + int i = 0; + + ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL); + if (ret <= 0) + return ret == -ENOENT ? 0 : ret; + + /* Loop over the phandles until all the requested entry is found */ + of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) { + np = it.node; + if (!of_match_node(idle_state_match, np)) + continue; + + if (!of_device_is_available(np)) + continue; + + if (states) { + ret = genpd_parse_state(&states[i], np); + if (ret) { + pr_err("Parsing idle state node %pOF failed with err %d\n", + np, ret); + of_node_put(np); + return ret; + } + } + i++; + } + + return i; +} + +/** + * of_genpd_parse_idle_states: Return array of idle states for the genpd. + * + * @dn: The genpd device node + * @states: The pointer to which the state array will be saved. + * @n: The count of elements in the array returned from this function. + * + * Returns the device states parsed from the OF node. The memory for the states + * is allocated by this function and is the responsibility of the caller to + * free the memory after use. If any or zero compatible domain idle states is + * found it returns 0 and in case of errors, a negative error code is returned. + */ +int of_genpd_parse_idle_states(struct device_node *dn, + struct genpd_power_state **states, int *n) +{ + struct genpd_power_state *st; + int ret; + + ret = genpd_iterate_idle_states(dn, NULL); + if (ret < 0) + return ret; + + if (!ret) { + *states = NULL; + *n = 0; + return 0; + } + + st = kcalloc(ret, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + ret = genpd_iterate_idle_states(dn, st); + if (ret <= 0) { + kfree(st); + return ret < 0 ? ret : -EINVAL; + } + + *states = st; + *n = ret; + + return 0; +} +EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states); + +/** + * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node. + * + * @genpd_dev: Genpd's device for which the performance-state needs to be found. + * @opp: struct dev_pm_opp of the OPP for which we need to find performance + * state. + * + * Returns performance state encoded in the OPP of the genpd. This calls + * platform specific genpd->opp_to_performance_state() callback to translate + * power domain OPP to performance state. + * + * Returns performance state on success and 0 on failure. + */ +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) +{ + struct generic_pm_domain *genpd = NULL; + int state; + + genpd = container_of(genpd_dev, struct generic_pm_domain, dev); + + if (unlikely(!genpd->opp_to_performance_state)) + return 0; + + genpd_lock(genpd); + state = genpd->opp_to_performance_state(genpd, opp); + genpd_unlock(genpd); + + return state; +} +EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state); + +static int __init genpd_bus_init(void) +{ + return bus_register(&genpd_bus_type); +} +core_initcall(genpd_bus_init); + +#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */ + + +/*** debugfs support ***/ + +#ifdef CONFIG_DEBUG_FS +#include <linux/pm.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/kobject.h> +static struct dentry *genpd_debugfs_dir; + +/* + * TODO: This function is a slightly modified version of rtpm_status_show + * from sysfs.c, so generalize it. + */ +static void rtpm_status_str(struct seq_file *s, struct device *dev) +{ + static const char * const status_lookup[] = { + [RPM_ACTIVE] = "active", + [RPM_RESUMING] = "resuming", + [RPM_SUSPENDED] = "suspended", + [RPM_SUSPENDING] = "suspending" + }; + const char *p = ""; + + if (dev->power.runtime_error) + p = "error"; + else if (dev->power.disable_depth) + p = "unsupported"; + else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup)) + p = status_lookup[dev->power.runtime_status]; + else + WARN_ON(1); + + seq_puts(s, p); +} + +static int genpd_summary_one(struct seq_file *s, + struct generic_pm_domain *genpd) +{ + static const char * const status_lookup[] = { + [GENPD_STATE_ON] = "on", + [GENPD_STATE_OFF] = "off" + }; + struct pm_domain_data *pm_data; + const char *kobj_path; + struct gpd_link *link; + char state[16]; + int ret; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + if (!genpd_status_on(genpd)) + snprintf(state, sizeof(state), "%s-%u", + status_lookup[genpd->status], genpd->state_idx); + else + snprintf(state, sizeof(state), "%s", + status_lookup[genpd->status]); + seq_printf(s, "%-30s %-15s ", genpd->name, state); + + /* + * Modifications on the list require holding locks on both + * parent and child, so we are safe. + * Also genpd->name is immutable. + */ + list_for_each_entry(link, &genpd->parent_links, parent_node) { + seq_printf(s, "%s", link->child->name); + if (!list_is_last(&link->parent_node, &genpd->parent_links)) + seq_puts(s, ", "); + } + + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "\n %-50s ", kobj_path); + rtpm_status_str(s, pm_data->dev); + kfree(kobj_path); + } + + seq_puts(s, "\n"); +exit: + genpd_unlock(genpd); + + return 0; +} + +static int summary_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd; + int ret = 0; + + seq_puts(s, "domain status children\n"); + seq_puts(s, " /device runtime status\n"); + seq_puts(s, "----------------------------------------------------------------------\n"); + + ret = mutex_lock_interruptible(&gpd_list_lock); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + ret = genpd_summary_one(s, genpd); + if (ret) + break; + } + mutex_unlock(&gpd_list_lock); + + return ret; +} + +static int status_show(struct seq_file *s, void *data) +{ + static const char * const status_lookup[] = { + [GENPD_STATE_ON] = "on", + [GENPD_STATE_OFF] = "off" + }; + + struct generic_pm_domain *genpd = s->private; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + + if (genpd->status == GENPD_STATE_OFF) + seq_printf(s, "%s-%u\n", status_lookup[genpd->status], + genpd->state_idx); + else + seq_printf(s, "%s\n", status_lookup[genpd->status]); +exit: + genpd_unlock(genpd); + return ret; +} + +static int sub_domains_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct gpd_link *link; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(link, &genpd->parent_links, parent_node) + seq_printf(s, "%s\n", link->child->name); + + genpd_unlock(genpd); + return ret; +} + +static int idle_states_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + seq_puts(s, "State Time Spent(ms) Usage Rejected\n"); + + for (i = 0; i < genpd->state_count; i++) { + ktime_t delta = 0; + s64 msecs; + + if ((genpd->status == GENPD_STATE_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + msecs = ktime_to_ms( + ktime_add(genpd->states[i].idle_time, delta)); + seq_printf(s, "S%-13i %-14lld %-14llu %llu\n", i, msecs, + genpd->states[i].usage, genpd->states[i].rejected); + } + + genpd_unlock(genpd); + return ret; +} + +static int active_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (genpd->status == GENPD_STATE_ON) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + seq_printf(s, "%lld ms\n", ktime_to_ms( + ktime_add(genpd->on_time, delta))); + + genpd_unlock(genpd); + return ret; +} + +static int total_idle_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0, total = 0; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + for (i = 0; i < genpd->state_count; i++) { + + if ((genpd->status == GENPD_STATE_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + total = ktime_add(total, genpd->states[i].idle_time); + } + total = ktime_add(total, delta); + + seq_printf(s, "%lld ms\n", ktime_to_ms(total)); + + genpd_unlock(genpd); + return ret; +} + + +static int devices_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct pm_domain_data *pm_data; + const char *kobj_path; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "%s\n", kobj_path); + kfree(kobj_path); + } + + genpd_unlock(genpd); + return ret; +} + +static int perf_state_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + + if (genpd_lock_interruptible(genpd)) + return -ERESTARTSYS; + + seq_printf(s, "%u\n", genpd->performance_state); + + genpd_unlock(genpd); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(summary); +DEFINE_SHOW_ATTRIBUTE(status); +DEFINE_SHOW_ATTRIBUTE(sub_domains); +DEFINE_SHOW_ATTRIBUTE(idle_states); +DEFINE_SHOW_ATTRIBUTE(active_time); +DEFINE_SHOW_ATTRIBUTE(total_idle_time); +DEFINE_SHOW_ATTRIBUTE(devices); +DEFINE_SHOW_ATTRIBUTE(perf_state); + +static int __init genpd_debug_init(void) +{ + struct dentry *d; + struct generic_pm_domain *genpd; + + genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); + + debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, + NULL, &summary_fops); + + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); + + debugfs_create_file("current_state", 0444, + d, genpd, &status_fops); + debugfs_create_file("sub_domains", 0444, + d, genpd, &sub_domains_fops); + debugfs_create_file("idle_states", 0444, + d, genpd, &idle_states_fops); + debugfs_create_file("active_time", 0444, + d, genpd, &active_time_fops); + debugfs_create_file("total_idle_time", 0444, + d, genpd, &total_idle_time_fops); + debugfs_create_file("devices", 0444, + d, genpd, &devices_fops); + if (genpd->set_performance_state) + debugfs_create_file("perf_state", 0444, + d, genpd, &perf_state_fops); + } + + return 0; +} +late_initcall(genpd_debug_init); + +static void __exit genpd_debug_exit(void) +{ + debugfs_remove_recursive(genpd_debugfs_dir); +} +__exitcall(genpd_debug_exit); +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c new file mode 100644 index 000000000..490ed7deb --- /dev/null +++ b/drivers/base/power/domain_governor.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/domain_governor.c - Governors for device PM domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + */ +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_qos.h> +#include <linux/hrtimer.h> +#include <linux/cpuidle.h> +#include <linux/cpumask.h> +#include <linux/ktime.h> + +static int dev_update_qos_constraint(struct device *dev, void *data) +{ + s64 *constraint_ns_p = data; + s64 constraint_ns; + + if (dev->power.subsys_data && dev->power.subsys_data->domain_data) { + /* + * Only take suspend-time QoS constraints of devices into + * account, because constraints updated after the device has + * been suspended are not guaranteed to be taken into account + * anyway. In order for them to take effect, the device has to + * be resumed and suspended again. + */ + constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; + } else { + /* + * The child is not in a domain and there's no info on its + * suspend/resume latencies, so assume them to be negligible and + * take its current PM QoS constraint (that's the only thing + * known at this point anyway). + */ + constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY); + constraint_ns *= NSEC_PER_USEC; + } + + if (constraint_ns < *constraint_ns_p) + *constraint_ns_p = constraint_ns; + + return 0; +} + +/** + * default_suspend_ok - Default PM domain governor routine to suspend devices. + * @dev: Device to check. + */ +static bool default_suspend_ok(struct device *dev) +{ + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + unsigned long flags; + s64 constraint_ns; + + dev_dbg(dev, "%s()\n", __func__); + + spin_lock_irqsave(&dev->power.lock, flags); + + if (!td->constraint_changed) { + bool ret = td->cached_suspend_ok; + + spin_unlock_irqrestore(&dev->power.lock, flags); + return ret; + } + td->constraint_changed = false; + td->cached_suspend_ok = false; + td->effective_constraint_ns = 0; + constraint_ns = __dev_pm_qos_resume_latency(dev); + + spin_unlock_irqrestore(&dev->power.lock, flags); + + if (constraint_ns == 0) + return false; + + constraint_ns *= NSEC_PER_USEC; + /* + * We can walk the children without any additional locking, because + * they all have been suspended at this point and their + * effective_constraint_ns fields won't be modified in parallel with us. + */ + if (!dev->power.ignore_children) + device_for_each_child(dev, &constraint_ns, + dev_update_qos_constraint); + + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) { + /* "No restriction", so the device is allowed to suspend. */ + td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS; + td->cached_suspend_ok = true; + } else if (constraint_ns == 0) { + /* + * This triggers if one of the children that don't belong to a + * domain has a zero PM QoS constraint and it's better not to + * suspend then. effective_constraint_ns is zero already and + * cached_suspend_ok is false, so bail out. + */ + return false; + } else { + constraint_ns -= td->suspend_latency_ns + + td->resume_latency_ns; + /* + * effective_constraint_ns is zero already and cached_suspend_ok + * is false, so if the computed value is not positive, return + * right away. + */ + if (constraint_ns <= 0) + return false; + + td->effective_constraint_ns = constraint_ns; + td->cached_suspend_ok = true; + } + + /* + * The children have been suspended already, so we don't need to take + * their suspend latencies into account here. + */ + return td->cached_suspend_ok; +} + +static bool __default_power_down_ok(struct dev_pm_domain *pd, + unsigned int state) +{ + struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct gpd_link *link; + struct pm_domain_data *pdd; + s64 min_off_time_ns; + s64 off_on_time_ns; + + off_on_time_ns = genpd->states[state].power_off_latency_ns + + genpd->states[state].power_on_latency_ns; + + min_off_time_ns = -1; + /* + * Check if subdomains can be off for enough time. + * + * All subdomains have been powered off already at this point. + */ + list_for_each_entry(link, &genpd->parent_links, parent_node) { + struct generic_pm_domain *sd = link->child; + s64 sd_max_off_ns = sd->max_off_time_ns; + + if (sd_max_off_ns < 0) + continue; + + /* + * Check if the subdomain is allowed to be off long enough for + * the current domain to turn off and on (that's how much time + * it will have to wait worst case). + */ + if (sd_max_off_ns <= off_on_time_ns) + return false; + + if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0) + min_off_time_ns = sd_max_off_ns; + } + + /* + * Check if the devices in the domain can be off enough time. + */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + struct gpd_timing_data *td; + s64 constraint_ns; + + /* + * Check if the device is allowed to be off long enough for the + * domain to turn off and on (that's how much time it will + * have to wait worst case). + */ + td = &to_gpd_data(pdd)->td; + constraint_ns = td->effective_constraint_ns; + /* + * Zero means "no suspend at all" and this runs only when all + * devices in the domain are suspended, so it must be positive. + */ + if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) + continue; + + if (constraint_ns <= off_on_time_ns) + return false; + + if (min_off_time_ns > constraint_ns || min_off_time_ns < 0) + min_off_time_ns = constraint_ns; + } + + /* + * If the computed minimum device off time is negative, there are no + * latency constraints, so the domain can spend arbitrary time in the + * "off" state. + */ + if (min_off_time_ns < 0) + return true; + + /* + * The difference between the computed minimum subdomain or device off + * time and the time needed to turn the domain on is the maximum + * theoretical time this domain can spend in the "off" state. + */ + genpd->max_off_time_ns = min_off_time_ns - + genpd->states[state].power_on_latency_ns; + return true; +} + +/** + * default_power_down_ok - Default generic PM domain power off governor routine. + * @pd: PM domain to check. + * + * This routine must be executed under the PM domain's lock. + */ +static bool default_power_down_ok(struct dev_pm_domain *pd) +{ + struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct gpd_link *link; + + if (!genpd->max_off_time_changed) { + genpd->state_idx = genpd->cached_power_down_state_idx; + return genpd->cached_power_down_ok; + } + + /* + * We have to invalidate the cached results for the parents, so + * use the observation that default_power_down_ok() is not + * going to be called for any parent until this instance + * returns. + */ + list_for_each_entry(link, &genpd->child_links, child_node) + link->parent->max_off_time_changed = true; + + genpd->max_off_time_ns = -1; + genpd->max_off_time_changed = false; + genpd->cached_power_down_ok = true; + genpd->state_idx = genpd->state_count - 1; + + /* Find a state to power down to, starting from the deepest. */ + while (!__default_power_down_ok(pd, genpd->state_idx)) { + if (genpd->state_idx == 0) { + genpd->cached_power_down_ok = false; + break; + } + genpd->state_idx--; + } + + genpd->cached_power_down_state_idx = genpd->state_idx; + return genpd->cached_power_down_ok; +} + +static bool always_on_power_down_ok(struct dev_pm_domain *domain) +{ + return false; +} + +#ifdef CONFIG_CPU_IDLE +static bool cpu_power_down_ok(struct dev_pm_domain *pd) +{ + struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct cpuidle_device *dev; + ktime_t domain_wakeup, next_hrtimer; + s64 idle_duration_ns; + int cpu, i; + + /* Validate dev PM QoS constraints. */ + if (!default_power_down_ok(pd)) + return false; + + if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) + return true; + + /* + * Find the next wakeup for any of the online CPUs within the PM domain + * and its subdomains. Note, we only need the genpd->cpus, as it already + * contains a mask of all CPUs from subdomains. + */ + domain_wakeup = ktime_set(KTIME_SEC_MAX, 0); + for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) { + dev = per_cpu(cpuidle_devices, cpu); + if (dev) { + next_hrtimer = READ_ONCE(dev->next_hrtimer); + if (ktime_before(next_hrtimer, domain_wakeup)) + domain_wakeup = next_hrtimer; + } + } + + /* The minimum idle duration is from now - until the next wakeup. */ + idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get())); + if (idle_duration_ns <= 0) + return false; + + /* + * Find the deepest idle state that has its residency value satisfied + * and by also taking into account the power off latency for the state. + * Start at the state picked by the dev PM QoS constraint validation. + */ + i = genpd->state_idx; + do { + if (idle_duration_ns >= (genpd->states[i].residency_ns + + genpd->states[i].power_off_latency_ns)) { + genpd->state_idx = i; + return true; + } + } while (--i >= 0); + + return false; +} + +struct dev_power_governor pm_domain_cpu_gov = { + .suspend_ok = default_suspend_ok, + .power_down_ok = cpu_power_down_ok, +}; +#endif + +struct dev_power_governor simple_qos_governor = { + .suspend_ok = default_suspend_ok, + .power_down_ok = default_power_down_ok, +}; + +/** + * pm_genpd_gov_always_on - A governor implementing an always-on policy + */ +struct dev_power_governor pm_domain_always_on_gov = { + .power_down_ok = always_on_power_down_ok, + .suspend_ok = default_suspend_ok, +}; diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c new file mode 100644 index 000000000..4fa525668 --- /dev/null +++ b/drivers/base/power/generic_ops.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems + * + * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + */ +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/export.h> + +#ifdef CONFIG_PM +/** + * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. + * @dev: Device to suspend. + * + * If PM operations are defined for the @dev's driver and they include + * ->runtime_suspend(), execute it and return its error code. Otherwise, + * return 0. + */ +int pm_generic_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int ret; + + ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; + + return ret; +} +EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); + +/** + * pm_generic_runtime_resume - Generic runtime resume callback for subsystems. + * @dev: Device to resume. + * + * If PM operations are defined for the @dev's driver and they include + * ->runtime_resume(), execute it and return its error code. Otherwise, + * return 0. + */ +int pm_generic_runtime_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int ret; + + ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; + + return ret; +} +EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +/** + * pm_generic_prepare - Generic routine preparing a device for power transition. + * @dev: Device to prepare. + * + * Prepare a device for a system-wide power transition. + */ +int pm_generic_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +/** + * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. + * @dev: Device to suspend. + */ +int pm_generic_suspend_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); + +/** + * pm_generic_suspend_late - Generic suspend_late callback for subsystems. + * @dev: Device to suspend. + */ +int pm_generic_suspend_late(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->suspend_late ? pm->suspend_late(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_suspend_late); + +/** + * pm_generic_suspend - Generic suspend callback for subsystems. + * @dev: Device to suspend. + */ +int pm_generic_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->suspend ? pm->suspend(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_suspend); + +/** + * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems. + * @dev: Device to freeze. + */ +int pm_generic_freeze_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); + +/** + * pm_generic_freeze_late - Generic freeze_late callback for subsystems. + * @dev: Device to freeze. + */ +int pm_generic_freeze_late(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->freeze_late ? pm->freeze_late(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_freeze_late); + +/** + * pm_generic_freeze - Generic freeze callback for subsystems. + * @dev: Device to freeze. + */ +int pm_generic_freeze(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->freeze ? pm->freeze(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_freeze); + +/** + * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems. + * @dev: Device to handle. + */ +int pm_generic_poweroff_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); + +/** + * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems. + * @dev: Device to handle. + */ +int pm_generic_poweroff_late(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_poweroff_late); + +/** + * pm_generic_poweroff - Generic poweroff callback for subsystems. + * @dev: Device to handle. + */ +int pm_generic_poweroff(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->poweroff ? pm->poweroff(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_poweroff); + +/** + * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems. + * @dev: Device to thaw. + */ +int pm_generic_thaw_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); + +/** + * pm_generic_thaw_early - Generic thaw_early callback for subsystems. + * @dev: Device to thaw. + */ +int pm_generic_thaw_early(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->thaw_early ? pm->thaw_early(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_thaw_early); + +/** + * pm_generic_thaw - Generic thaw callback for subsystems. + * @dev: Device to thaw. + */ +int pm_generic_thaw(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->thaw ? pm->thaw(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_thaw); + +/** + * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_resume_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); + +/** + * pm_generic_resume_early - Generic resume_early callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_resume_early(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->resume_early ? pm->resume_early(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_resume_early); + +/** + * pm_generic_resume - Generic resume callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->resume ? pm->resume(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_resume); + +/** + * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems. + * @dev: Device to restore. + */ +int pm_generic_restore_noirq(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); + +/** + * pm_generic_restore_early - Generic restore_early callback for subsystems. + * @dev: Device to resume. + */ +int pm_generic_restore_early(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->restore_early ? pm->restore_early(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_restore_early); + +/** + * pm_generic_restore - Generic restore callback for subsystems. + * @dev: Device to restore. + */ +int pm_generic_restore(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + return pm && pm->restore ? pm->restore(dev) : 0; +} +EXPORT_SYMBOL_GPL(pm_generic_restore); + +/** + * pm_generic_complete - Generic routine completing a device power transition. + * @dev: Device to handle. + * + * Complete a device power transition during a system-wide power transition. + */ +void pm_generic_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c new file mode 100644 index 000000000..1dbaaddf5 --- /dev/null +++ b/drivers/base/power/main.c @@ -0,0 +1,2015 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/main.c - Where the driver meets power management. + * + * Copyright (c) 2003 Patrick Mochel + * Copyright (c) 2003 Open Source Development Lab + * + * The driver model core calls device_pm_add() when a device is registered. + * This will initialize the embedded device_pm_info object in the device + * and add it to the list of power-controlled devices. sysfs entries for + * controlling device power management will also be added. + * + * A separate list is used for keeping track of power info, because the power + * domain dependencies may differ from the ancestral dependencies that the + * subsystem list maintains. + */ + +#define pr_fmt(fmt) "PM: " fmt + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/mutex.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/pm-trace.h> +#include <linux/pm_wakeirq.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/async.h> +#include <linux/suspend.h> +#include <trace/events/power.h> +#include <linux/cpufreq.h> +#include <linux/cpuidle.h> +#include <linux/devfreq.h> +#include <linux/timer.h> + +#include "../base.h" +#include "power.h" + +typedef int (*pm_callback_t)(struct device *); + +#define list_for_each_entry_rcu_locked(pos, head, member) \ + list_for_each_entry_rcu(pos, head, member, \ + device_links_read_lock_held()) + +/* + * The entries in the dpm_list list are in a depth first order, simply + * because children are guaranteed to be discovered after parents, and + * are inserted at the back of the list on discovery. + * + * Since device_pm_add() may be called with a device lock held, + * we must never try to acquire a device lock while holding + * dpm_list_mutex. + */ + +LIST_HEAD(dpm_list); +static LIST_HEAD(dpm_prepared_list); +static LIST_HEAD(dpm_suspended_list); +static LIST_HEAD(dpm_late_early_list); +static LIST_HEAD(dpm_noirq_list); + +struct suspend_stats suspend_stats; +static DEFINE_MUTEX(dpm_list_mtx); +static pm_message_t pm_transition; + +static int async_error; + +static const char *pm_verb(int event) +{ + switch (event) { + case PM_EVENT_SUSPEND: + return "suspend"; + case PM_EVENT_RESUME: + return "resume"; + case PM_EVENT_FREEZE: + return "freeze"; + case PM_EVENT_QUIESCE: + return "quiesce"; + case PM_EVENT_HIBERNATE: + return "hibernate"; + case PM_EVENT_THAW: + return "thaw"; + case PM_EVENT_RESTORE: + return "restore"; + case PM_EVENT_RECOVER: + return "recover"; + default: + return "(unknown PM event)"; + } +} + +/** + * device_pm_sleep_init - Initialize system suspend-related device fields. + * @dev: Device object being initialized. + */ +void device_pm_sleep_init(struct device *dev) +{ + dev->power.is_prepared = false; + dev->power.is_suspended = false; + dev->power.is_noirq_suspended = false; + dev->power.is_late_suspended = false; + init_completion(&dev->power.completion); + complete_all(&dev->power.completion); + dev->power.wakeup = NULL; + INIT_LIST_HEAD(&dev->power.entry); +} + +/** + * device_pm_lock - Lock the list of active devices used by the PM core. + */ +void device_pm_lock(void) +{ + mutex_lock(&dpm_list_mtx); +} + +/** + * device_pm_unlock - Unlock the list of active devices used by the PM core. + */ +void device_pm_unlock(void) +{ + mutex_unlock(&dpm_list_mtx); +} + +/** + * device_pm_add - Add a device to the PM core's list of active devices. + * @dev: Device to add to the list. + */ +void device_pm_add(struct device *dev) +{ + /* Skip PM setup/initialization. */ + if (device_pm_not_required(dev)) + return; + + pr_debug("Adding info for %s:%s\n", + dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + device_pm_check_callbacks(dev); + mutex_lock(&dpm_list_mtx); + if (dev->parent && dev->parent->power.is_prepared) + dev_warn(dev, "parent %s should not be sleeping\n", + dev_name(dev->parent)); + list_add_tail(&dev->power.entry, &dpm_list); + dev->power.in_dpm_list = true; + mutex_unlock(&dpm_list_mtx); +} + +/** + * device_pm_remove - Remove a device from the PM core's list of active devices. + * @dev: Device to be removed from the list. + */ +void device_pm_remove(struct device *dev) +{ + if (device_pm_not_required(dev)) + return; + + pr_debug("Removing info for %s:%s\n", + dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + complete_all(&dev->power.completion); + mutex_lock(&dpm_list_mtx); + list_del_init(&dev->power.entry); + dev->power.in_dpm_list = false; + mutex_unlock(&dpm_list_mtx); + device_wakeup_disable(dev); + pm_runtime_remove(dev); + device_pm_check_callbacks(dev); +} + +/** + * device_pm_move_before - Move device in the PM core's list of active devices. + * @deva: Device to move in dpm_list. + * @devb: Device @deva should come before. + */ +void device_pm_move_before(struct device *deva, struct device *devb) +{ + pr_debug("Moving %s:%s before %s:%s\n", + deva->bus ? deva->bus->name : "No Bus", dev_name(deva), + devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); + /* Delete deva from dpm_list and reinsert before devb. */ + list_move_tail(&deva->power.entry, &devb->power.entry); +} + +/** + * device_pm_move_after - Move device in the PM core's list of active devices. + * @deva: Device to move in dpm_list. + * @devb: Device @deva should come after. + */ +void device_pm_move_after(struct device *deva, struct device *devb) +{ + pr_debug("Moving %s:%s after %s:%s\n", + deva->bus ? deva->bus->name : "No Bus", dev_name(deva), + devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); + /* Delete deva from dpm_list and reinsert after devb. */ + list_move(&deva->power.entry, &devb->power.entry); +} + +/** + * device_pm_move_last - Move device to end of the PM core's list of devices. + * @dev: Device to move in dpm_list. + */ +void device_pm_move_last(struct device *dev) +{ + pr_debug("Moving %s:%s to end of list\n", + dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); + list_move_tail(&dev->power.entry, &dpm_list); +} + +static ktime_t initcall_debug_start(struct device *dev, void *cb) +{ + if (!pm_print_times_enabled) + return 0; + + dev_info(dev, "calling %pS @ %i, parent: %s\n", cb, + task_pid_nr(current), + dev->parent ? dev_name(dev->parent) : "none"); + return ktime_get(); +} + +static void initcall_debug_report(struct device *dev, ktime_t calltime, + void *cb, int error) +{ + ktime_t rettime; + s64 nsecs; + + if (!pm_print_times_enabled) + return; + + rettime = ktime_get(); + nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime)); + + dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error, + (unsigned long long)nsecs >> 10); +} + +/** + * dpm_wait - Wait for a PM operation to complete. + * @dev: Device to wait for. + * @async: If unset, wait only if the device's power.async_suspend flag is set. + */ +static void dpm_wait(struct device *dev, bool async) +{ + if (!dev) + return; + + if (async || (pm_async_enabled && dev->power.async_suspend)) + wait_for_completion(&dev->power.completion); +} + +static int dpm_wait_fn(struct device *dev, void *async_ptr) +{ + dpm_wait(dev, *((bool *)async_ptr)); + return 0; +} + +static void dpm_wait_for_children(struct device *dev, bool async) +{ + device_for_each_child(dev, &async, dpm_wait_fn); +} + +static void dpm_wait_for_suppliers(struct device *dev, bool async) +{ + struct device_link *link; + int idx; + + idx = device_links_read_lock(); + + /* + * If the supplier goes away right after we've checked the link to it, + * we'll wait for its completion to change the state, but that's fine, + * because the only things that will block as a result are the SRCU + * callbacks freeing the link objects for the links in the list we're + * walking. + */ + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_wait(link->supplier, async); + + device_links_read_unlock(idx); +} + +static bool dpm_wait_for_superior(struct device *dev, bool async) +{ + struct device *parent; + + /* + * If the device is resumed asynchronously and the parent's callback + * deletes both the device and the parent itself, the parent object may + * be freed while this function is running, so avoid that by reference + * counting the parent once more unless the device has been deleted + * already (in which case return right away). + */ + mutex_lock(&dpm_list_mtx); + + if (!device_pm_initialized(dev)) { + mutex_unlock(&dpm_list_mtx); + return false; + } + + parent = get_device(dev->parent); + + mutex_unlock(&dpm_list_mtx); + + dpm_wait(parent, async); + put_device(parent); + + dpm_wait_for_suppliers(dev, async); + + /* + * If the parent's callback has deleted the device, attempting to resume + * it would be invalid, so avoid doing that then. + */ + return device_pm_initialized(dev); +} + +static void dpm_wait_for_consumers(struct device *dev, bool async) +{ + struct device_link *link; + int idx; + + idx = device_links_read_lock(); + + /* + * The status of a device link can only be changed from "dormant" by a + * probe, but that cannot happen during system suspend/resume. In + * theory it can change to "dormant" at that time, but then it is + * reasonable to wait for the target device anyway (eg. if it goes + * away, it's better to wait for it to go away completely and then + * continue instead of trying to continue in parallel with its + * unregistration). + */ + list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node) + if (READ_ONCE(link->status) != DL_STATE_DORMANT) + dpm_wait(link->consumer, async); + + device_links_read_unlock(idx); +} + +static void dpm_wait_for_subordinate(struct device *dev, bool async) +{ + dpm_wait_for_children(dev, async); + dpm_wait_for_consumers(dev, async); +} + +/** + * pm_op - Return the PM operation appropriate for given PM event. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + */ +static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) +{ + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + return ops->suspend; + case PM_EVENT_RESUME: + return ops->resume; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATE_CALLBACKS + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return ops->freeze; + case PM_EVENT_HIBERNATE: + return ops->poweroff; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + return ops->thaw; + case PM_EVENT_RESTORE: + return ops->restore; +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + } + + return NULL; +} + +/** + * pm_late_early_op - Return the PM operation appropriate for given PM event. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + * + * Runtime PM is disabled for @dev while this function is being executed. + */ +static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops, + pm_message_t state) +{ + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + return ops->suspend_late; + case PM_EVENT_RESUME: + return ops->resume_early; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATE_CALLBACKS + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return ops->freeze_late; + case PM_EVENT_HIBERNATE: + return ops->poweroff_late; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + return ops->thaw_early; + case PM_EVENT_RESTORE: + return ops->restore_early; +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + } + + return NULL; +} + +/** + * pm_noirq_op - Return the PM operation appropriate for given PM event. + * @ops: PM operations to choose from. + * @state: PM transition of the system being carried out. + * + * The driver of @dev will not receive interrupts while this function is being + * executed. + */ +static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) +{ + switch (state.event) { +#ifdef CONFIG_SUSPEND + case PM_EVENT_SUSPEND: + return ops->suspend_noirq; + case PM_EVENT_RESUME: + return ops->resume_noirq; +#endif /* CONFIG_SUSPEND */ +#ifdef CONFIG_HIBERNATE_CALLBACKS + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return ops->freeze_noirq; + case PM_EVENT_HIBERNATE: + return ops->poweroff_noirq; + case PM_EVENT_THAW: + case PM_EVENT_RECOVER: + return ops->thaw_noirq; + case PM_EVENT_RESTORE: + return ops->restore_noirq; +#endif /* CONFIG_HIBERNATE_CALLBACKS */ + } + + return NULL; +} + +static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) +{ + dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), + ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? + ", may wakeup" : ""); +} + +static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, + int error) +{ + pr_err("Device %s failed to %s%s: error %d\n", + dev_name(dev), pm_verb(state.event), info, error); +} + +static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, + const char *info) +{ + ktime_t calltime; + u64 usecs64; + int usecs; + + calltime = ktime_get(); + usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); + do_div(usecs64, NSEC_PER_USEC); + usecs = usecs64; + if (usecs == 0) + usecs = 1; + + pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", + info ?: "", info ? " " : "", pm_verb(state.event), + error ? "aborted" : "complete", + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); +} + +static int dpm_run_callback(pm_callback_t cb, struct device *dev, + pm_message_t state, const char *info) +{ + ktime_t calltime; + int error; + + if (!cb) + return 0; + + calltime = initcall_debug_start(dev, cb); + + pm_dev_dbg(dev, state, info); + trace_device_pm_callback_start(dev, info, state.event); + error = cb(dev); + trace_device_pm_callback_end(dev, error); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, cb, error); + + return error; +} + +#ifdef CONFIG_DPM_WATCHDOG +struct dpm_watchdog { + struct device *dev; + struct task_struct *tsk; + struct timer_list timer; +}; + +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \ + struct dpm_watchdog wd + +/** + * dpm_watchdog_handler - Driver suspend / resume watchdog handler. + * @t: The timer that PM watchdog depends on. + * + * Called when a driver has timed out suspending or resuming. + * There's not much we can do here to recover so panic() to + * capture a crash-dump in pstore. + */ +static void dpm_watchdog_handler(struct timer_list *t) +{ + struct dpm_watchdog *wd = from_timer(wd, t, timer); + + dev_emerg(wd->dev, "**** DPM device timeout ****\n"); + show_stack(wd->tsk, NULL, KERN_EMERG); + panic("%s %s: unrecoverable failure\n", + dev_driver_string(wd->dev), dev_name(wd->dev)); +} + +/** + * dpm_watchdog_set - Enable pm watchdog for given device. + * @wd: Watchdog. Must be allocated on the stack. + * @dev: Device to handle. + */ +static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) +{ + struct timer_list *timer = &wd->timer; + + wd->dev = dev; + wd->tsk = current; + + timer_setup_on_stack(timer, dpm_watchdog_handler, 0); + /* use same timeout value for both suspend and resume */ + timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; + add_timer(timer); +} + +/** + * dpm_watchdog_clear - Disable suspend/resume watchdog. + * @wd: Watchdog to disable. + */ +static void dpm_watchdog_clear(struct dpm_watchdog *wd) +{ + struct timer_list *timer = &wd->timer; + + del_timer_sync(timer); + destroy_timer_on_stack(timer); +} +#else +#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) +#define dpm_watchdog_set(x, y) +#define dpm_watchdog_clear(x) +#endif + +/*------------------------- Resume routines -------------------------*/ + +/** + * dev_pm_skip_resume - System-wide device resume optimization check. + * @dev: Target device. + * + * Return: + * - %false if the transition under way is RESTORE. + * - Return value of dev_pm_skip_suspend() if the transition under way is THAW. + * - The logical negation of %power.must_resume otherwise (that is, when the + * transition under way is RESUME). + */ +bool dev_pm_skip_resume(struct device *dev) +{ + if (pm_transition.event == PM_EVENT_RESTORE) + return false; + + if (pm_transition.event == PM_EVENT_THAW) + return dev_pm_skip_suspend(dev); + + return !dev->power.must_resume; +} + +/** + * device_resume_noirq - Execute a "noirq resume" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being resumed asynchronously. + * + * The driver of @dev will not receive interrupts while this function is being + * executed. + */ +static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + const char *info = NULL; + bool skip_resume; + int error = 0; + + TRACE_DEVICE(dev); + TRACE_RESUME(0); + + if (dev->power.syscore || dev->power.direct_complete) + goto Out; + + if (!dev->power.is_noirq_suspended) + goto Out; + + if (!dpm_wait_for_superior(dev, async)) + goto Out; + + skip_resume = dev_pm_skip_resume(dev); + /* + * If the driver callback is skipped below or by the middle layer + * callback and device_resume_early() also skips the driver callback for + * this device later, it needs to appear as "suspended" to PM-runtime, + * so change its status accordingly. + * + * Otherwise, the device is going to be resumed, so set its PM-runtime + * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set + * to avoid confusing drivers that don't use it. + */ + if (skip_resume) + pm_runtime_set_suspended(dev); + else if (dev_pm_skip_suspend(dev)) + pm_runtime_set_active(dev); + + if (dev->pm_domain) { + info = "noirq power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "noirq type "; + callback = pm_noirq_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "noirq class "; + callback = pm_noirq_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "noirq bus "; + callback = pm_noirq_op(dev->bus->pm, state); + } + if (callback) + goto Run; + + if (skip_resume) + goto Skip; + + if (dev->driver && dev->driver->pm) { + info = "noirq driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + +Run: + error = dpm_run_callback(callback, dev, state, info); + +Skip: + dev->power.is_noirq_suspended = false; + +Out: + complete_all(&dev->power.completion); + TRACE_RESUME(error); + return error; +} + +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + +static bool dpm_async_fn(struct device *dev, async_func_t func) +{ + reinit_completion(&dev->power.completion); + + if (is_async(dev)) { + get_device(dev); + async_schedule_dev(func, dev); + return true; + } + + return false; +} + +static void async_resume_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_noirq(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + +static void dpm_noirq_resume_devices(pm_message_t state) +{ + struct device *dev; + ktime_t starttime = ktime_get(); + + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); + mutex_lock(&dpm_list_mtx); + pm_transition = state; + + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_noirq_list, power.entry) + dpm_async_fn(dev, async_resume_noirq); + + while (!list_empty(&dpm_noirq_list)) { + dev = to_device(dpm_noirq_list.next); + get_device(dev); + list_move_tail(&dev->power.entry, &dpm_late_early_list); + mutex_unlock(&dpm_list_mtx); + + if (!is_async(dev)) { + int error; + + error = device_resume_noirq(dev, state, false); + if (error) { + suspend_stats.failed_resume_noirq++; + dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " noirq", error); + } + } + + mutex_lock(&dpm_list_mtx); + put_device(dev); + } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + dpm_show_time(starttime, state, 0, "noirq"); + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); +} + +/** + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and + * allow device drivers' interrupt handlers to be called. + */ +void dpm_resume_noirq(pm_message_t state) +{ + dpm_noirq_resume_devices(state); + + resume_device_irqs(); + device_wakeup_disarm_wake_irqs(); + + cpuidle_resume(); +} + +/** + * device_resume_early - Execute an "early resume" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being resumed asynchronously. + * + * Runtime PM is disabled for @dev while this function is being executed. + */ +static int device_resume_early(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + const char *info = NULL; + int error = 0; + + TRACE_DEVICE(dev); + TRACE_RESUME(0); + + if (dev->power.syscore || dev->power.direct_complete) + goto Out; + + if (!dev->power.is_late_suspended) + goto Out; + + if (!dpm_wait_for_superior(dev, async)) + goto Out; + + if (dev->pm_domain) { + info = "early power domain "; + callback = pm_late_early_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "early type "; + callback = pm_late_early_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "early class "; + callback = pm_late_early_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "early bus "; + callback = pm_late_early_op(dev->bus->pm, state); + } + if (callback) + goto Run; + + if (dev_pm_skip_resume(dev)) + goto Skip; + + if (dev->driver && dev->driver->pm) { + info = "early driver "; + callback = pm_late_early_op(dev->driver->pm, state); + } + +Run: + error = dpm_run_callback(callback, dev, state, info); + +Skip: + dev->power.is_late_suspended = false; + +Out: + TRACE_RESUME(error); + + pm_runtime_enable(dev); + complete_all(&dev->power.completion); + return error; +} + +static void async_resume_early(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume_early(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + + put_device(dev); +} + +/** + * dpm_resume_early - Execute "early resume" callbacks for all devices. + * @state: PM transition of the system being carried out. + */ +void dpm_resume_early(pm_message_t state) +{ + struct device *dev; + ktime_t starttime = ktime_get(); + + trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); + mutex_lock(&dpm_list_mtx); + pm_transition = state; + + /* + * Advanced the async threads upfront, + * in case the starting of async threads is + * delayed by non-async resuming devices. + */ + list_for_each_entry(dev, &dpm_late_early_list, power.entry) + dpm_async_fn(dev, async_resume_early); + + while (!list_empty(&dpm_late_early_list)) { + dev = to_device(dpm_late_early_list.next); + get_device(dev); + list_move_tail(&dev->power.entry, &dpm_suspended_list); + mutex_unlock(&dpm_list_mtx); + + if (!is_async(dev)) { + int error; + + error = device_resume_early(dev, state, false); + if (error) { + suspend_stats.failed_resume_early++; + dpm_save_failed_step(SUSPEND_RESUME_EARLY); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, " early", error); + } + } + mutex_lock(&dpm_list_mtx); + put_device(dev); + } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + dpm_show_time(starttime, state, 0, "early"); + trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); +} + +/** + * dpm_resume_start - Execute "noirq" and "early" device callbacks. + * @state: PM transition of the system being carried out. + */ +void dpm_resume_start(pm_message_t state) +{ + dpm_resume_noirq(state); + dpm_resume_early(state); +} +EXPORT_SYMBOL_GPL(dpm_resume_start); + +/** + * device_resume - Execute "resume" callbacks for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being resumed asynchronously. + */ +static int device_resume(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + const char *info = NULL; + int error = 0; + DECLARE_DPM_WATCHDOG_ON_STACK(wd); + + TRACE_DEVICE(dev); + TRACE_RESUME(0); + + if (dev->power.syscore) + goto Complete; + + if (dev->power.direct_complete) { + /* Match the pm_runtime_disable() in __device_suspend(). */ + pm_runtime_enable(dev); + goto Complete; + } + + if (!dpm_wait_for_superior(dev, async)) + goto Complete; + + dpm_watchdog_set(&wd, dev); + device_lock(dev); + + /* + * This is a fib. But we'll allow new children to be added below + * a resumed device, even if the device hasn't been completed yet. + */ + dev->power.is_prepared = false; + + if (!dev->power.is_suspended) + goto Unlock; + + if (dev->pm_domain) { + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Driver; + } + + if (dev->type && dev->type->pm) { + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Driver; + } + + if (dev->class && dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Driver; + } + + if (dev->bus) { + if (dev->bus->pm) { + info = "bus "; + callback = pm_op(dev->bus->pm, state); + } else if (dev->bus->resume) { + info = "legacy bus "; + callback = dev->bus->resume; + goto End; + } + } + + Driver: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + + End: + error = dpm_run_callback(callback, dev, state, info); + dev->power.is_suspended = false; + + Unlock: + device_unlock(dev); + dpm_watchdog_clear(&wd); + + Complete: + complete_all(&dev->power.completion); + + TRACE_RESUME(error); + + return error; +} + +static void async_resume(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + put_device(dev); +} + +/** + * dpm_resume - Execute "resume" callbacks for non-sysdev devices. + * @state: PM transition of the system being carried out. + * + * Execute the appropriate "resume" callback for all devices whose status + * indicates that they are suspended. + */ +void dpm_resume(pm_message_t state) +{ + struct device *dev; + ktime_t starttime = ktime_get(); + + trace_suspend_resume(TPS("dpm_resume"), state.event, true); + might_sleep(); + + mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + + list_for_each_entry(dev, &dpm_suspended_list, power.entry) + dpm_async_fn(dev, async_resume); + + while (!list_empty(&dpm_suspended_list)) { + dev = to_device(dpm_suspended_list.next); + get_device(dev); + if (!is_async(dev)) { + int error; + + mutex_unlock(&dpm_list_mtx); + + error = device_resume(dev, state, false); + if (error) { + suspend_stats.failed_resume++; + dpm_save_failed_step(SUSPEND_RESUME); + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, state, "", error); + } + + mutex_lock(&dpm_list_mtx); + } + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &dpm_prepared_list); + put_device(dev); + } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + dpm_show_time(starttime, state, 0, NULL); + + cpufreq_resume(); + devfreq_resume(); + trace_suspend_resume(TPS("dpm_resume"), state.event, false); +} + +/** + * device_complete - Complete a PM transition for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + */ +static void device_complete(struct device *dev, pm_message_t state) +{ + void (*callback)(struct device *) = NULL; + const char *info = NULL; + + if (dev->power.syscore) + goto out; + + device_lock(dev); + + if (dev->pm_domain) { + info = "completing power domain "; + callback = dev->pm_domain->ops.complete; + } else if (dev->type && dev->type->pm) { + info = "completing type "; + callback = dev->type->pm->complete; + } else if (dev->class && dev->class->pm) { + info = "completing class "; + callback = dev->class->pm->complete; + } else if (dev->bus && dev->bus->pm) { + info = "completing bus "; + callback = dev->bus->pm->complete; + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "completing driver "; + callback = dev->driver->pm->complete; + } + + if (callback) { + pm_dev_dbg(dev, state, info); + callback(dev); + } + + device_unlock(dev); + +out: + pm_runtime_put(dev); +} + +/** + * dpm_complete - Complete a PM transition for all non-sysdev devices. + * @state: PM transition of the system being carried out. + * + * Execute the ->complete() callbacks for all devices whose PM status is not + * DPM_ON (this allows new devices to be registered). + */ +void dpm_complete(pm_message_t state) +{ + struct list_head list; + + trace_suspend_resume(TPS("dpm_complete"), state.event, true); + might_sleep(); + + INIT_LIST_HEAD(&list); + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_prepared_list)) { + struct device *dev = to_device(dpm_prepared_list.prev); + + get_device(dev); + dev->power.is_prepared = false; + list_move(&dev->power.entry, &list); + mutex_unlock(&dpm_list_mtx); + + trace_device_pm_callback_start(dev, "", state.event); + device_complete(dev, state); + trace_device_pm_callback_end(dev, 0); + + mutex_lock(&dpm_list_mtx); + put_device(dev); + } + list_splice(&list, &dpm_list); + mutex_unlock(&dpm_list_mtx); + + /* Allow device probing and trigger re-probing of deferred devices */ + device_unblock_probing(); + trace_suspend_resume(TPS("dpm_complete"), state.event, false); +} + +/** + * dpm_resume_end - Execute "resume" callbacks and complete system transition. + * @state: PM transition of the system being carried out. + * + * Execute "resume" callbacks for all devices and complete the PM transition of + * the system. + */ +void dpm_resume_end(pm_message_t state) +{ + dpm_resume(state); + dpm_complete(state); +} +EXPORT_SYMBOL_GPL(dpm_resume_end); + + +/*------------------------- Suspend routines -------------------------*/ + +/** + * resume_event - Return a "resume" message for given "suspend" sleep state. + * @sleep_state: PM message representing a sleep state. + * + * Return a PM message representing the resume event corresponding to given + * sleep state. + */ +static pm_message_t resume_event(pm_message_t sleep_state) +{ + switch (sleep_state.event) { + case PM_EVENT_SUSPEND: + return PMSG_RESUME; + case PM_EVENT_FREEZE: + case PM_EVENT_QUIESCE: + return PMSG_RECOVER; + case PM_EVENT_HIBERNATE: + return PMSG_RESTORE; + } + return PMSG_ON; +} + +static void dpm_superior_set_must_resume(struct device *dev) +{ + struct device_link *link; + int idx; + + if (dev->parent) + dev->parent->power.must_resume = true; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) + link->supplier->power.must_resume = true; + + device_links_read_unlock(idx); +} + +/** + * __device_suspend_noirq - Execute a "noirq suspend" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being suspended asynchronously. + * + * The driver of @dev will not receive interrupts while this function is being + * executed. + */ +static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + const char *info = NULL; + int error = 0; + + TRACE_DEVICE(dev); + TRACE_SUSPEND(0); + + dpm_wait_for_subordinate(dev, async); + + if (async_error) + goto Complete; + + if (dev->power.syscore || dev->power.direct_complete) + goto Complete; + + if (dev->pm_domain) { + info = "noirq power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "noirq type "; + callback = pm_noirq_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "noirq class "; + callback = pm_noirq_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "noirq bus "; + callback = pm_noirq_op(dev->bus->pm, state); + } + if (callback) + goto Run; + + if (dev_pm_skip_suspend(dev)) + goto Skip; + + if (dev->driver && dev->driver->pm) { + info = "noirq driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + +Run: + error = dpm_run_callback(callback, dev, state, info); + if (error) { + async_error = error; + goto Complete; + } + +Skip: + dev->power.is_noirq_suspended = true; + + /* + * Skipping the resume of devices that were in use right before the + * system suspend (as indicated by their PM-runtime usage counters) + * would be suboptimal. Also resume them if doing that is not allowed + * to be skipped. + */ + if (atomic_read(&dev->power.usage_count) > 1 || + !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume)) + dev->power.must_resume = true; + + if (dev->power.must_resume) + dpm_superior_set_must_resume(dev); + +Complete: + complete_all(&dev->power.completion); + TRACE_SUSPEND(error); + return error; +} + +static void async_suspend_noirq(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_noirq(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + + put_device(dev); +} + +static int device_suspend_noirq(struct device *dev) +{ + if (dpm_async_fn(dev, async_suspend_noirq)) + return 0; + + return __device_suspend_noirq(dev, pm_transition, false); +} + +static int dpm_noirq_suspend_devices(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error = 0; + + trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); + mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + + while (!list_empty(&dpm_late_early_list)) { + struct device *dev = to_device(dpm_late_early_list.prev); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + error = device_suspend_noirq(dev); + + mutex_lock(&dpm_list_mtx); + if (error) { + pm_dev_err(dev, state, " noirq", error); + dpm_save_failed_dev(dev_name(dev)); + put_device(dev); + break; + } + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_noirq_list); + put_device(dev); + + if (async_error) + break; + } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + if (!error) + error = async_error; + + if (error) { + suspend_stats.failed_suspend_noirq++; + dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); + } + dpm_show_time(starttime, state, error, "noirq"); + trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); + return error; +} + +/** + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + * + * Prevent device drivers' interrupt handlers from being called and invoke + * "noirq" suspend callbacks for all non-sysdev devices. + */ +int dpm_suspend_noirq(pm_message_t state) +{ + int ret; + + cpuidle_pause(); + + device_wakeup_arm_wake_irqs(); + suspend_device_irqs(); + + ret = dpm_noirq_suspend_devices(state); + if (ret) + dpm_resume_noirq(resume_event(state)); + + return ret; +} + +static void dpm_propagate_wakeup_to_parent(struct device *dev) +{ + struct device *parent = dev->parent; + + if (!parent) + return; + + spin_lock_irq(&parent->power.lock); + + if (dev->power.wakeup_path && !parent->power.ignore_children) + parent->power.wakeup_path = true; + + spin_unlock_irq(&parent->power.lock); +} + +/** + * __device_suspend_late - Execute a "late suspend" callback for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being suspended asynchronously. + * + * Runtime PM is disabled for @dev while this function is being executed. + */ +static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + const char *info = NULL; + int error = 0; + + TRACE_DEVICE(dev); + TRACE_SUSPEND(0); + + __pm_runtime_disable(dev, false); + + dpm_wait_for_subordinate(dev, async); + + if (async_error) + goto Complete; + + if (pm_wakeup_pending()) { + async_error = -EBUSY; + goto Complete; + } + + if (dev->power.syscore || dev->power.direct_complete) + goto Complete; + + if (dev->pm_domain) { + info = "late power domain "; + callback = pm_late_early_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "late type "; + callback = pm_late_early_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "late class "; + callback = pm_late_early_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "late bus "; + callback = pm_late_early_op(dev->bus->pm, state); + } + if (callback) + goto Run; + + if (dev_pm_skip_suspend(dev)) + goto Skip; + + if (dev->driver && dev->driver->pm) { + info = "late driver "; + callback = pm_late_early_op(dev->driver->pm, state); + } + +Run: + error = dpm_run_callback(callback, dev, state, info); + if (error) { + async_error = error; + goto Complete; + } + dpm_propagate_wakeup_to_parent(dev); + +Skip: + dev->power.is_late_suspended = true; + +Complete: + TRACE_SUSPEND(error); + complete_all(&dev->power.completion); + return error; +} + +static void async_suspend_late(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend_late(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + put_device(dev); +} + +static int device_suspend_late(struct device *dev) +{ + if (dpm_async_fn(dev, async_suspend_late)) + return 0; + + return __device_suspend_late(dev, pm_transition, false); +} + +/** + * dpm_suspend_late - Execute "late suspend" callbacks for all devices. + * @state: PM transition of the system being carried out. + */ +int dpm_suspend_late(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error = 0; + + trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); + mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + + while (!list_empty(&dpm_suspended_list)) { + struct device *dev = to_device(dpm_suspended_list.prev); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + error = device_suspend_late(dev); + + mutex_lock(&dpm_list_mtx); + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_late_early_list); + + if (error) { + pm_dev_err(dev, state, " late", error); + dpm_save_failed_dev(dev_name(dev)); + put_device(dev); + break; + } + put_device(dev); + + if (async_error) + break; + } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + if (!error) + error = async_error; + if (error) { + suspend_stats.failed_suspend_late++; + dpm_save_failed_step(SUSPEND_SUSPEND_LATE); + dpm_resume_early(resume_event(state)); + } + dpm_show_time(starttime, state, error, "late"); + trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); + return error; +} + +/** + * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks. + * @state: PM transition of the system being carried out. + */ +int dpm_suspend_end(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error; + + error = dpm_suspend_late(state); + if (error) + goto out; + + error = dpm_suspend_noirq(state); + if (error) + dpm_resume_early(resume_event(state)); + +out: + dpm_show_time(starttime, state, error, "end"); + return error; +} +EXPORT_SYMBOL_GPL(dpm_suspend_end); + +/** + * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. + * @dev: Device to suspend. + * @state: PM transition of the system being carried out. + * @cb: Suspend callback to execute. + * @info: string description of caller. + */ +static int legacy_suspend(struct device *dev, pm_message_t state, + int (*cb)(struct device *dev, pm_message_t state), + const char *info) +{ + int error; + ktime_t calltime; + + calltime = initcall_debug_start(dev, cb); + + trace_device_pm_callback_start(dev, info, state.event); + error = cb(dev, state); + trace_device_pm_callback_end(dev, error); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, cb, error); + + return error; +} + +static void dpm_clear_superiors_direct_complete(struct device *dev) +{ + struct device_link *link; + int idx; + + if (dev->parent) { + spin_lock_irq(&dev->parent->power.lock); + dev->parent->power.direct_complete = false; + spin_unlock_irq(&dev->parent->power.lock); + } + + idx = device_links_read_lock(); + + list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) { + spin_lock_irq(&link->supplier->power.lock); + link->supplier->power.direct_complete = false; + spin_unlock_irq(&link->supplier->power.lock); + } + + device_links_read_unlock(idx); +} + +/** + * __device_suspend - Execute "suspend" callbacks for given device. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * @async: If true, the device is being suspended asynchronously. + */ +static int __device_suspend(struct device *dev, pm_message_t state, bool async) +{ + pm_callback_t callback = NULL; + const char *info = NULL; + int error = 0; + DECLARE_DPM_WATCHDOG_ON_STACK(wd); + + TRACE_DEVICE(dev); + TRACE_SUSPEND(0); + + dpm_wait_for_subordinate(dev, async); + + if (async_error) { + dev->power.direct_complete = false; + goto Complete; + } + + /* + * Wait for possible runtime PM transitions of the device in progress + * to complete and if there's a runtime resume request pending for it, + * resume it before proceeding with invoking the system-wide suspend + * callbacks for it. + * + * If the system-wide suspend callbacks below change the configuration + * of the device, they must disable runtime PM for it or otherwise + * ensure that its runtime-resume callbacks will not be confused by that + * change in case they are invoked going forward. + */ + pm_runtime_barrier(dev); + + if (pm_wakeup_pending()) { + dev->power.direct_complete = false; + async_error = -EBUSY; + goto Complete; + } + + if (dev->power.syscore) + goto Complete; + + /* Avoid direct_complete to let wakeup_path propagate. */ + if (device_may_wakeup(dev) || dev->power.wakeup_path) + dev->power.direct_complete = false; + + if (dev->power.direct_complete) { + if (pm_runtime_status_suspended(dev)) { + pm_runtime_disable(dev); + if (pm_runtime_status_suspended(dev)) { + pm_dev_dbg(dev, state, "direct-complete "); + goto Complete; + } + + pm_runtime_enable(dev); + } + dev->power.direct_complete = false; + } + + dev->power.may_skip_resume = true; + dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME); + + dpm_watchdog_set(&wd, dev); + device_lock(dev); + + if (dev->pm_domain) { + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Run; + } + + if (dev->type && dev->type->pm) { + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Run; + } + + if (dev->class && dev->class->pm) { + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Run; + } + + if (dev->bus) { + if (dev->bus->pm) { + info = "bus "; + callback = pm_op(dev->bus->pm, state); + } else if (dev->bus->suspend) { + pm_dev_dbg(dev, state, "legacy bus "); + error = legacy_suspend(dev, state, dev->bus->suspend, + "legacy bus "); + goto End; + } + } + + Run: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + + End: + if (!error) { + dev->power.is_suspended = true; + if (device_may_wakeup(dev)) + dev->power.wakeup_path = true; + + dpm_propagate_wakeup_to_parent(dev); + dpm_clear_superiors_direct_complete(dev); + } + + device_unlock(dev); + dpm_watchdog_clear(&wd); + + Complete: + if (error) + async_error = error; + + complete_all(&dev->power.completion); + TRACE_SUSPEND(error); + return error; +} + +static void async_suspend(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend(dev, pm_transition, true); + if (error) { + dpm_save_failed_dev(dev_name(dev)); + pm_dev_err(dev, pm_transition, " async", error); + } + + put_device(dev); +} + +static int device_suspend(struct device *dev) +{ + if (dpm_async_fn(dev, async_suspend)) + return 0; + + return __device_suspend(dev, pm_transition, false); +} + +/** + * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. + * @state: PM transition of the system being carried out. + */ +int dpm_suspend(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error = 0; + + trace_suspend_resume(TPS("dpm_suspend"), state.event, true); + might_sleep(); + + devfreq_suspend(); + cpufreq_suspend(); + + mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; + while (!list_empty(&dpm_prepared_list)) { + struct device *dev = to_device(dpm_prepared_list.prev); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + error = device_suspend(dev); + + mutex_lock(&dpm_list_mtx); + if (error) { + pm_dev_err(dev, state, "", error); + dpm_save_failed_dev(dev_name(dev)); + put_device(dev); + break; + } + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &dpm_suspended_list); + put_device(dev); + if (async_error) + break; + } + mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + if (!error) + error = async_error; + if (error) { + suspend_stats.failed_suspend++; + dpm_save_failed_step(SUSPEND_SUSPEND); + } + dpm_show_time(starttime, state, error, NULL); + trace_suspend_resume(TPS("dpm_suspend"), state.event, false); + return error; +} + +/** + * device_prepare - Prepare a device for system power transition. + * @dev: Device to handle. + * @state: PM transition of the system being carried out. + * + * Execute the ->prepare() callback(s) for given device. No new children of the + * device may be registered after this function has returned. + */ +static int device_prepare(struct device *dev, pm_message_t state) +{ + int (*callback)(struct device *) = NULL; + int ret = 0; + + /* + * If a device's parent goes into runtime suspend at the wrong time, + * it won't be possible to resume the device. To prevent this we + * block runtime suspend here, during the prepare phase, and allow + * it again during the complete phase. + */ + pm_runtime_get_noresume(dev); + + if (dev->power.syscore) + return 0; + + device_lock(dev); + + dev->power.wakeup_path = false; + + if (dev->power.no_pm_callbacks) + goto unlock; + + if (dev->pm_domain) + callback = dev->pm_domain->ops.prepare; + else if (dev->type && dev->type->pm) + callback = dev->type->pm->prepare; + else if (dev->class && dev->class->pm) + callback = dev->class->pm->prepare; + else if (dev->bus && dev->bus->pm) + callback = dev->bus->pm->prepare; + + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->prepare; + + if (callback) + ret = callback(dev); + +unlock: + device_unlock(dev); + + if (ret < 0) { + suspend_report_result(callback, ret); + pm_runtime_put(dev); + return ret; + } + /* + * A positive return value from ->prepare() means "this device appears + * to be runtime-suspended and its state is fine, so if it really is + * runtime-suspended, you can leave it in that state provided that you + * will do the same thing with all of its descendants". This only + * applies to suspend transitions, however. + */ + spin_lock_irq(&dev->power.lock); + dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && + (ret > 0 || dev->power.no_pm_callbacks) && + !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE); + spin_unlock_irq(&dev->power.lock); + return 0; +} + +/** + * dpm_prepare - Prepare all non-sysdev devices for a system PM transition. + * @state: PM transition of the system being carried out. + * + * Execute the ->prepare() callback(s) for all devices. + */ +int dpm_prepare(pm_message_t state) +{ + int error = 0; + + trace_suspend_resume(TPS("dpm_prepare"), state.event, true); + might_sleep(); + + /* + * Give a chance for the known devices to complete their probes, before + * disable probing of devices. This sync point is important at least + * at boot time + hibernation restore. + */ + wait_for_device_probe(); + /* + * It is unsafe if probing of devices will happen during suspend or + * hibernation and system behavior will be unpredictable in this case. + * So, let's prohibit device's probing here and defer their probes + * instead. The normal behavior will be restored in dpm_complete(). + */ + device_block_probing(); + + mutex_lock(&dpm_list_mtx); + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.next); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + + trace_device_pm_callback_start(dev, "", state.event); + error = device_prepare(dev, state); + trace_device_pm_callback_end(dev, error); + + mutex_lock(&dpm_list_mtx); + if (error) { + if (error == -EAGAIN) { + put_device(dev); + error = 0; + continue; + } + pr_info("Device %s not prepared for power transition: code %d\n", + dev_name(dev), error); + put_device(dev); + break; + } + dev->power.is_prepared = true; + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &dpm_prepared_list); + put_device(dev); + } + mutex_unlock(&dpm_list_mtx); + trace_suspend_resume(TPS("dpm_prepare"), state.event, false); + return error; +} + +/** + * dpm_suspend_start - Prepare devices for PM transition and suspend them. + * @state: PM transition of the system being carried out. + * + * Prepare all non-sysdev devices for system PM transition and execute "suspend" + * callbacks for them. + */ +int dpm_suspend_start(pm_message_t state) +{ + ktime_t starttime = ktime_get(); + int error; + + error = dpm_prepare(state); + if (error) { + suspend_stats.failed_prepare++; + dpm_save_failed_step(SUSPEND_PREPARE); + } else + error = dpm_suspend(state); + dpm_show_time(starttime, state, error, "start"); + return error; +} +EXPORT_SYMBOL_GPL(dpm_suspend_start); + +void __suspend_report_result(const char *function, void *fn, int ret) +{ + if (ret) + pr_err("%s(): %pS returns %d\n", function, fn, ret); +} +EXPORT_SYMBOL_GPL(__suspend_report_result); + +/** + * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. + * @subordinate: Device that needs to wait for @dev. + * @dev: Device to wait for. + */ +int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) +{ + dpm_wait(dev, subordinate->power.async_suspend); + return async_error; +} +EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); + +/** + * dpm_for_each_dev - device iterator. + * @data: data for the callback. + * @fn: function to be called for each device. + * + * Iterate over devices in dpm_list, and call @fn for each device, + * passing it @data. + */ +void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *)) +{ + struct device *dev; + + if (!fn) + return; + + device_pm_lock(); + list_for_each_entry(dev, &dpm_list, power.entry) + fn(dev, data); + device_pm_unlock(); +} +EXPORT_SYMBOL_GPL(dpm_for_each_dev); + +static bool pm_ops_is_empty(const struct dev_pm_ops *ops) +{ + if (!ops) + return true; + + return !ops->prepare && + !ops->suspend && + !ops->suspend_late && + !ops->suspend_noirq && + !ops->resume_noirq && + !ops->resume_early && + !ops->resume && + !ops->complete; +} + +void device_pm_check_callbacks(struct device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + dev->power.no_pm_callbacks = + (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && + !dev->bus->suspend && !dev->bus->resume)) && + (!dev->class || pm_ops_is_empty(dev->class->pm)) && + (!dev->type || pm_ops_is_empty(dev->type->pm)) && + (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && + (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && + !dev->driver->suspend && !dev->driver->resume)); + spin_unlock_irqrestore(&dev->power.lock, flags); +} + +bool dev_pm_skip_suspend(struct device *dev) +{ + return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && + pm_runtime_status_suspended(dev); +} diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h new file mode 100644 index 000000000..922ed457d --- /dev/null +++ b/drivers/base/power/power.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/pm_qos.h> + +static inline void device_pm_init_common(struct device *dev) +{ + if (!dev->power.early_init) { + spin_lock_init(&dev->power.lock); + dev->power.qos = NULL; + dev->power.early_init = true; + } +} + +#ifdef CONFIG_PM + +static inline void pm_runtime_early_init(struct device *dev) +{ + dev->power.disable_depth = 1; + device_pm_init_common(dev); +} + +extern void pm_runtime_init(struct device *dev); +extern void pm_runtime_reinit(struct device *dev); +extern void pm_runtime_remove(struct device *dev); +extern u64 pm_runtime_active_time(struct device *dev); + +#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) +#define WAKE_IRQ_DEDICATED_MANAGED BIT(1) +#define WAKE_IRQ_DEDICATED_REVERSE BIT(2) +#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ + WAKE_IRQ_DEDICATED_MANAGED | \ + WAKE_IRQ_DEDICATED_REVERSE) +#define WAKE_IRQ_DEDICATED_ENABLED BIT(3) + +struct wake_irq { + struct device *dev; + unsigned int status; + int irq; + const char *name; +}; + +extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); +extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); +extern void dev_pm_enable_wake_irq_check(struct device *dev, + bool can_change_status); +extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable); +extern void dev_pm_enable_wake_irq_complete(struct device *dev); + +#ifdef CONFIG_PM_SLEEP + +extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq); +extern void device_wakeup_detach_irq(struct device *dev); +extern void device_wakeup_arm_wake_irqs(void); +extern void device_wakeup_disarm_wake_irqs(void); + +#else + +static inline void device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq) {} + +static inline void device_wakeup_detach_irq(struct device *dev) +{ +} + +#endif /* CONFIG_PM_SLEEP */ + +/* + * sysfs.c + */ + +extern int dpm_sysfs_add(struct device *dev); +extern void dpm_sysfs_remove(struct device *dev); +extern void rpm_sysfs_remove(struct device *dev); +extern int wakeup_sysfs_add(struct device *dev); +extern void wakeup_sysfs_remove(struct device *dev); +extern int pm_qos_sysfs_add_resume_latency(struct device *dev); +extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); +extern int pm_qos_sysfs_add_flags(struct device *dev); +extern void pm_qos_sysfs_remove_flags(struct device *dev); +extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); +extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev); +extern int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); + +#else /* CONFIG_PM */ + +static inline void pm_runtime_early_init(struct device *dev) +{ + device_pm_init_common(dev); +} + +static inline void pm_runtime_init(struct device *dev) {} +static inline void pm_runtime_reinit(struct device *dev) {} +static inline void pm_runtime_remove(struct device *dev) {} + +static inline int dpm_sysfs_add(struct device *dev) { return 0; } +static inline void dpm_sysfs_remove(struct device *dev) {} +static inline int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, + kgid_t kgid) { return 0; } + +#endif + +#ifdef CONFIG_PM_SLEEP + +/* kernel/power/main.c */ +extern int pm_async_enabled; + +/* drivers/base/power/main.c */ +extern struct list_head dpm_list; /* The active device list */ + +static inline struct device *to_device(struct list_head *entry) +{ + return container_of(entry, struct device, power.entry); +} + +extern void device_pm_sleep_init(struct device *dev); +extern void device_pm_add(struct device *); +extern void device_pm_remove(struct device *); +extern void device_pm_move_before(struct device *, struct device *); +extern void device_pm_move_after(struct device *, struct device *); +extern void device_pm_move_last(struct device *); +extern void device_pm_check_callbacks(struct device *dev); + +static inline bool device_pm_initialized(struct device *dev) +{ + return dev->power.in_dpm_list; +} + +/* drivers/base/power/wakeup_stats.c */ +extern int wakeup_source_sysfs_add(struct device *parent, + struct wakeup_source *ws); +extern void wakeup_source_sysfs_remove(struct wakeup_source *ws); + +extern int pm_wakeup_source_sysfs_add(struct device *parent); + +#else /* !CONFIG_PM_SLEEP */ + +static inline void device_pm_sleep_init(struct device *dev) {} + +static inline void device_pm_add(struct device *dev) {} + +static inline void device_pm_remove(struct device *dev) +{ + pm_runtime_remove(dev); +} + +static inline void device_pm_move_before(struct device *deva, + struct device *devb) {} +static inline void device_pm_move_after(struct device *deva, + struct device *devb) {} +static inline void device_pm_move_last(struct device *dev) {} + +static inline void device_pm_check_callbacks(struct device *dev) {} + +static inline bool device_pm_initialized(struct device *dev) +{ + return device_is_registered(dev); +} + +static inline int pm_wakeup_source_sysfs_add(struct device *parent) +{ + return 0; +} + +#endif /* !CONFIG_PM_SLEEP */ + +static inline void device_pm_init(struct device *dev) +{ + device_pm_init_common(dev); + device_pm_sleep_init(dev); + pm_runtime_init(dev); +} diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c new file mode 100644 index 000000000..79fc6c441 --- /dev/null +++ b/drivers/base/power/qos-test.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP + */ +#include <kunit/test.h> +#include <linux/pm_qos.h> + +/* Basic test for aggregating two "min" requests */ +static void freq_qos_test_min(struct kunit *test) +{ + struct freq_constraints qos; + struct freq_qos_request req1, req2; + int ret; + + freq_constraints_init(&qos); + memset(&req1, 0, sizeof(req1)); + memset(&req2, 0, sizeof(req2)); + + ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000); + KUNIT_EXPECT_EQ(test, ret, 1); + ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000); + KUNIT_EXPECT_EQ(test, ret, 1); + + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000); + + ret = freq_qos_remove_request(&req2); + KUNIT_EXPECT_EQ(test, ret, 1); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000); + + ret = freq_qos_remove_request(&req1); + KUNIT_EXPECT_EQ(test, ret, 1); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), + FREQ_QOS_MIN_DEFAULT_VALUE); +} + +/* Test that requests for MAX_DEFAULT_VALUE have no effect */ +static void freq_qos_test_maxdef(struct kunit *test) +{ + struct freq_constraints qos; + struct freq_qos_request req1, req2; + int ret; + + freq_constraints_init(&qos); + memset(&req1, 0, sizeof(req1)); + memset(&req2, 0, sizeof(req2)); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), + FREQ_QOS_MAX_DEFAULT_VALUE); + + ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX, + FREQ_QOS_MAX_DEFAULT_VALUE); + KUNIT_EXPECT_EQ(test, ret, 0); + ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX, + FREQ_QOS_MAX_DEFAULT_VALUE); + KUNIT_EXPECT_EQ(test, ret, 0); + + /* Add max 1000 */ + ret = freq_qos_update_request(&req1, 1000); + KUNIT_EXPECT_EQ(test, ret, 1); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000); + + /* Add max 2000, no impact */ + ret = freq_qos_update_request(&req2, 2000); + KUNIT_EXPECT_EQ(test, ret, 0); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000); + + /* Remove max 1000, new max 2000 */ + ret = freq_qos_remove_request(&req1); + KUNIT_EXPECT_EQ(test, ret, 1); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000); +} + +/* + * Test that a freq_qos_request can be added again after removal + * + * This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency + * QoS requests after removal") + */ +static void freq_qos_test_readd(struct kunit *test) +{ + struct freq_constraints qos; + struct freq_qos_request req; + int ret; + + freq_constraints_init(&qos); + memset(&req, 0, sizeof(req)); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), + FREQ_QOS_MIN_DEFAULT_VALUE); + + /* Add */ + ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000); + KUNIT_EXPECT_EQ(test, ret, 1); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000); + + /* Remove */ + ret = freq_qos_remove_request(&req); + KUNIT_EXPECT_EQ(test, ret, 1); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), + FREQ_QOS_MIN_DEFAULT_VALUE); + + /* Add again */ + ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000); + KUNIT_EXPECT_EQ(test, ret, 1); + KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000); +} + +static struct kunit_case pm_qos_test_cases[] = { + KUNIT_CASE(freq_qos_test_min), + KUNIT_CASE(freq_qos_test_maxdef), + KUNIT_CASE(freq_qos_test_readd), + {}, +}; + +static struct kunit_suite pm_qos_test_module = { + .name = "qos-kunit-test", + .test_cases = pm_qos_test_cases, +}; +kunit_test_suites(&pm_qos_test_module); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c new file mode 100644 index 000000000..8e93167f1 --- /dev/null +++ b/drivers/base/power/qos.c @@ -0,0 +1,982 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Devices PM QoS constraints management + * + * Copyright (C) 2011 Texas Instruments, Inc. + * + * This module exposes the interface to kernel space for specifying + * per-device PM QoS dependencies. It provides infrastructure for registration + * of: + * + * Dependents on a QoS value : register requests + * Watchers of QoS value : get notified when target QoS value changes + * + * This QoS design is best effort based. Dependents register their QoS needs. + * Watchers register to keep track of the current QoS needs of the system. + * Watchers can register a per-device notification callback using the + * dev_pm_qos_*_notifier API. The notification chain data is stored in the + * per-device constraint data struct. + * + * Note about the per-device constraint data struct allocation: + * . The per-device constraints data struct ptr is stored into the device + * dev_pm_info. + * . To minimize the data usage by the per-device constraints, the data struct + * is only allocated at the first call to dev_pm_qos_add_request. + * . The data is later free'd when the device is removed from the system. + * . A global mutex protects the constraints users from the data being + * allocated and free'd. + */ + +#include <linux/pm_qos.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/export.h> +#include <linux/pm_runtime.h> +#include <linux/err.h> +#include <trace/events/power.h> + +#include "power.h" + +static DEFINE_MUTEX(dev_pm_qos_mtx); +static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); + +/** + * __dev_pm_qos_flags - Check PM QoS flags for a given device. + * @dev: Device to check the PM QoS flags for. + * @mask: Flags to check against. + * + * This routine must be called with dev->power.lock held. + */ +enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask) +{ + struct dev_pm_qos *qos = dev->power.qos; + struct pm_qos_flags *pqf; + s32 val; + + lockdep_assert_held(&dev->power.lock); + + if (IS_ERR_OR_NULL(qos)) + return PM_QOS_FLAGS_UNDEFINED; + + pqf = &qos->flags; + if (list_empty(&pqf->list)) + return PM_QOS_FLAGS_UNDEFINED; + + val = pqf->effective_flags & mask; + if (val) + return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME; + + return PM_QOS_FLAGS_NONE; +} + +/** + * dev_pm_qos_flags - Check PM QoS flags for a given device (locked). + * @dev: Device to check the PM QoS flags for. + * @mask: Flags to check against. + */ +enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask) +{ + unsigned long irqflags; + enum pm_qos_flags_status ret; + + spin_lock_irqsave(&dev->power.lock, irqflags); + ret = __dev_pm_qos_flags(dev, mask); + spin_unlock_irqrestore(&dev->power.lock, irqflags); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_flags); + +/** + * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device. + * @dev: Device to get the PM QoS constraint value for. + * + * This routine must be called with dev->power.lock held. + */ +s32 __dev_pm_qos_resume_latency(struct device *dev) +{ + lockdep_assert_held(&dev->power.lock); + + return dev_pm_qos_raw_resume_latency(dev); +} + +/** + * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). + * @dev: Device to get the PM QoS constraint value for. + * @type: QoS request type. + */ +s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type) +{ + struct dev_pm_qos *qos = dev->power.qos; + unsigned long flags; + s32 ret; + + spin_lock_irqsave(&dev->power.lock, flags); + + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT + : pm_qos_read_value(&qos->resume_latency); + break; + case DEV_PM_QOS_MIN_FREQUENCY: + ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE + : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN); + break; + case DEV_PM_QOS_MAX_FREQUENCY: + ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE + : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX); + break; + default: + WARN_ON(1); + ret = 0; + } + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return ret; +} + +/** + * apply_constraint - Add/modify/remove device PM QoS request. + * @req: Constraint request to apply + * @action: Action to perform (add/update/remove). + * @value: Value to assign to the QoS request. + * + * Internal function to update the constraints list using the PM QoS core + * code and if needed call the per-device callbacks. + */ +static int apply_constraint(struct dev_pm_qos_request *req, + enum pm_qos_req_action action, s32 value) +{ + struct dev_pm_qos *qos = req->dev->power.qos; + int ret; + + switch(req->type) { + case DEV_PM_QOS_RESUME_LATENCY: + if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0)) + value = 0; + + ret = pm_qos_update_target(&qos->resume_latency, + &req->data.pnode, action, value); + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + ret = pm_qos_update_target(&qos->latency_tolerance, + &req->data.pnode, action, value); + if (ret) { + value = pm_qos_read_value(&qos->latency_tolerance); + req->dev->power.set_latency_tolerance(req->dev, value); + } + break; + case DEV_PM_QOS_MIN_FREQUENCY: + case DEV_PM_QOS_MAX_FREQUENCY: + ret = freq_qos_apply(&req->data.freq, action, value); + break; + case DEV_PM_QOS_FLAGS: + ret = pm_qos_update_flags(&qos->flags, &req->data.flr, + action, value); + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/* + * dev_pm_qos_constraints_allocate + * @dev: device to allocate data for + * + * Called at the first call to add_request, for constraint data allocation + * Must be called with the dev_pm_qos_mtx mutex held + */ +static int dev_pm_qos_constraints_allocate(struct device *dev) +{ + struct dev_pm_qos *qos; + struct pm_qos_constraints *c; + struct blocking_notifier_head *n; + + qos = kzalloc(sizeof(*qos), GFP_KERNEL); + if (!qos) + return -ENOMEM; + + n = kzalloc(3 * sizeof(*n), GFP_KERNEL); + if (!n) { + kfree(qos); + return -ENOMEM; + } + + c = &qos->resume_latency; + plist_head_init(&c->list); + c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + c->type = PM_QOS_MIN; + c->notifiers = n; + BLOCKING_INIT_NOTIFIER_HEAD(n); + + c = &qos->latency_tolerance; + plist_head_init(&c->list); + c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE; + c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + c->type = PM_QOS_MIN; + + freq_constraints_init(&qos->freq); + + INIT_LIST_HEAD(&qos->flags.list); + + spin_lock_irq(&dev->power.lock); + dev->power.qos = qos; + spin_unlock_irq(&dev->power.lock); + + return 0; +} + +static void __dev_pm_qos_hide_latency_limit(struct device *dev); +static void __dev_pm_qos_hide_flags(struct device *dev); + +/** + * dev_pm_qos_constraints_destroy + * @dev: target device + * + * Called from the device PM subsystem on device removal under device_pm_lock(). + */ +void dev_pm_qos_constraints_destroy(struct device *dev) +{ + struct dev_pm_qos *qos; + struct dev_pm_qos_request *req, *tmp; + struct pm_qos_constraints *c; + struct pm_qos_flags *f; + + mutex_lock(&dev_pm_qos_sysfs_mtx); + + /* + * If the device's PM QoS resume latency limit or PM QoS flags have been + * exposed to user space, they have to be hidden at this point. + */ + pm_qos_sysfs_remove_resume_latency(dev); + pm_qos_sysfs_remove_flags(dev); + + mutex_lock(&dev_pm_qos_mtx); + + __dev_pm_qos_hide_latency_limit(dev); + __dev_pm_qos_hide_flags(dev); + + qos = dev->power.qos; + if (!qos) + goto out; + + /* Flush the constraints lists for the device. */ + c = &qos->resume_latency; + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { + /* + * Update constraints list and call the notification + * callbacks if needed + */ + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + c = &qos->latency_tolerance; + plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + c = &qos->freq.min_freq; + plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, + PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + c = &qos->freq.max_freq; + plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) { + apply_constraint(req, PM_QOS_REMOVE_REQ, + PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + f = &qos->flags; + list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { + apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + } + + spin_lock_irq(&dev->power.lock); + dev->power.qos = ERR_PTR(-ENODEV); + spin_unlock_irq(&dev->power.lock); + + kfree(qos->resume_latency.notifiers); + kfree(qos); + + out: + mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); +} + +static bool dev_pm_qos_invalid_req_type(struct device *dev, + enum dev_pm_qos_req_type type) +{ + return type == DEV_PM_QOS_LATENCY_TOLERANCE && + !dev->power.set_latency_tolerance; +} + +static int __dev_pm_qos_add_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + int ret = 0; + + if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type)) + return -EINVAL; + + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) + return -EINVAL; + + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); + + trace_dev_pm_qos_add_request(dev_name(dev), type, value); + if (ret) + return ret; + + req->dev = dev; + req->type = type; + if (req->type == DEV_PM_QOS_MIN_FREQUENCY) + ret = freq_qos_add_request(&dev->power.qos->freq, + &req->data.freq, + FREQ_QOS_MIN, value); + else if (req->type == DEV_PM_QOS_MAX_FREQUENCY) + ret = freq_qos_add_request(&dev->power.qos->freq, + &req->data.freq, + FREQ_QOS_MAX, value); + else + ret = apply_constraint(req, PM_QOS_ADD_REQ, value); + + return ret; +} + +/** + * dev_pm_qos_add_request - inserts new qos request into the list + * @dev: target device for the constraint + * @req: pointer to a preallocated handle + * @type: type of the request + * @value: defines the qos request + * + * This function inserts a new entry in the device constraints list of + * requested qos performance characteristics. It recomputes the aggregate + * QoS expectations of parameters and initializes the dev_pm_qos_request + * handle. Caller needs to save this handle for later use in updates and + * removal. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory + * to allocate for data structures, -ENODEV if the device has just been removed + * from the system. + * + * Callers should ensure that the target device is not RPM_SUSPENDED before + * using this function for requests of type DEV_PM_QOS_FLAGS. + */ +int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = __dev_pm_qos_add_request(dev, req, type, value); + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); + +/** + * __dev_pm_qos_update_request - Modify an existing device PM QoS request. + * @req : PM QoS request to modify. + * @new_value: New value to request. + */ +static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, + s32 new_value) +{ + s32 curr_value; + int ret = 0; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + + if (IS_ERR_OR_NULL(req->dev->power.qos)) + return -ENODEV; + + switch(req->type) { + case DEV_PM_QOS_RESUME_LATENCY: + case DEV_PM_QOS_LATENCY_TOLERANCE: + curr_value = req->data.pnode.prio; + break; + case DEV_PM_QOS_MIN_FREQUENCY: + case DEV_PM_QOS_MAX_FREQUENCY: + curr_value = req->data.freq.pnode.prio; + break; + case DEV_PM_QOS_FLAGS: + curr_value = req->data.flr.flags; + break; + default: + return -EINVAL; + } + + trace_dev_pm_qos_update_request(dev_name(req->dev), req->type, + new_value); + if (curr_value != new_value) + ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); + + return ret; +} + +/** + * dev_pm_qos_update_request - modifies an existing qos request + * @req : handle to list element holding a dev_pm_qos request to use + * @new_value: defines the qos request + * + * Updates an existing dev PM qos request along with updating the + * target value. + * + * Attempts are made to make this code callable on hot code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + * + * Callers should ensure that the target device is not RPM_SUSPENDED before + * using this function for requests of type DEV_PM_QOS_FLAGS. + */ +int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = __dev_pm_qos_update_request(req, new_value); + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); + +static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) +{ + int ret; + + if (!req) /*guard against callers passing in null */ + return -EINVAL; + + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) + return -EINVAL; + + if (IS_ERR_OR_NULL(req->dev->power.qos)) + return -ENODEV; + + trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type, + PM_QOS_DEFAULT_VALUE); + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); + memset(req, 0, sizeof(*req)); + return ret; +} + +/** + * dev_pm_qos_remove_request - modifies an existing qos request + * @req: handle to request list element + * + * Will remove pm qos request from the list of constraints and + * recompute the current target value. Call this on slow code paths. + * + * Returns 1 if the aggregated constraint value has changed, + * 0 if the aggregated constraint value has not changed, + * -EINVAL in case of wrong parameters, -ENODEV if the device has been + * removed from the system + * + * Callers should ensure that the target device is not RPM_SUSPENDED before + * using this function for requests of type DEV_PM_QOS_FLAGS. + */ +int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = __dev_pm_qos_remove_request(req); + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); + +/** + * dev_pm_qos_add_notifier - sets notification entry for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block managed by caller. + * @type: request type. + * + * Will register the notifier into a notification chain that gets called + * upon changes to the target value for the device. + * + * If the device's constraints object doesn't exist when this routine is called, + * it will be created (or error code will be returned if that fails). + */ +int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier, + enum dev_pm_qos_req_type type) +{ + int ret = 0; + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR(dev->power.qos)) + ret = -ENODEV; + else if (!dev->power.qos) + ret = dev_pm_qos_constraints_allocate(dev); + + if (ret) + goto unlock; + + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers, + notifier); + break; + case DEV_PM_QOS_MIN_FREQUENCY: + ret = freq_qos_add_notifier(&dev->power.qos->freq, + FREQ_QOS_MIN, notifier); + break; + case DEV_PM_QOS_MAX_FREQUENCY: + ret = freq_qos_add_notifier(&dev->power.qos->freq, + FREQ_QOS_MAX, notifier); + break; + default: + WARN_ON(1); + ret = -EINVAL; + } + +unlock: + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); + +/** + * dev_pm_qos_remove_notifier - deletes notification for changes to target value + * of per-device PM QoS constraints + * + * @dev: target device for the constraint + * @notifier: notifier block to be removed. + * @type: request type. + * + * Will remove the notifier from the notification chain that gets called + * upon changes to the target value. + */ +int dev_pm_qos_remove_notifier(struct device *dev, + struct notifier_block *notifier, + enum dev_pm_qos_req_type type) +{ + int ret = 0; + + mutex_lock(&dev_pm_qos_mtx); + + /* Silently return if the constraints object is not present. */ + if (IS_ERR_OR_NULL(dev->power.qos)) + goto unlock; + + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers, + notifier); + break; + case DEV_PM_QOS_MIN_FREQUENCY: + ret = freq_qos_remove_notifier(&dev->power.qos->freq, + FREQ_QOS_MIN, notifier); + break; + case DEV_PM_QOS_MAX_FREQUENCY: + ret = freq_qos_remove_notifier(&dev->power.qos->freq, + FREQ_QOS_MAX, notifier); + break; + default: + WARN_ON(1); + ret = -EINVAL; + } + +unlock: + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); + +/** + * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. + * @dev: Device whose ancestor to add the request for. + * @req: Pointer to the preallocated handle. + * @type: Type of the request. + * @value: Constraint latency value. + */ +int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, + enum dev_pm_qos_req_type type, s32 value) +{ + struct device *ancestor = dev->parent; + int ret = -ENODEV; + + switch (type) { + case DEV_PM_QOS_RESUME_LATENCY: + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + while (ancestor && !ancestor->power.set_latency_tolerance) + ancestor = ancestor->parent; + + break; + default: + ancestor = NULL; + } + if (ancestor) + ret = dev_pm_qos_add_request(ancestor, req, type, value); + + if (ret < 0) + req->dev = NULL; + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); + +static void __dev_pm_qos_drop_user_request(struct device *dev, + enum dev_pm_qos_req_type type) +{ + struct dev_pm_qos_request *req = NULL; + + switch(type) { + case DEV_PM_QOS_RESUME_LATENCY: + req = dev->power.qos->resume_latency_req; + dev->power.qos->resume_latency_req = NULL; + break; + case DEV_PM_QOS_LATENCY_TOLERANCE: + req = dev->power.qos->latency_tolerance_req; + dev->power.qos->latency_tolerance_req = NULL; + break; + case DEV_PM_QOS_FLAGS: + req = dev->power.qos->flags_req; + dev->power.qos->flags_req = NULL; + break; + default: + WARN_ON(1); + return; + } + __dev_pm_qos_remove_request(req); + kfree(req); +} + +static void dev_pm_qos_drop_user_request(struct device *dev, + enum dev_pm_qos_req_type type) +{ + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_drop_user_request(dev, type); + mutex_unlock(&dev_pm_qos_mtx); +} + +/** + * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. + * @dev: Device whose PM QoS latency limit is to be exposed to user space. + * @value: Initial value of the latency limit. + */ +int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) +{ + struct dev_pm_qos_request *req; + int ret; + + if (!device_is_registered(dev) || value < 0) + return -EINVAL; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); + if (ret < 0) { + kfree(req); + return ret; + } + + mutex_lock(&dev_pm_qos_sysfs_mtx); + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos)) + ret = -ENODEV; + else if (dev->power.qos->resume_latency_req) + ret = -EEXIST; + + if (ret < 0) { + __dev_pm_qos_remove_request(req); + kfree(req); + mutex_unlock(&dev_pm_qos_mtx); + goto out; + } + dev->power.qos->resume_latency_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + + ret = pm_qos_sysfs_add_resume_latency(dev); + if (ret) + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); + + out: + mutex_unlock(&dev_pm_qos_sysfs_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); + +static void __dev_pm_qos_hide_latency_limit(struct device *dev) +{ + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY); +} + +/** + * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. + * @dev: Device whose PM QoS latency limit is to be hidden from user space. + */ +void dev_pm_qos_hide_latency_limit(struct device *dev) +{ + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_resume_latency(dev); + + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_latency_limit(dev); + mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); + +/** + * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space. + * @dev: Device whose PM QoS flags are to be exposed to user space. + * @val: Initial values of the flags. + */ +int dev_pm_qos_expose_flags(struct device *dev, s32 val) +{ + struct dev_pm_qos_request *req; + int ret; + + if (!device_is_registered(dev)) + return -EINVAL; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); + if (ret < 0) { + kfree(req); + return ret; + } + + pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos)) + ret = -ENODEV; + else if (dev->power.qos->flags_req) + ret = -EEXIST; + + if (ret < 0) { + __dev_pm_qos_remove_request(req); + kfree(req); + mutex_unlock(&dev_pm_qos_mtx); + goto out; + } + dev->power.qos->flags_req = req; + + mutex_unlock(&dev_pm_qos_mtx); + + ret = pm_qos_sysfs_add_flags(dev); + if (ret) + dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); + + out: + mutex_unlock(&dev_pm_qos_sysfs_mtx); + pm_runtime_put(dev); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); + +static void __dev_pm_qos_hide_flags(struct device *dev) +{ + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); +} + +/** + * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. + * @dev: Device whose PM QoS flags are to be hidden from user space. + */ +void dev_pm_qos_hide_flags(struct device *dev) +{ + pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_sysfs_mtx); + + pm_qos_sysfs_remove_flags(dev); + + mutex_lock(&dev_pm_qos_mtx); + __dev_pm_qos_hide_flags(dev); + mutex_unlock(&dev_pm_qos_mtx); + + mutex_unlock(&dev_pm_qos_sysfs_mtx); + pm_runtime_put(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); + +/** + * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space. + * @dev: Device to update the PM QoS flags request for. + * @mask: Flags to set/clear. + * @set: Whether to set or clear the flags (true means set). + */ +int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set) +{ + s32 value; + int ret; + + pm_runtime_get_sync(dev); + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) { + ret = -EINVAL; + goto out; + } + + value = dev_pm_qos_requested_flags(dev); + if (set) + value |= mask; + else + value &= ~mask; + + ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); + + out: + mutex_unlock(&dev_pm_qos_mtx); + pm_runtime_put(dev); + return ret; +} + +/** + * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance. + * @dev: Device to obtain the user space latency tolerance for. + */ +s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) +{ + s32 ret; + + mutex_lock(&dev_pm_qos_mtx); + ret = IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req ? + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT : + dev->power.qos->latency_tolerance_req->data.pnode.prio; + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} + +/** + * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance. + * @dev: Device to update the user space latency tolerance for. + * @val: New user space latency tolerance for @dev (negative values disable). + */ +int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) +{ + int ret; + + mutex_lock(&dev_pm_qos_mtx); + + if (IS_ERR_OR_NULL(dev->power.qos) + || !dev->power.qos->latency_tolerance_req) { + struct dev_pm_qos_request *req; + + if (val < 0) { + if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT) + ret = 0; + else + ret = -EINVAL; + goto out; + } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out; + } + ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); + if (ret < 0) { + kfree(req); + goto out; + } + dev->power.qos->latency_tolerance_req = req; + } else { + if (val < 0) { + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE); + ret = 0; + } else { + ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val); + } + } + + out: + mutex_unlock(&dev_pm_qos_mtx); + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance); + +/** + * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace + * @dev: Device whose latency tolerance to expose + */ +int dev_pm_qos_expose_latency_tolerance(struct device *dev) +{ + int ret; + + if (!dev->power.set_latency_tolerance) + return -EINVAL; + + mutex_lock(&dev_pm_qos_sysfs_mtx); + ret = pm_qos_sysfs_add_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); + +/** + * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace + * @dev: Device whose latency tolerance to hide + */ +void dev_pm_qos_hide_latency_tolerance(struct device *dev) +{ + mutex_lock(&dev_pm_qos_sysfs_mtx); + pm_qos_sysfs_remove_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + /* Remove the request from user space now */ + pm_runtime_get_sync(dev); + dev_pm_qos_update_user_latency_tolerance(dev, + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); + pm_runtime_put(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c new file mode 100644 index 000000000..fbbc3ed14 --- /dev/null +++ b/drivers/base/power/runtime.c @@ -0,0 +1,1892 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/runtime.c - Helper functions for device runtime PM + * + * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> + */ +#include <linux/sched/mm.h> +#include <linux/ktime.h> +#include <linux/hrtimer.h> +#include <linux/export.h> +#include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> +#include <trace/events/rpm.h> + +#include "../base.h" +#include "power.h" + +typedef int (*pm_callback_t)(struct device *); + +static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) +{ + pm_callback_t cb; + const struct dev_pm_ops *ops; + + if (dev->pm_domain) + ops = &dev->pm_domain->ops; + else if (dev->type && dev->type->pm) + ops = dev->type->pm; + else if (dev->class && dev->class->pm) + ops = dev->class->pm; + else if (dev->bus && dev->bus->pm) + ops = dev->bus->pm; + else + ops = NULL; + + if (ops) + cb = *(pm_callback_t *)((void *)ops + cb_offset); + else + cb = NULL; + + if (!cb && dev->driver && dev->driver->pm) + cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); + + return cb; +} + +#define RPM_GET_CALLBACK(dev, callback) \ + __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) + +static int rpm_resume(struct device *dev, int rpmflags); +static int rpm_suspend(struct device *dev, int rpmflags); + +/** + * update_pm_runtime_accounting - Update the time accounting of power states + * @dev: Device to update the accounting for + * + * In order to be able to have time accounting of the various power states + * (as used by programs such as PowerTOP to show the effectiveness of runtime + * PM), we need to track the time spent in each state. + * update_pm_runtime_accounting must be called each time before the + * runtime_status field is updated, to account the time in the old state + * correctly. + */ +static void update_pm_runtime_accounting(struct device *dev) +{ + u64 now, last, delta; + + if (dev->power.disable_depth > 0) + return; + + last = dev->power.accounting_timestamp; + + now = ktime_get_mono_fast_ns(); + dev->power.accounting_timestamp = now; + + /* + * Because ktime_get_mono_fast_ns() is not monotonic during + * timekeeping updates, ensure that 'now' is after the last saved + * timesptamp. + */ + if (now < last) + return; + + delta = now - last; + + if (dev->power.runtime_status == RPM_SUSPENDED) + dev->power.suspended_time += delta; + else + dev->power.active_time += delta; +} + +static void __update_runtime_status(struct device *dev, enum rpm_status status) +{ + update_pm_runtime_accounting(dev); + dev->power.runtime_status = status; +} + +static u64 rpm_get_accounted_time(struct device *dev, bool suspended) +{ + u64 time; + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + update_pm_runtime_accounting(dev); + time = suspended ? dev->power.suspended_time : dev->power.active_time; + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return time; +} + +u64 pm_runtime_active_time(struct device *dev) +{ + return rpm_get_accounted_time(dev, false); +} + +u64 pm_runtime_suspended_time(struct device *dev) +{ + return rpm_get_accounted_time(dev, true); +} +EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); + +/** + * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. + * @dev: Device to handle. + */ +static void pm_runtime_deactivate_timer(struct device *dev) +{ + if (dev->power.timer_expires > 0) { + hrtimer_try_to_cancel(&dev->power.suspend_timer); + dev->power.timer_expires = 0; + } +} + +/** + * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. + * @dev: Device to handle. + */ +static void pm_runtime_cancel_pending(struct device *dev) +{ + pm_runtime_deactivate_timer(dev); + /* + * In case there's a request pending, make sure its work function will + * return without doing anything. + */ + dev->power.request = RPM_REQ_NONE; +} + +/* + * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. + * @dev: Device to handle. + * + * Compute the autosuspend-delay expiration time based on the device's + * power.last_busy time. If the delay has already expired or is disabled + * (negative) or the power.use_autosuspend flag isn't set, return 0. + * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). + * + * This function may be called either with or without dev->power.lock held. + * Either way it can be racy, since power.last_busy may be updated at any time. + */ +u64 pm_runtime_autosuspend_expiration(struct device *dev) +{ + int autosuspend_delay; + u64 expires; + + if (!dev->power.use_autosuspend) + return 0; + + autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); + if (autosuspend_delay < 0) + return 0; + + expires = READ_ONCE(dev->power.last_busy); + expires += (u64)autosuspend_delay * NSEC_PER_MSEC; + if (expires > ktime_get_mono_fast_ns()) + return expires; /* Expires in the future */ + + return 0; +} +EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); + +static int dev_memalloc_noio(struct device *dev, void *data) +{ + return dev->power.memalloc_noio; +} + +/* + * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. + * @dev: Device to handle. + * @enable: True for setting the flag and False for clearing the flag. + * + * Set the flag for all devices in the path from the device to the + * root device in the device tree if @enable is true, otherwise clear + * the flag for devices in the path whose siblings don't set the flag. + * + * The function should only be called by block device, or network + * device driver for solving the deadlock problem during runtime + * resume/suspend: + * + * If memory allocation with GFP_KERNEL is called inside runtime + * resume/suspend callback of any one of its ancestors(or the + * block device itself), the deadlock may be triggered inside the + * memory allocation since it might not complete until the block + * device becomes active and the involed page I/O finishes. The + * situation is pointed out first by Alan Stern. Network device + * are involved in iSCSI kind of situation. + * + * The lock of dev_hotplug_mutex is held in the function for handling + * hotplug race because pm_runtime_set_memalloc_noio() may be called + * in async probe(). + * + * The function should be called between device_add() and device_del() + * on the affected device(block/network device). + */ +void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) +{ + static DEFINE_MUTEX(dev_hotplug_mutex); + + mutex_lock(&dev_hotplug_mutex); + for (;;) { + bool enabled; + + /* hold power lock since bitfield is not SMP-safe. */ + spin_lock_irq(&dev->power.lock); + enabled = dev->power.memalloc_noio; + dev->power.memalloc_noio = enable; + spin_unlock_irq(&dev->power.lock); + + /* + * not need to enable ancestors any more if the device + * has been enabled. + */ + if (enabled && enable) + break; + + dev = dev->parent; + + /* + * clear flag of the parent device only if all the + * children don't set the flag because ancestor's + * flag was set by any one of the descendants. + */ + if (!dev || (!enable && + device_for_each_child(dev, NULL, + dev_memalloc_noio))) + break; + } + mutex_unlock(&dev_hotplug_mutex); +} +EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); + +/** + * rpm_check_suspend_allowed - Test whether a device may be suspended. + * @dev: Device to test. + */ +static int rpm_check_suspend_allowed(struct device *dev) +{ + int retval = 0; + + if (dev->power.runtime_error) + retval = -EINVAL; + else if (dev->power.disable_depth > 0) + retval = -EACCES; + else if (atomic_read(&dev->power.usage_count) > 0) + retval = -EAGAIN; + else if (!dev->power.ignore_children && + atomic_read(&dev->power.child_count)) + retval = -EBUSY; + + /* Pending resume requests take precedence over suspends. */ + else if ((dev->power.deferred_resume + && dev->power.runtime_status == RPM_SUSPENDING) + || (dev->power.request_pending + && dev->power.request == RPM_REQ_RESUME)) + retval = -EAGAIN; + else if (__dev_pm_qos_resume_latency(dev) == 0) + retval = -EPERM; + else if (dev->power.runtime_status == RPM_SUSPENDED) + retval = 1; + + return retval; +} + +static int rpm_get_suppliers(struct device *dev) +{ + struct device_link *link; + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) { + int retval; + + if (!(link->flags & DL_FLAG_PM_RUNTIME)) + continue; + + retval = pm_runtime_get_sync(link->supplier); + /* Ignore suppliers with disabled runtime PM. */ + if (retval < 0 && retval != -EACCES) { + pm_runtime_put_noidle(link->supplier); + return retval; + } + refcount_inc(&link->rpm_active); + } + return 0; +} + +/** + * pm_runtime_release_supplier - Drop references to device link's supplier. + * @link: Target device link. + * + * Drop all runtime PM references associated with @link to its supplier device. + */ +void pm_runtime_release_supplier(struct device_link *link) +{ + struct device *supplier = link->supplier; + + /* + * The additional power.usage_count check is a safety net in case + * the rpm_active refcount becomes saturated, in which case + * refcount_dec_not_one() would return true forever, but it is not + * strictly necessary. + */ + while (refcount_dec_not_one(&link->rpm_active) && + atomic_read(&supplier->power.usage_count) > 0) + pm_runtime_put_noidle(supplier); +} + +static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) +{ + struct device_link *link; + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) { + pm_runtime_release_supplier(link); + if (try_to_suspend) + pm_request_idle(link->supplier); + } +} + +static void rpm_put_suppliers(struct device *dev) +{ + __rpm_put_suppliers(dev, true); +} + +static void rpm_suspend_suppliers(struct device *dev) +{ + struct device_link *link; + int idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) + pm_request_idle(link->supplier); + + device_links_read_unlock(idx); +} + +/** + * __rpm_callback - Run a given runtime PM callback for a given device. + * @cb: Runtime PM callback to run. + * @dev: Device to run the callback for. + */ +static int __rpm_callback(int (*cb)(struct device *), struct device *dev) + __releases(&dev->power.lock) __acquires(&dev->power.lock) +{ + int retval, idx; + bool use_links = dev->power.links_count > 0; + + if (dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + } else { + spin_unlock_irq(&dev->power.lock); + + /* + * Resume suppliers if necessary. + * + * The device's runtime PM status cannot change until this + * routine returns, so it is safe to read the status outside of + * the lock. + */ + if (use_links && dev->power.runtime_status == RPM_RESUMING) { + idx = device_links_read_lock(); + + retval = rpm_get_suppliers(dev); + if (retval) { + rpm_put_suppliers(dev); + goto fail; + } + + device_links_read_unlock(idx); + } + } + + retval = cb(dev); + + if (dev->power.irq_safe) { + spin_lock(&dev->power.lock); + } else { + /* + * If the device is suspending and the callback has returned + * success, drop the usage counters of the suppliers that have + * been reference counted on its resume. + * + * Do that if resume fails too. + */ + if (use_links + && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) + || (dev->power.runtime_status == RPM_RESUMING && retval))) { + idx = device_links_read_lock(); + + __rpm_put_suppliers(dev, false); + +fail: + device_links_read_unlock(idx); + } + + spin_lock_irq(&dev->power.lock); + } + + return retval; +} + +/** + * rpm_idle - Notify device bus type if the device can be suspended. + * @dev: Device to notify the bus type about. + * @rpmflags: Flag bits. + * + * Check if the device's runtime PM status allows it to be suspended. If + * another idle notification has been started earlier, return immediately. If + * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise + * run the ->runtime_idle() callback directly. If the ->runtime_idle callback + * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static int rpm_idle(struct device *dev, int rpmflags) +{ + int (*callback)(struct device *); + int retval; + + trace_rpm_idle_rcuidle(dev, rpmflags); + retval = rpm_check_suspend_allowed(dev); + if (retval < 0) + ; /* Conditions are wrong. */ + + /* Idle notifications are allowed only in the RPM_ACTIVE state. */ + else if (dev->power.runtime_status != RPM_ACTIVE) + retval = -EAGAIN; + + /* + * Any pending request other than an idle notification takes + * precedence over us, except that the timer may be running. + */ + else if (dev->power.request_pending && + dev->power.request > RPM_REQ_IDLE) + retval = -EAGAIN; + + /* Act as though RPM_NOWAIT is always set. */ + else if (dev->power.idle_notification) + retval = -EINPROGRESS; + if (retval) + goto out; + + /* Pending requests need to be canceled. */ + dev->power.request = RPM_REQ_NONE; + + callback = RPM_GET_CALLBACK(dev, runtime_idle); + + /* If no callback assume success. */ + if (!callback || dev->power.no_callbacks) + goto out; + + /* Carry out an asynchronous or a synchronous idle notification. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = RPM_REQ_IDLE; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); + return 0; + } + + dev->power.idle_notification = true; + + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = callback(dev); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); + + dev->power.idle_notification = false; + wake_up_all(&dev->power.wait_queue); + + out: + trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); + return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); +} + +/** + * rpm_callback - Run a given runtime PM callback for a given device. + * @cb: Runtime PM callback to run. + * @dev: Device to run the callback for. + */ +static int rpm_callback(int (*cb)(struct device *), struct device *dev) +{ + int retval; + + if (!cb) + return -ENOSYS; + + if (dev->power.memalloc_noio) { + unsigned int noio_flag; + + /* + * Deadlock might be caused if memory allocation with + * GFP_KERNEL happens inside runtime_suspend and + * runtime_resume callbacks of one block device's + * ancestor or the block device itself. Network + * device might be thought as part of iSCSI block + * device, so network device and its ancestor should + * be marked as memalloc_noio too. + */ + noio_flag = memalloc_noio_save(); + retval = __rpm_callback(cb, dev); + memalloc_noio_restore(noio_flag); + } else { + retval = __rpm_callback(cb, dev); + } + + dev->power.runtime_error = retval; + return retval != -EACCES ? retval : -EIO; +} + +/** + * rpm_suspend - Carry out runtime suspend of given device. + * @dev: Device to suspend. + * @rpmflags: Flag bits. + * + * Check if the device's runtime PM status allows it to be suspended. + * Cancel a pending idle notification, autosuspend or suspend. If + * another suspend has been started earlier, either return immediately + * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC + * flags. If the RPM_ASYNC flag is set then queue a suspend request; + * otherwise run the ->runtime_suspend() callback directly. When + * ->runtime_suspend succeeded, if a deferred resume was requested while + * the callback was running then carry it out, otherwise send an idle + * notification for its parent (if the suspend succeeded and both + * ignore_children of parent->power and irq_safe of dev->power are not set). + * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO + * flag is set and the next autosuspend-delay expiration time is in the + * future, schedule another autosuspend attempt. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static int rpm_suspend(struct device *dev, int rpmflags) + __releases(&dev->power.lock) __acquires(&dev->power.lock) +{ + int (*callback)(struct device *); + struct device *parent = NULL; + int retval; + + trace_rpm_suspend_rcuidle(dev, rpmflags); + + repeat: + retval = rpm_check_suspend_allowed(dev); + if (retval < 0) + goto out; /* Conditions are wrong. */ + + /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ + if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) + retval = -EAGAIN; + if (retval) + goto out; + + /* If the autosuspend_delay time hasn't expired yet, reschedule. */ + if ((rpmflags & RPM_AUTO) + && dev->power.runtime_status != RPM_SUSPENDING) { + u64 expires = pm_runtime_autosuspend_expiration(dev); + + if (expires != 0) { + /* Pending requests need to be canceled. */ + dev->power.request = RPM_REQ_NONE; + + /* + * Optimization: If the timer is already running and is + * set to expire at or before the autosuspend delay, + * avoid the overhead of resetting it. Just let it + * expire; pm_suspend_timer_fn() will take care of the + * rest. + */ + if (!(dev->power.timer_expires && + dev->power.timer_expires <= expires)) { + /* + * We add a slack of 25% to gather wakeups + * without sacrificing the granularity. + */ + u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * + (NSEC_PER_MSEC >> 2); + + dev->power.timer_expires = expires; + hrtimer_start_range_ns(&dev->power.suspend_timer, + ns_to_ktime(expires), + slack, + HRTIMER_MODE_ABS); + } + dev->power.timer_autosuspends = 1; + goto out; + } + } + + /* Other scheduled or pending requests need to be canceled. */ + pm_runtime_cancel_pending(dev); + + if (dev->power.runtime_status == RPM_SUSPENDING) { + DEFINE_WAIT(wait); + + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { + retval = -EINPROGRESS; + goto out; + } + + if (dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + + cpu_relax(); + + spin_lock(&dev->power.lock); + goto repeat; + } + + /* Wait for the other suspend running in parallel with us. */ + for (;;) { + prepare_to_wait(&dev->power.wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (dev->power.runtime_status != RPM_SUSPENDING) + break; + + spin_unlock_irq(&dev->power.lock); + + schedule(); + + spin_lock_irq(&dev->power.lock); + } + finish_wait(&dev->power.wait_queue, &wait); + goto repeat; + } + + if (dev->power.no_callbacks) + goto no_callback; /* Assume success. */ + + /* Carry out an asynchronous or a synchronous suspend. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = (rpmflags & RPM_AUTO) ? + RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + goto out; + } + + __update_runtime_status(dev, RPM_SUSPENDING); + + callback = RPM_GET_CALLBACK(dev, runtime_suspend); + + dev_pm_enable_wake_irq_check(dev, true); + retval = rpm_callback(callback, dev); + if (retval) + goto fail; + + dev_pm_enable_wake_irq_complete(dev); + + no_callback: + __update_runtime_status(dev, RPM_SUSPENDED); + pm_runtime_deactivate_timer(dev); + + if (dev->parent) { + parent = dev->parent; + atomic_add_unless(&parent->power.child_count, -1, 0); + } + wake_up_all(&dev->power.wait_queue); + + if (dev->power.deferred_resume) { + dev->power.deferred_resume = false; + rpm_resume(dev, 0); + retval = -EAGAIN; + goto out; + } + + if (dev->power.irq_safe) + goto out; + + /* Maybe the parent is now able to suspend. */ + if (parent && !parent->power.ignore_children) { + spin_unlock(&dev->power.lock); + + spin_lock(&parent->power.lock); + rpm_idle(parent, RPM_ASYNC); + spin_unlock(&parent->power.lock); + + spin_lock(&dev->power.lock); + } + /* Maybe the suppliers are now able to suspend. */ + if (dev->power.links_count > 0) { + spin_unlock_irq(&dev->power.lock); + + rpm_suspend_suppliers(dev); + + spin_lock_irq(&dev->power.lock); + } + + out: + trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); + + return retval; + + fail: + dev_pm_disable_wake_irq_check(dev, true); + __update_runtime_status(dev, RPM_ACTIVE); + dev->power.deferred_resume = false; + wake_up_all(&dev->power.wait_queue); + + if (retval == -EAGAIN || retval == -EBUSY) { + dev->power.runtime_error = 0; + + /* + * If the callback routine failed an autosuspend, and + * if the last_busy time has been updated so that there + * is a new autosuspend expiration time, automatically + * reschedule another autosuspend. + */ + if ((rpmflags & RPM_AUTO) && + pm_runtime_autosuspend_expiration(dev) != 0) + goto repeat; + } else { + pm_runtime_cancel_pending(dev); + } + goto out; +} + +/** + * rpm_resume - Carry out runtime resume of given device. + * @dev: Device to resume. + * @rpmflags: Flag bits. + * + * Check if the device's runtime PM status allows it to be resumed. Cancel + * any scheduled or pending requests. If another resume has been started + * earlier, either return immediately or wait for it to finish, depending on the + * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in + * parallel with this function, either tell the other process to resume after + * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC + * flag is set then queue a resume request; otherwise run the + * ->runtime_resume() callback directly. Queue an idle notification for the + * device if the resume succeeded. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static int rpm_resume(struct device *dev, int rpmflags) + __releases(&dev->power.lock) __acquires(&dev->power.lock) +{ + int (*callback)(struct device *); + struct device *parent = NULL; + int retval = 0; + + trace_rpm_resume_rcuidle(dev, rpmflags); + + repeat: + if (dev->power.runtime_error) + retval = -EINVAL; + else if (dev->power.disable_depth == 1 && dev->power.is_suspended + && dev->power.runtime_status == RPM_ACTIVE) + retval = 1; + else if (dev->power.disable_depth > 0) + retval = -EACCES; + if (retval) + goto out; + + /* + * Other scheduled or pending requests need to be canceled. Small + * optimization: If an autosuspend timer is running, leave it running + * rather than cancelling it now only to restart it again in the near + * future. + */ + dev->power.request = RPM_REQ_NONE; + if (!dev->power.timer_autosuspends) + pm_runtime_deactivate_timer(dev); + + if (dev->power.runtime_status == RPM_ACTIVE) { + retval = 1; + goto out; + } + + if (dev->power.runtime_status == RPM_RESUMING + || dev->power.runtime_status == RPM_SUSPENDING) { + DEFINE_WAIT(wait); + + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { + if (dev->power.runtime_status == RPM_SUSPENDING) + dev->power.deferred_resume = true; + else + retval = -EINPROGRESS; + goto out; + } + + if (dev->power.irq_safe) { + spin_unlock(&dev->power.lock); + + cpu_relax(); + + spin_lock(&dev->power.lock); + goto repeat; + } + + /* Wait for the operation carried out in parallel with us. */ + for (;;) { + prepare_to_wait(&dev->power.wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (dev->power.runtime_status != RPM_RESUMING + && dev->power.runtime_status != RPM_SUSPENDING) + break; + + spin_unlock_irq(&dev->power.lock); + + schedule(); + + spin_lock_irq(&dev->power.lock); + } + finish_wait(&dev->power.wait_queue, &wait); + goto repeat; + } + + /* + * See if we can skip waking up the parent. This is safe only if + * power.no_callbacks is set, because otherwise we don't know whether + * the resume will actually succeed. + */ + if (dev->power.no_callbacks && !parent && dev->parent) { + spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); + if (dev->parent->power.disable_depth > 0 + || dev->parent->power.ignore_children + || dev->parent->power.runtime_status == RPM_ACTIVE) { + atomic_inc(&dev->parent->power.child_count); + spin_unlock(&dev->parent->power.lock); + retval = 1; + goto no_callback; /* Assume success. */ + } + spin_unlock(&dev->parent->power.lock); + } + + /* Carry out an asynchronous or a synchronous resume. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = RPM_REQ_RESUME; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + retval = 0; + goto out; + } + + if (!parent && dev->parent) { + /* + * Increment the parent's usage counter and resume it if + * necessary. Not needed if dev is irq-safe; then the + * parent is permanently resumed. + */ + parent = dev->parent; + if (dev->power.irq_safe) + goto skip_parent; + spin_unlock(&dev->power.lock); + + pm_runtime_get_noresume(parent); + + spin_lock(&parent->power.lock); + /* + * Resume the parent if it has runtime PM enabled and not been + * set to ignore its children. + */ + if (!parent->power.disable_depth + && !parent->power.ignore_children) { + rpm_resume(parent, 0); + if (parent->power.runtime_status != RPM_ACTIVE) + retval = -EBUSY; + } + spin_unlock(&parent->power.lock); + + spin_lock(&dev->power.lock); + if (retval) + goto out; + goto repeat; + } + skip_parent: + + if (dev->power.no_callbacks) + goto no_callback; /* Assume success. */ + + __update_runtime_status(dev, RPM_RESUMING); + + callback = RPM_GET_CALLBACK(dev, runtime_resume); + + dev_pm_disable_wake_irq_check(dev, false); + retval = rpm_callback(callback, dev); + if (retval) { + __update_runtime_status(dev, RPM_SUSPENDED); + pm_runtime_cancel_pending(dev); + dev_pm_enable_wake_irq_check(dev, false); + } else { + no_callback: + __update_runtime_status(dev, RPM_ACTIVE); + pm_runtime_mark_last_busy(dev); + if (parent) + atomic_inc(&parent->power.child_count); + } + wake_up_all(&dev->power.wait_queue); + + if (retval >= 0) + rpm_idle(dev, RPM_ASYNC); + + out: + if (parent && !dev->power.irq_safe) { + spin_unlock_irq(&dev->power.lock); + + pm_runtime_put(parent); + + spin_lock_irq(&dev->power.lock); + } + + trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); + + return retval; +} + +/** + * pm_runtime_work - Universal runtime PM work function. + * @work: Work structure used for scheduling the execution of this function. + * + * Use @work to get the device object the work is to be done for, determine what + * is to be done and execute the appropriate runtime PM function. + */ +static void pm_runtime_work(struct work_struct *work) +{ + struct device *dev = container_of(work, struct device, power.work); + enum rpm_request req; + + spin_lock_irq(&dev->power.lock); + + if (!dev->power.request_pending) + goto out; + + req = dev->power.request; + dev->power.request = RPM_REQ_NONE; + dev->power.request_pending = false; + + switch (req) { + case RPM_REQ_NONE: + break; + case RPM_REQ_IDLE: + rpm_idle(dev, RPM_NOWAIT); + break; + case RPM_REQ_SUSPEND: + rpm_suspend(dev, RPM_NOWAIT); + break; + case RPM_REQ_AUTOSUSPEND: + rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); + break; + case RPM_REQ_RESUME: + rpm_resume(dev, RPM_NOWAIT); + break; + } + + out: + spin_unlock_irq(&dev->power.lock); +} + +/** + * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). + * @data: Device pointer passed by pm_schedule_suspend(). + * + * Check if the time is right and queue a suspend request. + */ +static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) +{ + struct device *dev = container_of(timer, struct device, power.suspend_timer); + unsigned long flags; + u64 expires; + + spin_lock_irqsave(&dev->power.lock, flags); + + expires = dev->power.timer_expires; + /* + * If 'expires' is after the current time, we've been called + * too early. + */ + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { + dev->power.timer_expires = 0; + rpm_suspend(dev, dev->power.timer_autosuspends ? + (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); + } + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return HRTIMER_NORESTART; +} + +/** + * pm_schedule_suspend - Set up a timer to submit a suspend request in future. + * @dev: Device to suspend. + * @delay: Time to wait before submitting a suspend request, in milliseconds. + */ +int pm_schedule_suspend(struct device *dev, unsigned int delay) +{ + unsigned long flags; + u64 expires; + int retval; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (!delay) { + retval = rpm_suspend(dev, RPM_ASYNC); + goto out; + } + + retval = rpm_check_suspend_allowed(dev); + if (retval) + goto out; + + /* Other scheduled or pending requests need to be canceled. */ + pm_runtime_cancel_pending(dev); + + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; + dev->power.timer_expires = expires; + dev->power.timer_autosuspends = 0; + hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); + + out: + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(pm_schedule_suspend); + +/** + * __pm_runtime_idle - Entry point for runtime idle operations. + * @dev: Device to send idle notification for. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, decrement the device's usage count and + * return immediately if it is larger than zero. Then carry out an idle + * notification, either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. + */ +int __pm_runtime_idle(struct device *dev, int rpmflags) +{ + unsigned long flags; + int retval; + + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); + return 0; + } + } + + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_idle(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(__pm_runtime_idle); + +/** + * __pm_runtime_suspend - Entry point for runtime put/suspend operations. + * @dev: Device to suspend. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, decrement the device's usage count and + * return immediately if it is larger than zero. Then carry out a suspend, + * either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. + */ +int __pm_runtime_suspend(struct device *dev, int rpmflags) +{ + unsigned long flags; + int retval; + + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) { + trace_rpm_usage_rcuidle(dev, rpmflags); + return 0; + } + } + + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_suspend(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(__pm_runtime_suspend); + +/** + * __pm_runtime_resume - Entry point for runtime resume operations. + * @dev: Device to resume. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, increment the device's usage count. Then + * carry out a resume, either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set, + * or if pm_runtime_irq_safe() has been called. + */ +int __pm_runtime_resume(struct device *dev, int rpmflags) +{ + unsigned long flags; + int retval; + + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && + dev->power.runtime_status != RPM_ACTIVE); + + if (rpmflags & RPM_GET_PUT) + atomic_inc(&dev->power.usage_count); + + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_resume(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(__pm_runtime_resume); + +/** + * pm_runtime_get_if_active - Conditionally bump up device usage counter. + * @dev: Device to handle. + * @ign_usage_count: Whether or not to look at the current usage counter value. + * + * Return -EINVAL if runtime PM is disabled for @dev. + * + * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either + * @ign_usage_count is %true or the runtime PM usage counter of @dev is not + * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 + * without changing the usage counter. + * + * If @ign_usage_count is %true, this function can be used to prevent suspending + * the device when its runtime PM status is %RPM_ACTIVE. + * + * If @ign_usage_count is %false, this function can be used to prevent + * suspending the device when both its runtime PM status is %RPM_ACTIVE and its + * runtime PM usage counter is not zero. + * + * The caller is resposible for decrementing the runtime PM usage counter of + * @dev after this function has returned a positive value for it. + */ +int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) +{ + unsigned long flags; + int retval; + + spin_lock_irqsave(&dev->power.lock, flags); + if (dev->power.disable_depth > 0) { + retval = -EINVAL; + } else if (dev->power.runtime_status != RPM_ACTIVE) { + retval = 0; + } else if (ign_usage_count) { + retval = 1; + atomic_inc(&dev->power.usage_count); + } else { + retval = atomic_inc_not_zero(&dev->power.usage_count); + } + trace_rpm_usage_rcuidle(dev, 0); + spin_unlock_irqrestore(&dev->power.lock, flags); + + return retval; +} +EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); + +/** + * __pm_runtime_set_status - Set runtime PM status of a device. + * @dev: Device to handle. + * @status: New runtime PM status of the device. + * + * If runtime PM of the device is disabled or its power.runtime_error field is + * different from zero, the status may be changed either to RPM_ACTIVE, or to + * RPM_SUSPENDED, as long as that reflects the actual state of the device. + * However, if the device has a parent and the parent is not active, and the + * parent's power.ignore_children flag is unset, the device's status cannot be + * set to RPM_ACTIVE, so -EBUSY is returned in that case. + * + * If successful, __pm_runtime_set_status() clears the power.runtime_error field + * and the device parent's counter of unsuspended children is modified to + * reflect the new status. If the new status is RPM_SUSPENDED, an idle + * notification request for the parent is submitted. + * + * If @dev has any suppliers (as reflected by device links to them), and @status + * is RPM_ACTIVE, they will be activated upfront and if the activation of one + * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead + * of the @status value) and the suppliers will be deacticated on exit. The + * error returned by the failing supplier activation will be returned in that + * case. + */ +int __pm_runtime_set_status(struct device *dev, unsigned int status) +{ + struct device *parent = dev->parent; + bool notify_parent = false; + int error = 0; + + if (status != RPM_ACTIVE && status != RPM_SUSPENDED) + return -EINVAL; + + spin_lock_irq(&dev->power.lock); + + /* + * Prevent PM-runtime from being enabled for the device or return an + * error if it is enabled already and working. + */ + if (dev->power.runtime_error || dev->power.disable_depth) + dev->power.disable_depth++; + else + error = -EAGAIN; + + spin_unlock_irq(&dev->power.lock); + + if (error) + return error; + + /* + * If the new status is RPM_ACTIVE, the suppliers can be activated + * upfront regardless of the current status, because next time + * rpm_put_suppliers() runs, the rpm_active refcounts of the links + * involved will be dropped down to one anyway. + */ + if (status == RPM_ACTIVE) { + int idx = device_links_read_lock(); + + error = rpm_get_suppliers(dev); + if (error) + status = RPM_SUSPENDED; + + device_links_read_unlock(idx); + } + + spin_lock_irq(&dev->power.lock); + + if (dev->power.runtime_status == status || !parent) + goto out_set; + + if (status == RPM_SUSPENDED) { + atomic_add_unless(&parent->power.child_count, -1, 0); + notify_parent = !parent->power.ignore_children; + } else { + spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); + + /* + * It is invalid to put an active child under a parent that is + * not active, has runtime PM enabled and the + * 'power.ignore_children' flag unset. + */ + if (!parent->power.disable_depth + && !parent->power.ignore_children + && parent->power.runtime_status != RPM_ACTIVE) { + dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", + dev_name(dev), + dev_name(parent)); + error = -EBUSY; + } else if (dev->power.runtime_status == RPM_SUSPENDED) { + atomic_inc(&parent->power.child_count); + } + + spin_unlock(&parent->power.lock); + + if (error) { + status = RPM_SUSPENDED; + goto out; + } + } + + out_set: + __update_runtime_status(dev, status); + if (!error) + dev->power.runtime_error = 0; + + out: + spin_unlock_irq(&dev->power.lock); + + if (notify_parent) + pm_request_idle(parent); + + if (status == RPM_SUSPENDED) { + int idx = device_links_read_lock(); + + rpm_put_suppliers(dev); + + device_links_read_unlock(idx); + } + + pm_runtime_enable(dev); + + return error; +} +EXPORT_SYMBOL_GPL(__pm_runtime_set_status); + +/** + * __pm_runtime_barrier - Cancel pending requests and wait for completions. + * @dev: Device to handle. + * + * Flush all pending requests for the device from pm_wq and wait for all + * runtime PM operations involving the device in progress to complete. + * + * Should be called under dev->power.lock with interrupts disabled. + */ +static void __pm_runtime_barrier(struct device *dev) +{ + pm_runtime_deactivate_timer(dev); + + if (dev->power.request_pending) { + dev->power.request = RPM_REQ_NONE; + spin_unlock_irq(&dev->power.lock); + + cancel_work_sync(&dev->power.work); + + spin_lock_irq(&dev->power.lock); + dev->power.request_pending = false; + } + + if (dev->power.runtime_status == RPM_SUSPENDING + || dev->power.runtime_status == RPM_RESUMING + || dev->power.idle_notification) { + DEFINE_WAIT(wait); + + /* Suspend, wake-up or idle notification in progress. */ + for (;;) { + prepare_to_wait(&dev->power.wait_queue, &wait, + TASK_UNINTERRUPTIBLE); + if (dev->power.runtime_status != RPM_SUSPENDING + && dev->power.runtime_status != RPM_RESUMING + && !dev->power.idle_notification) + break; + spin_unlock_irq(&dev->power.lock); + + schedule(); + + spin_lock_irq(&dev->power.lock); + } + finish_wait(&dev->power.wait_queue, &wait); + } +} + +/** + * pm_runtime_barrier - Flush pending requests and wait for completions. + * @dev: Device to handle. + * + * Prevent the device from being suspended by incrementing its usage counter and + * if there's a pending resume request for the device, wake the device up. + * Next, make sure that all pending requests for the device have been flushed + * from pm_wq and wait for all runtime PM operations involving the device in + * progress to complete. + * + * Return value: + * 1, if there was a resume request pending and the device had to be woken up, + * 0, otherwise + */ +int pm_runtime_barrier(struct device *dev) +{ + int retval = 0; + + pm_runtime_get_noresume(dev); + spin_lock_irq(&dev->power.lock); + + if (dev->power.request_pending + && dev->power.request == RPM_REQ_RESUME) { + rpm_resume(dev, 0); + retval = 1; + } + + __pm_runtime_barrier(dev); + + spin_unlock_irq(&dev->power.lock); + pm_runtime_put_noidle(dev); + + return retval; +} +EXPORT_SYMBOL_GPL(pm_runtime_barrier); + +/** + * __pm_runtime_disable - Disable runtime PM of a device. + * @dev: Device to handle. + * @check_resume: If set, check if there's a resume request for the device. + * + * Increment power.disable_depth for the device and if it was zero previously, + * cancel all pending runtime PM requests for the device and wait for all + * operations in progress to complete. The device can be either active or + * suspended after its runtime PM has been disabled. + * + * If @check_resume is set and there's a resume request pending when + * __pm_runtime_disable() is called and power.disable_depth is zero, the + * function will wake up the device before disabling its runtime PM. + */ +void __pm_runtime_disable(struct device *dev, bool check_resume) +{ + spin_lock_irq(&dev->power.lock); + + if (dev->power.disable_depth > 0) { + dev->power.disable_depth++; + goto out; + } + + /* + * Wake up the device if there's a resume request pending, because that + * means there probably is some I/O to process and disabling runtime PM + * shouldn't prevent the device from processing the I/O. + */ + if (check_resume && dev->power.request_pending + && dev->power.request == RPM_REQ_RESUME) { + /* + * Prevent suspends and idle notifications from being carried + * out after we have woken up the device. + */ + pm_runtime_get_noresume(dev); + + rpm_resume(dev, 0); + + pm_runtime_put_noidle(dev); + } + + /* Update time accounting before disabling PM-runtime. */ + update_pm_runtime_accounting(dev); + + if (!dev->power.disable_depth++) + __pm_runtime_barrier(dev); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(__pm_runtime_disable); + +/** + * pm_runtime_enable - Enable runtime PM of a device. + * @dev: Device to handle. + */ +void pm_runtime_enable(struct device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (dev->power.disable_depth > 0) { + dev->power.disable_depth--; + + /* About to enable runtime pm, set accounting_timestamp to now */ + if (!dev->power.disable_depth) + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); + } else { + dev_warn(dev, "Unbalanced %s!\n", __func__); + } + + WARN(!dev->power.disable_depth && + dev->power.runtime_status == RPM_SUSPENDED && + !dev->power.ignore_children && + atomic_read(&dev->power.child_count) > 0, + "Enabling runtime PM for inactive device (%s) with active children\n", + dev_name(dev)); + + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_runtime_enable); + +/** + * pm_runtime_forbid - Block runtime PM of a device. + * @dev: Device to handle. + * + * Increase the device's usage count and clear its power.runtime_auto flag, + * so that it cannot be suspended at run time until pm_runtime_allow() is called + * for it. + */ +void pm_runtime_forbid(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + if (!dev->power.runtime_auto) + goto out; + + dev->power.runtime_auto = false; + atomic_inc(&dev->power.usage_count); + rpm_resume(dev, 0); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_forbid); + +/** + * pm_runtime_allow - Unblock runtime PM of a device. + * @dev: Device to handle. + * + * Decrease the device's usage count and set its power.runtime_auto flag. + */ +void pm_runtime_allow(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + if (dev->power.runtime_auto) + goto out; + + dev->power.runtime_auto = true; + if (atomic_dec_and_test(&dev->power.usage_count)) + rpm_idle(dev, RPM_AUTO | RPM_ASYNC); + else + trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_allow); + +/** + * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. + * @dev: Device to handle. + * + * Set the power.no_callbacks flag, which tells the PM core that this + * device is power-managed through its parent and has no runtime PM + * callbacks of its own. The runtime sysfs attributes will be removed. + */ +void pm_runtime_no_callbacks(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + dev->power.no_callbacks = 1; + spin_unlock_irq(&dev->power.lock); + if (device_is_registered(dev)) + rpm_sysfs_remove(dev); +} +EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); + +/** + * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. + * @dev: Device to handle + * + * Set the power.irq_safe flag, which tells the PM core that the + * ->runtime_suspend() and ->runtime_resume() callbacks for this device should + * always be invoked with the spinlock held and interrupts disabled. It also + * causes the parent's usage counter to be permanently incremented, preventing + * the parent from runtime suspending -- otherwise an irq-safe child might have + * to wait for a non-irq-safe parent. + */ +void pm_runtime_irq_safe(struct device *dev) +{ + if (dev->parent) + pm_runtime_get_sync(dev->parent); + spin_lock_irq(&dev->power.lock); + dev->power.irq_safe = 1; + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); + +/** + * update_autosuspend - Handle a change to a device's autosuspend settings. + * @dev: Device to handle. + * @old_delay: The former autosuspend_delay value. + * @old_use: The former use_autosuspend value. + * + * Prevent runtime suspend if the new delay is negative and use_autosuspend is + * set; otherwise allow it. Send an idle notification if suspends are allowed. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static void update_autosuspend(struct device *dev, int old_delay, int old_use) +{ + int delay = dev->power.autosuspend_delay; + + /* Should runtime suspend be prevented now? */ + if (dev->power.use_autosuspend && delay < 0) { + + /* If it used to be allowed then prevent it. */ + if (!old_use || old_delay >= 0) { + atomic_inc(&dev->power.usage_count); + rpm_resume(dev, 0); + } else { + trace_rpm_usage_rcuidle(dev, 0); + } + } + + /* Runtime suspend should be allowed now. */ + else { + + /* If it used to be prevented then allow it. */ + if (old_use && old_delay < 0) + atomic_dec(&dev->power.usage_count); + + /* Maybe we can autosuspend now. */ + rpm_idle(dev, RPM_AUTO); + } +} + +/** + * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. + * @dev: Device to handle. + * @delay: Value of the new delay in milliseconds. + * + * Set the device's power.autosuspend_delay value. If it changes to negative + * and the power.use_autosuspend flag is set, prevent runtime suspends. If it + * changes the other way, allow runtime suspends. + */ +void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) +{ + int old_delay, old_use; + + spin_lock_irq(&dev->power.lock); + old_delay = dev->power.autosuspend_delay; + old_use = dev->power.use_autosuspend; + dev->power.autosuspend_delay = delay; + update_autosuspend(dev, old_delay, old_use); + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); + +/** + * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. + * @dev: Device to handle. + * @use: New value for use_autosuspend. + * + * Set the device's power.use_autosuspend flag, and allow or prevent runtime + * suspends as needed. + */ +void __pm_runtime_use_autosuspend(struct device *dev, bool use) +{ + int old_delay, old_use; + + spin_lock_irq(&dev->power.lock); + old_delay = dev->power.autosuspend_delay; + old_use = dev->power.use_autosuspend; + dev->power.use_autosuspend = use; + update_autosuspend(dev, old_delay, old_use); + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); + +/** + * pm_runtime_init - Initialize runtime PM fields in given device object. + * @dev: Device object to initialize. + */ +void pm_runtime_init(struct device *dev) +{ + dev->power.runtime_status = RPM_SUSPENDED; + dev->power.idle_notification = false; + + dev->power.disable_depth = 1; + atomic_set(&dev->power.usage_count, 0); + + dev->power.runtime_error = 0; + + atomic_set(&dev->power.child_count, 0); + pm_suspend_ignore_children(dev, false); + dev->power.runtime_auto = true; + + dev->power.request_pending = false; + dev->power.request = RPM_REQ_NONE; + dev->power.deferred_resume = false; + dev->power.needs_force_resume = 0; + INIT_WORK(&dev->power.work, pm_runtime_work); + + dev->power.timer_expires = 0; + hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + dev->power.suspend_timer.function = pm_suspend_timer_fn; + + init_waitqueue_head(&dev->power.wait_queue); +} + +/** + * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. + * @dev: Device object to re-initialize. + */ +void pm_runtime_reinit(struct device *dev) +{ + if (!pm_runtime_enabled(dev)) { + if (dev->power.runtime_status == RPM_ACTIVE) + pm_runtime_set_suspended(dev); + if (dev->power.irq_safe) { + spin_lock_irq(&dev->power.lock); + dev->power.irq_safe = 0; + spin_unlock_irq(&dev->power.lock); + if (dev->parent) + pm_runtime_put(dev->parent); + } + } +} + +/** + * pm_runtime_remove - Prepare for removing a device from device hierarchy. + * @dev: Device object being removed from device hierarchy. + */ +void pm_runtime_remove(struct device *dev) +{ + __pm_runtime_disable(dev, false); + pm_runtime_reinit(dev); +} + +/** + * pm_runtime_get_suppliers - Resume and reference-count supplier devices. + * @dev: Consumer device. + */ +void pm_runtime_get_suppliers(struct device *dev) +{ + struct device_link *link; + int idx; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) + if (link->flags & DL_FLAG_PM_RUNTIME) { + link->supplier_preactivated = true; + pm_runtime_get_sync(link->supplier); + refcount_inc(&link->rpm_active); + } + + device_links_read_unlock(idx); +} + +/** + * pm_runtime_put_suppliers - Drop references to supplier devices. + * @dev: Consumer device. + */ +void pm_runtime_put_suppliers(struct device *dev) +{ + struct device_link *link; + unsigned long flags; + bool put; + int idx; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, + device_links_read_lock_held()) + if (link->supplier_preactivated) { + link->supplier_preactivated = false; + spin_lock_irqsave(&dev->power.lock, flags); + put = pm_runtime_status_suspended(dev) && + refcount_dec_not_one(&link->rpm_active); + spin_unlock_irqrestore(&dev->power.lock, flags); + if (put) + pm_runtime_put(link->supplier); + } + + device_links_read_unlock(idx); +} + +void pm_runtime_new_link(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + dev->power.links_count++; + spin_unlock_irq(&dev->power.lock); +} + +static void pm_runtime_drop_link_count(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + WARN_ON(dev->power.links_count == 0); + dev->power.links_count--; + spin_unlock_irq(&dev->power.lock); +} + +/** + * pm_runtime_drop_link - Prepare for device link removal. + * @link: Device link going away. + * + * Drop the link count of the consumer end of @link and decrement the supplier + * device's runtime PM usage counter as many times as needed to drop all of the + * PM runtime reference to it from the consumer. + */ +void pm_runtime_drop_link(struct device_link *link) +{ + if (!(link->flags & DL_FLAG_PM_RUNTIME)) + return; + + pm_runtime_drop_link_count(link->consumer); + pm_runtime_release_supplier(link); + pm_request_idle(link->supplier); +} + +static bool pm_runtime_need_not_resume(struct device *dev) +{ + return atomic_read(&dev->power.usage_count) <= 1 && + (atomic_read(&dev->power.child_count) == 0 || + dev->power.ignore_children); +} + +/** + * pm_runtime_force_suspend - Force a device into suspend state if needed. + * @dev: Device to suspend. + * + * Disable runtime PM so we safely can check the device's runtime PM status and + * if it is active, invoke its ->runtime_suspend callback to suspend it and + * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's + * usage and children counters don't indicate that the device was in use before + * the system-wide transition under way, decrement its parent's children counter + * (if there is a parent). Keep runtime PM disabled to preserve the state + * unless we encounter errors. + * + * Typically this function may be invoked from a system suspend callback to make + * sure the device is put into low power state and it should only be used during + * system-wide PM transitions to sleep states. It assumes that the analogous + * pm_runtime_force_resume() will be used to resume the device. + */ +int pm_runtime_force_suspend(struct device *dev) +{ + int (*callback)(struct device *); + int ret; + + pm_runtime_disable(dev); + if (pm_runtime_status_suspended(dev)) + return 0; + + callback = RPM_GET_CALLBACK(dev, runtime_suspend); + + ret = callback ? callback(dev) : 0; + if (ret) + goto err; + + /* + * If the device can stay in suspend after the system-wide transition + * to the working state that will follow, drop the children counter of + * its parent, but set its status to RPM_SUSPENDED anyway in case this + * function will be called again for it in the meantime. + */ + if (pm_runtime_need_not_resume(dev)) { + pm_runtime_set_suspended(dev); + } else { + __update_runtime_status(dev, RPM_SUSPENDED); + dev->power.needs_force_resume = 1; + } + + return 0; + +err: + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); + +/** + * pm_runtime_force_resume - Force a device into resume state if needed. + * @dev: Device to resume. + * + * Prior invoking this function we expect the user to have brought the device + * into low power state by a call to pm_runtime_force_suspend(). Here we reverse + * those actions and bring the device into full power, if it is expected to be + * used on system resume. In the other case, we defer the resume to be managed + * via runtime PM. + * + * Typically this function may be invoked from a system resume callback. + */ +int pm_runtime_force_resume(struct device *dev) +{ + int (*callback)(struct device *); + int ret = 0; + + if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) + goto out; + + /* + * The value of the parent's children counter is correct already, so + * just update the status of the device. + */ + __update_runtime_status(dev, RPM_ACTIVE); + + callback = RPM_GET_CALLBACK(dev, runtime_resume); + + ret = callback ? callback(dev) : 0; + if (ret) { + pm_runtime_set_suspended(dev); + goto out; + } + + pm_runtime_mark_last_busy(dev); +out: + dev->power.needs_force_resume = 0; + pm_runtime_enable(dev); + return ret; +} +EXPORT_SYMBOL_GPL(pm_runtime_force_resume); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c new file mode 100644 index 000000000..a1474fb67 --- /dev/null +++ b/drivers/base/power/sysfs.c @@ -0,0 +1,838 @@ +// SPDX-License-Identifier: GPL-2.0 +/* sysfs entries for device PM */ +#include <linux/device.h> +#include <linux/kobject.h> +#include <linux/string.h> +#include <linux/export.h> +#include <linux/pm_qos.h> +#include <linux/pm_runtime.h> +#include <linux/pm_wakeup.h> +#include <linux/atomic.h> +#include <linux/jiffies.h> +#include "power.h" + +/* + * control - Report/change current runtime PM setting of the device + * + * Runtime power management of a device can be blocked with the help of + * this attribute. All devices have one of the following two values for + * the power/control file: + * + * + "auto\n" to allow the device to be power managed at run time; + * + "on\n" to prevent the device from being power managed at run time; + * + * The default for all devices is "auto", which means that devices may be + * subject to automatic power management, depending on their drivers. + * Changing this attribute to "on" prevents the driver from power managing + * the device at run time. Doing that while the device is suspended causes + * it to be woken up. + * + * wakeup - Report/change current wakeup option for device + * + * Some devices support "wakeup" events, which are hardware signals + * used to activate devices from suspended or low power states. Such + * devices have one of three values for the sysfs power/wakeup file: + * + * + "enabled\n" to issue the events; + * + "disabled\n" not to do so; or + * + "\n" for temporary or permanent inability to issue wakeup. + * + * (For example, unconfigured USB devices can't issue wakeups.) + * + * Familiar examples of devices that can issue wakeup events include + * keyboards and mice (both PS2 and USB styles), power buttons, modems, + * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events + * will wake the entire system from a suspend state; others may just + * wake up the device (if the system as a whole is already active). + * Some wakeup events use normal IRQ lines; other use special out + * of band signaling. + * + * It is the responsibility of device drivers to enable (or disable) + * wakeup signaling as part of changing device power states, respecting + * the policy choices provided through the driver model. + * + * Devices may not be able to generate wakeup events from all power + * states. Also, the events may be ignored in some configurations; + * for example, they might need help from other devices that aren't + * active, or which may have wakeup disabled. Some drivers rely on + * wakeup events internally (unless they are disabled), keeping + * their hardware in low power modes whenever they're unused. This + * saves runtime power, without requiring system-wide sleep states. + * + * async - Report/change current async suspend setting for the device + * + * Asynchronous suspend and resume of the device during system-wide power + * state transitions can be enabled by writing "enabled" to this file. + * Analogously, if "disabled" is written to this file, the device will be + * suspended and resumed synchronously. + * + * All devices have one of the following two values for power/async: + * + * + "enabled\n" to permit the asynchronous suspend/resume of the device; + * + "disabled\n" to forbid it; + * + * NOTE: It generally is unsafe to permit the asynchronous suspend/resume + * of a device unless it is certain that all of the PM dependencies of the + * device are known to the PM core. However, for some devices this + * attribute is set to "enabled" by bus type code or device drivers and in + * that cases it should be safe to leave the default value. + * + * autosuspend_delay_ms - Report/change a device's autosuspend_delay value + * + * Some drivers don't want to carry out a runtime suspend as soon as a + * device becomes idle; they want it always to remain idle for some period + * of time before suspending it. This period is the autosuspend_delay + * value (expressed in milliseconds) and it can be controlled by the user. + * If the value is negative then the device will never be runtime + * suspended. + * + * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay + * value are used only if the driver calls pm_runtime_use_autosuspend(). + * + * wakeup_count - Report the number of wakeup events related to the device + */ + +const char power_group_name[] = "power"; +EXPORT_SYMBOL_GPL(power_group_name); + +static const char ctrl_auto[] = "auto"; +static const char ctrl_on[] = "on"; + +static ssize_t control_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", + dev->power.runtime_auto ? ctrl_auto : ctrl_on); +} + +static ssize_t control_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + device_lock(dev); + if (sysfs_streq(buf, ctrl_auto)) + pm_runtime_allow(dev); + else if (sysfs_streq(buf, ctrl_on)) + pm_runtime_forbid(dev); + else + n = -EINVAL; + device_unlock(dev); + return n; +} + +static DEVICE_ATTR_RW(control); + +static ssize_t runtime_active_time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 tmp = pm_runtime_active_time(dev); + + do_div(tmp, NSEC_PER_MSEC); + + return sysfs_emit(buf, "%llu\n", tmp); +} + +static DEVICE_ATTR_RO(runtime_active_time); + +static ssize_t runtime_suspended_time_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + u64 tmp = pm_runtime_suspended_time(dev); + + do_div(tmp, NSEC_PER_MSEC); + + return sysfs_emit(buf, "%llu\n", tmp); +} + +static DEVICE_ATTR_RO(runtime_suspended_time); + +static ssize_t runtime_status_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *output; + + if (dev->power.runtime_error) { + output = "error"; + } else if (dev->power.disable_depth) { + output = "unsupported"; + } else { + switch (dev->power.runtime_status) { + case RPM_SUSPENDED: + output = "suspended"; + break; + case RPM_SUSPENDING: + output = "suspending"; + break; + case RPM_RESUMING: + output = "resuming"; + break; + case RPM_ACTIVE: + output = "active"; + break; + default: + return -EIO; + } + } + return sysfs_emit(buf, "%s\n", output); +} + +static DEVICE_ATTR_RO(runtime_status); + +static ssize_t autosuspend_delay_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + if (!dev->power.use_autosuspend) + return -EIO; + + return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay); +} + +static ssize_t autosuspend_delay_ms_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t n) +{ + long delay; + + if (!dev->power.use_autosuspend) + return -EIO; + + if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay) + return -EINVAL; + + device_lock(dev); + pm_runtime_set_autosuspend_delay(dev, delay); + device_unlock(dev); + return n; +} + +static DEVICE_ATTR_RW(autosuspend_delay_ms); + +static ssize_t pm_qos_resume_latency_us_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s32 value = dev_pm_qos_requested_resume_latency(dev); + + if (value == 0) + return sysfs_emit(buf, "n/a\n"); + if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + value = 0; + + return sysfs_emit(buf, "%d\n", value); +} + +static ssize_t pm_qos_resume_latency_us_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (!kstrtos32(buf, 0, &value)) { + /* + * Prevent users from writing negative or "no constraint" values + * directly. + */ + if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + return -EINVAL; + + if (value == 0) + value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; + } else if (sysfs_streq(buf, "n/a")) { + value = 0; + } else { + return -EINVAL; + } + + ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, + value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR_RW(pm_qos_resume_latency_us); + +static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s32 value = dev_pm_qos_get_user_latency_tolerance(dev); + + if (value < 0) + return sysfs_emit(buf, "%s\n", "auto"); + if (value == PM_QOS_LATENCY_ANY) + return sysfs_emit(buf, "%s\n", "any"); + + return sysfs_emit(buf, "%d\n", value); +} + +static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + s32 value; + int ret; + + if (kstrtos32(buf, 0, &value) == 0) { + /* Users can't write negative values directly */ + if (value < 0) + return -EINVAL; + } else { + if (sysfs_streq(buf, "auto")) + value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; + else if (sysfs_streq(buf, "any")) + value = PM_QOS_LATENCY_ANY; + else + return -EINVAL; + } + ret = dev_pm_qos_update_user_latency_tolerance(dev, value); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); + +static ssize_t pm_qos_no_power_off_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev) + & PM_QOS_FLAG_NO_POWER_OFF)); +} + +static ssize_t pm_qos_no_power_off_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) +{ + int ret; + + if (kstrtoint(buf, 0, &ret)) + return -EINVAL; + + if (ret != 0 && ret != 1) + return -EINVAL; + + ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret); + return ret < 0 ? ret : n; +} + +static DEVICE_ATTR_RW(pm_qos_no_power_off); + +#ifdef CONFIG_PM_SLEEP +static const char _enabled[] = "enabled"; +static const char _disabled[] = "disabled"; + +static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", device_can_wakeup(dev) + ? (device_may_wakeup(dev) ? _enabled : _disabled) + : ""); +} + +static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + if (!device_can_wakeup(dev)) + return -EINVAL; + + if (sysfs_streq(buf, _enabled)) + device_set_wakeup_enable(dev, 1); + else if (sysfs_streq(buf, _disabled)) + device_set_wakeup_enable(dev, 0); + else + return -EINVAL; + return n; +} + +static DEVICE_ATTR_RW(wakeup); + +static ssize_t wakeup_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long count; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->wakeup_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); +} + +static DEVICE_ATTR_RO(wakeup_count); + +static ssize_t wakeup_active_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long count; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->active_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); +} + +static DEVICE_ATTR_RO(wakeup_active_count); + +static ssize_t wakeup_abort_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long count; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->wakeup_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); +} + +static DEVICE_ATTR_RO(wakeup_abort_count); + +static ssize_t wakeup_expire_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long count; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->expire_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lu\n", count); +} + +static DEVICE_ATTR_RO(wakeup_expire_count); + +static ssize_t wakeup_active_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int active; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + active = dev->power.wakeup->active; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%u\n", active); +} + +static DEVICE_ATTR_RO(wakeup_active); + +static ssize_t wakeup_total_time_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s64 msec; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->total_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); +} + +static DEVICE_ATTR_RO(wakeup_total_time_ms); + +static ssize_t wakeup_max_time_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->max_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); +} + +static DEVICE_ATTR_RO(wakeup_max_time_ms); + +static ssize_t wakeup_last_time_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s64 msec; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->last_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); +} + +static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, + kgid_t kgid) +{ + if (dev->power.wakeup && dev->power.wakeup->dev) + return device_change_owner(dev->power.wakeup->dev, kuid, kgid); + return 0; +} + +static DEVICE_ATTR_RO(wakeup_last_time_ms); + +#ifdef CONFIG_PM_AUTOSLEEP +static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + s64 msec; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + + if (!enabled) + return sysfs_emit(buf, "\n"); + return sysfs_emit(buf, "%lld\n", msec); +} + +static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); +#endif /* CONFIG_PM_AUTOSLEEP */ +#else /* CONFIG_PM_SLEEP */ +static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid, + kgid_t kgid) +{ + return 0; +} +#endif + +#ifdef CONFIG_PM_ADVANCED_DEBUG +static ssize_t runtime_usage_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count)); +} +static DEVICE_ATTR_RO(runtime_usage); + +static ssize_t runtime_active_kids_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", dev->power.ignore_children ? + 0 : atomic_read(&dev->power.child_count)); +} +static DEVICE_ATTR_RO(runtime_active_kids); + +static ssize_t runtime_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + const char *output; + + if (dev->power.disable_depth && !dev->power.runtime_auto) + output = "disabled & forbidden"; + else if (dev->power.disable_depth) + output = "disabled"; + else if (!dev->power.runtime_auto) + output = "forbidden"; + else + output = "enabled"; + + return sysfs_emit(buf, "%s\n", output); +} +static DEVICE_ATTR_RO(runtime_enabled); + +#ifdef CONFIG_PM_SLEEP +static ssize_t async_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", + device_async_suspend_enabled(dev) ? + _enabled : _disabled); +} + +static ssize_t async_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + if (sysfs_streq(buf, _enabled)) + device_enable_async_suspend(dev); + else if (sysfs_streq(buf, _disabled)) + device_disable_async_suspend(dev); + else + return -EINVAL; + return n; +} + +static DEVICE_ATTR_RW(async); + +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM_ADVANCED_DEBUG */ + +static struct attribute *power_attrs[] = { +#ifdef CONFIG_PM_ADVANCED_DEBUG +#ifdef CONFIG_PM_SLEEP + &dev_attr_async.attr, +#endif + &dev_attr_runtime_status.attr, + &dev_attr_runtime_usage.attr, + &dev_attr_runtime_active_kids.attr, + &dev_attr_runtime_enabled.attr, +#endif /* CONFIG_PM_ADVANCED_DEBUG */ + NULL, +}; +static const struct attribute_group pm_attr_group = { + .name = power_group_name, + .attrs = power_attrs, +}; + +static struct attribute *wakeup_attrs[] = { +#ifdef CONFIG_PM_SLEEP + &dev_attr_wakeup.attr, + &dev_attr_wakeup_count.attr, + &dev_attr_wakeup_active_count.attr, + &dev_attr_wakeup_abort_count.attr, + &dev_attr_wakeup_expire_count.attr, + &dev_attr_wakeup_active.attr, + &dev_attr_wakeup_total_time_ms.attr, + &dev_attr_wakeup_max_time_ms.attr, + &dev_attr_wakeup_last_time_ms.attr, +#ifdef CONFIG_PM_AUTOSLEEP + &dev_attr_wakeup_prevent_sleep_time_ms.attr, +#endif +#endif + NULL, +}; +static const struct attribute_group pm_wakeup_attr_group = { + .name = power_group_name, + .attrs = wakeup_attrs, +}; + +static struct attribute *runtime_attrs[] = { +#ifndef CONFIG_PM_ADVANCED_DEBUG + &dev_attr_runtime_status.attr, +#endif + &dev_attr_control.attr, + &dev_attr_runtime_suspended_time.attr, + &dev_attr_runtime_active_time.attr, + &dev_attr_autosuspend_delay_ms.attr, + NULL, +}; +static const struct attribute_group pm_runtime_attr_group = { + .name = power_group_name, + .attrs = runtime_attrs, +}; + +static struct attribute *pm_qos_resume_latency_attrs[] = { + &dev_attr_pm_qos_resume_latency_us.attr, + NULL, +}; +static const struct attribute_group pm_qos_resume_latency_attr_group = { + .name = power_group_name, + .attrs = pm_qos_resume_latency_attrs, +}; + +static struct attribute *pm_qos_latency_tolerance_attrs[] = { + &dev_attr_pm_qos_latency_tolerance_us.attr, + NULL, +}; +static const struct attribute_group pm_qos_latency_tolerance_attr_group = { + .name = power_group_name, + .attrs = pm_qos_latency_tolerance_attrs, +}; + +static struct attribute *pm_qos_flags_attrs[] = { + &dev_attr_pm_qos_no_power_off.attr, + NULL, +}; +static const struct attribute_group pm_qos_flags_attr_group = { + .name = power_group_name, + .attrs = pm_qos_flags_attrs, +}; + +int dpm_sysfs_add(struct device *dev) +{ + int rc; + + /* No need to create PM sysfs if explicitly disabled. */ + if (device_pm_not_required(dev)) + return 0; + + rc = sysfs_create_group(&dev->kobj, &pm_attr_group); + if (rc) + return rc; + + if (!pm_runtime_has_no_callbacks(dev)) { + rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); + if (rc) + goto err_out; + } + if (device_can_wakeup(dev)) { + rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); + if (rc) + goto err_runtime; + } + if (dev->power.set_latency_tolerance) { + rc = sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); + if (rc) + goto err_wakeup; + } + rc = pm_wakeup_source_sysfs_add(dev); + if (rc) + goto err_latency; + return 0; + + err_latency: + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); + err_wakeup: + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + err_runtime: + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); + err_out: + sysfs_remove_group(&dev->kobj, &pm_attr_group); + return rc; +} + +int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid) +{ + int rc; + + if (device_pm_not_required(dev)) + return 0; + + rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid); + if (rc) + return rc; + + if (!pm_runtime_has_no_callbacks(dev)) { + rc = sysfs_group_change_owner( + &dev->kobj, &pm_runtime_attr_group, kuid, kgid); + if (rc) + return rc; + } + + if (device_can_wakeup(dev)) { + rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group, + kuid, kgid); + if (rc) + return rc; + + rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid); + if (rc) + return rc; + } + + if (dev->power.set_latency_tolerance) { + rc = sysfs_group_change_owner( + &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid, + kgid); + if (rc) + return rc; + } + return 0; +} + +int wakeup_sysfs_add(struct device *dev) +{ + int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); + + if (!ret) + kobject_uevent(&dev->kobj, KOBJ_CHANGE); + + return ret; +} + +void wakeup_sysfs_remove(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + kobject_uevent(&dev->kobj, KOBJ_CHANGE); +} + +int pm_qos_sysfs_add_resume_latency(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); +} + +void pm_qos_sysfs_remove_resume_latency(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group); +} + +int pm_qos_sysfs_add_flags(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group); +} + +void pm_qos_sysfs_remove_flags(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); +} + +int pm_qos_sysfs_add_latency_tolerance(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); +} + +void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); +} + +void rpm_sysfs_remove(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); +} + +void dpm_sysfs_remove(struct device *dev) +{ + if (device_pm_not_required(dev)) + return; + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); + dev_pm_qos_constraints_destroy(dev); + rpm_sysfs_remove(dev); + sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); + sysfs_remove_group(&dev->kobj, &pm_attr_group); +} diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c new file mode 100644 index 000000000..72b7a9233 --- /dev/null +++ b/drivers/base/power/trace.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/trace.c + * + * Copyright (C) 2006 Linus Torvalds + * + * Trace facility for suspend/resume problems, when none of the + * devices may be working. + */ +#define pr_fmt(fmt) "PM: " fmt + +#include <linux/pm-trace.h> +#include <linux/export.h> +#include <linux/rtc.h> +#include <linux/suspend.h> +#include <linux/init.h> + +#include <linux/mc146818rtc.h> + +#include "power.h" + +/* + * Horrid, horrid, horrid. + * + * It turns out that the _only_ piece of hardware that actually + * keeps its value across a hard boot (and, more importantly, the + * POST init sequence) is literally the realtime clock. + * + * Never mind that an RTC chip has 114 bytes (and often a whole + * other bank of an additional 128 bytes) of nice SRAM that is + * _designed_ to keep data - the POST will clear it. So we literally + * can just use the few bytes of actual time data, which means that + * we're really limited. + * + * It means, for example, that we can't use the seconds at all + * (since the time between the hang and the boot might be more + * than a minute), and we'd better not depend on the low bits of + * the minutes either. + * + * There are the wday fields etc, but I wouldn't guarantee those + * are dependable either. And if the date isn't valid, either the + * hw or POST will do strange things. + * + * So we're left with: + * - year: 0-99 + * - month: 0-11 + * - day-of-month: 1-28 + * - hour: 0-23 + * - min: (0-30)*2 + * + * Giving us a total range of 0-16128000 (0xf61800), ie less + * than 24 bits of actual data we can save across reboots. + * + * And if your box can't boot in less than three minutes, + * you're screwed. + * + * Now, almost 24 bits of data is pitifully small, so we need + * to be pretty dense if we want to use it for anything nice. + * What we do is that instead of saving off nice readable info, + * we save off _hashes_ of information that we can hopefully + * regenerate after the reboot. + * + * In particular, this means that we might be unlucky, and hit + * a case where we have a hash collision, and we end up not + * being able to tell for certain exactly which case happened. + * But that's hopefully unlikely. + * + * What we do is to take the bits we can fit, and split them + * into three parts (16*997*1009 = 16095568), and use the values + * for: + * - 0-15: user-settable + * - 0-996: file + line number + * - 0-1008: device + */ +#define USERHASH (16) +#define FILEHASH (997) +#define DEVHASH (1009) + +#define DEVSEED (7919) + +bool pm_trace_rtc_abused __read_mostly; +EXPORT_SYMBOL_GPL(pm_trace_rtc_abused); + +static unsigned int dev_hash_value; + +static int set_magic_time(unsigned int user, unsigned int file, unsigned int device) +{ + unsigned int n = user + USERHASH*(file + FILEHASH*device); + + // June 7th, 2006 + static struct rtc_time time = { + .tm_sec = 0, + .tm_min = 0, + .tm_hour = 0, + .tm_mday = 7, + .tm_mon = 5, // June - counting from zero + .tm_year = 106, + .tm_wday = 3, + .tm_yday = 160, + .tm_isdst = 1 + }; + + time.tm_year = (n % 100); + n /= 100; + time.tm_mon = (n % 12); + n /= 12; + time.tm_mday = (n % 28) + 1; + n /= 28; + time.tm_hour = (n % 24); + n /= 24; + time.tm_min = (n % 20) * 3; + n /= 20; + mc146818_set_time(&time); + pm_trace_rtc_abused = true; + return n ? -1 : 0; +} + +static unsigned int read_magic_time(void) +{ + struct rtc_time time; + unsigned int val; + + if (mc146818_get_time(&time) < 0) { + pr_err("Unable to read current time from RTC\n"); + return 0; + } + + pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time); + val = time.tm_year; /* 100 years */ + if (val > 100) + val -= 100; + val += time.tm_mon * 100; /* 12 months */ + val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */ + val += time.tm_hour * 100 * 12 * 28; /* 24 hours */ + val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */ + return val; +} + +/* + * This is just the sdbm hash function with a user-supplied + * seed and final size parameter. + */ +static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod) +{ + unsigned char c; + while ((c = *data++) != 0) { + seed = (seed << 16) + (seed << 6) - seed + c; + } + return seed % mod; +} + +void set_trace_device(struct device *dev) +{ + dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH); +} +EXPORT_SYMBOL(set_trace_device); + +/* + * We could just take the "tracedata" index into the .tracedata + * section instead. Generating a hash of the data gives us a + * chance to work across kernel versions, and perhaps more + * importantly it also gives us valid/invalid check (ie we will + * likely not give totally bogus reports - if the hash matches, + * it's not any guarantee, but it's a high _likelihood_ that + * the match is valid). + */ +void generate_pm_trace(const void *tracedata, unsigned int user) +{ + unsigned short lineno = *(unsigned short *)tracedata; + const char *file = *(const char **)(tracedata + 2); + unsigned int user_hash_value, file_hash_value; + + if (!x86_platform.legacy.rtc) + return; + + user_hash_value = user % USERHASH; + file_hash_value = hash_string(lineno, file, FILEHASH); + set_magic_time(user_hash_value, file_hash_value, dev_hash_value); +} +EXPORT_SYMBOL(generate_pm_trace); + +extern char __tracedata_start[], __tracedata_end[]; +static int show_file_hash(unsigned int value) +{ + int match; + char *tracedata; + + match = 0; + for (tracedata = __tracedata_start ; tracedata < __tracedata_end ; + tracedata += 2 + sizeof(unsigned long)) { + unsigned short lineno = *(unsigned short *)tracedata; + const char *file = *(const char **)(tracedata + 2); + unsigned int hash = hash_string(lineno, file, FILEHASH); + if (hash != value) + continue; + pr_info(" hash matches %s:%u\n", file, lineno); + match++; + } + return match; +} + +static int show_dev_hash(unsigned int value) +{ + int match = 0; + struct list_head *entry; + + device_pm_lock(); + entry = dpm_list.prev; + while (entry != &dpm_list) { + struct device * dev = to_device(entry); + unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); + if (hash == value) { + dev_info(dev, "hash matches\n"); + match++; + } + entry = entry->prev; + } + device_pm_unlock(); + return match; +} + +static unsigned int hash_value_early_read; + +int show_trace_dev_match(char *buf, size_t size) +{ + unsigned int value = hash_value_early_read / (USERHASH * FILEHASH); + int ret = 0; + struct list_head *entry; + + /* + * It's possible that multiple devices will match the hash and we can't + * tell which is the culprit, so it's best to output them all. + */ + device_pm_lock(); + entry = dpm_list.prev; + while (size && entry != &dpm_list) { + struct device *dev = to_device(entry); + unsigned int hash = hash_string(DEVSEED, dev_name(dev), + DEVHASH); + if (hash == value) { + int len = snprintf(buf, size, "%s\n", + dev_driver_string(dev)); + if (len > size) + len = size; + buf += len; + ret += len; + size -= len; + } + entry = entry->prev; + } + device_pm_unlock(); + return ret; +} + +static int +pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused) +{ + switch (mode) { + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + if (pm_trace_rtc_abused) { + pm_trace_rtc_abused = false; + pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n"); + } + break; + default: + break; + } + return 0; +} + +static struct notifier_block pm_trace_nb = { + .notifier_call = pm_trace_notify, +}; + +static int __init early_resume_init(void) +{ + if (!x86_platform.legacy.rtc) + return 0; + + hash_value_early_read = read_magic_time(); + register_pm_notifier(&pm_trace_nb); + return 0; +} + +static int __init late_resume_init(void) +{ + unsigned int val = hash_value_early_read; + unsigned int user, file, dev; + + if (!x86_platform.legacy.rtc) + return 0; + + user = val % USERHASH; + val = val / USERHASH; + file = val % FILEHASH; + val = val / FILEHASH; + dev = val /* % DEVHASH */; + + pr_info(" Magic number: %d:%d:%d\n", user, file, dev); + show_file_hash(file); + show_dev_hash(dev); + return 0; +} + +core_initcall(early_resume_init); +late_initcall(late_resume_init); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c new file mode 100644 index 000000000..aea690c64 --- /dev/null +++ b/drivers/base/power/wakeirq.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Device wakeirq helper functions */ +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> + +#include "power.h" + +/** + * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ + * @dev: Device entry + * @irq: Device wake-up capable interrupt + * @wirq: Wake irq specific data + * + * Internal function to attach either a device IO interrupt or a + * dedicated wake-up interrupt as a wake IRQ. + */ +static int dev_pm_attach_wake_irq(struct device *dev, int irq, + struct wake_irq *wirq) +{ + unsigned long flags; + + if (!dev || !wirq) + return -EINVAL; + + spin_lock_irqsave(&dev->power.lock, flags); + if (dev_WARN_ONCE(dev, dev->power.wakeirq, + "wake irq already initialized\n")) { + spin_unlock_irqrestore(&dev->power.lock, flags); + return -EEXIST; + } + + dev->power.wakeirq = wirq; + device_wakeup_attach_irq(dev, wirq); + + spin_unlock_irqrestore(&dev->power.lock, flags); + return 0; +} + +/** + * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ + * @dev: Device entry + * @irq: Device IO interrupt + * + * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets + * automatically configured for wake-up from suspend based + * on the device specific sysfs wakeup entry. Typically called + * during driver probe after calling device_init_wakeup(). + */ +int dev_pm_set_wake_irq(struct device *dev, int irq) +{ + struct wake_irq *wirq; + int err; + + if (irq < 0) + return -EINVAL; + + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); + if (!wirq) + return -ENOMEM; + + wirq->dev = dev; + wirq->irq = irq; + + err = dev_pm_attach_wake_irq(dev, irq, wirq); + if (err) + kfree(wirq); + + return err; +} +EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq); + +/** + * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ + * @dev: Device entry + * + * Detach a device wake IRQ and free resources. + * + * Note that it's OK for drivers to call this without calling + * dev_pm_set_wake_irq() as all the driver instances may not have + * a wake IRQ configured. This avoid adding wake IRQ specific + * checks into the drivers. + */ +void dev_pm_clear_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + unsigned long flags; + + if (!wirq) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + device_wakeup_detach_irq(dev); + dev->power.wakeirq = NULL; + spin_unlock_irqrestore(&dev->power.lock, flags); + + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) { + free_irq(wirq->irq, wirq); + wirq->status &= ~WAKE_IRQ_DEDICATED_MASK; + } + kfree(wirq->name); + kfree(wirq); +} +EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq); + +/** + * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts + * @irq: Device specific dedicated wake-up interrupt + * @_wirq: Wake IRQ data + * + * Some devices have a separate wake-up interrupt in addition to the + * device IO interrupt. The wake-up interrupt signals that a device + * should be woken up from it's idle state. This handler uses device + * specific pm_runtime functions to wake the device, and then it's + * up to the device to do whatever it needs to. Note that as the + * device may need to restore context and start up regulators, we + * use a threaded IRQ. + * + * Also note that we are not resending the lost device interrupts. + * We assume that the wake-up interrupt just needs to wake-up the + * device, and then device's pm_runtime_resume() can deal with the + * situation. + */ +static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) +{ + struct wake_irq *wirq = _wirq; + int res; + + /* Maybe abort suspend? */ + if (irqd_is_wakeup_set(irq_get_irq_data(irq))) { + pm_wakeup_event(wirq->dev, 0); + + return IRQ_HANDLED; + } + + /* We don't want RPM_ASYNC or RPM_NOWAIT here */ + res = pm_runtime_resume(wirq->dev); + if (res < 0) + dev_warn(wirq->dev, + "wake IRQ with no resume: %i\n", res); + + return IRQ_HANDLED; +} + +static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag) +{ + struct wake_irq *wirq; + int err; + + if (irq < 0) + return -EINVAL; + + wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); + if (!wirq) + return -ENOMEM; + + wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev)); + if (!wirq->name) { + err = -ENOMEM; + goto err_free; + } + + wirq->dev = dev; + wirq->irq = irq; + irq_set_status_flags(irq, IRQ_NOAUTOEN); + + /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */ + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + + /* + * Consumer device may need to power up and restore state + * so we use a threaded irq. + */ + err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq, + IRQF_ONESHOT, wirq->name, wirq); + if (err) + goto err_free_name; + + err = dev_pm_attach_wake_irq(dev, irq, wirq); + if (err) + goto err_free_irq; + + wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag; + + return err; + +err_free_irq: + free_irq(irq, wirq); +err_free_name: + kfree(wirq->name); +err_free: + kfree(wirq); + + return err; +} + + +/** + * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has + * a dedicated wake-up interrupt in addition to the device IO + * interrupt. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, 0); +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); + +/** + * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt + * with reverse enable ordering + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has a dedicated + * wake-up interrupt in addition to the device IO interrupt. It sets + * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() + * to enable dedicated wake-up interrupt after running the runtime suspend + * callback for @dev. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE); +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); + +/** + * dev_pm_enable_wake_irq - Enable device wake-up interrupt + * @dev: Device + * + * Optionally called from the bus code or the device driver for + * runtime_resume() to override the PM runtime core managed wake-up + * interrupt handling to enable the wake-up interrupt. + * + * Note that for runtime_suspend()) the wake-up interrupts + * should be unconditionally enabled unlike for suspend() + * that is conditional. + */ +void dev_pm_enable_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) + enable_irq(wirq->irq); +} +EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq); + +/** + * dev_pm_disable_wake_irq - Disable device wake-up interrupt + * @dev: Device + * + * Optionally called from the bus code or the device driver for + * runtime_suspend() to override the PM runtime core managed wake-up + * interrupt handling to disable the wake-up interrupt. + */ +void dev_pm_disable_wake_irq(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)) + disable_irq_nosync(wirq->irq); +} +EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq); + +/** + * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt + * @dev: Device + * @can_change_status: Can change wake-up interrupt status + * + * Enables wakeirq conditionally. We need to enable wake-up interrupt + * lazily on the first rpm_suspend(). This is needed as the consumer device + * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would + * otherwise try to disable already disabled wakeirq. The wake-up interrupt + * starts disabled with IRQ_NOAUTOEN set. + * + * Should be only called from rpm_suspend() and rpm_resume() path. + * Caller must hold &dev->power.lock to change wirq->status + */ +void dev_pm_enable_wake_irq_check(struct device *dev, + bool can_change_status) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) + return; + + if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) { + goto enable; + } else if (can_change_status) { + wirq->status |= WAKE_IRQ_DEDICATED_MANAGED; + goto enable; + } + + return; + +enable: + if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) { + enable_irq(wirq->irq); + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; + } +} + +/** + * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt + * @dev: Device + * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE + * + * Disables wake-up interrupt conditionally based on status. + * Should be only called from rpm_suspend() and rpm_resume() path. + */ +void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) + return; + + if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) { + wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED; + disable_irq_nosync(wirq->irq); + } +} + +/** + * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before + * @dev: Device using the wake IRQ + * + * Enable wake IRQ conditionally based on status, mainly used if want to + * enable wake IRQ after running ->runtime_suspend() which depends on + * WAKE_IRQ_DEDICATED_REVERSE. + * + * Should be only called from rpm_suspend() path. + */ +void dev_pm_enable_wake_irq_complete(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && + wirq->status & WAKE_IRQ_DEDICATED_REVERSE) + enable_irq(wirq->irq); +} + +/** + * dev_pm_arm_wake_irq - Arm device wake-up + * @wirq: Device wake-up interrupt + * + * Sets up the wake-up event conditionally based on the + * device_may_wake(). + */ +void dev_pm_arm_wake_irq(struct wake_irq *wirq) +{ + if (!wirq) + return; + + if (device_may_wakeup(wirq->dev)) { + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) + enable_irq(wirq->irq); + + enable_irq_wake(wirq->irq); + } +} + +/** + * dev_pm_disarm_wake_irq - Disarm device wake-up + * @wirq: Device wake-up interrupt + * + * Clears up the wake-up event conditionally based on the + * device_may_wake(). + */ +void dev_pm_disarm_wake_irq(struct wake_irq *wirq) +{ + if (!wirq) + return; + + if (device_may_wakeup(wirq->dev)) { + disable_irq_wake(wirq->irq); + + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) + disable_irq_nosync(wirq->irq); + } +} diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c new file mode 100644 index 000000000..8997e0227 --- /dev/null +++ b/drivers/base/power/wakeup.c @@ -0,0 +1,1214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drivers/base/power/wakeup.c - System wakeup events framework + * + * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + */ +#define pr_fmt(fmt) "PM: " fmt + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/sched/signal.h> +#include <linux/capability.h> +#include <linux/export.h> +#include <linux/suspend.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include <linux/pm_wakeirq.h> +#include <trace/events/power.h> + +#include "power.h" + +#ifndef CONFIG_SUSPEND +suspend_state_t pm_suspend_target_state; +#define pm_suspend_target_state (PM_SUSPEND_ON) +#endif + +#define list_for_each_entry_rcu_locked(pos, head, member) \ + list_for_each_entry_rcu(pos, head, member, \ + srcu_read_lock_held(&wakeup_srcu)) +/* + * If set, the suspend/hibernate code will abort transitions to a sleep state + * if wakeup events are registered during or immediately before the transition. + */ +bool events_check_enabled __read_mostly; + +/* First wakeup IRQ seen by the kernel in the last cycle. */ +static unsigned int wakeup_irq[2] __read_mostly; +static DEFINE_RAW_SPINLOCK(wakeup_irq_lock); + +/* If greater than 0 and the system is suspending, terminate the suspend. */ +static atomic_t pm_abort_suspend __read_mostly; + +/* + * Combined counters of registered wakeup events and wakeup events in progress. + * They need to be modified together atomically, so it's better to use one + * atomic variable to hold them both. + */ +static atomic_t combined_event_count = ATOMIC_INIT(0); + +#define IN_PROGRESS_BITS (sizeof(int) * 4) +#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) + +static void split_counters(unsigned int *cnt, unsigned int *inpr) +{ + unsigned int comb = atomic_read(&combined_event_count); + + *cnt = (comb >> IN_PROGRESS_BITS); + *inpr = comb & MAX_IN_PROGRESS; +} + +/* A preserved old value of the events counter. */ +static unsigned int saved_count; + +static DEFINE_RAW_SPINLOCK(events_lock); + +static void pm_wakeup_timer_fn(struct timer_list *t); + +static LIST_HEAD(wakeup_sources); + +static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); + +DEFINE_STATIC_SRCU(wakeup_srcu); + +static struct wakeup_source deleted_ws = { + .name = "deleted", + .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), +}; + +static DEFINE_IDA(wakeup_ida); + +/** + * wakeup_source_create - Create a struct wakeup_source object. + * @name: Name of the new wakeup source. + */ +struct wakeup_source *wakeup_source_create(const char *name) +{ + struct wakeup_source *ws; + const char *ws_name; + int id; + + ws = kzalloc(sizeof(*ws), GFP_KERNEL); + if (!ws) + goto err_ws; + + ws_name = kstrdup_const(name, GFP_KERNEL); + if (!ws_name) + goto err_name; + ws->name = ws_name; + + id = ida_alloc(&wakeup_ida, GFP_KERNEL); + if (id < 0) + goto err_id; + ws->id = id; + + return ws; + +err_id: + kfree_const(ws->name); +err_name: + kfree(ws); +err_ws: + return NULL; +} +EXPORT_SYMBOL_GPL(wakeup_source_create); + +/* + * Record wakeup_source statistics being deleted into a dummy wakeup_source. + */ +static void wakeup_source_record(struct wakeup_source *ws) +{ + unsigned long flags; + + spin_lock_irqsave(&deleted_ws.lock, flags); + + if (ws->event_count) { + deleted_ws.total_time = + ktime_add(deleted_ws.total_time, ws->total_time); + deleted_ws.prevent_sleep_time = + ktime_add(deleted_ws.prevent_sleep_time, + ws->prevent_sleep_time); + deleted_ws.max_time = + ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ? + deleted_ws.max_time : ws->max_time; + deleted_ws.event_count += ws->event_count; + deleted_ws.active_count += ws->active_count; + deleted_ws.relax_count += ws->relax_count; + deleted_ws.expire_count += ws->expire_count; + deleted_ws.wakeup_count += ws->wakeup_count; + } + + spin_unlock_irqrestore(&deleted_ws.lock, flags); +} + +static void wakeup_source_free(struct wakeup_source *ws) +{ + ida_free(&wakeup_ida, ws->id); + kfree_const(ws->name); + kfree(ws); +} + +/** + * wakeup_source_destroy - Destroy a struct wakeup_source object. + * @ws: Wakeup source to destroy. + * + * Use only for wakeup source objects created with wakeup_source_create(). + */ +void wakeup_source_destroy(struct wakeup_source *ws) +{ + if (!ws) + return; + + __pm_relax(ws); + wakeup_source_record(ws); + wakeup_source_free(ws); +} +EXPORT_SYMBOL_GPL(wakeup_source_destroy); + +/** + * wakeup_source_add - Add given object to the list of wakeup sources. + * @ws: Wakeup source object to add to the list. + */ +void wakeup_source_add(struct wakeup_source *ws) +{ + unsigned long flags; + + if (WARN_ON(!ws)) + return; + + spin_lock_init(&ws->lock); + timer_setup(&ws->timer, pm_wakeup_timer_fn, 0); + ws->active = false; + + raw_spin_lock_irqsave(&events_lock, flags); + list_add_rcu(&ws->entry, &wakeup_sources); + raw_spin_unlock_irqrestore(&events_lock, flags); +} +EXPORT_SYMBOL_GPL(wakeup_source_add); + +/** + * wakeup_source_remove - Remove given object from the wakeup sources list. + * @ws: Wakeup source object to remove from the list. + */ +void wakeup_source_remove(struct wakeup_source *ws) +{ + unsigned long flags; + + if (WARN_ON(!ws)) + return; + + raw_spin_lock_irqsave(&events_lock, flags); + list_del_rcu(&ws->entry); + raw_spin_unlock_irqrestore(&events_lock, flags); + synchronize_srcu(&wakeup_srcu); + + del_timer_sync(&ws->timer); + /* + * Clear timer.function to make wakeup_source_not_registered() treat + * this wakeup source as not registered. + */ + ws->timer.function = NULL; +} +EXPORT_SYMBOL_GPL(wakeup_source_remove); + +/** + * wakeup_source_register - Create wakeup source and add it to the list. + * @dev: Device this wakeup source is associated with (or NULL if virtual). + * @name: Name of the wakeup source to register. + */ +struct wakeup_source *wakeup_source_register(struct device *dev, + const char *name) +{ + struct wakeup_source *ws; + int ret; + + ws = wakeup_source_create(name); + if (ws) { + if (!dev || device_is_registered(dev)) { + ret = wakeup_source_sysfs_add(dev, ws); + if (ret) { + wakeup_source_free(ws); + return NULL; + } + } + wakeup_source_add(ws); + } + return ws; +} +EXPORT_SYMBOL_GPL(wakeup_source_register); + +/** + * wakeup_source_unregister - Remove wakeup source from the list and remove it. + * @ws: Wakeup source object to unregister. + */ +void wakeup_source_unregister(struct wakeup_source *ws) +{ + if (ws) { + wakeup_source_remove(ws); + if (ws->dev) + wakeup_source_sysfs_remove(ws); + + wakeup_source_destroy(ws); + } +} +EXPORT_SYMBOL_GPL(wakeup_source_unregister); + +/** + * wakeup_sources_read_lock - Lock wakeup source list for read. + * + * Returns an index of srcu lock for struct wakeup_srcu. + * This index must be passed to the matching wakeup_sources_read_unlock(). + */ +int wakeup_sources_read_lock(void) +{ + return srcu_read_lock(&wakeup_srcu); +} +EXPORT_SYMBOL_GPL(wakeup_sources_read_lock); + +/** + * wakeup_sources_read_unlock - Unlock wakeup source list. + * @idx: return value from corresponding wakeup_sources_read_lock() + */ +void wakeup_sources_read_unlock(int idx) +{ + srcu_read_unlock(&wakeup_srcu, idx); +} +EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock); + +/** + * wakeup_sources_walk_start - Begin a walk on wakeup source list + * + * Returns first object of the list of wakeup sources. + * + * Note that to be safe, wakeup sources list needs to be locked by calling + * wakeup_source_read_lock() for this. + */ +struct wakeup_source *wakeup_sources_walk_start(void) +{ + struct list_head *ws_head = &wakeup_sources; + + return list_entry_rcu(ws_head->next, struct wakeup_source, entry); +} +EXPORT_SYMBOL_GPL(wakeup_sources_walk_start); + +/** + * wakeup_sources_walk_next - Get next wakeup source from the list + * @ws: Previous wakeup source object + * + * Note that to be safe, wakeup sources list needs to be locked by calling + * wakeup_source_read_lock() for this. + */ +struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws) +{ + struct list_head *ws_head = &wakeup_sources; + + return list_next_or_null_rcu(ws_head, &ws->entry, + struct wakeup_source, entry); +} +EXPORT_SYMBOL_GPL(wakeup_sources_walk_next); + +/** + * device_wakeup_attach - Attach a wakeup source object to a device object. + * @dev: Device to handle. + * @ws: Wakeup source object to attach to @dev. + * + * This causes @dev to be treated as a wakeup device. + */ +static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) +{ + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + spin_unlock_irq(&dev->power.lock); + return -EEXIST; + } + dev->power.wakeup = ws; + if (dev->power.wakeirq) + device_wakeup_attach_irq(dev, dev->power.wakeirq); + spin_unlock_irq(&dev->power.lock); + return 0; +} + +/** + * device_wakeup_enable - Enable given device to be a wakeup source. + * @dev: Device to handle. + * + * Create a wakeup source object, register it and attach it to @dev. + */ +int device_wakeup_enable(struct device *dev) +{ + struct wakeup_source *ws; + int ret; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + if (pm_suspend_target_state != PM_SUSPEND_ON) + dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); + + ws = wakeup_source_register(dev, dev_name(dev)); + if (!ws) + return -ENOMEM; + + ret = device_wakeup_attach(dev, ws); + if (ret) + wakeup_source_unregister(ws); + + return ret; +} +EXPORT_SYMBOL_GPL(device_wakeup_enable); + +/** + * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source + * @dev: Device to handle + * @wakeirq: Device specific wakeirq entry + * + * Attach a device wakeirq to the wakeup source so the device + * wake IRQ can be configured automatically for suspend and + * resume. + * + * Call under the device's power.lock lock. + */ +void device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq) +{ + struct wakeup_source *ws; + + ws = dev->power.wakeup; + if (!ws) + return; + + if (ws->wakeirq) + dev_err(dev, "Leftover wakeup IRQ found, overriding\n"); + + ws->wakeirq = wakeirq; +} + +/** + * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source + * @dev: Device to handle + * + * Removes a device wakeirq from the wakeup source. + * + * Call under the device's power.lock lock. + */ +void device_wakeup_detach_irq(struct device *dev) +{ + struct wakeup_source *ws; + + ws = dev->power.wakeup; + if (ws) + ws->wakeirq = NULL; +} + +/** + * device_wakeup_arm_wake_irqs(void) + * + * Itereates over the list of device wakeirqs to arm them. + */ +void device_wakeup_arm_wake_irqs(void) +{ + struct wakeup_source *ws; + int srcuidx; + + srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) + dev_pm_arm_wake_irq(ws->wakeirq); + srcu_read_unlock(&wakeup_srcu, srcuidx); +} + +/** + * device_wakeup_disarm_wake_irqs(void) + * + * Itereates over the list of device wakeirqs to disarm them. + */ +void device_wakeup_disarm_wake_irqs(void) +{ + struct wakeup_source *ws; + int srcuidx; + + srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) + dev_pm_disarm_wake_irq(ws->wakeirq); + srcu_read_unlock(&wakeup_srcu, srcuidx); +} + +/** + * device_wakeup_detach - Detach a device's wakeup source object from it. + * @dev: Device to detach the wakeup source object from. + * + * After it returns, @dev will not be treated as a wakeup device any more. + */ +static struct wakeup_source *device_wakeup_detach(struct device *dev) +{ + struct wakeup_source *ws; + + spin_lock_irq(&dev->power.lock); + ws = dev->power.wakeup; + dev->power.wakeup = NULL; + spin_unlock_irq(&dev->power.lock); + return ws; +} + +/** + * device_wakeup_disable - Do not regard a device as a wakeup source any more. + * @dev: Device to handle. + * + * Detach the @dev's wakeup source object from it, unregister this wakeup source + * object and destroy it. + */ +int device_wakeup_disable(struct device *dev) +{ + struct wakeup_source *ws; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + ws = device_wakeup_detach(dev); + wakeup_source_unregister(ws); + return 0; +} +EXPORT_SYMBOL_GPL(device_wakeup_disable); + +/** + * device_set_wakeup_capable - Set/reset device wakeup capability flag. + * @dev: Device to handle. + * @capable: Whether or not @dev is capable of waking up the system from sleep. + * + * If @capable is set, set the @dev's power.can_wakeup flag and add its + * wakeup-related attributes to sysfs. Otherwise, unset the @dev's + * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. + * + * This function may sleep and it can't be called from any context where + * sleeping is not allowed. + */ +void device_set_wakeup_capable(struct device *dev, bool capable) +{ + if (!!dev->power.can_wakeup == !!capable) + return; + + dev->power.can_wakeup = capable; + if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { + if (capable) { + int ret = wakeup_sysfs_add(dev); + + if (ret) + dev_info(dev, "Wakeup sysfs attributes not added\n"); + } else { + wakeup_sysfs_remove(dev); + } + } +} +EXPORT_SYMBOL_GPL(device_set_wakeup_capable); + +/** + * device_init_wakeup - Device wakeup initialization. + * @dev: Device to handle. + * @enable: Whether or not to enable @dev as a wakeup device. + * + * By default, most devices should leave wakeup disabled. The exceptions are + * devices that everyone expects to be wakeup sources: keyboards, power buttons, + * possibly network interfaces, etc. Also, devices that don't generate their + * own wakeup requests but merely forward requests from one bus to another + * (like PCI bridges) should have wakeup enabled by default. + */ +int device_init_wakeup(struct device *dev, bool enable) +{ + int ret = 0; + + if (!dev) + return -EINVAL; + + if (enable) { + device_set_wakeup_capable(dev, true); + ret = device_wakeup_enable(dev); + } else { + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); + } + + return ret; +} +EXPORT_SYMBOL_GPL(device_init_wakeup); + +/** + * device_set_wakeup_enable - Enable or disable a device to wake up the system. + * @dev: Device to handle. + */ +int device_set_wakeup_enable(struct device *dev, bool enable) +{ + return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); +} +EXPORT_SYMBOL_GPL(device_set_wakeup_enable); + +/** + * wakeup_source_not_registered - validate the given wakeup source. + * @ws: Wakeup source to be validated. + */ +static bool wakeup_source_not_registered(struct wakeup_source *ws) +{ + /* + * Use timer struct to check if the given source is initialized + * by wakeup_source_add. + */ + return ws->timer.function != pm_wakeup_timer_fn; +} + +/* + * The functions below use the observation that each wakeup event starts a + * period in which the system should not be suspended. The moment this period + * will end depends on how the wakeup event is going to be processed after being + * detected and all of the possible cases can be divided into two distinct + * groups. + * + * First, a wakeup event may be detected by the same functional unit that will + * carry out the entire processing of it and possibly will pass it to user space + * for further processing. In that case the functional unit that has detected + * the event may later "close" the "no suspend" period associated with it + * directly as soon as it has been dealt with. The pair of pm_stay_awake() and + * pm_relax(), balanced with each other, is supposed to be used in such + * situations. + * + * Second, a wakeup event may be detected by one functional unit and processed + * by another one. In that case the unit that has detected it cannot really + * "close" the "no suspend" period associated with it, unless it knows in + * advance what's going to happen to the event during processing. This + * knowledge, however, may not be available to it, so it can simply specify time + * to wait before the system can be suspended and pass it as the second + * argument of pm_wakeup_event(). + * + * It is valid to call pm_relax() after pm_wakeup_event(), in which case the + * "no suspend" period will be ended either by the pm_relax(), or by the timer + * function executed when the timer expires, whichever comes first. + */ + +/** + * wakup_source_activate - Mark given wakeup source as active. + * @ws: Wakeup source to handle. + * + * Update the @ws' statistics and, if @ws has just been activated, notify the PM + * core of the event by incrementing the counter of of wakeup events being + * processed. + */ +static void wakeup_source_activate(struct wakeup_source *ws) +{ + unsigned int cec; + + if (WARN_ONCE(wakeup_source_not_registered(ws), + "unregistered wakeup source\n")) + return; + + ws->active = true; + ws->active_count++; + ws->last_time = ktime_get(); + if (ws->autosleep_enabled) + ws->start_prevent_time = ws->last_time; + + /* Increment the counter of events in progress. */ + cec = atomic_inc_return(&combined_event_count); + + trace_wakeup_source_activate(ws->name, cec); +} + +/** + * wakeup_source_report_event - Report wakeup event using the given source. + * @ws: Wakeup source to report the event for. + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. + */ +static void wakeup_source_report_event(struct wakeup_source *ws, bool hard) +{ + ws->event_count++; + /* This is racy, but the counter is approximate anyway. */ + if (events_check_enabled) + ws->wakeup_count++; + + if (!ws->active) + wakeup_source_activate(ws); + + if (hard) + pm_system_wakeup(); +} + +/** + * __pm_stay_awake - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the source of the event. + * + * It is safe to call this function from interrupt context. + */ +void __pm_stay_awake(struct wakeup_source *ws) +{ + unsigned long flags; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + + wakeup_source_report_event(ws, false); + del_timer(&ws->timer); + ws->timer_expires = 0; + + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_stay_awake); + +/** + * pm_stay_awake - Notify the PM core that a wakeup event is being processed. + * @dev: Device the wakeup event is related to. + * + * Notify the PM core of a wakeup event (signaled by @dev) by calling + * __pm_stay_awake for the @dev's wakeup source object. + * + * Call this function after detecting of a wakeup event if pm_relax() is going + * to be called directly after processing the event (and possibly passing it to + * user space for further processing). + */ +void pm_stay_awake(struct device *dev) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + __pm_stay_awake(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_stay_awake); + +#ifdef CONFIG_PM_AUTOSLEEP +static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now) +{ + ktime_t delta = ktime_sub(now, ws->start_prevent_time); + ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta); +} +#else +static inline void update_prevent_sleep_time(struct wakeup_source *ws, + ktime_t now) {} +#endif + +/** + * wakup_source_deactivate - Mark given wakeup source as inactive. + * @ws: Wakeup source to handle. + * + * Update the @ws' statistics and notify the PM core that the wakeup source has + * become inactive by decrementing the counter of wakeup events being processed + * and incrementing the counter of registered wakeup events. + */ +static void wakeup_source_deactivate(struct wakeup_source *ws) +{ + unsigned int cnt, inpr, cec; + ktime_t duration; + ktime_t now; + + ws->relax_count++; + /* + * __pm_relax() may be called directly or from a timer function. + * If it is called directly right after the timer function has been + * started, but before the timer function calls __pm_relax(), it is + * possible that __pm_stay_awake() will be called in the meantime and + * will set ws->active. Then, ws->active may be cleared immediately + * by the __pm_relax() called from the timer function, but in such a + * case ws->relax_count will be different from ws->active_count. + */ + if (ws->relax_count != ws->active_count) { + ws->relax_count--; + return; + } + + ws->active = false; + + now = ktime_get(); + duration = ktime_sub(now, ws->last_time); + ws->total_time = ktime_add(ws->total_time, duration); + if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) + ws->max_time = duration; + + ws->last_time = now; + del_timer(&ws->timer); + ws->timer_expires = 0; + + if (ws->autosleep_enabled) + update_prevent_sleep_time(ws, now); + + /* + * Increment the counter of registered wakeup events and decrement the + * couter of wakeup events in progress simultaneously. + */ + cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count); + trace_wakeup_source_deactivate(ws->name, cec); + + split_counters(&cnt, &inpr); + if (!inpr && waitqueue_active(&wakeup_count_wait_queue)) + wake_up(&wakeup_count_wait_queue); +} + +/** + * __pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @ws: Wakeup source object associated with the source of the event. + * + * Call this function for wakeup events whose processing started with calling + * __pm_stay_awake(). + * + * It is safe to call it from interrupt context. + */ +void __pm_relax(struct wakeup_source *ws) +{ + unsigned long flags; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + if (ws->active) + wakeup_source_deactivate(ws); + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_relax); + +/** + * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @dev: Device that signaled the event. + * + * Execute __pm_relax() for the @dev's wakeup source object. + */ +void pm_relax(struct device *dev) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + __pm_relax(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_relax); + +/** + * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. + * @data: Address of the wakeup source object associated with the event source. + * + * Call wakeup_source_deactivate() for the wakeup source whose address is stored + * in @data if it is currently active and its timer has not been canceled and + * the expiration time of the timer is not in future. + */ +static void pm_wakeup_timer_fn(struct timer_list *t) +{ + struct wakeup_source *ws = from_timer(ws, t, timer); + unsigned long flags; + + spin_lock_irqsave(&ws->lock, flags); + + if (ws->active && ws->timer_expires + && time_after_eq(jiffies, ws->timer_expires)) { + wakeup_source_deactivate(ws); + ws->expire_count++; + } + + spin_unlock_irqrestore(&ws->lock, flags); +} + +/** + * pm_wakeup_ws_event - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the event source. + * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. + * + * Notify the PM core of a wakeup event whose source is @ws that will take + * approximately @msec milliseconds to be processed by the kernel. If @ws is + * not active, activate it. If @msec is nonzero, set up the @ws' timer to + * execute pm_wakeup_timer_fn() in future. + * + * It is safe to call this function from interrupt context. + */ +void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) +{ + unsigned long flags; + unsigned long expires; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + + wakeup_source_report_event(ws, hard); + + if (!msec) { + wakeup_source_deactivate(ws); + goto unlock; + } + + expires = jiffies + msecs_to_jiffies(msec); + if (!expires) + expires = 1; + + if (!ws->timer_expires || time_after(expires, ws->timer_expires)) { + mod_timer(&ws->timer, expires); + ws->timer_expires = expires; + } + + unlock: + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); + +/** + * pm_wakeup_dev_event - Notify the PM core of a wakeup event. + * @dev: Device the wakeup event is related to. + * @msec: Anticipated event processing time (in milliseconds). + * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. + * + * Call pm_wakeup_ws_event() for the @dev's wakeup source object. + */ +void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + pm_wakeup_ws_event(dev->power.wakeup, msec, hard); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); + +void pm_print_active_wakeup_sources(void) +{ + struct wakeup_source *ws; + int srcuidx, active = 0; + struct wakeup_source *last_activity_ws = NULL; + + srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { + if (ws->active) { + pm_pr_dbg("active wakeup source: %s\n", ws->name); + active = 1; + } else if (!active && + (!last_activity_ws || + ktime_to_ns(ws->last_time) > + ktime_to_ns(last_activity_ws->last_time))) { + last_activity_ws = ws; + } + } + + if (!active && last_activity_ws) + pm_pr_dbg("last active wakeup source: %s\n", + last_activity_ws->name); + srcu_read_unlock(&wakeup_srcu, srcuidx); +} +EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); + +/** + * pm_wakeup_pending - Check if power transition in progress should be aborted. + * + * Compare the current number of registered wakeup events with its preserved + * value from the past and return true if new wakeup events have been registered + * since the old value was stored. Also return true if the current number of + * wakeup events being processed is different from zero. + */ +bool pm_wakeup_pending(void) +{ + unsigned long flags; + bool ret = false; + + raw_spin_lock_irqsave(&events_lock, flags); + if (events_check_enabled) { + unsigned int cnt, inpr; + + split_counters(&cnt, &inpr); + ret = (cnt != saved_count || inpr > 0); + events_check_enabled = !ret; + } + raw_spin_unlock_irqrestore(&events_lock, flags); + + if (ret) { + pm_pr_dbg("Wakeup pending, aborting suspend\n"); + pm_print_active_wakeup_sources(); + } + + return ret || atomic_read(&pm_abort_suspend) > 0; +} + +void pm_system_wakeup(void) +{ + atomic_inc(&pm_abort_suspend); + s2idle_wake(); +} +EXPORT_SYMBOL_GPL(pm_system_wakeup); + +void pm_system_cancel_wakeup(void) +{ + atomic_dec_if_positive(&pm_abort_suspend); +} + +void pm_wakeup_clear(unsigned int irq_number) +{ + raw_spin_lock_irq(&wakeup_irq_lock); + + if (irq_number && wakeup_irq[0] == irq_number) + wakeup_irq[0] = wakeup_irq[1]; + else + wakeup_irq[0] = 0; + + wakeup_irq[1] = 0; + + raw_spin_unlock_irq(&wakeup_irq_lock); + + if (!irq_number) + atomic_set(&pm_abort_suspend, 0); +} + +void pm_system_irq_wakeup(unsigned int irq_number) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&wakeup_irq_lock, flags); + + if (wakeup_irq[0] == 0) + wakeup_irq[0] = irq_number; + else if (wakeup_irq[1] == 0) + wakeup_irq[1] = irq_number; + else + irq_number = 0; + + raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags); + + if (irq_number) + pm_system_wakeup(); +} + +unsigned int pm_wakeup_irq(void) +{ + return wakeup_irq[0]; +} + +/** + * pm_get_wakeup_count - Read the number of registered wakeup events. + * @count: Address to store the value at. + * @block: Whether or not to block. + * + * Store the number of registered wakeup events at the address in @count. If + * @block is set, block until the current number of wakeup events being + * processed is zero. + * + * Return 'false' if the current number of wakeup events being processed is + * nonzero. Otherwise return 'true'. + */ +bool pm_get_wakeup_count(unsigned int *count, bool block) +{ + unsigned int cnt, inpr; + + if (block) { + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait(&wakeup_count_wait_queue, &wait, + TASK_INTERRUPTIBLE); + split_counters(&cnt, &inpr); + if (inpr == 0 || signal_pending(current)) + break; + pm_print_active_wakeup_sources(); + schedule(); + } + finish_wait(&wakeup_count_wait_queue, &wait); + } + + split_counters(&cnt, &inpr); + *count = cnt; + return !inpr; +} + +/** + * pm_save_wakeup_count - Save the current number of registered wakeup events. + * @count: Value to compare with the current number of registered wakeup events. + * + * If @count is equal to the current number of registered wakeup events and the + * current number of wakeup events being processed is zero, store @count as the + * old number of registered wakeup events for pm_check_wakeup_events(), enable + * wakeup events detection and return 'true'. Otherwise disable wakeup events + * detection and return 'false'. + */ +bool pm_save_wakeup_count(unsigned int count) +{ + unsigned int cnt, inpr; + unsigned long flags; + + events_check_enabled = false; + raw_spin_lock_irqsave(&events_lock, flags); + split_counters(&cnt, &inpr); + if (cnt == count && inpr == 0) { + saved_count = count; + events_check_enabled = true; + } + raw_spin_unlock_irqrestore(&events_lock, flags); + return events_check_enabled; +} + +#ifdef CONFIG_PM_AUTOSLEEP +/** + * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources. + * @enabled: Whether to set or to clear the autosleep_enabled flags. + */ +void pm_wakep_autosleep_enabled(bool set) +{ + struct wakeup_source *ws; + ktime_t now = ktime_get(); + int srcuidx; + + srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { + spin_lock_irq(&ws->lock); + if (ws->autosleep_enabled != set) { + ws->autosleep_enabled = set; + if (ws->active) { + if (set) + ws->start_prevent_time = now; + else + update_prevent_sleep_time(ws, now); + } + } + spin_unlock_irq(&ws->lock); + } + srcu_read_unlock(&wakeup_srcu, srcuidx); +} +#endif /* CONFIG_PM_AUTOSLEEP */ + +/** + * print_wakeup_source_stats - Print wakeup source statistics information. + * @m: seq_file to print the statistics into. + * @ws: Wakeup source object to print the statistics for. + */ +static int print_wakeup_source_stats(struct seq_file *m, + struct wakeup_source *ws) +{ + unsigned long flags; + ktime_t total_time; + ktime_t max_time; + unsigned long active_count; + ktime_t active_time; + ktime_t prevent_sleep_time; + + spin_lock_irqsave(&ws->lock, flags); + + total_time = ws->total_time; + max_time = ws->max_time; + prevent_sleep_time = ws->prevent_sleep_time; + active_count = ws->active_count; + if (ws->active) { + ktime_t now = ktime_get(); + + active_time = ktime_sub(now, ws->last_time); + total_time = ktime_add(total_time, active_time); + if (active_time > max_time) + max_time = active_time; + + if (ws->autosleep_enabled) + prevent_sleep_time = ktime_add(prevent_sleep_time, + ktime_sub(now, ws->start_prevent_time)); + } else { + active_time = 0; + } + + seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n", + ws->name, active_count, ws->event_count, + ws->wakeup_count, ws->expire_count, + ktime_to_ms(active_time), ktime_to_ms(total_time), + ktime_to_ms(max_time), ktime_to_ms(ws->last_time), + ktime_to_ms(prevent_sleep_time)); + + spin_unlock_irqrestore(&ws->lock, flags); + + return 0; +} + +static void *wakeup_sources_stats_seq_start(struct seq_file *m, + loff_t *pos) +{ + struct wakeup_source *ws; + loff_t n = *pos; + int *srcuidx = m->private; + + if (n == 0) { + seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t" + "expire_count\tactive_since\ttotal_time\tmax_time\t" + "last_change\tprevent_suspend_time\n"); + } + + *srcuidx = srcu_read_lock(&wakeup_srcu); + list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) { + if (n-- <= 0) + return ws; + } + + return NULL; +} + +static void *wakeup_sources_stats_seq_next(struct seq_file *m, + void *v, loff_t *pos) +{ + struct wakeup_source *ws = v; + struct wakeup_source *next_ws = NULL; + + ++(*pos); + + list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) { + next_ws = ws; + break; + } + + if (!next_ws) + print_wakeup_source_stats(m, &deleted_ws); + + return next_ws; +} + +static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v) +{ + int *srcuidx = m->private; + + srcu_read_unlock(&wakeup_srcu, *srcuidx); +} + +/** + * wakeup_sources_stats_seq_show - Print wakeup sources statistics information. + * @m: seq_file to print the statistics into. + * @v: wakeup_source of each iteration + */ +static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v) +{ + struct wakeup_source *ws = v; + + print_wakeup_source_stats(m, ws); + + return 0; +} + +static const struct seq_operations wakeup_sources_stats_seq_ops = { + .start = wakeup_sources_stats_seq_start, + .next = wakeup_sources_stats_seq_next, + .stop = wakeup_sources_stats_seq_stop, + .show = wakeup_sources_stats_seq_show, +}; + +static int wakeup_sources_stats_open(struct inode *inode, struct file *file) +{ + return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int)); +} + +static const struct file_operations wakeup_sources_stats_fops = { + .owner = THIS_MODULE, + .open = wakeup_sources_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +static int __init wakeup_sources_debugfs_init(void) +{ + debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL, + &wakeup_sources_stats_fops); + return 0; +} + +postcore_initcall(wakeup_sources_debugfs_init); diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c new file mode 100644 index 000000000..d638259b8 --- /dev/null +++ b/drivers/base/power/wakeup_stats.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Wakeup statistics in sysfs + * + * Copyright (c) 2019 Linux Foundation + * Copyright (c) 2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org> + * Copyright (c) 2019 Google Inc. + */ + +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/kdev_t.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/slab.h> +#include <linux/timekeeping.h> + +#include "power.h" + +static struct class *wakeup_class; + +#define wakeup_attr(_name) \ +static ssize_t _name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct wakeup_source *ws = dev_get_drvdata(dev); \ + \ + return sysfs_emit(buf, "%lu\n", ws->_name); \ +} \ +static DEVICE_ATTR_RO(_name) + +wakeup_attr(active_count); +wakeup_attr(event_count); +wakeup_attr(wakeup_count); +wakeup_attr(expire_count); + +static ssize_t active_time_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wakeup_source *ws = dev_get_drvdata(dev); + ktime_t active_time = + ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0; + + return sysfs_emit(buf, "%lld\n", ktime_to_ms(active_time)); +} +static DEVICE_ATTR_RO(active_time_ms); + +static ssize_t total_time_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wakeup_source *ws = dev_get_drvdata(dev); + ktime_t active_time; + ktime_t total_time = ws->total_time; + + if (ws->active) { + active_time = ktime_sub(ktime_get(), ws->last_time); + total_time = ktime_add(total_time, active_time); + } + + return sysfs_emit(buf, "%lld\n", ktime_to_ms(total_time)); +} +static DEVICE_ATTR_RO(total_time_ms); + +static ssize_t max_time_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wakeup_source *ws = dev_get_drvdata(dev); + ktime_t active_time; + ktime_t max_time = ws->max_time; + + if (ws->active) { + active_time = ktime_sub(ktime_get(), ws->last_time); + if (active_time > max_time) + max_time = active_time; + } + + return sysfs_emit(buf, "%lld\n", ktime_to_ms(max_time)); +} +static DEVICE_ATTR_RO(max_time_ms); + +static ssize_t last_change_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct wakeup_source *ws = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lld\n", ktime_to_ms(ws->last_time)); +} +static DEVICE_ATTR_RO(last_change_ms); + +static ssize_t name_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct wakeup_source *ws = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%s\n", ws->name); +} +static DEVICE_ATTR_RO(name); + +static ssize_t prevent_suspend_time_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct wakeup_source *ws = dev_get_drvdata(dev); + ktime_t prevent_sleep_time = ws->prevent_sleep_time; + + if (ws->active && ws->autosleep_enabled) { + prevent_sleep_time = ktime_add(prevent_sleep_time, + ktime_sub(ktime_get(), ws->start_prevent_time)); + } + + return sysfs_emit(buf, "%lld\n", ktime_to_ms(prevent_sleep_time)); +} +static DEVICE_ATTR_RO(prevent_suspend_time_ms); + +static struct attribute *wakeup_source_attrs[] = { + &dev_attr_name.attr, + &dev_attr_active_count.attr, + &dev_attr_event_count.attr, + &dev_attr_wakeup_count.attr, + &dev_attr_expire_count.attr, + &dev_attr_active_time_ms.attr, + &dev_attr_total_time_ms.attr, + &dev_attr_max_time_ms.attr, + &dev_attr_last_change_ms.attr, + &dev_attr_prevent_suspend_time_ms.attr, + NULL, +}; +ATTRIBUTE_GROUPS(wakeup_source); + +static void device_create_release(struct device *dev) +{ + kfree(dev); +} + +static struct device *wakeup_source_device_create(struct device *parent, + struct wakeup_source *ws) +{ + struct device *dev = NULL; + int retval = -ENODEV; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + retval = -ENOMEM; + goto error; + } + + device_initialize(dev); + dev->devt = MKDEV(0, 0); + dev->class = wakeup_class; + dev->parent = parent; + dev->groups = wakeup_source_groups; + dev->release = device_create_release; + dev_set_drvdata(dev, ws); + device_set_pm_not_required(dev); + + retval = kobject_set_name(&dev->kobj, "wakeup%d", ws->id); + if (retval) + goto error; + + retval = device_add(dev); + if (retval) + goto error; + + return dev; + +error: + put_device(dev); + return ERR_PTR(retval); +} + +/** + * wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs. + * @parent: Device given wakeup source is associated with (or NULL if virtual). + * @ws: Wakeup source to be added in sysfs. + */ +int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws) +{ + struct device *dev; + + dev = wakeup_source_device_create(parent, ws); + if (IS_ERR(dev)) + return PTR_ERR(dev); + ws->dev = dev; + + return 0; +} + +/** + * pm_wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs + * for a device if they're missing. + * @parent: Device given wakeup source is associated with + */ +int pm_wakeup_source_sysfs_add(struct device *parent) +{ + if (!parent->power.wakeup || parent->power.wakeup->dev) + return 0; + + return wakeup_source_sysfs_add(parent, parent->power.wakeup); +} + +/** + * wakeup_source_sysfs_remove - Remove wakeup_source attributes from sysfs. + * @ws: Wakeup source to be removed from sysfs. + */ +void wakeup_source_sysfs_remove(struct wakeup_source *ws) +{ + device_unregister(ws->dev); +} + +static int __init wakeup_sources_sysfs_init(void) +{ + wakeup_class = class_create(THIS_MODULE, "wakeup"); + + return PTR_ERR_OR_ZERO(wakeup_class); +} +postcore_initcall(wakeup_sources_sysfs_init); diff --git a/drivers/base/property.c b/drivers/base/property.c new file mode 100644 index 000000000..cf88a5554 --- /dev/null +++ b/drivers/base/property.c @@ -0,0 +1,1261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * property.c - Unified device property interface. + * + * Copyright (C) 2014, Intel Corporation + * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/acpi.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_graph.h> +#include <linux/of_irq.h> +#include <linux/property.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> + +struct fwnode_handle *dev_fwnode(struct device *dev) +{ + return IS_ENABLED(CONFIG_OF) && dev->of_node ? + &dev->of_node->fwnode : dev->fwnode; +} +EXPORT_SYMBOL_GPL(dev_fwnode); + +/** + * device_property_present - check if a property of a device is present + * @dev: Device whose property is being checked + * @propname: Name of the property + * + * Check if property @propname is present in the device firmware description. + */ +bool device_property_present(struct device *dev, const char *propname) +{ + return fwnode_property_present(dev_fwnode(dev), propname); +} +EXPORT_SYMBOL_GPL(device_property_present); + +/** + * fwnode_property_present - check if a property of a firmware node is present + * @fwnode: Firmware node whose property to check + * @propname: Name of the property + */ +bool fwnode_property_present(const struct fwnode_handle *fwnode, + const char *propname) +{ + bool ret; + + ret = fwnode_call_bool_op(fwnode, property_present, propname); + if (ret == false && !IS_ERR_OR_NULL(fwnode) && + !IS_ERR_OR_NULL(fwnode->secondary)) + ret = fwnode_call_bool_op(fwnode->secondary, property_present, + propname); + return ret; +} +EXPORT_SYMBOL_GPL(fwnode_property_present); + +/** + * device_property_read_u8_array - return a u8 array property of a device + * @dev: Device to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Function reads an array of u8 properties with @propname from the device + * firmware description and stores them to @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_read_u8_array(struct device *dev, const char *propname, + u8 *val, size_t nval) +{ + return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval); +} +EXPORT_SYMBOL_GPL(device_property_read_u8_array); + +/** + * device_property_read_u16_array - return a u16 array property of a device + * @dev: Device to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Function reads an array of u16 properties with @propname from the device + * firmware description and stores them to @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_read_u16_array(struct device *dev, const char *propname, + u16 *val, size_t nval) +{ + return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval); +} +EXPORT_SYMBOL_GPL(device_property_read_u16_array); + +/** + * device_property_read_u32_array - return a u32 array property of a device + * @dev: Device to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Function reads an array of u32 properties with @propname from the device + * firmware description and stores them to @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_read_u32_array(struct device *dev, const char *propname, + u32 *val, size_t nval) +{ + return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval); +} +EXPORT_SYMBOL_GPL(device_property_read_u32_array); + +/** + * device_property_read_u64_array - return a u64 array property of a device + * @dev: Device to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Function reads an array of u64 properties with @propname from the device + * firmware description and stores them to @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_read_u64_array(struct device *dev, const char *propname, + u64 *val, size_t nval) +{ + return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval); +} +EXPORT_SYMBOL_GPL(device_property_read_u64_array); + +/** + * device_property_read_string_array - return a string array property of device + * @dev: Device to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Function reads an array of string properties with @propname from the device + * firmware description and stores them to @val if found. + * + * Return: number of values read on success if @val is non-NULL, + * number of values available on success if @val is NULL, + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO or %-EILSEQ if the property is not an array of strings, + * %-EOVERFLOW if the size of the property is not as expected. + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_read_string_array(struct device *dev, const char *propname, + const char **val, size_t nval) +{ + return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval); +} +EXPORT_SYMBOL_GPL(device_property_read_string_array); + +/** + * device_property_read_string - return a string property of a device + * @dev: Device to get the property of + * @propname: Name of the property + * @val: The value is stored here + * + * Function reads property @propname from the device firmware description and + * stores the value into @val if found. The value is checked to be a string. + * + * Return: %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO or %-EILSEQ if the property type is not a string. + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_read_string(struct device *dev, const char *propname, + const char **val) +{ + return fwnode_property_read_string(dev_fwnode(dev), propname, val); +} +EXPORT_SYMBOL_GPL(device_property_read_string); + +/** + * device_property_match_string - find a string in an array and return index + * @dev: Device to get the property of + * @propname: Name of the property holding the array + * @string: String to look for + * + * Find a given string in a string array and if it is found return the + * index back. + * + * Return: %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of strings, + * %-ENXIO if no suitable firmware interface is present. + */ +int device_property_match_string(struct device *dev, const char *propname, + const char *string) +{ + return fwnode_property_match_string(dev_fwnode(dev), propname, string); +} +EXPORT_SYMBOL_GPL(device_property_match_string); + +static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) +{ + int ret; + + ret = fwnode_call_int_op(fwnode, property_read_int_array, propname, + elem_size, val, nval); + if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && + !IS_ERR_OR_NULL(fwnode->secondary)) + ret = fwnode_call_int_op( + fwnode->secondary, property_read_int_array, propname, + elem_size, val, nval); + + return ret; +} + +/** + * fwnode_property_read_u8_array - return a u8 array property of firmware node + * @fwnode: Firmware node to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Read an array of u8 properties with @propname from @fwnode and stores them to + * @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, + const char *propname, u8 *val, size_t nval) +{ + return fwnode_property_read_int_array(fwnode, propname, sizeof(u8), + val, nval); +} +EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); + +/** + * fwnode_property_read_u16_array - return a u16 array property of firmware node + * @fwnode: Firmware node to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Read an array of u16 properties with @propname from @fwnode and store them to + * @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode, + const char *propname, u16 *val, size_t nval) +{ + return fwnode_property_read_int_array(fwnode, propname, sizeof(u16), + val, nval); +} +EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); + +/** + * fwnode_property_read_u32_array - return a u32 array property of firmware node + * @fwnode: Firmware node to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Read an array of u32 properties with @propname from @fwnode store them to + * @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode, + const char *propname, u32 *val, size_t nval) +{ + return fwnode_property_read_int_array(fwnode, propname, sizeof(u32), + val, nval); +} +EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); + +/** + * fwnode_property_read_u64_array - return a u64 array property firmware node + * @fwnode: Firmware node to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Read an array of u64 properties with @propname from @fwnode and store them to + * @val if found. + * + * Return: number of values if @val was %NULL, + * %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of numbers, + * %-EOVERFLOW if the size of the property is not as expected, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode, + const char *propname, u64 *val, size_t nval) +{ + return fwnode_property_read_int_array(fwnode, propname, sizeof(u64), + val, nval); +} +EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); + +/** + * fwnode_property_read_string_array - return string array property of a node + * @fwnode: Firmware node to get the property of + * @propname: Name of the property + * @val: The values are stored here or %NULL to return the number of values + * @nval: Size of the @val array + * + * Read an string list property @propname from the given firmware node and store + * them to @val if found. + * + * Return: number of values read on success if @val is non-NULL, + * number of values available on success if @val is NULL, + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO or %-EILSEQ if the property is not an array of strings, + * %-EOVERFLOW if the size of the property is not as expected, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval) +{ + int ret; + + ret = fwnode_call_int_op(fwnode, property_read_string_array, propname, + val, nval); + if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && + !IS_ERR_OR_NULL(fwnode->secondary)) + ret = fwnode_call_int_op(fwnode->secondary, + property_read_string_array, propname, + val, nval); + return ret; +} +EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); + +/** + * fwnode_property_read_string - return a string property of a firmware node + * @fwnode: Firmware node to get the property of + * @propname: Name of the property + * @val: The value is stored here + * + * Read property @propname from the given firmware node and store the value into + * @val if found. The value is checked to be a string. + * + * Return: %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO or %-EILSEQ if the property is not a string, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_read_string(const struct fwnode_handle *fwnode, + const char *propname, const char **val) +{ + int ret = fwnode_property_read_string_array(fwnode, propname, val, 1); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(fwnode_property_read_string); + +/** + * fwnode_property_match_string - find a string in an array and return index + * @fwnode: Firmware node to get the property of + * @propname: Name of the property holding the array + * @string: String to look for + * + * Find a given string in a string array and if it is found return the + * index back. + * + * Return: %0 if the property was found (success), + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO if the property is not an array of strings, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_match_string(const struct fwnode_handle *fwnode, + const char *propname, const char *string) +{ + const char **values; + int nval, ret; + + nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0); + if (nval < 0) + return nval; + + if (nval == 0) + return -ENODATA; + + values = kcalloc(nval, sizeof(*values), GFP_KERNEL); + if (!values) + return -ENOMEM; + + ret = fwnode_property_read_string_array(fwnode, propname, values, nval); + if (ret < 0) + goto out; + + ret = match_string(values, nval, string); + if (ret < 0) + ret = -ENODATA; +out: + kfree(values); + return ret; +} +EXPORT_SYMBOL_GPL(fwnode_property_match_string); + +/** + * fwnode_property_get_reference_args() - Find a reference with arguments + * @fwnode: Firmware node where to look for the reference + * @prop: The name of the property + * @nargs_prop: The name of the property telling the number of + * arguments in the referred node. NULL if @nargs is known, + * otherwise @nargs is ignored. Only relevant on OF. + * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL. + * @index: Index of the reference, from zero onwards. + * @args: Result structure with reference and integer arguments. + * + * Obtain a reference based on a named property in an fwnode, with + * integer arguments. + * + * Caller is responsible to call fwnode_handle_put() on the returned + * args->fwnode pointer. + * + * Returns: %0 on success + * %-ENOENT when the index is out of bounds, the index has an empty + * reference or the property was not found + * %-EINVAL on parse error + */ +int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args) +{ + return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop, + nargs, index, args); +} +EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); + +/** + * fwnode_find_reference - Find named reference to a fwnode_handle + * @fwnode: Firmware node where to look for the reference + * @name: The name of the reference + * @index: Index of the reference + * + * @index can be used when the named reference holds a table of references. + * + * Returns pointer to the reference fwnode, or ERR_PTR. Caller is responsible to + * call fwnode_handle_put() on the returned fwnode pointer. + */ +struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, + const char *name, + unsigned int index) +{ + struct fwnode_reference_args args; + int ret; + + ret = fwnode_property_get_reference_args(fwnode, name, NULL, 0, index, + &args); + return ret ? ERR_PTR(ret) : args.fwnode; +} +EXPORT_SYMBOL_GPL(fwnode_find_reference); + +/** + * device_remove_properties - Remove properties from a device object. + * @dev: Device whose properties to remove. + * + * The function removes properties previously associated to the device + * firmware node with device_add_properties(). Memory allocated to the + * properties will also be released. + */ +void device_remove_properties(struct device *dev) +{ + struct fwnode_handle *fwnode = dev_fwnode(dev); + + if (!fwnode) + return; + + if (is_software_node(fwnode->secondary)) { + fwnode_remove_software_node(fwnode->secondary); + set_secondary_fwnode(dev, NULL); + } +} +EXPORT_SYMBOL_GPL(device_remove_properties); + +/** + * device_add_properties - Add a collection of properties to a device object. + * @dev: Device to add properties to. + * @properties: Collection of properties to add. + * + * Associate a collection of device properties represented by @properties with + * @dev. The function takes a copy of @properties. + * + * WARNING: The callers should not use this function if it is known that there + * is no real firmware node associated with @dev! In that case the callers + * should create a software node and assign it to @dev directly. + */ +int device_add_properties(struct device *dev, + const struct property_entry *properties) +{ + struct fwnode_handle *fwnode; + + fwnode = fwnode_create_software_node(properties, NULL); + if (IS_ERR(fwnode)) + return PTR_ERR(fwnode); + + set_secondary_fwnode(dev, fwnode); + return 0; +} +EXPORT_SYMBOL_GPL(device_add_properties); + +/** + * fwnode_get_name - Return the name of a node + * @fwnode: The firmware node + * + * Returns a pointer to the node name. + */ +const char *fwnode_get_name(const struct fwnode_handle *fwnode) +{ + return fwnode_call_ptr_op(fwnode, get_name); +} +EXPORT_SYMBOL_GPL(fwnode_get_name); + +/** + * fwnode_get_name_prefix - Return the prefix of node for printing purposes + * @fwnode: The firmware node + * + * Returns the prefix of a node, intended to be printed right before the node. + * The prefix works also as a separator between the nodes. + */ +const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode) +{ + return fwnode_call_ptr_op(fwnode, get_name_prefix); +} + +/** + * fwnode_get_parent - Return parent firwmare node + * @fwnode: Firmware whose parent is retrieved + * + * Return parent firmware node of the given node if possible or %NULL if no + * parent was available. + */ +struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode) +{ + return fwnode_call_ptr_op(fwnode, get_parent); +} +EXPORT_SYMBOL_GPL(fwnode_get_parent); + +/** + * fwnode_get_next_parent - Iterate to the node's parent + * @fwnode: Firmware whose parent is retrieved + * + * This is like fwnode_get_parent() except that it drops the refcount + * on the passed node, making it suitable for iterating through a + * node's parents. + * + * Returns a node pointer with refcount incremented, use + * fwnode_handle_node() on it when done. + */ +struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent = fwnode_get_parent(fwnode); + + fwnode_handle_put(fwnode); + + return parent; +} +EXPORT_SYMBOL_GPL(fwnode_get_next_parent); + +/** + * fwnode_count_parents - Return the number of parents a node has + * @fwnode: The node the parents of which are to be counted + * + * Returns the number of parents a node has. + */ +unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode) +{ + struct fwnode_handle *__fwnode; + unsigned int count; + + __fwnode = fwnode_get_parent(fwnode); + + for (count = 0; __fwnode; count++) + __fwnode = fwnode_get_next_parent(__fwnode); + + return count; +} +EXPORT_SYMBOL_GPL(fwnode_count_parents); + +/** + * fwnode_get_nth_parent - Return an nth parent of a node + * @fwnode: The node the parent of which is requested + * @depth: Distance of the parent from the node + * + * Returns the nth parent of a node. If there is no parent at the requested + * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to + * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on. + * + * The caller is responsible for calling fwnode_handle_put() for the returned + * node. + */ +struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode, + unsigned int depth) +{ + unsigned int i; + + fwnode_handle_get(fwnode); + + for (i = 0; i < depth && fwnode; i++) + fwnode = fwnode_get_next_parent(fwnode); + + return fwnode; +} +EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); + +/** + * fwnode_get_next_child_node - Return the next child node handle for a node + * @fwnode: Firmware node to find the next child node for. + * @child: Handle to one of the node's child nodes or a %NULL handle. + */ +struct fwnode_handle * +fwnode_get_next_child_node(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + return fwnode_call_ptr_op(fwnode, get_next_child_node, child); +} +EXPORT_SYMBOL_GPL(fwnode_get_next_child_node); + +/** + * fwnode_get_next_available_child_node - Return the next + * available child node handle for a node + * @fwnode: Firmware node to find the next child node for. + * @child: Handle to one of the node's child nodes or a %NULL handle. + */ +struct fwnode_handle * +fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + struct fwnode_handle *next_child = child; + + if (!fwnode) + return NULL; + + do { + next_child = fwnode_get_next_child_node(fwnode, next_child); + + if (!next_child || fwnode_device_is_available(next_child)) + break; + } while (next_child); + + return next_child; +} +EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node); + +/** + * device_get_next_child_node - Return the next child node handle for a device + * @dev: Device to find the next child node for. + * @child: Handle to one of the device's child nodes or a null handle. + */ +struct fwnode_handle *device_get_next_child_node(struct device *dev, + struct fwnode_handle *child) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + struct fwnode_handle *fwnode = NULL, *next; + + if (dev->of_node) + fwnode = &dev->of_node->fwnode; + else if (adev) + fwnode = acpi_fwnode_handle(adev); + + /* Try to find a child in primary fwnode */ + next = fwnode_get_next_child_node(fwnode, child); + if (next) + return next; + + /* When no more children in primary, continue with secondary */ + if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) + next = fwnode_get_next_child_node(fwnode->secondary, child); + + return next; +} +EXPORT_SYMBOL_GPL(device_get_next_child_node); + +/** + * fwnode_get_named_child_node - Return first matching named child node handle + * @fwnode: Firmware node to find the named child node for. + * @childname: String to match child node name against. + */ +struct fwnode_handle * +fwnode_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) +{ + return fwnode_call_ptr_op(fwnode, get_named_child_node, childname); +} +EXPORT_SYMBOL_GPL(fwnode_get_named_child_node); + +/** + * device_get_named_child_node - Return first matching named child node handle + * @dev: Device to find the named child node for. + * @childname: String to match child node name against. + */ +struct fwnode_handle *device_get_named_child_node(struct device *dev, + const char *childname) +{ + return fwnode_get_named_child_node(dev_fwnode(dev), childname); +} +EXPORT_SYMBOL_GPL(device_get_named_child_node); + +/** + * fwnode_handle_get - Obtain a reference to a device node + * @fwnode: Pointer to the device node to obtain the reference to. + * + * Returns the fwnode handle. + */ +struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode) +{ + if (!fwnode_has_op(fwnode, get)) + return fwnode; + + return fwnode_call_ptr_op(fwnode, get); +} +EXPORT_SYMBOL_GPL(fwnode_handle_get); + +/** + * fwnode_handle_put - Drop reference to a device node + * @fwnode: Pointer to the device node to drop the reference to. + * + * This has to be used when terminating device_for_each_child_node() iteration + * with break or return to prevent stale device node references from being left + * behind. + */ +void fwnode_handle_put(struct fwnode_handle *fwnode) +{ + fwnode_call_void_op(fwnode, put); +} +EXPORT_SYMBOL_GPL(fwnode_handle_put); + +/** + * fwnode_device_is_available - check if a device is available for use + * @fwnode: Pointer to the fwnode of the device. + */ +bool fwnode_device_is_available(const struct fwnode_handle *fwnode) +{ + return fwnode_call_bool_op(fwnode, device_is_available); +} +EXPORT_SYMBOL_GPL(fwnode_device_is_available); + +/** + * device_get_child_node_count - return the number of child nodes for device + * @dev: Device to cound the child nodes for + */ +unsigned int device_get_child_node_count(struct device *dev) +{ + struct fwnode_handle *child; + unsigned int count = 0; + + device_for_each_child_node(dev, child) + count++; + + return count; +} +EXPORT_SYMBOL_GPL(device_get_child_node_count); + +bool device_dma_supported(struct device *dev) +{ + /* For DT, this is always supported. + * For ACPI, this depends on CCA, which + * is determined by the acpi_dma_supported(). + */ + if (IS_ENABLED(CONFIG_OF) && dev->of_node) + return true; + + return acpi_dma_supported(ACPI_COMPANION(dev)); +} +EXPORT_SYMBOL_GPL(device_dma_supported); + +enum dev_dma_attr device_get_dma_attr(struct device *dev) +{ + enum dev_dma_attr attr = DEV_DMA_NOT_SUPPORTED; + + if (IS_ENABLED(CONFIG_OF) && dev->of_node) { + if (of_dma_is_coherent(dev->of_node)) + attr = DEV_DMA_COHERENT; + else + attr = DEV_DMA_NON_COHERENT; + } else + attr = acpi_get_dma_attr(ACPI_COMPANION(dev)); + + return attr; +} +EXPORT_SYMBOL_GPL(device_get_dma_attr); + +/** + * fwnode_get_phy_mode - Get phy mode for given firmware node + * @fwnode: Pointer to the given node + * + * The function gets phy interface string from property 'phy-mode' or + * 'phy-connection-type', and return its index in phy_modes table, or errno in + * error case. + */ +int fwnode_get_phy_mode(struct fwnode_handle *fwnode) +{ + const char *pm; + int err, i; + + err = fwnode_property_read_string(fwnode, "phy-mode", &pm); + if (err < 0) + err = fwnode_property_read_string(fwnode, + "phy-connection-type", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) + if (!strcasecmp(pm, phy_modes(i))) + return i; + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(fwnode_get_phy_mode); + +/** + * device_get_phy_mode - Get phy mode for given device + * @dev: Pointer to the given device + * + * The function gets phy interface string from property 'phy-mode' or + * 'phy-connection-type', and return its index in phy_modes table, or errno in + * error case. + */ +int device_get_phy_mode(struct device *dev) +{ + return fwnode_get_phy_mode(dev_fwnode(dev)); +} +EXPORT_SYMBOL_GPL(device_get_phy_mode); + +static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode, + const char *name, char *addr, + int alen) +{ + int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen); + + if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) + return addr; + return NULL; +} + +/** + * fwnode_get_mac_address - Get the MAC from the firmware node + * @fwnode: Pointer to the firmware node + * @addr: Address of buffer to store the MAC in + * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN + * + * Search the firmware node for the best MAC address to use. 'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC + * address. If that isn't set, then 'local-mac-address' is checked next, + * because that is the default address. If that isn't set, then the obsolete + * 'address' is checked, just in case we're using an old device tree. + * + * Note that the 'address' property is supposed to contain a virtual address of + * the register set, but some DTS files have redefined that property to be the + * MAC address. + * + * All-zero MAC addresses are rejected, because those could be properties that + * exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'mac-address' and 'local-mac-address', with + * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. + * In this case, the real MAC is in 'local-mac-address', and 'mac-address' + * exists but is all zeros. +*/ +void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen) +{ + char *res; + + res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen); + if (res) + return res; + + res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen); + if (res) + return res; + + return fwnode_get_mac_addr(fwnode, "address", addr, alen); +} +EXPORT_SYMBOL(fwnode_get_mac_address); + +/** + * device_get_mac_address - Get the MAC for a given device + * @dev: Pointer to the device + * @addr: Address of buffer to store the MAC in + * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN + */ +void *device_get_mac_address(struct device *dev, char *addr, int alen) +{ + return fwnode_get_mac_address(dev_fwnode(dev), addr, alen); +} +EXPORT_SYMBOL(device_get_mac_address); + +/** + * fwnode_irq_get - Get IRQ directly from a fwnode + * @fwnode: Pointer to the firmware node + * @index: Zero-based index of the IRQ + * + * Returns Linux IRQ number on success. Other values are determined + * accordingly to acpi_/of_ irq_get() operation. + */ +int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index) +{ + struct device_node *of_node = to_of_node(fwnode); + struct resource res; + int ret; + + if (IS_ENABLED(CONFIG_OF) && of_node) + return of_irq_get(of_node, index); + + ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), index, &res); + if (ret) + return ret; + + return res.start; +} +EXPORT_SYMBOL(fwnode_irq_get); + +/** + * fwnode_graph_get_next_endpoint - Get next endpoint firmware node + * @fwnode: Pointer to the parent firmware node + * @prev: Previous endpoint node or %NULL to get the first + * + * Returns an endpoint firmware node pointer or %NULL if no more endpoints + * are available. + */ +struct fwnode_handle * +fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev); +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); + +/** + * fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint + * @endpoint: Endpoint firmware node of the port + * + * Return: the firmware node of the device the @endpoint belongs to. + */ +struct fwnode_handle * +fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint) +{ + struct fwnode_handle *port, *parent; + + port = fwnode_get_parent(endpoint); + parent = fwnode_call_ptr_op(port, graph_get_port_parent); + + fwnode_handle_put(port); + + return parent; +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent); + +/** + * fwnode_graph_get_remote_port_parent - Return fwnode of a remote device + * @fwnode: Endpoint firmware node pointing to the remote endpoint + * + * Extracts firmware node of a remote device the @fwnode points to. + */ +struct fwnode_handle * +fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode) +{ + struct fwnode_handle *endpoint, *parent; + + endpoint = fwnode_graph_get_remote_endpoint(fwnode); + parent = fwnode_graph_get_port_parent(endpoint); + + fwnode_handle_put(endpoint); + + return parent; +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent); + +/** + * fwnode_graph_get_remote_port - Return fwnode of a remote port + * @fwnode: Endpoint firmware node pointing to the remote endpoint + * + * Extracts firmware node of a remote port the @fwnode points to. + */ +struct fwnode_handle * +fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode) +{ + return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode)); +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); + +/** + * fwnode_graph_get_remote_endpoint - Return fwnode of a remote endpoint + * @fwnode: Endpoint firmware node pointing to the remote endpoint + * + * Extracts firmware node of a remote endpoint the @fwnode points to. + */ +struct fwnode_handle * +fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) +{ + return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint); +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); + +/** + * fwnode_graph_get_remote_node - get remote parent node for given port/endpoint + * @fwnode: pointer to parent fwnode_handle containing graph port/endpoint + * @port_id: identifier of the parent port node + * @endpoint_id: identifier of the endpoint node + * + * Return: Remote fwnode handle associated with remote endpoint node linked + * to @node. Use fwnode_node_put() on it when done. + */ +struct fwnode_handle * +fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id, + u32 endpoint_id) +{ + struct fwnode_handle *endpoint = NULL; + + while ((endpoint = fwnode_graph_get_next_endpoint(fwnode, endpoint))) { + struct fwnode_endpoint fwnode_ep; + struct fwnode_handle *remote; + int ret; + + ret = fwnode_graph_parse_endpoint(endpoint, &fwnode_ep); + if (ret < 0) + continue; + + if (fwnode_ep.port != port_id || fwnode_ep.id != endpoint_id) + continue; + + remote = fwnode_graph_get_remote_port_parent(endpoint); + if (!remote) + return NULL; + + return fwnode_device_is_available(remote) ? remote : NULL; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node); + +/** + * fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers + * @fwnode: parent fwnode_handle containing the graph + * @port: identifier of the port node + * @endpoint: identifier of the endpoint node under the port node + * @flags: fwnode lookup flags + * + * Return the fwnode handle of the local endpoint corresponding the port and + * endpoint IDs or NULL if not found. + * + * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint + * has not been found, look for the closest endpoint ID greater than the + * specified one and return the endpoint that corresponds to it, if present. + * + * Do not return endpoints that belong to disabled devices, unless + * FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags. + * + * The returned endpoint needs to be released by calling fwnode_handle_put() on + * it when it is not needed any more. + */ +struct fwnode_handle * +fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, + u32 port, u32 endpoint, unsigned long flags) +{ + struct fwnode_handle *ep = NULL, *best_ep = NULL; + unsigned int best_ep_id = 0; + bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT; + bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED); + + while ((ep = fwnode_graph_get_next_endpoint(fwnode, ep))) { + struct fwnode_endpoint fwnode_ep = { 0 }; + int ret; + + if (enabled_only) { + struct fwnode_handle *dev_node; + bool available; + + dev_node = fwnode_graph_get_remote_port_parent(ep); + available = fwnode_device_is_available(dev_node); + fwnode_handle_put(dev_node); + if (!available) + continue; + } + + ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep); + if (ret < 0) + continue; + + if (fwnode_ep.port != port) + continue; + + if (fwnode_ep.id == endpoint) + return ep; + + if (!endpoint_next) + continue; + + /* + * If the endpoint that has just been found is not the first + * matching one and the ID of the one found previously is closer + * to the requested endpoint ID, skip it. + */ + if (fwnode_ep.id < endpoint || + (best_ep && best_ep_id < fwnode_ep.id)) + continue; + + fwnode_handle_put(best_ep); + best_ep = fwnode_handle_get(ep); + best_ep_id = fwnode_ep.id; + } + + return best_ep; +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id); + +/** + * fwnode_graph_parse_endpoint - parse common endpoint node properties + * @fwnode: pointer to endpoint fwnode_handle + * @endpoint: pointer to the fwnode endpoint data structure + * + * Parse @fwnode representing a graph endpoint node and store the + * information in @endpoint. The caller must hold a reference to + * @fwnode. + */ +int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint) +{ + memset(endpoint, 0, sizeof(*endpoint)); + + return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint); +} +EXPORT_SYMBOL(fwnode_graph_parse_endpoint); + +const void *device_get_match_data(struct device *dev) +{ + return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev); +} +EXPORT_SYMBOL_GPL(device_get_match_data); + +static void * +fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, + void *data, devcon_match_fn_t match) +{ + struct fwnode_handle *node; + struct fwnode_handle *ep; + void *ret; + + fwnode_graph_for_each_endpoint(fwnode, ep) { + node = fwnode_graph_get_remote_port_parent(ep); + if (!fwnode_device_is_available(node)) { + fwnode_handle_put(node); + continue; + } + + ret = match(node, con_id, data); + fwnode_handle_put(node); + if (ret) { + fwnode_handle_put(ep); + return ret; + } + } + return NULL; +} + +static void * +fwnode_devcon_match(struct fwnode_handle *fwnode, const char *con_id, + void *data, devcon_match_fn_t match) +{ + struct fwnode_handle *node; + void *ret; + int i; + + for (i = 0; ; i++) { + node = fwnode_find_reference(fwnode, con_id, i); + if (IS_ERR(node)) + break; + + ret = match(node, NULL, data); + fwnode_handle_put(node); + if (ret) + return ret; + } + + return NULL; +} + +/** + * fwnode_connection_find_match - Find connection from a device node + * @fwnode: Device node with the connection + * @con_id: Identifier for the connection + * @data: Data for the match function + * @match: Function to check and convert the connection description + * + * Find a connection with unique identifier @con_id between @fwnode and another + * device node. @match will be used to convert the connection description to + * data the caller is expecting to be returned. + */ +void *fwnode_connection_find_match(struct fwnode_handle *fwnode, + const char *con_id, void *data, + devcon_match_fn_t match) +{ + void *ret; + + if (!fwnode || !match) + return NULL; + + ret = fwnode_graph_devcon_match(fwnode, con_id, data, match); + if (ret) + return ret; + + return fwnode_devcon_match(fwnode, con_id, data, match); +} +EXPORT_SYMBOL_GPL(fwnode_connection_find_match); diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig new file mode 100644 index 000000000..bcb90d8c3 --- /dev/null +++ b/drivers/base/regmap/Kconfig @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: GPL-2.0 +# Generic register map support. There are no user servicable options here, +# this is an API intended to be used by other kernel subsystems. These +# subsystems should select the appropriate symbols. + +config REGMAP + default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM) + select IRQ_DOMAIN if REGMAP_IRQ + bool + +config REGCACHE_COMPRESSED + select LZO_COMPRESS + select LZO_DECOMPRESS + bool + +config REGMAP_AC97 + tristate + +config REGMAP_I2C + tristate + depends on I2C + +config REGMAP_SLIMBUS + tristate + depends on SLIMBUS + +config REGMAP_SPI + tristate + depends on SPI + +config REGMAP_SPMI + tristate + depends on SPMI + +config REGMAP_W1 + tristate + depends on W1 + +config REGMAP_MMIO + tristate + +config REGMAP_IRQ + bool + +config REGMAP_SOUNDWIRE + tristate + depends on SOUNDWIRE + +config REGMAP_SCCB + tristate + depends on I2C + +config REGMAP_I3C + tristate + depends on I3C + +config REGMAP_SPI_AVMM + tristate + depends on SPI diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile new file mode 100644 index 000000000..ac1b69ee4 --- /dev/null +++ b/drivers/base/regmap/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# For include/trace/define_trace.h to include trace.h +CFLAGS_regmap.o := -I$(src) + +obj-$(CONFIG_REGMAP) += regmap.o regcache.o +obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o +obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o +obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o +obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o +obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o +obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o +obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o +obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o +obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o +obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o +obj-$(CONFIG_REGMAP_W1) += regmap-w1.o +obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o +obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o +obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o +obj-$(CONFIG_REGMAP_SPI_AVMM) += regmap-spi-avmm.o diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h new file mode 100644 index 000000000..0097696c3 --- /dev/null +++ b/drivers/base/regmap/internal.h @@ -0,0 +1,300 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Register map access API internal header + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + */ + +#ifndef _REGMAP_INTERNAL_H +#define _REGMAP_INTERNAL_H + +#include <linux/device.h> +#include <linux/regmap.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/wait.h> + +struct regmap; +struct regcache_ops; + +struct regmap_debugfs_off_cache { + struct list_head list; + off_t min; + off_t max; + unsigned int base_reg; + unsigned int max_reg; +}; + +struct regmap_format { + size_t buf_size; + size_t reg_bytes; + size_t pad_bytes; + size_t val_bytes; + void (*format_write)(struct regmap *map, + unsigned int reg, unsigned int val); + void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); + void (*format_val)(void *buf, unsigned int val, unsigned int shift); + unsigned int (*parse_val)(const void *buf); + void (*parse_inplace)(void *buf); +}; + +struct regmap_async { + struct list_head list; + struct regmap *map; + void *work_buf; +}; + +struct regmap { + union { + struct mutex mutex; + struct { + spinlock_t spinlock; + unsigned long spinlock_flags; + }; + }; + regmap_lock lock; + regmap_unlock unlock; + void *lock_arg; /* This is passed to lock/unlock functions */ + gfp_t alloc_flags; + + struct device *dev; /* Device we do I/O on */ + void *work_buf; /* Scratch buffer used to format I/O */ + struct regmap_format format; /* Buffer format */ + const struct regmap_bus *bus; + void *bus_context; + const char *name; + + bool async; + spinlock_t async_lock; + wait_queue_head_t async_waitq; + struct list_head async_list; + struct list_head async_free; + int async_ret; + +#ifdef CONFIG_DEBUG_FS + bool debugfs_disable; + struct dentry *debugfs; + const char *debugfs_name; + + unsigned int debugfs_reg_len; + unsigned int debugfs_val_len; + unsigned int debugfs_tot_len; + + struct list_head debugfs_off_cache; + struct mutex cache_lock; +#endif + + unsigned int max_register; + bool (*writeable_reg)(struct device *dev, unsigned int reg); + bool (*readable_reg)(struct device *dev, unsigned int reg); + bool (*volatile_reg)(struct device *dev, unsigned int reg); + bool (*precious_reg)(struct device *dev, unsigned int reg); + bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg); + bool (*readable_noinc_reg)(struct device *dev, unsigned int reg); + const struct regmap_access_table *wr_table; + const struct regmap_access_table *rd_table; + const struct regmap_access_table *volatile_table; + const struct regmap_access_table *precious_table; + const struct regmap_access_table *wr_noinc_table; + const struct regmap_access_table *rd_noinc_table; + + int (*reg_read)(void *context, unsigned int reg, unsigned int *val); + int (*reg_write)(void *context, unsigned int reg, unsigned int val); + int (*reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); + + bool defer_caching; + + unsigned long read_flag_mask; + unsigned long write_flag_mask; + + /* number of bits to (left) shift the reg value when formatting*/ + int reg_shift; + int reg_stride; + int reg_stride_order; + + /* regcache specific members */ + const struct regcache_ops *cache_ops; + enum regcache_type cache_type; + + /* number of bytes in reg_defaults_raw */ + unsigned int cache_size_raw; + /* number of bytes per word in reg_defaults_raw */ + unsigned int cache_word_size; + /* number of entries in reg_defaults */ + unsigned int num_reg_defaults; + /* number of entries in reg_defaults_raw */ + unsigned int num_reg_defaults_raw; + + /* if set, only the cache is modified not the HW */ + bool cache_only; + /* if set, only the HW is modified not the cache */ + bool cache_bypass; + /* if set, remember to free reg_defaults_raw */ + bool cache_free; + + struct reg_default *reg_defaults; + const void *reg_defaults_raw; + void *cache; + /* if set, the cache contains newer data than the HW */ + bool cache_dirty; + /* if set, the HW registers are known to match map->reg_defaults */ + bool no_sync_defaults; + + struct reg_sequence *patch; + int patch_regs; + + /* if set, converts bulk read to single read */ + bool use_single_read; + /* if set, converts bulk write to single write */ + bool use_single_write; + /* if set, the device supports multi write mode */ + bool can_multi_write; + + /* if set, raw reads/writes are limited to this size */ + size_t max_raw_read; + size_t max_raw_write; + + struct rb_root range_tree; + void *selector_work_buf; /* Scratch buffer used for selector */ + + struct hwspinlock *hwlock; + + /* if set, the regmap core can sleep */ + bool can_sleep; +}; + +struct regcache_ops { + const char *name; + enum regcache_type type; + int (*init)(struct regmap *map); + int (*exit)(struct regmap *map); +#ifdef CONFIG_DEBUG_FS + void (*debugfs_init)(struct regmap *map); +#endif + int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); + int (*write)(struct regmap *map, unsigned int reg, unsigned int value); + int (*sync)(struct regmap *map, unsigned int min, unsigned int max); + int (*drop)(struct regmap *map, unsigned int min, unsigned int max); +}; + +bool regmap_cached(struct regmap *map, unsigned int reg); +bool regmap_writeable(struct regmap *map, unsigned int reg); +bool regmap_readable(struct regmap *map, unsigned int reg); +bool regmap_volatile(struct regmap *map, unsigned int reg); +bool regmap_precious(struct regmap *map, unsigned int reg); +bool regmap_writeable_noinc(struct regmap *map, unsigned int reg); +bool regmap_readable_noinc(struct regmap *map, unsigned int reg); + +int _regmap_write(struct regmap *map, unsigned int reg, + unsigned int val); + +struct regmap_range_node { + struct rb_node node; + const char *name; + struct regmap *map; + + unsigned int range_min; + unsigned int range_max; + + unsigned int selector_reg; + unsigned int selector_mask; + int selector_shift; + + unsigned int window_start; + unsigned int window_len; +}; + +struct regmap_field { + struct regmap *regmap; + unsigned int mask; + /* lsb */ + unsigned int shift; + unsigned int reg; + + unsigned int id_size; + unsigned int id_offset; +}; + +#ifdef CONFIG_DEBUG_FS +extern void regmap_debugfs_initcall(void); +extern void regmap_debugfs_init(struct regmap *map); +extern void regmap_debugfs_exit(struct regmap *map); + +static inline void regmap_debugfs_disable(struct regmap *map) +{ + map->debugfs_disable = true; +} + +#else +static inline void regmap_debugfs_initcall(void) { } +static inline void regmap_debugfs_init(struct regmap *map) { } +static inline void regmap_debugfs_exit(struct regmap *map) { } +static inline void regmap_debugfs_disable(struct regmap *map) { } +#endif + +/* regcache core declarations */ +int regcache_init(struct regmap *map, const struct regmap_config *config); +void regcache_exit(struct regmap *map); +int regcache_read(struct regmap *map, + unsigned int reg, unsigned int *value); +int regcache_write(struct regmap *map, + unsigned int reg, unsigned int value); +int regcache_sync(struct regmap *map); +int regcache_sync_block(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end); + +static inline const void *regcache_get_val_addr(struct regmap *map, + const void *base, + unsigned int idx) +{ + return base + (map->cache_word_size * idx); +} + +unsigned int regcache_get_val(struct regmap *map, const void *base, + unsigned int idx); +bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, + unsigned int val); +int regcache_lookup_reg(struct regmap *map, unsigned int reg); + +int _regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len, bool noinc); + +void regmap_async_complete_cb(struct regmap_async *async, int ret); + +enum regmap_endian regmap_get_val_endian(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config); + +extern struct regcache_ops regcache_rbtree_ops; +extern struct regcache_ops regcache_lzo_ops; +extern struct regcache_ops regcache_flat_ops; + +static inline const char *regmap_name(const struct regmap *map) +{ + if (map->dev) + return dev_name(map->dev); + + return map->name; +} + +static inline unsigned int regmap_get_offset(const struct regmap *map, + unsigned int index) +{ + if (map->reg_stride_order >= 0) + return index << map->reg_stride_order; + else + return index * map->reg_stride; +} + +static inline unsigned int regcache_get_index_by_order(const struct regmap *map, + unsigned int reg) +{ + return reg >> map->reg_stride_order; +} + +#endif diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c new file mode 100644 index 000000000..b7e4b2464 --- /dev/null +++ b/drivers/base/regmap/regcache-flat.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register cache access API - flat caching support +// +// Copyright 2012 Wolfson Microelectronics plc +// +// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + +#include <linux/device.h> +#include <linux/seq_file.h> +#include <linux/slab.h> + +#include "internal.h" + +static inline unsigned int regcache_flat_get_index(const struct regmap *map, + unsigned int reg) +{ + return regcache_get_index_by_order(map, reg); +} + +static int regcache_flat_init(struct regmap *map) +{ + int i; + unsigned int *cache; + + if (!map || map->reg_stride_order < 0 || !map->max_register) + return -EINVAL; + + map->cache = kcalloc(regcache_flat_get_index(map, map->max_register) + + 1, sizeof(unsigned int), GFP_KERNEL); + if (!map->cache) + return -ENOMEM; + + cache = map->cache; + + for (i = 0; i < map->num_reg_defaults; i++) { + unsigned int reg = map->reg_defaults[i].reg; + unsigned int index = regcache_flat_get_index(map, reg); + + cache[index] = map->reg_defaults[i].def; + } + + return 0; +} + +static int regcache_flat_exit(struct regmap *map) +{ + kfree(map->cache); + map->cache = NULL; + + return 0; +} + +static int regcache_flat_read(struct regmap *map, + unsigned int reg, unsigned int *value) +{ + unsigned int *cache = map->cache; + unsigned int index = regcache_flat_get_index(map, reg); + + *value = cache[index]; + + return 0; +} + +static int regcache_flat_write(struct regmap *map, unsigned int reg, + unsigned int value) +{ + unsigned int *cache = map->cache; + unsigned int index = regcache_flat_get_index(map, reg); + + cache[index] = value; + + return 0; +} + +struct regcache_ops regcache_flat_ops = { + .type = REGCACHE_FLAT, + .name = "flat", + .init = regcache_flat_init, + .exit = regcache_flat_exit, + .read = regcache_flat_read, + .write = regcache_flat_write, +}; diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c new file mode 100644 index 000000000..7886303eb --- /dev/null +++ b/drivers/base/regmap/regcache-lzo.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register cache access API - LZO caching support +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> + +#include <linux/device.h> +#include <linux/lzo.h> +#include <linux/slab.h> + +#include "internal.h" + +static int regcache_lzo_exit(struct regmap *map); + +struct regcache_lzo_ctx { + void *wmem; + void *dst; + const void *src; + size_t src_len; + size_t dst_len; + size_t decompressed_size; + unsigned long *sync_bmp; + int sync_bmp_nbits; +}; + +#define LZO_BLOCK_NUM 8 +static int regcache_lzo_block_count(struct regmap *map) +{ + return LZO_BLOCK_NUM; +} + +static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx) +{ + lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); + if (!lzo_ctx->wmem) + return -ENOMEM; + return 0; +} + +static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx) +{ + size_t compress_size; + int ret; + + ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len, + lzo_ctx->dst, &compress_size, lzo_ctx->wmem); + if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len) + return -EINVAL; + lzo_ctx->dst_len = compress_size; + return 0; +} + +static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx) +{ + size_t dst_len; + int ret; + + dst_len = lzo_ctx->dst_len; + ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len, + lzo_ctx->dst, &dst_len); + if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len) + return -EINVAL; + return 0; +} + +static int regcache_lzo_compress_cache_block(struct regmap *map, + struct regcache_lzo_ctx *lzo_ctx) +{ + int ret; + + lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE); + lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); + if (!lzo_ctx->dst) { + lzo_ctx->dst_len = 0; + return -ENOMEM; + } + + ret = regcache_lzo_compress(lzo_ctx); + if (ret < 0) + return ret; + return 0; +} + +static int regcache_lzo_decompress_cache_block(struct regmap *map, + struct regcache_lzo_ctx *lzo_ctx) +{ + int ret; + + lzo_ctx->dst_len = lzo_ctx->decompressed_size; + lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); + if (!lzo_ctx->dst) { + lzo_ctx->dst_len = 0; + return -ENOMEM; + } + + ret = regcache_lzo_decompress(lzo_ctx); + if (ret < 0) + return ret; + return 0; +} + +static inline int regcache_lzo_get_blkindex(struct regmap *map, + unsigned int reg) +{ + return ((reg / map->reg_stride) * map->cache_word_size) / + DIV_ROUND_UP(map->cache_size_raw, + regcache_lzo_block_count(map)); +} + +static inline int regcache_lzo_get_blkpos(struct regmap *map, + unsigned int reg) +{ + return (reg / map->reg_stride) % + (DIV_ROUND_UP(map->cache_size_raw, + regcache_lzo_block_count(map)) / + map->cache_word_size); +} + +static inline int regcache_lzo_get_blksize(struct regmap *map) +{ + return DIV_ROUND_UP(map->cache_size_raw, + regcache_lzo_block_count(map)); +} + +static int regcache_lzo_init(struct regmap *map) +{ + struct regcache_lzo_ctx **lzo_blocks; + size_t bmp_size; + int ret, i, blksize, blkcount; + const char *p, *end; + unsigned long *sync_bmp; + + ret = 0; + + blkcount = regcache_lzo_block_count(map); + map->cache = kcalloc(blkcount, sizeof(*lzo_blocks), + GFP_KERNEL); + if (!map->cache) + return -ENOMEM; + lzo_blocks = map->cache; + + /* + * allocate a bitmap to be used when syncing the cache with + * the hardware. Each time a register is modified, the corresponding + * bit is set in the bitmap, so we know that we have to sync + * that register. + */ + bmp_size = map->num_reg_defaults_raw; + sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL); + if (!sync_bmp) { + ret = -ENOMEM; + goto err; + } + + /* allocate the lzo blocks and initialize them */ + for (i = 0; i < blkcount; i++) { + lzo_blocks[i] = kzalloc(sizeof **lzo_blocks, + GFP_KERNEL); + if (!lzo_blocks[i]) { + bitmap_free(sync_bmp); + ret = -ENOMEM; + goto err; + } + lzo_blocks[i]->sync_bmp = sync_bmp; + lzo_blocks[i]->sync_bmp_nbits = bmp_size; + /* alloc the working space for the compressed block */ + ret = regcache_lzo_prepare(lzo_blocks[i]); + if (ret < 0) + goto err; + } + + blksize = regcache_lzo_get_blksize(map); + p = map->reg_defaults_raw; + end = map->reg_defaults_raw + map->cache_size_raw; + /* compress the register map and fill the lzo blocks */ + for (i = 0; i < blkcount; i++, p += blksize) { + lzo_blocks[i]->src = p; + if (p + blksize > end) + lzo_blocks[i]->src_len = end - p; + else + lzo_blocks[i]->src_len = blksize; + ret = regcache_lzo_compress_cache_block(map, + lzo_blocks[i]); + if (ret < 0) + goto err; + lzo_blocks[i]->decompressed_size = + lzo_blocks[i]->src_len; + } + + return 0; +err: + regcache_lzo_exit(map); + return ret; +} + +static int regcache_lzo_exit(struct regmap *map) +{ + struct regcache_lzo_ctx **lzo_blocks; + int i, blkcount; + + lzo_blocks = map->cache; + if (!lzo_blocks) + return 0; + + blkcount = regcache_lzo_block_count(map); + /* + * the pointer to the bitmap used for syncing the cache + * is shared amongst all lzo_blocks. Ensure it is freed + * only once. + */ + if (lzo_blocks[0]) + bitmap_free(lzo_blocks[0]->sync_bmp); + for (i = 0; i < blkcount; i++) { + if (lzo_blocks[i]) { + kfree(lzo_blocks[i]->wmem); + kfree(lzo_blocks[i]->dst); + } + /* each lzo_block is a pointer returned by kmalloc or NULL */ + kfree(lzo_blocks[i]); + } + kfree(lzo_blocks); + map->cache = NULL; + return 0; +} + +static int regcache_lzo_read(struct regmap *map, + unsigned int reg, unsigned int *value) +{ + struct regcache_lzo_ctx *lzo_block, **lzo_blocks; + int ret, blkindex, blkpos; + size_t tmp_dst_len; + void *tmp_dst; + + /* index of the compressed lzo block */ + blkindex = regcache_lzo_get_blkindex(map, reg); + /* register index within the decompressed block */ + blkpos = regcache_lzo_get_blkpos(map, reg); + lzo_blocks = map->cache; + lzo_block = lzo_blocks[blkindex]; + + /* save the pointer and length of the compressed block */ + tmp_dst = lzo_block->dst; + tmp_dst_len = lzo_block->dst_len; + + /* prepare the source to be the compressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* decompress the block */ + ret = regcache_lzo_decompress_cache_block(map, lzo_block); + if (ret >= 0) + /* fetch the value from the cache */ + *value = regcache_get_val(map, lzo_block->dst, blkpos); + + kfree(lzo_block->dst); + /* restore the pointer and length of the compressed block */ + lzo_block->dst = tmp_dst; + lzo_block->dst_len = tmp_dst_len; + + return ret; +} + +static int regcache_lzo_write(struct regmap *map, + unsigned int reg, unsigned int value) +{ + struct regcache_lzo_ctx *lzo_block, **lzo_blocks; + int ret, blkindex, blkpos; + size_t tmp_dst_len; + void *tmp_dst; + + /* index of the compressed lzo block */ + blkindex = regcache_lzo_get_blkindex(map, reg); + /* register index within the decompressed block */ + blkpos = regcache_lzo_get_blkpos(map, reg); + lzo_blocks = map->cache; + lzo_block = lzo_blocks[blkindex]; + + /* save the pointer and length of the compressed block */ + tmp_dst = lzo_block->dst; + tmp_dst_len = lzo_block->dst_len; + + /* prepare the source to be the compressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* decompress the block */ + ret = regcache_lzo_decompress_cache_block(map, lzo_block); + if (ret < 0) { + kfree(lzo_block->dst); + goto out; + } + + /* write the new value to the cache */ + if (regcache_set_val(map, lzo_block->dst, blkpos, value)) { + kfree(lzo_block->dst); + goto out; + } + + /* prepare the source to be the decompressed block */ + lzo_block->src = lzo_block->dst; + lzo_block->src_len = lzo_block->dst_len; + + /* compress the block */ + ret = regcache_lzo_compress_cache_block(map, lzo_block); + if (ret < 0) { + kfree(lzo_block->dst); + kfree(lzo_block->src); + goto out; + } + + /* set the bit so we know we have to sync this register */ + set_bit(reg / map->reg_stride, lzo_block->sync_bmp); + kfree(tmp_dst); + kfree(lzo_block->src); + return 0; +out: + lzo_block->dst = tmp_dst; + lzo_block->dst_len = tmp_dst_len; + return ret; +} + +static int regcache_lzo_sync(struct regmap *map, unsigned int min, + unsigned int max) +{ + struct regcache_lzo_ctx **lzo_blocks; + unsigned int val; + int i; + int ret; + + lzo_blocks = map->cache; + i = min; + for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp, + lzo_blocks[0]->sync_bmp_nbits) { + if (i > max) + continue; + + ret = regcache_read(map, i, &val); + if (ret) + return ret; + + /* Is this the hardware default? If so skip. */ + ret = regcache_lookup_reg(map, i); + if (ret > 0 && val == map->reg_defaults[ret].def) + continue; + + map->cache_bypass = true; + ret = _regmap_write(map, i, val); + map->cache_bypass = false; + if (ret) + return ret; + dev_dbg(map->dev, "Synced register %#x, value %#x\n", + i, val); + } + + return 0; +} + +struct regcache_ops regcache_lzo_ops = { + .type = REGCACHE_COMPRESSED, + .name = "lzo", + .init = regcache_lzo_init, + .exit = regcache_lzo_exit, + .read = regcache_lzo_read, + .write = regcache_lzo_write, + .sync = regcache_lzo_sync +}; diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c new file mode 100644 index 000000000..d65715b9e --- /dev/null +++ b/drivers/base/regmap/regcache-rbtree.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register cache access API - rbtree caching support +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> + +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/rbtree.h> +#include <linux/seq_file.h> +#include <linux/slab.h> + +#include "internal.h" + +static int regcache_rbtree_write(struct regmap *map, unsigned int reg, + unsigned int value); +static int regcache_rbtree_exit(struct regmap *map); + +struct regcache_rbtree_node { + /* block of adjacent registers */ + void *block; + /* Which registers are present */ + long *cache_present; + /* base register handled by this block */ + unsigned int base_reg; + /* number of registers available in the block */ + unsigned int blklen; + /* the actual rbtree node holding this block */ + struct rb_node node; +}; + +struct regcache_rbtree_ctx { + struct rb_root root; + struct regcache_rbtree_node *cached_rbnode; +}; + +static inline void regcache_rbtree_get_base_top_reg( + struct regmap *map, + struct regcache_rbtree_node *rbnode, + unsigned int *base, unsigned int *top) +{ + *base = rbnode->base_reg; + *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); +} + +static unsigned int regcache_rbtree_get_register(struct regmap *map, + struct regcache_rbtree_node *rbnode, unsigned int idx) +{ + return regcache_get_val(map, rbnode->block, idx); +} + +static void regcache_rbtree_set_register(struct regmap *map, + struct regcache_rbtree_node *rbnode, + unsigned int idx, unsigned int val) +{ + set_bit(idx, rbnode->cache_present); + regcache_set_val(map, rbnode->block, idx, val); +} + +static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, + unsigned int reg) +{ + struct regcache_rbtree_ctx *rbtree_ctx = map->cache; + struct rb_node *node; + struct regcache_rbtree_node *rbnode; + unsigned int base_reg, top_reg; + + rbnode = rbtree_ctx->cached_rbnode; + if (rbnode) { + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, + &top_reg); + if (reg >= base_reg && reg <= top_reg) + return rbnode; + } + + node = rbtree_ctx->root.rb_node; + while (node) { + rbnode = rb_entry(node, struct regcache_rbtree_node, node); + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, + &top_reg); + if (reg >= base_reg && reg <= top_reg) { + rbtree_ctx->cached_rbnode = rbnode; + return rbnode; + } else if (reg > top_reg) { + node = node->rb_right; + } else if (reg < base_reg) { + node = node->rb_left; + } + } + + return NULL; +} + +static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root, + struct regcache_rbtree_node *rbnode) +{ + struct rb_node **new, *parent; + struct regcache_rbtree_node *rbnode_tmp; + unsigned int base_reg_tmp, top_reg_tmp; + unsigned int base_reg; + + parent = NULL; + new = &root->rb_node; + while (*new) { + rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node); + /* base and top registers of the current rbnode */ + regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp, + &top_reg_tmp); + /* base register of the rbnode to be added */ + base_reg = rbnode->base_reg; + parent = *new; + /* if this register has already been inserted, just return */ + if (base_reg >= base_reg_tmp && + base_reg <= top_reg_tmp) + return 0; + else if (base_reg > top_reg_tmp) + new = &((*new)->rb_right); + else if (base_reg < base_reg_tmp) + new = &((*new)->rb_left); + } + + /* insert the node into the rbtree */ + rb_link_node(&rbnode->node, parent, new); + rb_insert_color(&rbnode->node, root); + + return 1; +} + +#ifdef CONFIG_DEBUG_FS +static int rbtree_show(struct seq_file *s, void *ignored) +{ + struct regmap *map = s->private; + struct regcache_rbtree_ctx *rbtree_ctx = map->cache; + struct regcache_rbtree_node *n; + struct rb_node *node; + unsigned int base, top; + size_t mem_size; + int nodes = 0; + int registers = 0; + int this_registers, average; + + map->lock(map->lock_arg); + + mem_size = sizeof(*rbtree_ctx); + + for (node = rb_first(&rbtree_ctx->root); node != NULL; + node = rb_next(node)) { + n = rb_entry(node, struct regcache_rbtree_node, node); + mem_size += sizeof(*n); + mem_size += (n->blklen * map->cache_word_size); + mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long); + + regcache_rbtree_get_base_top_reg(map, n, &base, &top); + this_registers = ((top - base) / map->reg_stride) + 1; + seq_printf(s, "%x-%x (%d)\n", base, top, this_registers); + + nodes++; + registers += this_registers; + } + + if (nodes) + average = registers / nodes; + else + average = 0; + + seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n", + nodes, registers, average, mem_size); + + map->unlock(map->lock_arg); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(rbtree); + +static void rbtree_debugfs_init(struct regmap *map) +{ + debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); +} +#endif + +static int regcache_rbtree_init(struct regmap *map) +{ + struct regcache_rbtree_ctx *rbtree_ctx; + int i; + int ret; + + map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); + if (!map->cache) + return -ENOMEM; + + rbtree_ctx = map->cache; + rbtree_ctx->root = RB_ROOT; + rbtree_ctx->cached_rbnode = NULL; + + for (i = 0; i < map->num_reg_defaults; i++) { + ret = regcache_rbtree_write(map, + map->reg_defaults[i].reg, + map->reg_defaults[i].def); + if (ret) + goto err; + } + + return 0; + +err: + regcache_rbtree_exit(map); + return ret; +} + +static int regcache_rbtree_exit(struct regmap *map) +{ + struct rb_node *next; + struct regcache_rbtree_ctx *rbtree_ctx; + struct regcache_rbtree_node *rbtree_node; + + /* if we've already been called then just return */ + rbtree_ctx = map->cache; + if (!rbtree_ctx) + return 0; + + /* free up the rbtree */ + next = rb_first(&rbtree_ctx->root); + while (next) { + rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); + next = rb_next(&rbtree_node->node); + rb_erase(&rbtree_node->node, &rbtree_ctx->root); + kfree(rbtree_node->cache_present); + kfree(rbtree_node->block); + kfree(rbtree_node); + } + + /* release the resources */ + kfree(map->cache); + map->cache = NULL; + + return 0; +} + +static int regcache_rbtree_read(struct regmap *map, + unsigned int reg, unsigned int *value) +{ + struct regcache_rbtree_node *rbnode; + unsigned int reg_tmp; + + rbnode = regcache_rbtree_lookup(map, reg); + if (rbnode) { + reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; + if (!test_bit(reg_tmp, rbnode->cache_present)) + return -ENOENT; + *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); + } else { + return -ENOENT; + } + + return 0; +} + + +static int regcache_rbtree_insert_to_block(struct regmap *map, + struct regcache_rbtree_node *rbnode, + unsigned int base_reg, + unsigned int top_reg, + unsigned int reg, + unsigned int value) +{ + unsigned int blklen; + unsigned int pos, offset; + unsigned long *present; + u8 *blk; + + blklen = (top_reg - base_reg) / map->reg_stride + 1; + pos = (reg - base_reg) / map->reg_stride; + offset = (rbnode->base_reg - base_reg) / map->reg_stride; + + blk = krealloc(rbnode->block, + blklen * map->cache_word_size, + map->alloc_flags); + if (!blk) + return -ENOMEM; + + rbnode->block = blk; + + if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) { + present = krealloc(rbnode->cache_present, + BITS_TO_LONGS(blklen) * sizeof(*present), + map->alloc_flags); + if (!present) + return -ENOMEM; + + memset(present + BITS_TO_LONGS(rbnode->blklen), 0, + (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen)) + * sizeof(*present)); + } else { + present = rbnode->cache_present; + } + + /* insert the register value in the correct place in the rbnode block */ + if (pos == 0) { + memmove(blk + offset * map->cache_word_size, + blk, rbnode->blklen * map->cache_word_size); + bitmap_shift_left(present, present, offset, blklen); + } + + /* update the rbnode block, its size and the base register */ + rbnode->blklen = blklen; + rbnode->base_reg = base_reg; + rbnode->cache_present = present; + + regcache_rbtree_set_register(map, rbnode, pos, value); + return 0; +} + +static struct regcache_rbtree_node * +regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) +{ + struct regcache_rbtree_node *rbnode; + const struct regmap_range *range; + int i; + + rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags); + if (!rbnode) + return NULL; + + /* If there is a read table then use it to guess at an allocation */ + if (map->rd_table) { + for (i = 0; i < map->rd_table->n_yes_ranges; i++) { + if (regmap_reg_in_range(reg, + &map->rd_table->yes_ranges[i])) + break; + } + + if (i != map->rd_table->n_yes_ranges) { + range = &map->rd_table->yes_ranges[i]; + rbnode->blklen = (range->range_max - range->range_min) / + map->reg_stride + 1; + rbnode->base_reg = range->range_min; + } + } + + if (!rbnode->blklen) { + rbnode->blklen = 1; + rbnode->base_reg = reg; + } + + rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, + map->alloc_flags); + if (!rbnode->block) + goto err_free; + + rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen), + sizeof(*rbnode->cache_present), + map->alloc_flags); + if (!rbnode->cache_present) + goto err_free_block; + + return rbnode; + +err_free_block: + kfree(rbnode->block); +err_free: + kfree(rbnode); + return NULL; +} + +static int regcache_rbtree_write(struct regmap *map, unsigned int reg, + unsigned int value) +{ + struct regcache_rbtree_ctx *rbtree_ctx; + struct regcache_rbtree_node *rbnode, *rbnode_tmp; + struct rb_node *node; + unsigned int reg_tmp; + int ret; + + rbtree_ctx = map->cache; + + /* if we can't locate it in the cached rbnode we'll have + * to traverse the rbtree looking for it. + */ + rbnode = regcache_rbtree_lookup(map, reg); + if (rbnode) { + reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; + regcache_rbtree_set_register(map, rbnode, reg_tmp, value); + } else { + unsigned int base_reg, top_reg; + unsigned int new_base_reg, new_top_reg; + unsigned int min, max; + unsigned int max_dist; + unsigned int dist, best_dist = UINT_MAX; + + max_dist = map->reg_stride * sizeof(*rbnode_tmp) / + map->cache_word_size; + if (reg < max_dist) + min = 0; + else + min = reg - max_dist; + max = reg + max_dist; + + /* look for an adjacent register to the one we are about to add */ + node = rbtree_ctx->root.rb_node; + while (node) { + rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, + node); + + regcache_rbtree_get_base_top_reg(map, rbnode_tmp, + &base_reg, &top_reg); + + if (base_reg <= max && top_reg >= min) { + if (reg < base_reg) + dist = base_reg - reg; + else if (reg > top_reg) + dist = reg - top_reg; + else + dist = 0; + if (dist < best_dist) { + rbnode = rbnode_tmp; + best_dist = dist; + new_base_reg = min(reg, base_reg); + new_top_reg = max(reg, top_reg); + } + } + + /* + * Keep looking, we want to choose the closest block, + * otherwise we might end up creating overlapping + * blocks, which breaks the rbtree. + */ + if (reg < base_reg) + node = node->rb_left; + else if (reg > top_reg) + node = node->rb_right; + else + break; + } + + if (rbnode) { + ret = regcache_rbtree_insert_to_block(map, rbnode, + new_base_reg, + new_top_reg, reg, + value); + if (ret) + return ret; + rbtree_ctx->cached_rbnode = rbnode; + return 0; + } + + /* We did not manage to find a place to insert it in + * an existing block so create a new rbnode. + */ + rbnode = regcache_rbtree_node_alloc(map, reg); + if (!rbnode) + return -ENOMEM; + regcache_rbtree_set_register(map, rbnode, + (reg - rbnode->base_reg) / map->reg_stride, + value); + regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); + rbtree_ctx->cached_rbnode = rbnode; + } + + return 0; +} + +static int regcache_rbtree_sync(struct regmap *map, unsigned int min, + unsigned int max) +{ + struct regcache_rbtree_ctx *rbtree_ctx; + struct rb_node *node; + struct regcache_rbtree_node *rbnode; + unsigned int base_reg, top_reg; + unsigned int start, end; + int ret; + + rbtree_ctx = map->cache; + for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { + rbnode = rb_entry(node, struct regcache_rbtree_node, node); + + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, + &top_reg); + if (base_reg > max) + break; + if (top_reg < min) + continue; + + if (min > base_reg) + start = (min - base_reg) / map->reg_stride; + else + start = 0; + + if (max < top_reg) + end = (max - base_reg) / map->reg_stride + 1; + else + end = rbnode->blklen; + + ret = regcache_sync_block(map, rbnode->block, + rbnode->cache_present, + rbnode->base_reg, start, end); + if (ret != 0) + return ret; + } + + return regmap_async_complete(map); +} + +static int regcache_rbtree_drop(struct regmap *map, unsigned int min, + unsigned int max) +{ + struct regcache_rbtree_ctx *rbtree_ctx; + struct regcache_rbtree_node *rbnode; + struct rb_node *node; + unsigned int base_reg, top_reg; + unsigned int start, end; + + rbtree_ctx = map->cache; + for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { + rbnode = rb_entry(node, struct regcache_rbtree_node, node); + + regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, + &top_reg); + if (base_reg > max) + break; + if (top_reg < min) + continue; + + if (min > base_reg) + start = (min - base_reg) / map->reg_stride; + else + start = 0; + + if (max < top_reg) + end = (max - base_reg) / map->reg_stride + 1; + else + end = rbnode->blklen; + + bitmap_clear(rbnode->cache_present, start, end - start); + } + + return 0; +} + +struct regcache_ops regcache_rbtree_ops = { + .type = REGCACHE_RBTREE, + .name = "rbtree", + .init = regcache_rbtree_init, + .exit = regcache_rbtree_exit, +#ifdef CONFIG_DEBUG_FS + .debugfs_init = rbtree_debugfs_init, +#endif + .read = regcache_rbtree_read, + .write = regcache_rbtree_write, + .sync = regcache_rbtree_sync, + .drop = regcache_rbtree_drop, +}; diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c new file mode 100644 index 000000000..7fdd702e5 --- /dev/null +++ b/drivers/base/regmap/regcache.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register cache access API +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> + +#include <linux/bsearch.h> +#include <linux/device.h> +#include <linux/export.h> +#include <linux/slab.h> +#include <linux/sort.h> + +#include "trace.h" +#include "internal.h" + +static const struct regcache_ops *cache_types[] = { + ®cache_rbtree_ops, +#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED) + ®cache_lzo_ops, +#endif + ®cache_flat_ops, +}; + +static int regcache_hw_init(struct regmap *map) +{ + int i, j; + int ret; + int count; + unsigned int reg, val; + void *tmp_buf; + + if (!map->num_reg_defaults_raw) + return -EINVAL; + + /* calculate the size of reg_defaults */ + for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) + if (regmap_readable(map, i * map->reg_stride) && + !regmap_volatile(map, i * map->reg_stride)) + count++; + + /* all registers are unreadable or volatile, so just bypass */ + if (!count) { + map->cache_bypass = true; + return 0; + } + + map->num_reg_defaults = count; + map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default), + GFP_KERNEL); + if (!map->reg_defaults) + return -ENOMEM; + + if (!map->reg_defaults_raw) { + bool cache_bypass = map->cache_bypass; + dev_warn(map->dev, "No cache defaults, reading back from HW\n"); + + /* Bypass the cache access till data read from HW */ + map->cache_bypass = true; + tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto err_free; + } + ret = regmap_raw_read(map, 0, tmp_buf, + map->cache_size_raw); + map->cache_bypass = cache_bypass; + if (ret == 0) { + map->reg_defaults_raw = tmp_buf; + map->cache_free = 1; + } else { + kfree(tmp_buf); + } + } + + /* fill the reg_defaults */ + for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { + reg = i * map->reg_stride; + + if (!regmap_readable(map, reg)) + continue; + + if (regmap_volatile(map, reg)) + continue; + + if (map->reg_defaults_raw) { + val = regcache_get_val(map, map->reg_defaults_raw, i); + } else { + bool cache_bypass = map->cache_bypass; + + map->cache_bypass = true; + ret = regmap_read(map, reg, &val); + map->cache_bypass = cache_bypass; + if (ret != 0) { + dev_err(map->dev, "Failed to read %d: %d\n", + reg, ret); + goto err_free; + } + } + + map->reg_defaults[j].reg = reg; + map->reg_defaults[j].def = val; + j++; + } + + return 0; + +err_free: + kfree(map->reg_defaults); + + return ret; +} + +int regcache_init(struct regmap *map, const struct regmap_config *config) +{ + int ret; + int i; + void *tmp_buf; + + if (map->cache_type == REGCACHE_NONE) { + if (config->reg_defaults || config->num_reg_defaults_raw) + dev_warn(map->dev, + "No cache used with register defaults set!\n"); + + map->cache_bypass = true; + return 0; + } + + if (config->reg_defaults && !config->num_reg_defaults) { + dev_err(map->dev, + "Register defaults are set without the number!\n"); + return -EINVAL; + } + + for (i = 0; i < config->num_reg_defaults; i++) + if (config->reg_defaults[i].reg % map->reg_stride) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(cache_types); i++) + if (cache_types[i]->type == map->cache_type) + break; + + if (i == ARRAY_SIZE(cache_types)) { + dev_err(map->dev, "Could not match compress type: %d\n", + map->cache_type); + return -EINVAL; + } + + map->num_reg_defaults = config->num_reg_defaults; + map->num_reg_defaults_raw = config->num_reg_defaults_raw; + map->reg_defaults_raw = config->reg_defaults_raw; + map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); + map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; + + map->cache = NULL; + map->cache_ops = cache_types[i]; + + if (!map->cache_ops->read || + !map->cache_ops->write || + !map->cache_ops->name) + return -EINVAL; + + /* We still need to ensure that the reg_defaults + * won't vanish from under us. We'll need to make + * a copy of it. + */ + if (config->reg_defaults) { + tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * + sizeof(struct reg_default), GFP_KERNEL); + if (!tmp_buf) + return -ENOMEM; + map->reg_defaults = tmp_buf; + } else if (map->num_reg_defaults_raw) { + /* Some devices such as PMICs don't have cache defaults, + * we cope with this by reading back the HW registers and + * crafting the cache defaults by hand. + */ + ret = regcache_hw_init(map); + if (ret < 0) + return ret; + if (map->cache_bypass) + return 0; + } + + if (!map->max_register) + map->max_register = map->num_reg_defaults_raw; + + if (map->cache_ops->init) { + dev_dbg(map->dev, "Initializing %s cache\n", + map->cache_ops->name); + ret = map->cache_ops->init(map); + if (ret) + goto err_free; + } + return 0; + +err_free: + kfree(map->reg_defaults); + if (map->cache_free) + kfree(map->reg_defaults_raw); + + return ret; +} + +void regcache_exit(struct regmap *map) +{ + if (map->cache_type == REGCACHE_NONE) + return; + + BUG_ON(!map->cache_ops); + + kfree(map->reg_defaults); + if (map->cache_free) + kfree(map->reg_defaults_raw); + + if (map->cache_ops->exit) { + dev_dbg(map->dev, "Destroying %s cache\n", + map->cache_ops->name); + map->cache_ops->exit(map); + } +} + +/** + * regcache_read - Fetch the value of a given register from the cache. + * + * @map: map to configure. + * @reg: The register index. + * @value: The value to be returned. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_read(struct regmap *map, + unsigned int reg, unsigned int *value) +{ + int ret; + + if (map->cache_type == REGCACHE_NONE) + return -ENOSYS; + + BUG_ON(!map->cache_ops); + + if (!regmap_volatile(map, reg)) { + ret = map->cache_ops->read(map, reg, value); + + if (ret == 0) + trace_regmap_reg_read_cache(map, reg, *value); + + return ret; + } + + return -EINVAL; +} + +/** + * regcache_write - Set the value of a given register in the cache. + * + * @map: map to configure. + * @reg: The register index. + * @value: The new register value. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_write(struct regmap *map, + unsigned int reg, unsigned int value) +{ + if (map->cache_type == REGCACHE_NONE) + return 0; + + BUG_ON(!map->cache_ops); + + if (!regmap_volatile(map, reg)) + return map->cache_ops->write(map, reg, value); + + return 0; +} + +static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, + unsigned int val) +{ + int ret; + + /* If we don't know the chip just got reset, then sync everything. */ + if (!map->no_sync_defaults) + return true; + + /* Is this the hardware default? If so skip. */ + ret = regcache_lookup_reg(map, reg); + if (ret >= 0 && val == map->reg_defaults[ret].def) + return false; + return true; +} + +static int regcache_default_sync(struct regmap *map, unsigned int min, + unsigned int max) +{ + unsigned int reg; + + for (reg = min; reg <= max; reg += map->reg_stride) { + unsigned int val; + int ret; + + if (regmap_volatile(map, reg) || + !regmap_writeable(map, reg)) + continue; + + ret = regcache_read(map, reg, &val); + if (ret) + return ret; + + if (!regcache_reg_needs_sync(map, reg, val)) + continue; + + map->cache_bypass = true; + ret = _regmap_write(map, reg, val); + map->cache_bypass = false; + if (ret) { + dev_err(map->dev, "Unable to sync register %#x. %d\n", + reg, ret); + return ret; + } + dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); + } + + return 0; +} + +/** + * regcache_sync - Sync the register cache with the hardware. + * + * @map: map to configure. + * + * Any registers that should not be synced should be marked as + * volatile. In general drivers can choose not to use the provided + * syncing functionality if they so require. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_sync(struct regmap *map) +{ + int ret = 0; + unsigned int i; + const char *name; + bool bypass; + + if (WARN_ON(map->cache_type == REGCACHE_NONE)) + return -EINVAL; + + BUG_ON(!map->cache_ops); + + map->lock(map->lock_arg); + /* Remember the initial bypass state */ + bypass = map->cache_bypass; + dev_dbg(map->dev, "Syncing %s cache\n", + map->cache_ops->name); + name = map->cache_ops->name; + trace_regcache_sync(map, name, "start"); + + if (!map->cache_dirty) + goto out; + + map->async = true; + + /* Apply any patch first */ + map->cache_bypass = true; + for (i = 0; i < map->patch_regs; i++) { + ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); + if (ret != 0) { + dev_err(map->dev, "Failed to write %x = %x: %d\n", + map->patch[i].reg, map->patch[i].def, ret); + goto out; + } + } + map->cache_bypass = false; + + if (map->cache_ops->sync) + ret = map->cache_ops->sync(map, 0, map->max_register); + else + ret = regcache_default_sync(map, 0, map->max_register); + + if (ret == 0) + map->cache_dirty = false; + +out: + /* Restore the bypass state */ + map->async = false; + map->cache_bypass = bypass; + map->no_sync_defaults = false; + map->unlock(map->lock_arg); + + regmap_async_complete(map); + + trace_regcache_sync(map, name, "stop"); + + return ret; +} +EXPORT_SYMBOL_GPL(regcache_sync); + +/** + * regcache_sync_region - Sync part of the register cache with the hardware. + * + * @map: map to sync. + * @min: first register to sync + * @max: last register to sync + * + * Write all non-default register values in the specified region to + * the hardware. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_sync_region(struct regmap *map, unsigned int min, + unsigned int max) +{ + int ret = 0; + const char *name; + bool bypass; + + if (WARN_ON(map->cache_type == REGCACHE_NONE)) + return -EINVAL; + + BUG_ON(!map->cache_ops); + + map->lock(map->lock_arg); + + /* Remember the initial bypass state */ + bypass = map->cache_bypass; + + name = map->cache_ops->name; + dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); + + trace_regcache_sync(map, name, "start region"); + + if (!map->cache_dirty) + goto out; + + map->async = true; + + if (map->cache_ops->sync) + ret = map->cache_ops->sync(map, min, max); + else + ret = regcache_default_sync(map, min, max); + +out: + /* Restore the bypass state */ + map->cache_bypass = bypass; + map->async = false; + map->no_sync_defaults = false; + map->unlock(map->lock_arg); + + regmap_async_complete(map); + + trace_regcache_sync(map, name, "stop region"); + + return ret; +} +EXPORT_SYMBOL_GPL(regcache_sync_region); + +/** + * regcache_drop_region - Discard part of the register cache + * + * @map: map to operate on + * @min: first register to discard + * @max: last register to discard + * + * Discard part of the register cache. + * + * Return a negative value on failure, 0 on success. + */ +int regcache_drop_region(struct regmap *map, unsigned int min, + unsigned int max) +{ + int ret = 0; + + if (!map->cache_ops || !map->cache_ops->drop) + return -EINVAL; + + map->lock(map->lock_arg); + + trace_regcache_drop_region(map, min, max); + + ret = map->cache_ops->drop(map, min, max); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regcache_drop_region); + +/** + * regcache_cache_only - Put a register map into cache only mode + * + * @map: map to configure + * @enable: flag if changes should be written to the hardware + * + * When a register map is marked as cache only writes to the register + * map API will only update the register cache, they will not cause + * any hardware changes. This is useful for allowing portions of + * drivers to act as though the device were functioning as normal when + * it is disabled for power saving reasons. + */ +void regcache_cache_only(struct regmap *map, bool enable) +{ + map->lock(map->lock_arg); + WARN_ON(map->cache_bypass && enable); + map->cache_only = enable; + trace_regmap_cache_only(map, enable); + map->unlock(map->lock_arg); +} +EXPORT_SYMBOL_GPL(regcache_cache_only); + +/** + * regcache_mark_dirty - Indicate that HW registers were reset to default values + * + * @map: map to mark + * + * Inform regcache that the device has been powered down or reset, so that + * on resume, regcache_sync() knows to write out all non-default values + * stored in the cache. + * + * If this function is not called, regcache_sync() will assume that + * the hardware state still matches the cache state, modulo any writes that + * happened when cache_only was true. + */ +void regcache_mark_dirty(struct regmap *map) +{ + map->lock(map->lock_arg); + map->cache_dirty = true; + map->no_sync_defaults = true; + map->unlock(map->lock_arg); +} +EXPORT_SYMBOL_GPL(regcache_mark_dirty); + +/** + * regcache_cache_bypass - Put a register map into cache bypass mode + * + * @map: map to configure + * @enable: flag if changes should not be written to the cache + * + * When a register map is marked with the cache bypass option, writes + * to the register map API will only update the hardware and not the + * the cache directly. This is useful when syncing the cache back to + * the hardware. + */ +void regcache_cache_bypass(struct regmap *map, bool enable) +{ + map->lock(map->lock_arg); + WARN_ON(map->cache_only && enable); + map->cache_bypass = enable; + trace_regmap_cache_bypass(map, enable); + map->unlock(map->lock_arg); +} +EXPORT_SYMBOL_GPL(regcache_cache_bypass); + +bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, + unsigned int val) +{ + if (regcache_get_val(map, base, idx) == val) + return true; + + /* Use device native format if possible */ + if (map->format.format_val) { + map->format.format_val(base + (map->cache_word_size * idx), + val, 0); + return false; + } + + switch (map->cache_word_size) { + case 1: { + u8 *cache = base; + + cache[idx] = val; + break; + } + case 2: { + u16 *cache = base; + + cache[idx] = val; + break; + } + case 4: { + u32 *cache = base; + + cache[idx] = val; + break; + } +#ifdef CONFIG_64BIT + case 8: { + u64 *cache = base; + + cache[idx] = val; + break; + } +#endif + default: + BUG(); + } + return false; +} + +unsigned int regcache_get_val(struct regmap *map, const void *base, + unsigned int idx) +{ + if (!base) + return -EINVAL; + + /* Use device native format if possible */ + if (map->format.parse_val) + return map->format.parse_val(regcache_get_val_addr(map, base, + idx)); + + switch (map->cache_word_size) { + case 1: { + const u8 *cache = base; + + return cache[idx]; + } + case 2: { + const u16 *cache = base; + + return cache[idx]; + } + case 4: { + const u32 *cache = base; + + return cache[idx]; + } +#ifdef CONFIG_64BIT + case 8: { + const u64 *cache = base; + + return cache[idx]; + } +#endif + default: + BUG(); + } + /* unreachable */ + return -1; +} + +static int regcache_default_cmp(const void *a, const void *b) +{ + const struct reg_default *_a = a; + const struct reg_default *_b = b; + + return _a->reg - _b->reg; +} + +int regcache_lookup_reg(struct regmap *map, unsigned int reg) +{ + struct reg_default key; + struct reg_default *r; + + key.reg = reg; + key.def = 0; + + r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, + sizeof(struct reg_default), regcache_default_cmp); + + if (r) + return r - map->reg_defaults; + else + return -ENOENT; +} + +static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) +{ + if (!cache_present) + return true; + + return test_bit(idx, cache_present); +} + +static int regcache_sync_block_single(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, + unsigned int start, unsigned int end) +{ + unsigned int i, regtmp, val; + int ret; + + for (i = start; i < end; i++) { + regtmp = block_base + (i * map->reg_stride); + + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) + continue; + + val = regcache_get_val(map, block, i); + if (!regcache_reg_needs_sync(map, regtmp, val)) + continue; + + map->cache_bypass = true; + + ret = _regmap_write(map, regtmp, val); + + map->cache_bypass = false; + if (ret != 0) { + dev_err(map->dev, "Unable to sync register %#x. %d\n", + regtmp, ret); + return ret; + } + dev_dbg(map->dev, "Synced register %#x, value %#x\n", + regtmp, val); + } + + return 0; +} + +static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, + unsigned int base, unsigned int cur) +{ + size_t val_bytes = map->format.val_bytes; + int ret, count; + + if (*data == NULL) + return 0; + + count = (cur - base) / map->reg_stride; + + dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", + count * val_bytes, count, base, cur - map->reg_stride); + + map->cache_bypass = true; + + ret = _regmap_raw_write(map, base, *data, count * val_bytes, false); + if (ret) + dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", + base, cur - map->reg_stride, ret); + + map->cache_bypass = false; + + *data = NULL; + + return ret; +} + +static int regcache_sync_block_raw(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end) +{ + unsigned int i, val; + unsigned int regtmp = 0; + unsigned int base = 0; + const void *data = NULL; + int ret; + + for (i = start; i < end; i++) { + regtmp = block_base + (i * map->reg_stride); + + if (!regcache_reg_present(cache_present, i) || + !regmap_writeable(map, regtmp)) { + ret = regcache_sync_block_raw_flush(map, &data, + base, regtmp); + if (ret != 0) + return ret; + continue; + } + + val = regcache_get_val(map, block, i); + if (!regcache_reg_needs_sync(map, regtmp, val)) { + ret = regcache_sync_block_raw_flush(map, &data, + base, regtmp); + if (ret != 0) + return ret; + continue; + } + + if (!data) { + data = regcache_get_val_addr(map, block, i); + base = regtmp; + } + } + + return regcache_sync_block_raw_flush(map, &data, base, regtmp + + map->reg_stride); +} + +int regcache_sync_block(struct regmap *map, void *block, + unsigned long *cache_present, + unsigned int block_base, unsigned int start, + unsigned int end) +{ + if (regmap_can_raw_write(map) && !map->use_single_write) + return regcache_sync_block_raw(map, block, cache_present, + block_base, start, end); + else + return regcache_sync_block_single(map, block, cache_present, + block_base, start, end); +} diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c new file mode 100644 index 000000000..b9f76bdf7 --- /dev/null +++ b/drivers/base/regmap/regmap-ac97.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - AC'97 support +// +// Copyright 2013 Linaro Ltd. All rights reserved. + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include <sound/ac97_codec.h> + +bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case AC97_RESET: + case AC97_POWERDOWN: + case AC97_INT_PAGING: + case AC97_EXTENDED_ID: + case AC97_EXTENDED_STATUS: + case AC97_EXTENDED_MID: + case AC97_EXTENDED_MSTATUS: + case AC97_GPIO_STATUS: + case AC97_MISC_AFE: + case AC97_VENDOR_ID1: + case AC97_VENDOR_ID2: + case AC97_CODEC_CLASS_REV: + case AC97_PCI_SVID: + case AC97_PCI_SID: + case AC97_FUNC_SELECT: + case AC97_FUNC_INFO: + case AC97_SENSE_INFO: + return true; + default: + return false; + } +} +EXPORT_SYMBOL_GPL(regmap_ac97_default_volatile); + +static int regmap_ac97_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct snd_ac97 *ac97 = context; + + *val = ac97->bus->ops->read(ac97, reg); + + return 0; +} + +static int regmap_ac97_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct snd_ac97 *ac97 = context; + + ac97->bus->ops->write(ac97, reg, val); + + return 0; +} + +static const struct regmap_bus ac97_regmap_bus = { + .reg_write = regmap_ac97_reg_write, + .reg_read = regmap_ac97_reg_read, +}; + +struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_ac97); + +struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c new file mode 100644 index 000000000..ed54dc31e --- /dev/null +++ b/drivers/base/regmap/regmap-debugfs.c @@ -0,0 +1,692 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - debugfs +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/device.h> +#include <linux/list.h> + +#include "internal.h" + +struct regmap_debugfs_node { + struct regmap *map; + struct list_head link; +}; + +static unsigned int dummy_index; +static struct dentry *regmap_debugfs_root; +static LIST_HEAD(regmap_debugfs_early_list); +static DEFINE_MUTEX(regmap_debugfs_early_lock); + +/* Calculate the length of a fixed format */ +static size_t regmap_calc_reg_len(int max_val) +{ + return snprintf(NULL, 0, "%x", max_val); +} + +static ssize_t regmap_name_read_file(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct regmap *map = file->private_data; + const char *name = "nodev"; + int ret; + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (map->dev && map->dev->driver) + name = map->dev->driver->name; + + ret = snprintf(buf, PAGE_SIZE, "%s\n", name); + if (ret >= PAGE_SIZE) { + kfree(buf); + return ret; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); + kfree(buf); + return ret; +} + +static const struct file_operations regmap_name_fops = { + .open = simple_open, + .read = regmap_name_read_file, + .llseek = default_llseek, +}; + +static void regmap_debugfs_free_dump_cache(struct regmap *map) +{ + struct regmap_debugfs_off_cache *c; + + while (!list_empty(&map->debugfs_off_cache)) { + c = list_first_entry(&map->debugfs_off_cache, + struct regmap_debugfs_off_cache, + list); + list_del(&c->list); + kfree(c); + } +} + +static bool regmap_printable(struct regmap *map, unsigned int reg) +{ + if (regmap_precious(map, reg)) + return false; + + if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) + return false; + + return true; +} + +/* + * Work out where the start offset maps into register numbers, bearing + * in mind that we suppress hidden registers. + */ +static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, + unsigned int base, + loff_t from, + loff_t *pos) +{ + struct regmap_debugfs_off_cache *c = NULL; + loff_t p = 0; + unsigned int i, ret; + unsigned int fpos_offset; + unsigned int reg_offset; + + /* Suppress the cache if we're using a subrange */ + if (base) + return base; + + /* + * If we don't have a cache build one so we don't have to do a + * linear scan each time. + */ + mutex_lock(&map->cache_lock); + i = base; + if (list_empty(&map->debugfs_off_cache)) { + for (; i <= map->max_register; i += map->reg_stride) { + /* Skip unprinted registers, closing off cache entry */ + if (!regmap_printable(map, i)) { + if (c) { + c->max = p - 1; + c->max_reg = i - map->reg_stride; + list_add_tail(&c->list, + &map->debugfs_off_cache); + c = NULL; + } + + continue; + } + + /* No cache entry? Start a new one */ + if (!c) { + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) { + regmap_debugfs_free_dump_cache(map); + mutex_unlock(&map->cache_lock); + return base; + } + c->min = p; + c->base_reg = i; + } + + p += map->debugfs_tot_len; + } + } + + /* Close the last entry off if we didn't scan beyond it */ + if (c) { + c->max = p - 1; + c->max_reg = i - map->reg_stride; + list_add_tail(&c->list, + &map->debugfs_off_cache); + } + + /* + * This should never happen; we return above if we fail to + * allocate and we should never be in this code if there are + * no registers at all. + */ + WARN_ON(list_empty(&map->debugfs_off_cache)); + ret = base; + + /* Find the relevant block:offset */ + list_for_each_entry(c, &map->debugfs_off_cache, list) { + if (from >= c->min && from <= c->max) { + fpos_offset = from - c->min; + reg_offset = fpos_offset / map->debugfs_tot_len; + *pos = c->min + (reg_offset * map->debugfs_tot_len); + mutex_unlock(&map->cache_lock); + return c->base_reg + (reg_offset * map->reg_stride); + } + + *pos = c->max; + ret = c->max_reg; + } + mutex_unlock(&map->cache_lock); + + return ret; +} + +static inline void regmap_calc_tot_len(struct regmap *map, + void *buf, size_t count) +{ + /* Calculate the length of a fixed format */ + if (!map->debugfs_tot_len) { + map->debugfs_reg_len = regmap_calc_reg_len(map->max_register); + map->debugfs_val_len = 2 * map->format.val_bytes; + map->debugfs_tot_len = map->debugfs_reg_len + + map->debugfs_val_len + 3; /* : \n */ + } +} + +static int regmap_next_readable_reg(struct regmap *map, int reg) +{ + struct regmap_debugfs_off_cache *c; + int ret = -EINVAL; + + if (regmap_printable(map, reg + map->reg_stride)) { + ret = reg + map->reg_stride; + } else { + mutex_lock(&map->cache_lock); + list_for_each_entry(c, &map->debugfs_off_cache, list) { + if (reg > c->max_reg) + continue; + if (reg < c->base_reg) { + ret = c->base_reg; + break; + } + } + mutex_unlock(&map->cache_lock); + } + return ret; +} + +static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, + unsigned int to, char __user *user_buf, + size_t count, loff_t *ppos) +{ + size_t buf_pos = 0; + loff_t p = *ppos; + ssize_t ret; + int i; + char *buf; + unsigned int val, start_reg; + + if (*ppos < 0 || !count) + return -EINVAL; + + if (count > (PAGE_SIZE << (MAX_ORDER - 1))) + count = PAGE_SIZE << (MAX_ORDER - 1); + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + regmap_calc_tot_len(map, buf, count); + + /* Work out which register we're starting at */ + start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); + + for (i = start_reg; i >= 0 && i <= to; + i = regmap_next_readable_reg(map, i)) { + + /* If we're in the region the user is trying to read */ + if (p >= *ppos) { + /* ...but not beyond it */ + if (buf_pos + map->debugfs_tot_len > count) + break; + + /* Format the register */ + snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", + map->debugfs_reg_len, i - from); + buf_pos += map->debugfs_reg_len + 2; + + /* Format the value, write all X if we can't read */ + ret = regmap_read(map, i, &val); + if (ret == 0) + snprintf(buf + buf_pos, count - buf_pos, + "%.*x", map->debugfs_val_len, val); + else + memset(buf + buf_pos, 'X', + map->debugfs_val_len); + buf_pos += 2 * map->format.val_bytes; + + buf[buf_pos++] = '\n'; + } + p += map->debugfs_tot_len; + } + + ret = buf_pos; + + if (copy_to_user(user_buf, buf, buf_pos)) { + ret = -EFAULT; + goto out; + } + + *ppos += buf_pos; + +out: + kfree(buf); + return ret; +} + +static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap *map = file->private_data; + + return regmap_read_debugfs(map, 0, map->max_register, user_buf, + count, ppos); +} + +#undef REGMAP_ALLOW_WRITE_DEBUGFS +#ifdef REGMAP_ALLOW_WRITE_DEBUGFS +/* + * This can be dangerous especially when we have clients such as + * PMICs, therefore don't provide any real compile time configuration option + * for this feature, people who want to use this will need to modify + * the source code directly. + */ +static ssize_t regmap_map_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[32]; + size_t buf_size; + char *start = buf; + unsigned long reg, value; + struct regmap *map = file->private_data; + int ret; + + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + while (*start == ' ') + start++; + reg = simple_strtoul(start, &start, 16); + while (*start == ' ') + start++; + if (kstrtoul(start, 16, &value)) + return -EINVAL; + + /* Userspace has been fiddling around behind the kernel's back */ + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + + ret = regmap_write(map, reg, value); + if (ret < 0) + return ret; + return buf_size; +} +#else +#define regmap_map_write_file NULL +#endif + +static const struct file_operations regmap_map_fops = { + .open = simple_open, + .read = regmap_map_read_file, + .write = regmap_map_write_file, + .llseek = default_llseek, +}; + +static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap_range_node *range = file->private_data; + struct regmap *map = range->map; + + return regmap_read_debugfs(map, range->range_min, range->range_max, + user_buf, count, ppos); +} + +static const struct file_operations regmap_range_fops = { + .open = simple_open, + .read = regmap_range_read_file, + .llseek = default_llseek, +}; + +static ssize_t regmap_reg_ranges_read_file(struct file *file, + char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct regmap *map = file->private_data; + struct regmap_debugfs_off_cache *c; + loff_t p = 0; + size_t buf_pos = 0; + char *buf; + char *entry; + int ret; + unsigned entry_len; + + if (*ppos < 0 || !count) + return -EINVAL; + + if (count > (PAGE_SIZE << (MAX_ORDER - 1))) + count = PAGE_SIZE << (MAX_ORDER - 1); + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + entry = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!entry) { + kfree(buf); + return -ENOMEM; + } + + /* While we are at it, build the register dump cache + * now so the read() operation on the `registers' file + * can benefit from using the cache. We do not care + * about the file position information that is contained + * in the cache, just about the actual register blocks */ + regmap_calc_tot_len(map, buf, count); + regmap_debugfs_get_dump_start(map, 0, *ppos, &p); + + /* Reset file pointer as the fixed-format of the `registers' + * file is not compatible with the `range' file */ + p = 0; + mutex_lock(&map->cache_lock); + list_for_each_entry(c, &map->debugfs_off_cache, list) { + entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n", + c->base_reg, c->max_reg); + if (p >= *ppos) { + if (buf_pos + entry_len > count) + break; + memcpy(buf + buf_pos, entry, entry_len); + buf_pos += entry_len; + } + p += entry_len; + } + mutex_unlock(&map->cache_lock); + + kfree(entry); + ret = buf_pos; + + if (copy_to_user(user_buf, buf, buf_pos)) { + ret = -EFAULT; + goto out_buf; + } + + *ppos += buf_pos; +out_buf: + kfree(buf); + return ret; +} + +static const struct file_operations regmap_reg_ranges_fops = { + .open = simple_open, + .read = regmap_reg_ranges_read_file, + .llseek = default_llseek, +}; + +static int regmap_access_show(struct seq_file *s, void *ignored) +{ + struct regmap *map = s->private; + int i, reg_len; + + reg_len = regmap_calc_reg_len(map->max_register); + + for (i = 0; i <= map->max_register; i += map->reg_stride) { + /* Ignore registers which are neither readable nor writable */ + if (!regmap_readable(map, i) && !regmap_writeable(map, i)) + continue; + + /* Format the register */ + seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i, + regmap_readable(map, i) ? 'y' : 'n', + regmap_writeable(map, i) ? 'y' : 'n', + regmap_volatile(map, i) ? 'y' : 'n', + regmap_precious(map, i) ? 'y' : 'n'); + } + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(regmap_access); + +static ssize_t regmap_cache_only_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap *map = container_of(file->private_data, + struct regmap, cache_only); + bool new_val, require_sync = false; + int err; + + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; + + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; + + map->lock(map->lock_arg); + + if (new_val && !map->cache_only) { + dev_warn(map->dev, "debugfs cache_only=Y forced\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } else if (!new_val && map->cache_only) { + dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); + require_sync = true; + } + map->cache_only = new_val; + + map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); + + if (require_sync) { + err = regcache_sync(map); + if (err) + dev_err(map->dev, "Failed to sync cache %d\n", err); + } + + return count; +} + +static const struct file_operations regmap_cache_only_fops = { + .open = simple_open, + .read = debugfs_read_file_bool, + .write = regmap_cache_only_write_file, +}; + +static ssize_t regmap_cache_bypass_write_file(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct regmap *map = container_of(file->private_data, + struct regmap, cache_bypass); + bool new_val; + int err; + + err = kstrtobool_from_user(user_buf, count, &new_val); + /* Ignore malforned data like debugfs_write_file_bool() */ + if (err) + return count; + + err = debugfs_file_get(file->f_path.dentry); + if (err) + return err; + + map->lock(map->lock_arg); + + if (new_val && !map->cache_bypass) { + dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } else if (!new_val && map->cache_bypass) { + dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); + } + map->cache_bypass = new_val; + + map->unlock(map->lock_arg); + debugfs_file_put(file->f_path.dentry); + + return count; +} + +static const struct file_operations regmap_cache_bypass_fops = { + .open = simple_open, + .read = debugfs_read_file_bool, + .write = regmap_cache_bypass_write_file, +}; + +void regmap_debugfs_init(struct regmap *map) +{ + struct rb_node *next; + struct regmap_range_node *range_node; + const char *devname = "dummy"; + const char *name = map->name; + + /* + * Userspace can initiate reads from the hardware over debugfs. + * Normally internal regmap structures and buffers are protected with + * a mutex or a spinlock, but if the regmap owner decided to disable + * all locking mechanisms, this is no longer the case. For safety: + * don't create the debugfs entries if locking is disabled. + */ + if (map->debugfs_disable) { + dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); + return; + } + + /* If we don't have the debugfs root yet, postpone init */ + if (!regmap_debugfs_root) { + struct regmap_debugfs_node *node; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return; + node->map = map; + mutex_lock(®map_debugfs_early_lock); + list_add(&node->link, ®map_debugfs_early_list); + mutex_unlock(®map_debugfs_early_lock); + return; + } + + INIT_LIST_HEAD(&map->debugfs_off_cache); + mutex_init(&map->cache_lock); + + if (map->dev) + devname = dev_name(map->dev); + + if (name) { + if (!map->debugfs_name) { + map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", + devname, name); + if (!map->debugfs_name) + return; + } + name = map->debugfs_name; + } else { + name = devname; + } + + if (!strcmp(name, "dummy")) { + kfree(map->debugfs_name); + map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", + dummy_index); + if (!map->debugfs_name) + return; + name = map->debugfs_name; + dummy_index++; + } + + map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); + + debugfs_create_file("name", 0400, map->debugfs, + map, ®map_name_fops); + + debugfs_create_file("range", 0400, map->debugfs, + map, ®map_reg_ranges_fops); + + if (map->max_register || regmap_readable(map, 0)) { + umode_t registers_mode; + +#if defined(REGMAP_ALLOW_WRITE_DEBUGFS) + registers_mode = 0600; +#else + registers_mode = 0400; +#endif + + debugfs_create_file("registers", registers_mode, map->debugfs, + map, ®map_map_fops); + debugfs_create_file("access", 0400, map->debugfs, + map, ®map_access_fops); + } + + if (map->cache_type) { + debugfs_create_file("cache_only", 0600, map->debugfs, + &map->cache_only, ®map_cache_only_fops); + debugfs_create_bool("cache_dirty", 0400, map->debugfs, + &map->cache_dirty); + debugfs_create_file("cache_bypass", 0600, map->debugfs, + &map->cache_bypass, + ®map_cache_bypass_fops); + } + + next = rb_first(&map->range_tree); + while (next) { + range_node = rb_entry(next, struct regmap_range_node, node); + + if (range_node->name) + debugfs_create_file(range_node->name, 0400, + map->debugfs, range_node, + ®map_range_fops); + + next = rb_next(&range_node->node); + } + + if (map->cache_ops && map->cache_ops->debugfs_init) + map->cache_ops->debugfs_init(map); +} + +void regmap_debugfs_exit(struct regmap *map) +{ + if (map->debugfs) { + debugfs_remove_recursive(map->debugfs); + mutex_lock(&map->cache_lock); + regmap_debugfs_free_dump_cache(map); + mutex_unlock(&map->cache_lock); + kfree(map->debugfs_name); + map->debugfs_name = NULL; + } else { + struct regmap_debugfs_node *node, *tmp; + + mutex_lock(®map_debugfs_early_lock); + list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, + link) { + if (node->map == map) { + list_del(&node->link); + kfree(node); + } + } + mutex_unlock(®map_debugfs_early_lock); + } +} + +void regmap_debugfs_initcall(void) +{ + struct regmap_debugfs_node *node, *tmp; + + regmap_debugfs_root = debugfs_create_dir("regmap", NULL); + + mutex_lock(®map_debugfs_early_lock); + list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { + regmap_debugfs_init(node->map); + list_del(&node->link); + kfree(node); + } + mutex_unlock(®map_debugfs_early_lock); +} diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c new file mode 100644 index 000000000..051c10e73 --- /dev/null +++ b/drivers/base/regmap/regmap-i2c.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - I2C support +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + +#include <linux/regmap.h> +#include <linux/i2c.h> +#include <linux/module.h> + +#include "internal.h" + +static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg > 0xff) + return -EINVAL; + + ret = i2c_smbus_read_byte_data(i2c, reg); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (val > 0xff || reg > 0xff) + return -EINVAL; + + return i2c_smbus_write_byte_data(i2c, reg, val); +} + +static const struct regmap_bus regmap_smbus_byte = { + .reg_write = regmap_smbus_byte_reg_write, + .reg_read = regmap_smbus_byte_reg_read, +}; + +static int regmap_smbus_word_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg > 0xff) + return -EINVAL; + + ret = i2c_smbus_read_word_data(i2c, reg); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int regmap_smbus_word_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (val > 0xffff || reg > 0xff) + return -EINVAL; + + return i2c_smbus_write_word_data(i2c, reg, val); +} + +static const struct regmap_bus regmap_smbus_word = { + .reg_write = regmap_smbus_word_reg_write, + .reg_read = regmap_smbus_word_reg_read, +}; + +static int regmap_smbus_word_read_swapped(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg > 0xff) + return -EINVAL; + + ret = i2c_smbus_read_word_swapped(i2c, reg); + if (ret < 0) + return ret; + + *val = ret; + + return 0; +} + +static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (val > 0xffff || reg > 0xff) + return -EINVAL; + + return i2c_smbus_write_word_swapped(i2c, reg, val); +} + +static const struct regmap_bus regmap_smbus_word_swapped = { + .reg_write = regmap_smbus_word_write_swapped, + .reg_read = regmap_smbus_word_read_swapped, +}; + +static int regmap_i2c_write(void *context, const void *data, size_t count) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + ret = i2c_master_send(i2c, data, count); + if (ret == count) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static int regmap_i2c_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + struct i2c_msg xfer[2]; + int ret; + + /* If the I2C controller can't do a gather tell the core, it + * will substitute in a linear write for us. + */ + if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART)) + return -ENOTSUPP; + + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].len = reg_size; + xfer[0].buf = (void *)reg; + + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_NOSTART; + xfer[1].len = val_size; + xfer[1].buf = (void *)val; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (ret == 2) + return 0; + if (ret < 0) + return ret; + else + return -EIO; +} + +static int regmap_i2c_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + struct i2c_msg xfer[2]; + int ret; + + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].len = reg_size; + xfer[0].buf = (void *)reg; + + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_RD; + xfer[1].len = val_size; + xfer[1].buf = val; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (ret == 2) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static const struct regmap_bus regmap_i2c = { + .write = regmap_i2c_write, + .gather_write = regmap_i2c_gather_write, + .read = regmap_i2c_read, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, +}; + +static int regmap_i2c_smbus_i2c_write(void *context, const void *data, + size_t count) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (count < 1) + return -EINVAL; + + --count; + return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count, + ((u8 *)data + 1)); +} + +static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, + size_t reg_size, void *val, + size_t val_size) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + + if (reg_size != 1 || val_size < 1) + return -EINVAL; + + ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val); + if (ret == val_size) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static const struct regmap_bus regmap_i2c_smbus_i2c_block = { + .write = regmap_i2c_smbus_i2c_write, + .read = regmap_i2c_smbus_i2c_read, + .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1, + .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1, +}; + +static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data, + size_t count) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + if (count < 2) + return -EINVAL; + + count--; + return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count, + (u8 *)data + 1); +} + +static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg, + size_t reg_size, void *val, + size_t val_size) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret, count, len = val_size; + + if (reg_size != 2) + return -EINVAL; + + ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff, + ((u16 *)reg)[0] >> 8); + if (ret < 0) + return ret; + + count = 0; + do { + /* Current Address Read */ + ret = i2c_smbus_read_byte(i2c); + if (ret < 0) + break; + + *((u8 *)val++) = ret; + count++; + len--; + } while (len > 0); + + if (count == val_size) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = { + .write = regmap_i2c_smbus_i2c_write_reg16, + .read = regmap_i2c_smbus_i2c_read_reg16, + .max_raw_read = I2C_SMBUS_BLOCK_MAX - 2, + .max_raw_write = I2C_SMBUS_BLOCK_MAX - 2, +}; + +static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, + const struct regmap_config *config) +{ + if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) + return ®map_i2c; + else if (config->val_bits == 8 && config->reg_bits == 8 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK)) + return ®map_i2c_smbus_i2c_block; + else if (config->val_bits == 8 && config->reg_bits == 16 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_I2C_BLOCK)) + return ®map_i2c_smbus_i2c_block_reg16; + else if (config->val_bits == 16 && config->reg_bits == 8 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) + switch (regmap_get_val_endian(&i2c->dev, NULL, config)) { + case REGMAP_ENDIAN_LITTLE: + return ®map_smbus_word; + case REGMAP_ENDIAN_BIG: + return ®map_smbus_word_swapped; + default: /* everything else is not supported */ + break; + } + else if (config->val_bits == 8 && config->reg_bits == 8 && + i2c_check_functionality(i2c->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) + return ®map_smbus_byte; + + return ERR_PTR(-ENOTSUPP); +} + +struct regmap *__regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __regmap_init(&i2c->dev, bus, &i2c->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_i2c); + +struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c new file mode 100644 index 000000000..1578fb506 --- /dev/null +++ b/drivers/base/regmap/regmap-i3c.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + +#include <linux/regmap.h> +#include <linux/i3c/device.h> +#include <linux/i3c/master.h> +#include <linux/module.h> + +static int regmap_i3c_write(void *context, const void *data, size_t count) +{ + struct device *dev = context; + struct i3c_device *i3c = dev_to_i3cdev(dev); + struct i3c_priv_xfer xfers[] = { + { + .rnw = false, + .len = count, + .data.out = data, + }, + }; + + return i3c_device_do_priv_xfers(i3c, xfers, 1); +} + +static int regmap_i3c_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct i3c_device *i3c = dev_to_i3cdev(dev); + struct i3c_priv_xfer xfers[2]; + + xfers[0].rnw = false; + xfers[0].len = reg_size; + xfers[0].data.out = reg; + + xfers[1].rnw = true; + xfers[1].len = val_size; + xfers[1].data.in = val; + + return i3c_device_do_priv_xfers(i3c, xfers, 2); +} + +static struct regmap_bus regmap_i3c = { + .write = regmap_i3c_write, + .read = regmap_i3c_read, +}; + +struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __devm_regmap_init(&i3c->dev, ®map_i3c, &i3c->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c); + +MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>"); +MODULE_DESCRIPTION("Regmap I3C Module"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c new file mode 100644 index 000000000..4466f8bda --- /dev/null +++ b/drivers/base/regmap/regmap-irq.c @@ -0,0 +1,1073 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// regmap based irq_chip +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + +#include <linux/device.h> +#include <linux/export.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include "internal.h" + +struct regmap_irq_chip_data { + struct mutex lock; + struct irq_chip irq_chip; + + struct regmap *map; + const struct regmap_irq_chip *chip; + + int irq_base; + struct irq_domain *domain; + + int irq; + int wake_count; + + void *status_reg_buf; + unsigned int *main_status_buf; + unsigned int *status_buf; + unsigned int *mask_buf; + unsigned int *mask_buf_def; + unsigned int *wake_buf; + unsigned int *type_buf; + unsigned int *type_buf_def; + + unsigned int irq_reg_stride; + unsigned int type_reg_stride; + + bool clear_status:1; +}; + +static inline const +struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, + int irq) +{ + return &data->chip->irqs[irq]; +} + +static void regmap_irq_lock(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + + mutex_lock(&d->lock); +} + +static int regmap_irq_update_bits(struct regmap_irq_chip_data *d, + unsigned int reg, unsigned int mask, + unsigned int val) +{ + if (d->chip->mask_writeonly) + return regmap_write_bits(d->map, reg, mask, val); + else + return regmap_update_bits(d->map, reg, mask, val); +} + +static void regmap_irq_sync_unlock(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + int i, ret; + u32 reg; + u32 unmask_offset; + u32 val; + + if (d->chip->runtime_pm) { + ret = pm_runtime_get_sync(map->dev); + if (ret < 0) + dev_err(map->dev, "IRQ sync failed to resume: %d\n", + ret); + } + + if (d->clear_status) { + for (i = 0; i < d->chip->num_regs; i++) { + reg = d->chip->status_base + + (i * map->reg_stride * d->irq_reg_stride); + + ret = regmap_read(map, reg, &val); + if (ret) + dev_err(d->map->dev, + "Failed to clear the interrupt status bits\n"); + } + + d->clear_status = false; + } + + /* + * If there's been a change in the mask write it back to the + * hardware. We rely on the use of the regmap core cache to + * suppress pointless writes. + */ + for (i = 0; i < d->chip->num_regs; i++) { + if (!d->chip->mask_base) + continue; + + reg = d->chip->mask_base + + (i * map->reg_stride * d->irq_reg_stride); + if (d->chip->mask_invert) { + ret = regmap_irq_update_bits(d, reg, + d->mask_buf_def[i], ~d->mask_buf[i]); + } else if (d->chip->unmask_base) { + /* set mask with mask_base register */ + ret = regmap_irq_update_bits(d, reg, + d->mask_buf_def[i], ~d->mask_buf[i]); + if (ret < 0) + dev_err(d->map->dev, + "Failed to sync unmasks in %x\n", + reg); + unmask_offset = d->chip->unmask_base - + d->chip->mask_base; + /* clear mask with unmask_base register */ + ret = regmap_irq_update_bits(d, + reg + unmask_offset, + d->mask_buf_def[i], + d->mask_buf[i]); + } else { + ret = regmap_irq_update_bits(d, reg, + d->mask_buf_def[i], d->mask_buf[i]); + } + if (ret != 0) + dev_err(d->map->dev, "Failed to sync masks in %x\n", + reg); + + reg = d->chip->wake_base + + (i * map->reg_stride * d->irq_reg_stride); + if (d->wake_buf) { + if (d->chip->wake_invert) + ret = regmap_irq_update_bits(d, reg, + d->mask_buf_def[i], + ~d->wake_buf[i]); + else + ret = regmap_irq_update_bits(d, reg, + d->mask_buf_def[i], + d->wake_buf[i]); + if (ret != 0) + dev_err(d->map->dev, + "Failed to sync wakes in %x: %d\n", + reg, ret); + } + + if (!d->chip->init_ack_masked) + continue; + /* + * Ack all the masked interrupts unconditionally, + * OR if there is masked interrupt which hasn't been Acked, + * it'll be ignored in irq handler, then may introduce irq storm + */ + if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { + reg = d->chip->ack_base + + (i * map->reg_stride * d->irq_reg_stride); + /* some chips ack by write 0 */ + if (d->chip->ack_invert) + ret = regmap_write(map, reg, ~d->mask_buf[i]); + else + ret = regmap_write(map, reg, d->mask_buf[i]); + if (d->chip->clear_ack) { + if (d->chip->ack_invert && !ret) + ret = regmap_write(map, reg, UINT_MAX); + else if (!ret) + ret = regmap_write(map, reg, 0); + } + if (ret != 0) + dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", + reg, ret); + } + } + + /* Don't update the type bits if we're using mask bits for irq type. */ + if (!d->chip->type_in_mask) { + for (i = 0; i < d->chip->num_type_reg; i++) { + if (!d->type_buf_def[i]) + continue; + reg = d->chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + if (d->chip->type_invert) + ret = regmap_irq_update_bits(d, reg, + d->type_buf_def[i], ~d->type_buf[i]); + else + ret = regmap_irq_update_bits(d, reg, + d->type_buf_def[i], d->type_buf[i]); + if (ret != 0) + dev_err(d->map->dev, "Failed to sync type in %x\n", + reg); + } + } + + if (d->chip->runtime_pm) + pm_runtime_put(map->dev); + + /* If we've changed our wakeup count propagate it to the parent */ + if (d->wake_count < 0) + for (i = d->wake_count; i < 0; i++) + irq_set_irq_wake(d->irq, 0); + else if (d->wake_count > 0) + for (i = 0; i < d->wake_count; i++) + irq_set_irq_wake(d->irq, 1); + + d->wake_count = 0; + + mutex_unlock(&d->lock); +} + +static void regmap_irq_enable(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + unsigned int reg = irq_data->reg_offset / map->reg_stride; + unsigned int mask, type; + + type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; + + /* + * The type_in_mask flag means that the underlying hardware uses + * separate mask bits for rising and falling edge interrupts, but + * we want to make them into a single virtual interrupt with + * configurable edge. + * + * If the interrupt we're enabling defines the falling or rising + * masks then instead of using the regular mask bits for this + * interrupt, use the value previously written to the type buffer + * at the corresponding offset in regmap_irq_set_type(). + */ + if (d->chip->type_in_mask && type) + mask = d->type_buf[reg] & irq_data->mask; + else + mask = irq_data->mask; + + if (d->chip->clear_on_unmask) + d->clear_status = true; + + d->mask_buf[reg] &= ~mask; +} + +static void regmap_irq_disable(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + + d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; +} + +static int regmap_irq_set_type(struct irq_data *data, unsigned int type) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + int reg; + const struct regmap_irq_type *t = &irq_data->type; + + if ((t->types_supported & type) != type) + return 0; + + reg = t->type_reg_offset / map->reg_stride; + + if (t->type_reg_mask) + d->type_buf[reg] &= ~t->type_reg_mask; + else + d->type_buf[reg] &= ~(t->type_falling_val | + t->type_rising_val | + t->type_level_low_val | + t->type_level_high_val); + switch (type) { + case IRQ_TYPE_EDGE_FALLING: + d->type_buf[reg] |= t->type_falling_val; + break; + + case IRQ_TYPE_EDGE_RISING: + d->type_buf[reg] |= t->type_rising_val; + break; + + case IRQ_TYPE_EDGE_BOTH: + d->type_buf[reg] |= (t->type_falling_val | + t->type_rising_val); + break; + + case IRQ_TYPE_LEVEL_HIGH: + d->type_buf[reg] |= t->type_level_high_val; + break; + + case IRQ_TYPE_LEVEL_LOW: + d->type_buf[reg] |= t->type_level_low_val; + break; + default: + return -EINVAL; + } + return 0; +} + +static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + struct regmap *map = d->map; + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + + if (on) { + if (d->wake_buf) + d->wake_buf[irq_data->reg_offset / map->reg_stride] + &= ~irq_data->mask; + d->wake_count++; + } else { + if (d->wake_buf) + d->wake_buf[irq_data->reg_offset / map->reg_stride] + |= irq_data->mask; + d->wake_count--; + } + + return 0; +} + +static const struct irq_chip regmap_irq_chip = { + .irq_bus_lock = regmap_irq_lock, + .irq_bus_sync_unlock = regmap_irq_sync_unlock, + .irq_disable = regmap_irq_disable, + .irq_enable = regmap_irq_enable, + .irq_set_type = regmap_irq_set_type, + .irq_set_wake = regmap_irq_set_wake, +}; + +static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, + unsigned int b) +{ + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + struct regmap_irq_sub_irq_map *subreg; + int i, ret = 0; + + if (!chip->sub_reg_offsets) { + /* Assume linear mapping */ + ret = regmap_read(map, chip->status_base + + (b * map->reg_stride * data->irq_reg_stride), + &data->status_buf[b]); + } else { + subreg = &chip->sub_reg_offsets[b]; + for (i = 0; i < subreg->num_regs; i++) { + unsigned int offset = subreg->offset[i]; + + ret = regmap_read(map, chip->status_base + offset, + &data->status_buf[offset]); + if (ret) + break; + } + } + return ret; +} + +static irqreturn_t regmap_irq_thread(int irq, void *d) +{ + struct regmap_irq_chip_data *data = d; + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + int ret, i; + bool handled = false; + u32 reg; + + if (chip->handle_pre_irq) + chip->handle_pre_irq(chip->irq_drv_data); + + if (chip->runtime_pm) { + ret = pm_runtime_get_sync(map->dev); + if (ret < 0) { + dev_err(map->dev, "IRQ thread failed to resume: %d\n", + ret); + goto exit; + } + } + + /* + * Read only registers with active IRQs if the chip has 'main status + * register'. Else read in the statuses, using a single bulk read if + * possible in order to reduce the I/O overheads. + */ + + if (chip->num_main_regs) { + unsigned int max_main_bits; + unsigned long size; + + size = chip->num_regs * sizeof(unsigned int); + + max_main_bits = (chip->num_main_status_bits) ? + chip->num_main_status_bits : chip->num_regs; + /* Clear the status buf as we don't read all status regs */ + memset(data->status_buf, 0, size); + + /* We could support bulk read for main status registers + * but I don't expect to see devices with really many main + * status registers so let's only support single reads for the + * sake of simplicity. and add bulk reads only if needed + */ + for (i = 0; i < chip->num_main_regs; i++) { + ret = regmap_read(map, chip->main_status + + (i * map->reg_stride + * data->irq_reg_stride), + &data->main_status_buf[i]); + if (ret) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + goto exit; + } + } + + /* Read sub registers with active IRQs */ + for (i = 0; i < chip->num_main_regs; i++) { + unsigned int b; + const unsigned long mreg = data->main_status_buf[i]; + + for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { + if (i * map->format.val_bytes * 8 + b > + max_main_bits) + break; + ret = read_sub_irq_data(data, b); + + if (ret != 0) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + goto exit; + } + } + + } + } else if (!map->use_single_read && map->reg_stride == 1 && + data->irq_reg_stride == 1) { + + u8 *buf8 = data->status_reg_buf; + u16 *buf16 = data->status_reg_buf; + u32 *buf32 = data->status_reg_buf; + + BUG_ON(!data->status_reg_buf); + + ret = regmap_bulk_read(map, chip->status_base, + data->status_reg_buf, + chip->num_regs); + if (ret != 0) { + dev_err(map->dev, "Failed to read IRQ status: %d\n", + ret); + goto exit; + } + + for (i = 0; i < data->chip->num_regs; i++) { + switch (map->format.val_bytes) { + case 1: + data->status_buf[i] = buf8[i]; + break; + case 2: + data->status_buf[i] = buf16[i]; + break; + case 4: + data->status_buf[i] = buf32[i]; + break; + default: + BUG(); + goto exit; + } + } + + } else { + for (i = 0; i < data->chip->num_regs; i++) { + ret = regmap_read(map, chip->status_base + + (i * map->reg_stride + * data->irq_reg_stride), + &data->status_buf[i]); + + if (ret != 0) { + dev_err(map->dev, + "Failed to read IRQ status: %d\n", + ret); + goto exit; + } + } + } + + /* + * Ignore masked IRQs and ack if we need to; we ack early so + * there is no race between handling and acknowleding the + * interrupt. We assume that typically few of the interrupts + * will fire simultaneously so don't worry about overhead from + * doing a write per register. + */ + for (i = 0; i < data->chip->num_regs; i++) { + data->status_buf[i] &= ~data->mask_buf[i]; + + if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { + reg = chip->ack_base + + (i * map->reg_stride * data->irq_reg_stride); + if (chip->ack_invert) + ret = regmap_write(map, reg, + ~data->status_buf[i]); + else + ret = regmap_write(map, reg, + data->status_buf[i]); + if (chip->clear_ack) { + if (chip->ack_invert && !ret) + ret = regmap_write(map, reg, UINT_MAX); + else if (!ret) + ret = regmap_write(map, reg, 0); + } + if (ret != 0) + dev_err(map->dev, "Failed to ack 0x%x: %d\n", + reg, ret); + } + } + + for (i = 0; i < chip->num_irqs; i++) { + if (data->status_buf[chip->irqs[i].reg_offset / + map->reg_stride] & chip->irqs[i].mask) { + handle_nested_irq(irq_find_mapping(data->domain, i)); + handled = true; + } + } + +exit: + if (chip->runtime_pm) + pm_runtime_put(map->dev); + + if (chip->handle_post_irq) + chip->handle_post_irq(chip->irq_drv_data); + + if (handled) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +static int regmap_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw) +{ + struct regmap_irq_chip_data *data = h->host_data; + + irq_set_chip_data(virq, data); + irq_set_chip(virq, &data->irq_chip); + irq_set_nested_thread(virq, 1); + irq_set_parent(virq, data->irq); + irq_set_noprobe(virq); + + return 0; +} + +static const struct irq_domain_ops regmap_domain_ops = { + .map = regmap_irq_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +/** + * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling + * + * @fwnode: The firmware node where the IRQ domain should be added to. + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts. + * @irq_flags: The IRQF_ flags to use for the primary interrupt. + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success. + * + * Returns 0 on success or an errno on failure. + * + * In order for this to be efficient the chip really should use a + * register cache. The chip driver is responsible for restoring the + * register values used by the IRQ controller over suspend and resume. + */ +int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + struct regmap_irq_chip_data *d; + int i; + int ret = -ENOMEM; + int num_type_reg; + u32 reg; + u32 unmask_offset; + + if (chip->num_regs <= 0) + return -EINVAL; + + if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) + return -EINVAL; + + for (i = 0; i < chip->num_irqs; i++) { + if (chip->irqs[i].reg_offset % map->reg_stride) + return -EINVAL; + if (chip->irqs[i].reg_offset / map->reg_stride >= + chip->num_regs) + return -EINVAL; + } + + if (irq_base) { + irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); + if (irq_base < 0) { + dev_warn(map->dev, "Failed to allocate IRQs: %d\n", + irq_base); + return irq_base; + } + } + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + if (chip->num_main_regs) { + d->main_status_buf = kcalloc(chip->num_main_regs, + sizeof(unsigned int), + GFP_KERNEL); + + if (!d->main_status_buf) + goto err_alloc; + } + + d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + GFP_KERNEL); + if (!d->status_buf) + goto err_alloc; + + d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + GFP_KERNEL); + if (!d->mask_buf) + goto err_alloc; + + d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int), + GFP_KERNEL); + if (!d->mask_buf_def) + goto err_alloc; + + if (chip->wake_base) { + d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int), + GFP_KERNEL); + if (!d->wake_buf) + goto err_alloc; + } + + num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; + if (num_type_reg) { + d->type_buf_def = kcalloc(num_type_reg, + sizeof(unsigned int), GFP_KERNEL); + if (!d->type_buf_def) + goto err_alloc; + + d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int), + GFP_KERNEL); + if (!d->type_buf) + goto err_alloc; + } + + d->irq_chip = regmap_irq_chip; + d->irq_chip.name = chip->name; + d->irq = irq; + d->map = map; + d->chip = chip; + d->irq_base = irq_base; + + if (chip->irq_reg_stride) + d->irq_reg_stride = chip->irq_reg_stride; + else + d->irq_reg_stride = 1; + + if (chip->type_reg_stride) + d->type_reg_stride = chip->type_reg_stride; + else + d->type_reg_stride = 1; + + if (!map->use_single_read && map->reg_stride == 1 && + d->irq_reg_stride == 1) { + d->status_reg_buf = kmalloc_array(chip->num_regs, + map->format.val_bytes, + GFP_KERNEL); + if (!d->status_reg_buf) + goto err_alloc; + } + + mutex_init(&d->lock); + + for (i = 0; i < chip->num_irqs; i++) + d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] + |= chip->irqs[i].mask; + + /* Mask all the interrupts by default */ + for (i = 0; i < chip->num_regs; i++) { + d->mask_buf[i] = d->mask_buf_def[i]; + if (!chip->mask_base) + continue; + + reg = chip->mask_base + + (i * map->reg_stride * d->irq_reg_stride); + if (chip->mask_invert) + ret = regmap_irq_update_bits(d, reg, + d->mask_buf[i], ~d->mask_buf[i]); + else if (d->chip->unmask_base) { + unmask_offset = d->chip->unmask_base - + d->chip->mask_base; + ret = regmap_irq_update_bits(d, + reg + unmask_offset, + d->mask_buf[i], + d->mask_buf[i]); + } else + ret = regmap_irq_update_bits(d, reg, + d->mask_buf[i], d->mask_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + + if (!chip->init_ack_masked) + continue; + + /* Ack masked but set interrupts */ + reg = chip->status_base + + (i * map->reg_stride * d->irq_reg_stride); + ret = regmap_read(map, reg, &d->status_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to read IRQ status: %d\n", + ret); + goto err_alloc; + } + + if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { + reg = chip->ack_base + + (i * map->reg_stride * d->irq_reg_stride); + if (chip->ack_invert) + ret = regmap_write(map, reg, + ~(d->status_buf[i] & d->mask_buf[i])); + else + ret = regmap_write(map, reg, + d->status_buf[i] & d->mask_buf[i]); + if (chip->clear_ack) { + if (chip->ack_invert && !ret) + ret = regmap_write(map, reg, UINT_MAX); + else if (!ret) + ret = regmap_write(map, reg, 0); + } + if (ret != 0) { + dev_err(map->dev, "Failed to ack 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + } + } + + /* Wake is disabled by default */ + if (d->wake_buf) { + for (i = 0; i < chip->num_regs; i++) { + d->wake_buf[i] = d->mask_buf_def[i]; + reg = chip->wake_base + + (i * map->reg_stride * d->irq_reg_stride); + + if (chip->wake_invert) + ret = regmap_irq_update_bits(d, reg, + d->mask_buf_def[i], + 0); + else + ret = regmap_irq_update_bits(d, reg, + d->mask_buf_def[i], + d->wake_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + } + } + + if (chip->num_type_reg && !chip->type_in_mask) { + for (i = 0; i < chip->num_type_reg; ++i) { + reg = chip->type_base + + (i * map->reg_stride * d->type_reg_stride); + + ret = regmap_read(map, reg, &d->type_buf_def[i]); + + if (d->chip->type_invert) + d->type_buf_def[i] = ~d->type_buf_def[i]; + + if (ret) { + dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + } + } + + if (irq_base) + d->domain = irq_domain_add_legacy(to_of_node(fwnode), + chip->num_irqs, irq_base, + 0, ®map_domain_ops, d); + else + d->domain = irq_domain_add_linear(to_of_node(fwnode), + chip->num_irqs, + ®map_domain_ops, d); + if (!d->domain) { + dev_err(map->dev, "Failed to create IRQ domain\n"); + ret = -ENOMEM; + goto err_alloc; + } + + ret = request_threaded_irq(irq, NULL, regmap_irq_thread, + irq_flags | IRQF_ONESHOT, + chip->name, d); + if (ret != 0) { + dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", + irq, chip->name, ret); + goto err_domain; + } + + *data = d; + + return 0; + +err_domain: + /* Should really dispose of the domain but... */ +err_alloc: + kfree(d->type_buf); + kfree(d->type_buf_def); + kfree(d->wake_buf); + kfree(d->mask_buf_def); + kfree(d->mask_buf); + kfree(d->status_buf); + kfree(d->status_reg_buf); + kfree(d); + return ret; +} +EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); + +/** + * regmap_add_irq_chip() - Use standard regmap IRQ controller handling + * + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts. + * @irq_flags: The IRQF_ flags to use for the primary interrupt. + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success. + * + * Returns 0 on success or an errno on failure. + * + * This is the same as regmap_add_irq_chip_fwnode, except that the firmware + * node of the regmap is used. + */ +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, + int irq_base, const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, + irq_flags, irq_base, chip, data); +} +EXPORT_SYMBOL_GPL(regmap_add_irq_chip); + +/** + * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip + * + * @irq: Primary IRQ for the device + * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip() + * + * This function also disposes of all mapped IRQs on the chip. + */ +void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) +{ + unsigned int virq; + int hwirq; + + if (!d) + return; + + free_irq(irq, d); + + /* Dispose all virtual irq from irq domain before removing it */ + for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) { + /* Ignore hwirq if holes in the IRQ list */ + if (!d->chip->irqs[hwirq].mask) + continue; + + /* + * Find the virtual irq of hwirq on chip and if it is + * there then dispose it + */ + virq = irq_find_mapping(d->domain, hwirq); + if (virq) + irq_dispose_mapping(virq); + } + + irq_domain_remove(d->domain); + kfree(d->type_buf); + kfree(d->type_buf_def); + kfree(d->wake_buf); + kfree(d->mask_buf_def); + kfree(d->mask_buf); + kfree(d->status_reg_buf); + kfree(d->status_buf); + kfree(d); +} +EXPORT_SYMBOL_GPL(regmap_del_irq_chip); + +static void devm_regmap_irq_chip_release(struct device *dev, void *res) +{ + struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res; + + regmap_del_irq_chip(d->irq, d); +} + +static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) + +{ + struct regmap_irq_chip_data **r = res; + + if (!r || !*r) { + WARN_ON(!r || !*r); + return 0; + } + return *r == data; +} + +/** + * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() + * + * @dev: The device pointer on which irq_chip belongs to. + * @fwnode: The firmware node where the IRQ domain should be added to. + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts + * @irq_flags: The IRQF_ flags to use for the primary interrupt. + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success + * + * Returns 0 on success or an errno on failure. + * + * The ®map_irq_chip_data will be automatically released when the device is + * unbound. + */ +int devm_regmap_add_irq_chip_fwnode(struct device *dev, + struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + struct regmap_irq_chip_data **ptr, *d; + int ret; + + ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, + chip, &d); + if (ret < 0) { + devres_free(ptr); + return ret; + } + + *ptr = d; + devres_add(dev, ptr); + *data = d; + return 0; +} +EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); + +/** + * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip() + * + * @dev: The device pointer on which irq_chip belongs to. + * @map: The regmap for the device. + * @irq: The IRQ the device uses to signal interrupts + * @irq_flags: The IRQF_ flags to use for the primary interrupt. + * @irq_base: Allocate at specific IRQ number if irq_base > 0. + * @chip: Configuration for the interrupt controller. + * @data: Runtime data structure for the controller, allocated on success + * + * Returns 0 on success or an errno on failure. + * + * The ®map_irq_chip_data will be automatically released when the device is + * unbound. + */ +int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, + irq, irq_flags, irq_base, chip, + data); +} +EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); + +/** + * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip() + * + * @dev: Device for which which resource was allocated. + * @irq: Primary IRQ for the device. + * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip(). + * + * A resource managed version of regmap_del_irq_chip(). + */ +void devm_regmap_del_irq_chip(struct device *dev, int irq, + struct regmap_irq_chip_data *data) +{ + int rc; + + WARN_ON(irq != data->irq); + rc = devres_release(dev, devm_regmap_irq_chip_release, + devm_regmap_irq_chip_match, data); + + if (rc != 0) + WARN_ON(rc); +} +EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip); + +/** + * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip + * + * @data: regmap irq controller to operate on. + * + * Useful for drivers to request their own IRQs. + */ +int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) +{ + WARN_ON(!data->irq_base); + return data->irq_base; +} +EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); + +/** + * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ + * + * @data: regmap irq controller to operate on. + * @irq: index of the interrupt requested in the chip IRQs. + * + * Useful for drivers to request their own IRQs. + */ +int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) +{ + /* Handle holes in the IRQ list */ + if (!data->chip->irqs[irq].mask) + return -EINVAL; + + return irq_create_mapping(data->domain, irq); +} +EXPORT_SYMBOL_GPL(regmap_irq_get_virq); + +/** + * regmap_irq_get_domain() - Retrieve the irq_domain for the chip + * + * @data: regmap_irq controller to operate on. + * + * Useful for drivers to request their own IRQs and for integration + * with subsystems. For ease of integration NULL is accepted as a + * domain, allowing devices to just call this even if no domain is + * allocated. + */ +struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) +{ + if (data) + return data->domain; + else + return NULL; +} +EXPORT_SYMBOL_GPL(regmap_irq_get_domain); diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c new file mode 100644 index 000000000..af967d8f9 --- /dev/null +++ b/drivers/base/regmap/regmap-mmio.c @@ -0,0 +1,379 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - MMIO support +// +// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#include "internal.h" + +struct regmap_mmio_context { + void __iomem *regs; + unsigned val_bytes; + + bool attached_clk; + struct clk *clk; + + void (*reg_write)(struct regmap_mmio_context *ctx, + unsigned int reg, unsigned int val); + unsigned int (*reg_read)(struct regmap_mmio_context *ctx, + unsigned int reg); +}; + +static int regmap_mmio_regbits_check(size_t reg_bits) +{ + switch (reg_bits) { + case 8: + case 16: + case 32: +#ifdef CONFIG_64BIT + case 64: +#endif + return 0; + default: + return -EINVAL; + } +} + +static int regmap_mmio_get_min_stride(size_t val_bits) +{ + int min_stride; + + switch (val_bits) { + case 8: + /* The core treats 0 as 1 */ + min_stride = 0; + return 0; + case 16: + min_stride = 2; + break; + case 32: + min_stride = 4; + break; +#ifdef CONFIG_64BIT + case 64: + min_stride = 8; + break; +#endif + default: + return -EINVAL; + } + + return min_stride; +} + +static void regmap_mmio_write8(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeb(val, ctx->regs + reg); +} + +static void regmap_mmio_write16le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writew(val, ctx->regs + reg); +} + +static void regmap_mmio_write16be(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + iowrite16be(val, ctx->regs + reg); +} + +static void regmap_mmio_write32le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writel(val, ctx->regs + reg); +} + +static void regmap_mmio_write32be(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + iowrite32be(val, ctx->regs + reg); +} + +#ifdef CONFIG_64BIT +static void regmap_mmio_write64le(struct regmap_mmio_context *ctx, + unsigned int reg, + unsigned int val) +{ + writeq(val, ctx->regs + reg); +} +#endif + +static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val) +{ + struct regmap_mmio_context *ctx = context; + int ret; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + ctx->reg_write(ctx, reg, val); + + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return 0; +} + +static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readb(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readw(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread16be(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readl(ctx->regs + reg); +} + +static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return ioread32be(ctx->regs + reg); +} + +#ifdef CONFIG_64BIT +static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx, + unsigned int reg) +{ + return readq(ctx->regs + reg); +} +#endif + +static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val) +{ + struct regmap_mmio_context *ctx = context; + int ret; + + if (!IS_ERR(ctx->clk)) { + ret = clk_enable(ctx->clk); + if (ret < 0) + return ret; + } + + *val = ctx->reg_read(ctx, reg); + + if (!IS_ERR(ctx->clk)) + clk_disable(ctx->clk); + + return 0; +} + +static void regmap_mmio_free_context(void *context) +{ + struct regmap_mmio_context *ctx = context; + + if (!IS_ERR(ctx->clk)) { + clk_unprepare(ctx->clk); + if (!ctx->attached_clk) + clk_put(ctx->clk); + } + kfree(context); +} + +static const struct regmap_bus regmap_mmio = { + .fast_io = true, + .reg_write = regmap_mmio_write, + .reg_read = regmap_mmio_read, + .free_context = regmap_mmio_free_context, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config) +{ + struct regmap_mmio_context *ctx; + int min_stride; + int ret; + + ret = regmap_mmio_regbits_check(config->reg_bits); + if (ret) + return ERR_PTR(ret); + + if (config->pad_bits) + return ERR_PTR(-EINVAL); + + min_stride = regmap_mmio_get_min_stride(config->val_bits); + if (min_stride < 0) + return ERR_PTR(min_stride); + + if (config->reg_stride < min_stride) + return ERR_PTR(-EINVAL); + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + ctx->regs = regs; + ctx->val_bytes = config->val_bits / 8; + ctx->clk = ERR_PTR(-ENODEV); + + switch (regmap_get_val_endian(dev, ®map_mmio, config)) { + case REGMAP_ENDIAN_DEFAULT: + case REGMAP_ENDIAN_LITTLE: +#ifdef __LITTLE_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + switch (config->val_bits) { + case 8: + ctx->reg_read = regmap_mmio_read8; + ctx->reg_write = regmap_mmio_write8; + break; + case 16: + ctx->reg_read = regmap_mmio_read16le; + ctx->reg_write = regmap_mmio_write16le; + break; + case 32: + ctx->reg_read = regmap_mmio_read32le; + ctx->reg_write = regmap_mmio_write32le; + break; +#ifdef CONFIG_64BIT + case 64: + ctx->reg_read = regmap_mmio_read64le; + ctx->reg_write = regmap_mmio_write64le; + break; +#endif + default: + ret = -EINVAL; + goto err_free; + } + break; + case REGMAP_ENDIAN_BIG: +#ifdef __BIG_ENDIAN + case REGMAP_ENDIAN_NATIVE: +#endif + switch (config->val_bits) { + case 8: + ctx->reg_read = regmap_mmio_read8; + ctx->reg_write = regmap_mmio_write8; + break; + case 16: + ctx->reg_read = regmap_mmio_read16be; + ctx->reg_write = regmap_mmio_write16be; + break; + case 32: + ctx->reg_read = regmap_mmio_read32be; + ctx->reg_write = regmap_mmio_write32be; + break; + default: + ret = -EINVAL; + goto err_free; + } + break; + default: + ret = -EINVAL; + goto err_free; + } + + if (clk_id == NULL) + return ctx; + + ctx->clk = clk_get(dev, clk_id); + if (IS_ERR(ctx->clk)) { + ret = PTR_ERR(ctx->clk); + goto err_free; + } + + ret = clk_prepare(ctx->clk); + if (ret < 0) { + clk_put(ctx->clk); + goto err_free; + } + + return ctx; + +err_free: + kfree(ctx); + + return ERR_PTR(ret); +} + +struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap_mmio_context *ctx; + + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk); + +struct regmap *__devm_regmap_init_mmio_clk(struct device *dev, + const char *clk_id, + void __iomem *regs, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap_mmio_context *ctx; + + ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); + if (IS_ERR(ctx)) + return ERR_CAST(ctx); + + return __devm_regmap_init(dev, ®map_mmio, ctx, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk); + +int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk) +{ + struct regmap_mmio_context *ctx = map->bus_context; + + ctx->clk = clk; + ctx->attached_clk = true; + + return clk_prepare(ctx->clk); +} +EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk); + +void regmap_mmio_detach_clk(struct regmap *map) +{ + struct regmap_mmio_context *ctx = map->bus_context; + + clk_unprepare(ctx->clk); + + ctx->attached_clk = false; + ctx->clk = NULL; +} +EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c new file mode 100644 index 000000000..597042e2d --- /dev/null +++ b/drivers/base/regmap/regmap-sccb.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +// Register map access API - SCCB support + +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include "internal.h" + +/** + * sccb_is_available - Check if the adapter supports SCCB protocol + * @adap: I2C adapter + * + * Return true if the I2C adapter is capable of using SCCB helper functions, + * false otherwise. + */ +static bool sccb_is_available(struct i2c_adapter *adap) +{ + u32 needed_funcs = I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA; + + /* + * If we ever want support for hardware doing SCCB natively, we will + * introduce a sccb_xfer() callback to struct i2c_algorithm and check + * for it here. + */ + + return (i2c_get_functionality(adap) & needed_funcs) == needed_funcs; +} + +/** + * regmap_sccb_read - Read data from SCCB slave device + * @context: Device that will be interacted with + * @reg: Register to be read from + * @val: Pointer to store read value + * + * This executes the 2-phase write transmission cycle that is followed by a + * 2-phase read transmission cycle, returning negative errno else zero on + * success. + */ +static int regmap_sccb_read(void *context, unsigned int reg, unsigned int *val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + union i2c_smbus_data data; + + i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT); + + ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags, + I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE, NULL); + if (ret < 0) + goto out; + + ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags, + I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data); + if (ret < 0) + goto out; + + *val = data.byte; +out: + i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT); + + return ret; +} + +/** + * regmap_sccb_write - Write data to SCCB slave device + * @context: Device that will be interacted with + * @reg: Register to write to + * @val: Value to be written + * + * This executes the SCCB 3-phase write transmission cycle, returning negative + * errno else zero on success. + */ +static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + + return i2c_smbus_write_byte_data(i2c, reg, val); +} + +static struct regmap_bus regmap_sccb_bus = { + .reg_write = regmap_sccb_write, + .reg_read = regmap_sccb_read, +}; + +static const struct regmap_bus *regmap_get_sccb_bus(struct i2c_client *i2c, + const struct regmap_config *config) +{ + if (config->val_bits == 8 && config->reg_bits == 8 && + sccb_is_available(i2c->adapter)) + return ®map_sccb_bus; + + return ERR_PTR(-ENOTSUPP); +} + +struct regmap *__regmap_init_sccb(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __regmap_init(&i2c->dev, bus, &i2c->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_sccb); + +struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c new file mode 100644 index 000000000..4b8d2d010 --- /dev/null +++ b/drivers/base/regmap/regmap-sdw.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright(c) 2015-17 Intel Corporation. + +#include <linux/device.h> +#include <linux/module.h> +#include <linux/soundwire/sdw.h> +#include "internal.h" + +static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val) +{ + struct device *dev = context; + struct sdw_slave *slave = dev_to_sdw_dev(dev); + + return sdw_write_no_pm(slave, reg, val); +} + +static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val) +{ + struct device *dev = context; + struct sdw_slave *slave = dev_to_sdw_dev(dev); + int read; + + read = sdw_read_no_pm(slave, reg); + if (read < 0) + return read; + + *val = read; + return 0; +} + +static struct regmap_bus regmap_sdw = { + .reg_read = regmap_sdw_read, + .reg_write = regmap_sdw_write, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static int regmap_sdw_config_check(const struct regmap_config *config) +{ + /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */ + if (config->val_bits != 8) + return -ENOTSUPP; + + /* Registers are 32 bits wide */ + if (config->reg_bits != 32) + return -ENOTSUPP; + + if (config->pad_bits != 0) + return -ENOTSUPP; + + return 0; +} + +struct regmap *__regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return __regmap_init(&sdw->dev, ®map_sdw, + &sdw->dev, config, lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_sdw); + +struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return __devm_regmap_init(&sdw->dev, ®map_sdw, + &sdw->dev, config, lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw); + +MODULE_DESCRIPTION("Regmap SoundWire Module"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c new file mode 100644 index 000000000..0968059f1 --- /dev/null +++ b/drivers/base/regmap/regmap-slimbus.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017, Linaro Ltd. + +#include <linux/regmap.h> +#include <linux/slimbus.h> +#include <linux/module.h> + +#include "internal.h" + +static int regmap_slimbus_write(void *context, const void *data, size_t count) +{ + struct slim_device *sdev = context; + + return slim_write(sdev, *(u16 *)data, count - 2, (u8 *)data + 2); +} + +static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct slim_device *sdev = context; + + return slim_read(sdev, *(u16 *)reg, val_size, val); +} + +static struct regmap_bus regmap_slimbus_bus = { + .write = regmap_slimbus_write, + .read = regmap_slimbus_read, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static const struct regmap_bus *regmap_get_slimbus(struct slim_device *slim, + const struct regmap_config *config) +{ + if (config->val_bits == 8 && config->reg_bits == 16) + return ®map_slimbus_bus; + + return ERR_PTR(-ENOTSUPP); +} + +struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_slimbus); + +struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c new file mode 100644 index 000000000..ad1da83e8 --- /dev/null +++ b/drivers/base/regmap/regmap-spi-avmm.c @@ -0,0 +1,719 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - SPI AVMM support +// +// Copyright (C) 2018-2020 Intel Corporation. All rights reserved. + +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/spi/spi.h> + +/* + * This driver implements the regmap operations for a generic SPI + * master to access the registers of the spi slave chip which has an + * Avalone bus in it. + * + * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated + * in the spi slave chip. The IP acts as a bridge to convert encoded streams of + * bytes from the host to the internal register read/write on Avalon bus. In + * order to issue register access requests to the slave chip, the host should + * send formatted bytes that conform to the transfer protocol. + * The transfer protocol contains 3 layers: transaction layer, packet layer + * and physical layer. + * + * Reference Documents could be found at: + * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html + * + * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general + * introduction to the protocol. + * + * Chapter "Avalon Packets to Transactions Converter Core" describes + * the transaction layer. + * + * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores" + * describes the packet layer. + * + * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the + * physical layer. + * + * + * When host issues a regmap read/write, the driver will transform the request + * to byte stream layer by layer. It formats the register addr, value and + * length to the transaction layer request, then converts the request to packet + * layer bytes stream and then to physical layer bytes stream. Finally the + * driver sends the formatted byte stream over SPI bus to the slave chip. + * + * The spi-avmm IP on the slave chip decodes the byte stream and initiates + * register read/write on its internal Avalon bus, and then encodes the + * response to byte stream and sends back to host. + * + * The driver receives the byte stream, reverses the 3 layers transformation, + * and finally gets the response value (read out data for register read, + * successful written size for register write). + */ + +#define PKT_SOP 0x7a +#define PKT_EOP 0x7b +#define PKT_CHANNEL 0x7c +#define PKT_ESC 0x7d + +#define PHY_IDLE 0x4a +#define PHY_ESC 0x4d + +#define TRANS_CODE_WRITE 0x0 +#define TRANS_CODE_SEQ_WRITE 0x4 +#define TRANS_CODE_READ 0x10 +#define TRANS_CODE_SEQ_READ 0x14 +#define TRANS_CODE_NO_TRANS 0x7f + +#define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200)) + +/* slave's register addr is 32 bits */ +#define SPI_AVMM_REG_SIZE 4UL +/* slave's register value is 32 bits */ +#define SPI_AVMM_VAL_SIZE 4UL + +/* + * max rx size could be larger. But considering the buffer consuming, + * it is proper that we limit 1KB xfer at max. + */ +#define MAX_READ_CNT 256UL +#define MAX_WRITE_CNT 1UL + +struct trans_req_header { + u8 code; + u8 rsvd; + __be16 size; + __be32 addr; +} __packed; + +struct trans_resp_header { + u8 r_code; + u8 rsvd; + __be16 size; +} __packed; + +#define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header)) +#define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header)) + +/* + * In transaction layer, + * the write request format is: Transaction request header + data + * the read request format is: Transaction request header + * the write response format is: Transaction response header + * the read response format is: pure data, no Transaction response header + */ +#define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n)) +#define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE +#define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT) + +#define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n)) +#define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE +#define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT) + +/* tx & rx share one transaction layer buffer */ +#define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \ + TRANS_TX_MAX : TRANS_RX_MAX) + +/* + * In tx phase, the host prepares all the phy layer bytes of a request in the + * phy buffer and sends them in a batch. + * + * The packet layer and physical layer defines several special chars for + * various purpose, when a transaction layer byte hits one of these special + * chars, it should be escaped. The escape rule is, "Escape char first, + * following the byte XOR'ed with 0x20". + * + * This macro defines the max possible length of the phy data. In the worst + * case, all transaction layer bytes need to be escaped (so the data length + * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally + * we should make sure the length is aligned to SPI BPW. + */ +#define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4) + +/* + * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max + * length of the rx bit stream is unpredictable. So the driver reads the words + * one by one, and parses each word immediately into transaction layer buffer. + * Only one word length of phy buffer is used for rx. + */ +#define PHY_BUF_SIZE PHY_TX_MAX + +/** + * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge + * + * @spi: spi slave associated with this bridge. + * @word_len: bytes of word for spi transfer. + * @trans_len: length of valid data in trans_buf. + * @phy_len: length of valid data in phy_buf. + * @trans_buf: the bridge buffer for transaction layer data. + * @phy_buf: the bridge buffer for physical layer data. + * @swap_words: the word swapping cb for phy data. NULL if not needed. + * + * As a device's registers are implemented on the AVMM bus address space, it + * requires the driver to issue formatted requests to spi slave to AVMM bus + * master bridge to perform register access. + */ +struct spi_avmm_bridge { + struct spi_device *spi; + unsigned char word_len; + unsigned int trans_len; + unsigned int phy_len; + /* bridge buffer used in translation between protocol layers */ + char trans_buf[TRANS_BUF_SIZE]; + char phy_buf[PHY_BUF_SIZE]; + void (*swap_words)(char *buf, unsigned int len); +}; + +static void br_swap_words_32(char *buf, unsigned int len) +{ + u32 *p = (u32 *)buf; + unsigned int count; + + count = len / 4; + while (count--) { + *p = swab32p(p); + p++; + } +} + +/* + * Format transaction layer data in br->trans_buf according to the register + * access request, Store valid transaction layer data length in br->trans_len. + */ +static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg, + u32 *wr_val, u32 count) +{ + struct trans_req_header *header; + unsigned int trans_len; + u8 code; + __le32 *data; + int i; + + if (is_read) { + if (count == 1) + code = TRANS_CODE_READ; + else + code = TRANS_CODE_SEQ_READ; + } else { + if (count == 1) + code = TRANS_CODE_WRITE; + else + code = TRANS_CODE_SEQ_WRITE; + } + + header = (struct trans_req_header *)br->trans_buf; + header->code = code; + header->rsvd = 0; + header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE); + header->addr = cpu_to_be32(reg); + + trans_len = TRANS_REQ_HD_SIZE; + + if (!is_read) { + trans_len += SPI_AVMM_VAL_SIZE * count; + if (trans_len > sizeof(br->trans_buf)) + return -ENOMEM; + + data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE); + + for (i = 0; i < count; i++) + *data++ = cpu_to_le32(*wr_val++); + } + + /* Store valid trans data length for next layer */ + br->trans_len = trans_len; + + return 0; +} + +/* + * Convert transaction layer data (in br->trans_buf) to phy layer data, store + * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy + * layer data length in br->phy_len. + * + * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded + * with PHY_IDLE, then the slave will just drop them. + * + * The driver will not simply pad 4a at the tail. The concern is that driver + * will not store MISO data during tx phase, if the driver pads 4a at the tail, + * it is possible that if the slave is fast enough to response at the padding + * time. As a result these rx bytes are lost. In the following case, 7a,7c,00 + * will lost. + * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|... + * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|... + * + * So the driver moves EOP and bytes after EOP to the end of the aligned size, + * then fill the hole with PHY_IDLE. As following: + * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40| + * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40| + * Then if the slave will not get the entire packet before the tx phase is + * over, it can't responsed to anything either. + */ +static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br) +{ + char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL; + unsigned int aligned_phy_len, move_size; + bool need_esc = false; + + tb = br->trans_buf; + tb_end = tb + br->trans_len; + pb = br->phy_buf; + pb_limit = pb + ARRAY_SIZE(br->phy_buf); + + *pb++ = PKT_SOP; + + /* + * The driver doesn't support multiple channels so the channel number + * is always 0. + */ + *pb++ = PKT_CHANNEL; + *pb++ = 0x0; + + for (; pb < pb_limit && tb < tb_end; pb++) { + if (need_esc) { + *pb = *tb++ ^ 0x20; + need_esc = false; + continue; + } + + /* EOP should be inserted before the last valid char */ + if (tb == tb_end - 1 && !pb_eop) { + *pb = PKT_EOP; + pb_eop = pb; + continue; + } + + /* + * insert an ESCAPE char if the data value equals any special + * char. + */ + switch (*tb) { + case PKT_SOP: + case PKT_EOP: + case PKT_CHANNEL: + case PKT_ESC: + *pb = PKT_ESC; + need_esc = true; + break; + case PHY_IDLE: + case PHY_ESC: + *pb = PHY_ESC; + need_esc = true; + break; + default: + *pb = *tb++; + break; + } + } + + /* The phy buffer is used out but transaction layer data remains */ + if (tb < tb_end) + return -ENOMEM; + + /* Store valid phy data length for spi transfer */ + br->phy_len = pb - br->phy_buf; + + if (br->word_len == 1) + return 0; + + /* Do phy buf padding if word_len > 1 byte. */ + aligned_phy_len = ALIGN(br->phy_len, br->word_len); + if (aligned_phy_len > sizeof(br->phy_buf)) + return -ENOMEM; + + if (aligned_phy_len == br->phy_len) + return 0; + + /* move EOP and bytes after EOP to the end of aligned size */ + move_size = pb - pb_eop; + memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size); + + /* fill the hole with PHY_IDLEs */ + memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len); + + /* update the phy data length */ + br->phy_len = aligned_phy_len; + + return 0; +} + +/* + * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will + * ignore rx in tx phase. + */ +static int br_do_tx(struct spi_avmm_bridge *br) +{ + /* reorder words for spi transfer */ + if (br->swap_words) + br->swap_words(br->phy_buf, br->phy_len); + + /* send all data in phy_buf */ + return spi_write(br->spi, br->phy_buf, br->phy_len); +} + +/* + * This function read the rx byte stream from SPI word by word and convert + * them to transaction layer data in br->trans_buf. It also stores the length + * of rx transaction layer data in br->trans_len + * + * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot + * prepare a fixed length buffer to receive all of the rx data in a batch. We + * have to read word by word and convert them to transaction layer data at + * once. + */ +static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br) +{ + bool eop_found = false, channel_found = false, esc_found = false; + bool valid_word = false, last_try = false; + struct device *dev = &br->spi->dev; + char *pb, *tb_limit, *tb = NULL; + unsigned long poll_timeout; + int ret, i; + + tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf); + pb = br->phy_buf; + poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; + while (tb < tb_limit) { + ret = spi_read(br->spi, pb, br->word_len); + if (ret) + return ret; + + /* reorder the word back */ + if (br->swap_words) + br->swap_words(pb, br->word_len); + + valid_word = false; + for (i = 0; i < br->word_len; i++) { + /* drop everything before first SOP */ + if (!tb && pb[i] != PKT_SOP) + continue; + + /* drop PHY_IDLE */ + if (pb[i] == PHY_IDLE) + continue; + + valid_word = true; + + /* + * We don't support multiple channels, so error out if + * a non-zero channel number is found. + */ + if (channel_found) { + if (pb[i] != 0) { + dev_err(dev, "%s channel num != 0\n", + __func__); + return -EFAULT; + } + + channel_found = false; + continue; + } + + switch (pb[i]) { + case PKT_SOP: + /* + * reset the parsing if a second SOP appears. + */ + tb = br->trans_buf; + eop_found = false; + channel_found = false; + esc_found = false; + break; + case PKT_EOP: + /* + * No special char is expected after ESC char. + * No special char (except ESC & PHY_IDLE) is + * expected after EOP char. + * + * The special chars are all dropped. + */ + if (esc_found || eop_found) + return -EFAULT; + + eop_found = true; + break; + case PKT_CHANNEL: + if (esc_found || eop_found) + return -EFAULT; + + channel_found = true; + break; + case PKT_ESC: + case PHY_ESC: + if (esc_found) + return -EFAULT; + + esc_found = true; + break; + default: + /* Record the normal byte in trans_buf. */ + if (esc_found) { + *tb++ = pb[i] ^ 0x20; + esc_found = false; + } else { + *tb++ = pb[i]; + } + + /* + * We get the last normal byte after EOP, it is + * time we finish. Normally the function should + * return here. + */ + if (eop_found) { + br->trans_len = tb - br->trans_buf; + return 0; + } + } + } + + if (valid_word) { + /* update poll timeout when we get valid word */ + poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; + last_try = false; + } else { + /* + * We timeout when rx keeps invalid for some time. But + * it is possible we are scheduled out for long time + * after a spi_read. So when we are scheduled in, a SW + * timeout happens. But actually HW may have worked fine and + * has been ready long time ago. So we need to do an extra + * read, if we get a valid word then we could continue rx, + * otherwise real a HW issue happens. + */ + if (last_try) + return -ETIMEDOUT; + + if (time_after(jiffies, poll_timeout)) + last_try = true; + } + } + + /* + * We have used out all transfer layer buffer but cannot find the end + * of the byte stream. + */ + dev_err(dev, "%s transfer buffer is full but rx doesn't end\n", + __func__); + + return -EFAULT; +} + +/* + * For read transactions, the avmm bus will directly return register values + * without transaction response header. + */ +static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br, + u32 *val, unsigned int expected_count) +{ + unsigned int i, trans_len = br->trans_len; + __le32 *data; + + if (expected_count * SPI_AVMM_VAL_SIZE != trans_len) + return -EFAULT; + + data = (__le32 *)br->trans_buf; + for (i = 0; i < expected_count; i++) + *val++ = le32_to_cpu(*data++); + + return 0; +} + +/* + * For write transactions, the slave will return a transaction response + * header. + */ +static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br, + unsigned int expected_count) +{ + unsigned int trans_len = br->trans_len; + struct trans_resp_header *resp; + u8 code; + u16 val_len; + + if (trans_len != TRANS_RESP_HD_SIZE) + return -EFAULT; + + resp = (struct trans_resp_header *)br->trans_buf; + + code = resp->r_code ^ 0x80; + val_len = be16_to_cpu(resp->size); + if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE) + return -EFAULT; + + /* error out if the trans code doesn't align with the val size */ + if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) || + (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE)) + return -EFAULT; + + return 0; +} + +static int do_reg_access(void *context, bool is_read, unsigned int reg, + unsigned int *value, unsigned int count) +{ + struct spi_avmm_bridge *br = context; + int ret; + + /* invalidate bridge buffers first */ + br->trans_len = 0; + br->phy_len = 0; + + ret = br_trans_tx_prepare(br, is_read, reg, value, count); + if (ret) + return ret; + + ret = br_pkt_phy_tx_prepare(br); + if (ret) + return ret; + + ret = br_do_tx(br); + if (ret) + return ret; + + ret = br_do_rx_and_pkt_phy_parse(br); + if (ret) + return ret; + + if (is_read) + return br_rd_trans_rx_parse(br, value, count); + else + return br_wr_trans_rx_parse(br, count); +} + +static int regmap_spi_avmm_gather_write(void *context, + const void *reg_buf, size_t reg_len, + const void *val_buf, size_t val_len) +{ + if (reg_len != SPI_AVMM_REG_SIZE) + return -EINVAL; + + if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) + return -EINVAL; + + return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf, + val_len / SPI_AVMM_VAL_SIZE); +} + +static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes) +{ + if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE) + return -EINVAL; + + return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE, + data + SPI_AVMM_REG_SIZE, + bytes - SPI_AVMM_REG_SIZE); +} + +static int regmap_spi_avmm_read(void *context, + const void *reg_buf, size_t reg_len, + void *val_buf, size_t val_len) +{ + if (reg_len != SPI_AVMM_REG_SIZE) + return -EINVAL; + + if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) + return -EINVAL; + + return do_reg_access(context, true, *(u32 *)reg_buf, val_buf, + (val_len / SPI_AVMM_VAL_SIZE)); +} + +static struct spi_avmm_bridge * +spi_avmm_bridge_ctx_gen(struct spi_device *spi) +{ + struct spi_avmm_bridge *br; + + if (!spi) + return ERR_PTR(-ENODEV); + + /* Only support BPW == 8 or 32 now. Try 32 BPW first. */ + spi->mode = SPI_MODE_1; + spi->bits_per_word = 32; + if (spi_setup(spi)) { + spi->bits_per_word = 8; + if (spi_setup(spi)) + return ERR_PTR(-EINVAL); + } + + br = kzalloc(sizeof(*br), GFP_KERNEL); + if (!br) + return ERR_PTR(-ENOMEM); + + br->spi = spi; + br->word_len = spi->bits_per_word / 8; + if (br->word_len == 4) { + /* + * The protocol requires little endian byte order but MSB + * first. So driver needs to swap the byte order word by word + * if word length > 1. + */ + br->swap_words = br_swap_words_32; + } + + return br; +} + +static void spi_avmm_bridge_ctx_free(void *context) +{ + kfree(context); +} + +static const struct regmap_bus regmap_spi_avmm_bus = { + .write = regmap_spi_avmm_write, + .gather_write = regmap_spi_avmm_gather_write, + .read = regmap_spi_avmm_read, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, + .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT, + .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, + .free_context = spi_avmm_bridge_ctx_free, +}; + +struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct spi_avmm_bridge *bridge; + struct regmap *map; + + bridge = spi_avmm_bridge_ctx_gen(spi); + if (IS_ERR(bridge)) + return ERR_CAST(bridge); + + map = __regmap_init(&spi->dev, ®map_spi_avmm_bus, + bridge, config, lock_key, lock_name); + if (IS_ERR(map)) { + spi_avmm_bridge_ctx_free(bridge); + return ERR_CAST(map); + } + + return map; +} +EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm); + +struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct spi_avmm_bridge *bridge; + struct regmap *map; + + bridge = spi_avmm_bridge_ctx_gen(spi); + if (IS_ERR(bridge)) + return ERR_CAST(bridge); + + map = __devm_regmap_init(&spi->dev, ®map_spi_avmm_bus, + bridge, config, lock_key, lock_name); + if (IS_ERR(map)) { + spi_avmm_bridge_ctx_free(bridge); + return ERR_CAST(map); + } + + return map; +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c new file mode 100644 index 000000000..c1894e93c --- /dev/null +++ b/drivers/base/regmap/regmap-spi.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - SPI support +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + +#include <linux/regmap.h> +#include <linux/spi/spi.h> +#include <linux/module.h> + +#include "internal.h" + +struct regmap_async_spi { + struct regmap_async core; + struct spi_message m; + struct spi_transfer t[2]; +}; + +static void regmap_spi_complete(void *data) +{ + struct regmap_async_spi *async = data; + + regmap_async_complete_cb(&async->core, async->m.status); +} + +static int regmap_spi_write(void *context, const void *data, size_t count) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + + return spi_write(spi, data, count); +} + +static int regmap_spi_gather_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + struct spi_message m; + struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, }, + { .tx_buf = val, .len = val_len, }, }; + + spi_message_init(&m); + spi_message_add_tail(&t[0], &m); + spi_message_add_tail(&t[1], &m); + + return spi_sync(spi, &m); +} + +static int regmap_spi_async_write(void *context, + const void *reg, size_t reg_len, + const void *val, size_t val_len, + struct regmap_async *a) +{ + struct regmap_async_spi *async = container_of(a, + struct regmap_async_spi, + core); + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + + async->t[0].tx_buf = reg; + async->t[0].len = reg_len; + async->t[1].tx_buf = val; + async->t[1].len = val_len; + + spi_message_init(&async->m); + spi_message_add_tail(&async->t[0], &async->m); + if (val) + spi_message_add_tail(&async->t[1], &async->m); + + async->m.complete = regmap_spi_complete; + async->m.context = async; + + return spi_async(spi, &async->m); +} + +static struct regmap_async *regmap_spi_async_alloc(void) +{ + struct regmap_async_spi *async_spi; + + async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL); + if (!async_spi) + return NULL; + + return &async_spi->core; +} + +static int regmap_spi_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct spi_device *spi = to_spi_device(dev); + + return spi_write_then_read(spi, reg, reg_size, val, val_size); +} + +static const struct regmap_bus regmap_spi = { + .write = regmap_spi_write, + .gather_write = regmap_spi_gather_write, + .async_write = regmap_spi_async_write, + .async_alloc = regmap_spi_async_alloc, + .read = regmap_spi_read, + .read_flag_mask = 0x80, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, +}; + +struct regmap *__regmap_init_spi(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __regmap_init(&spi->dev, ®map_spi, &spi->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_spi); + +struct regmap *__devm_regmap_init_spi(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __devm_regmap_init(&spi->dev, ®map_spi, &spi->dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_spi); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c new file mode 100644 index 000000000..cdf12d2aa --- /dev/null +++ b/drivers/base/regmap/regmap-spmi.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - SPMI support +// +// Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. +// +// Based on regmap-i2c.c: +// Copyright 2011 Wolfson Microelectronics plc +// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + +#include <linux/regmap.h> +#include <linux/spmi.h> +#include <linux/module.h> +#include <linux/init.h> + +static int regmap_spmi_base_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + u8 addr = *(u8 *)reg; + int err = 0; + + BUG_ON(reg_size != 1); + + while (val_size-- && !err) + err = spmi_register_read(context, addr++, val++); + + return err; +} + +static int regmap_spmi_base_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + const u8 *data = val; + u8 addr = *(u8 *)reg; + int err = 0; + + BUG_ON(reg_size != 1); + + /* + * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence, + * use it when possible. + */ + if (addr == 0 && val_size) { + err = spmi_register_zero_write(context, *data); + if (err) + goto err_out; + + data++; + addr++; + val_size--; + } + + while (val_size) { + err = spmi_register_write(context, addr, *data); + if (err) + goto err_out; + + data++; + addr++; + val_size--; + } + +err_out: + return err; +} + +static int regmap_spmi_base_write(void *context, const void *data, + size_t count) +{ + BUG_ON(count < 1); + return regmap_spmi_base_gather_write(context, data, 1, data + 1, + count - 1); +} + +static const struct regmap_bus regmap_spmi_base = { + .read = regmap_spmi_base_read, + .write = regmap_spmi_base_write, + .gather_write = regmap_spmi_base_gather_write, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __regmap_init(&sdev->dev, ®map_spmi_base, sdev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_spmi_base); + +struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __devm_regmap_init(&sdev->dev, ®map_spmi_base, sdev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base); + +static int regmap_spmi_ext_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + int err = 0; + size_t len; + u16 addr; + + BUG_ON(reg_size != 2); + + addr = *(u16 *)reg; + + /* + * Split accesses into two to take advantage of the more + * bandwidth-efficient 'Extended Register Read' command when possible + */ + while (addr <= 0xFF && val_size) { + len = min_t(size_t, val_size, 16); + + err = spmi_ext_register_read(context, addr, val, len); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + + while (val_size) { + len = min_t(size_t, val_size, 8); + + err = spmi_ext_register_readl(context, addr, val, len); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + +err_out: + return err; +} + +static int regmap_spmi_ext_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + int err = 0; + size_t len; + u16 addr; + + BUG_ON(reg_size != 2); + + addr = *(u16 *)reg; + + while (addr <= 0xFF && val_size) { + len = min_t(size_t, val_size, 16); + + err = spmi_ext_register_write(context, addr, val, len); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + + while (val_size) { + len = min_t(size_t, val_size, 8); + + err = spmi_ext_register_writel(context, addr, val, len); + if (err) + goto err_out; + + addr += len; + val += len; + val_size -= len; + } + +err_out: + return err; +} + +static int regmap_spmi_ext_write(void *context, const void *data, + size_t count) +{ + BUG_ON(count < 2); + return regmap_spmi_ext_gather_write(context, data, 2, data + 2, + count - 2); +} + +static const struct regmap_bus regmap_spmi_ext = { + .read = regmap_spmi_ext_read, + .write = regmap_spmi_ext_write, + .gather_write = regmap_spmi_ext_gather_write, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, +}; + +struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext); + +struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + return __devm_regmap_init(&sdev->dev, ®map_spmi_ext, sdev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c new file mode 100644 index 000000000..1fbaaad71 --- /dev/null +++ b/drivers/base/regmap/regmap-w1.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API - W1 (1-Wire) support +// +// Copyright (c) 2017 Radioavionica Corporation +// Author: Alex A. Mihaylov <minimumlaw@rambler.ru> + +#include <linux/regmap.h> +#include <linux/module.h> +#include <linux/w1.h> + +#include "internal.h" + +#define W1_CMD_READ_DATA 0x69 +#define W1_CMD_WRITE_DATA 0x6C + +/* + * 1-Wire slaves registers with addess 8 bit and data 8 bit + */ + +static int w1_reg_a8_v8_read(void *context, unsigned int reg, unsigned int *val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_READ_DATA); + w1_write_8(sl->master, reg); + *val = w1_read_8(sl->master); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +static int w1_reg_a8_v8_write(void *context, unsigned int reg, unsigned int val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_WRITE_DATA); + w1_write_8(sl->master, reg); + w1_write_8(sl->master, val); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +/* + * 1-Wire slaves registers with addess 8 bit and data 16 bit + */ + +static int w1_reg_a8_v16_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_READ_DATA); + w1_write_8(sl->master, reg); + *val = w1_read_8(sl->master); + *val |= w1_read_8(sl->master)<<8; + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +static int w1_reg_a8_v16_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 255) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_WRITE_DATA); + w1_write_8(sl->master, reg); + w1_write_8(sl->master, val & 0x00FF); + w1_write_8(sl->master, val>>8 & 0x00FF); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +/* + * 1-Wire slaves registers with addess 16 bit and data 16 bit + */ + +static int w1_reg_a16_v16_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 65535) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_READ_DATA); + w1_write_8(sl->master, reg & 0x00FF); + w1_write_8(sl->master, reg>>8 & 0x00FF); + *val = w1_read_8(sl->master); + *val |= w1_read_8(sl->master)<<8; + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +static int w1_reg_a16_v16_write(void *context, unsigned int reg, + unsigned int val) +{ + struct device *dev = context; + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + int ret = 0; + + if (reg > 65535) + return -EINVAL; + + mutex_lock(&sl->master->bus_mutex); + if (!w1_reset_select_slave(sl)) { + w1_write_8(sl->master, W1_CMD_WRITE_DATA); + w1_write_8(sl->master, reg & 0x00FF); + w1_write_8(sl->master, reg>>8 & 0x00FF); + w1_write_8(sl->master, val & 0x00FF); + w1_write_8(sl->master, val>>8 & 0x00FF); + } else { + ret = -ENODEV; + } + mutex_unlock(&sl->master->bus_mutex); + + return ret; +} + +/* + * Various types of supported bus addressing + */ + +static struct regmap_bus regmap_w1_bus_a8_v8 = { + .reg_read = w1_reg_a8_v8_read, + .reg_write = w1_reg_a8_v8_write, +}; + +static struct regmap_bus regmap_w1_bus_a8_v16 = { + .reg_read = w1_reg_a8_v16_read, + .reg_write = w1_reg_a8_v16_write, +}; + +static struct regmap_bus regmap_w1_bus_a16_v16 = { + .reg_read = w1_reg_a16_v16_read, + .reg_write = w1_reg_a16_v16_write, +}; + +static const struct regmap_bus *regmap_get_w1_bus(struct device *w1_dev, + const struct regmap_config *config) +{ + if (config->reg_bits == 8 && config->val_bits == 8) + return ®map_w1_bus_a8_v8; + + if (config->reg_bits == 8 && config->val_bits == 16) + return ®map_w1_bus_a8_v16; + + if (config->reg_bits == 16 && config->val_bits == 16) + return ®map_w1_bus_a16_v16; + + return ERR_PTR(-ENOTSUPP); +} + +struct regmap *__regmap_init_w1(struct device *w1_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + + const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __regmap_init(w1_dev, bus, w1_dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__regmap_init_w1); + +struct regmap *__devm_regmap_init_w1(struct device *w1_dev, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + + const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config); + + if (IS_ERR(bus)) + return ERR_CAST(bus); + + return __devm_regmap_init(w1_dev, bus, w1_dev, config, + lock_key, lock_name); +} +EXPORT_SYMBOL_GPL(__devm_regmap_init_w1); + +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c new file mode 100644 index 000000000..7bc603145 --- /dev/null +++ b/drivers/base/regmap/regmap.c @@ -0,0 +1,3301 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Register map access API +// +// Copyright 2011 Wolfson Microelectronics plc +// +// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/mutex.h> +#include <linux/err.h> +#include <linux/property.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/hwspinlock.h> +#include <asm/unaligned.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" + +#include "internal.h" + +/* + * Sometimes for failures during very early init the trace + * infrastructure isn't available early enough to be used. For this + * sort of problem defining LOG_DEVICE will add printks for basic + * register I/O on a specific device. + */ +#undef LOG_DEVICE + +#ifdef LOG_DEVICE +static inline bool regmap_should_log(struct regmap *map) +{ + return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); +} +#else +static inline bool regmap_should_log(struct regmap *map) { return false; } +#endif + + +static int _regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change, bool force_write); + +static int _regmap_bus_reg_read(void *context, unsigned int reg, + unsigned int *val); +static int _regmap_bus_read(void *context, unsigned int reg, + unsigned int *val); +static int _regmap_bus_formatted_write(void *context, unsigned int reg, + unsigned int val); +static int _regmap_bus_reg_write(void *context, unsigned int reg, + unsigned int val); +static int _regmap_bus_raw_write(void *context, unsigned int reg, + unsigned int val); + +bool regmap_reg_in_ranges(unsigned int reg, + const struct regmap_range *ranges, + unsigned int nranges) +{ + const struct regmap_range *r; + int i; + + for (i = 0, r = ranges; i < nranges; i++, r++) + if (regmap_reg_in_range(reg, r)) + return true; + return false; +} +EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); + +bool regmap_check_range_table(struct regmap *map, unsigned int reg, + const struct regmap_access_table *table) +{ + /* Check "no ranges" first */ + if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges)) + return false; + + /* In case zero "yes ranges" are supplied, any reg is OK */ + if (!table->n_yes_ranges) + return true; + + return regmap_reg_in_ranges(reg, table->yes_ranges, + table->n_yes_ranges); +} +EXPORT_SYMBOL_GPL(regmap_check_range_table); + +bool regmap_writeable(struct regmap *map, unsigned int reg) +{ + if (map->max_register && reg > map->max_register) + return false; + + if (map->writeable_reg) + return map->writeable_reg(map->dev, reg); + + if (map->wr_table) + return regmap_check_range_table(map, reg, map->wr_table); + + return true; +} + +bool regmap_cached(struct regmap *map, unsigned int reg) +{ + int ret; + unsigned int val; + + if (map->cache_type == REGCACHE_NONE) + return false; + + if (!map->cache_ops) + return false; + + if (map->max_register && reg > map->max_register) + return false; + + map->lock(map->lock_arg); + ret = regcache_read(map, reg, &val); + map->unlock(map->lock_arg); + if (ret) + return false; + + return true; +} + +bool regmap_readable(struct regmap *map, unsigned int reg) +{ + if (!map->reg_read) + return false; + + if (map->max_register && reg > map->max_register) + return false; + + if (map->format.format_write) + return false; + + if (map->readable_reg) + return map->readable_reg(map->dev, reg); + + if (map->rd_table) + return regmap_check_range_table(map, reg, map->rd_table); + + return true; +} + +bool regmap_volatile(struct regmap *map, unsigned int reg) +{ + if (!map->format.format_write && !regmap_readable(map, reg)) + return false; + + if (map->volatile_reg) + return map->volatile_reg(map->dev, reg); + + if (map->volatile_table) + return regmap_check_range_table(map, reg, map->volatile_table); + + if (map->cache_ops) + return false; + else + return true; +} + +bool regmap_precious(struct regmap *map, unsigned int reg) +{ + if (!regmap_readable(map, reg)) + return false; + + if (map->precious_reg) + return map->precious_reg(map->dev, reg); + + if (map->precious_table) + return regmap_check_range_table(map, reg, map->precious_table); + + return false; +} + +bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) +{ + if (map->writeable_noinc_reg) + return map->writeable_noinc_reg(map->dev, reg); + + if (map->wr_noinc_table) + return regmap_check_range_table(map, reg, map->wr_noinc_table); + + return true; +} + +bool regmap_readable_noinc(struct regmap *map, unsigned int reg) +{ + if (map->readable_noinc_reg) + return map->readable_noinc_reg(map->dev, reg); + + if (map->rd_noinc_table) + return regmap_check_range_table(map, reg, map->rd_noinc_table); + + return true; +} + +static bool regmap_volatile_range(struct regmap *map, unsigned int reg, + size_t num) +{ + unsigned int i; + + for (i = 0; i < num; i++) + if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) + return false; + + return true; +} + +static void regmap_format_12_20_write(struct regmap *map, + unsigned int reg, unsigned int val) +{ + u8 *out = map->work_buf; + + out[0] = reg >> 4; + out[1] = (reg << 4) | (val >> 16); + out[2] = val >> 8; + out[3] = val; +} + + +static void regmap_format_2_6_write(struct regmap *map, + unsigned int reg, unsigned int val) +{ + u8 *out = map->work_buf; + + *out = (reg << 6) | val; +} + +static void regmap_format_4_12_write(struct regmap *map, + unsigned int reg, unsigned int val) +{ + __be16 *out = map->work_buf; + *out = cpu_to_be16((reg << 12) | val); +} + +static void regmap_format_7_9_write(struct regmap *map, + unsigned int reg, unsigned int val) +{ + __be16 *out = map->work_buf; + *out = cpu_to_be16((reg << 9) | val); +} + +static void regmap_format_10_14_write(struct regmap *map, + unsigned int reg, unsigned int val) +{ + u8 *out = map->work_buf; + + out[2] = val; + out[1] = (val >> 8) | (reg << 6); + out[0] = reg >> 2; +} + +static void regmap_format_8(void *buf, unsigned int val, unsigned int shift) +{ + u8 *b = buf; + + b[0] = val << shift; +} + +static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) +{ + put_unaligned_be16(val << shift, buf); +} + +static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) +{ + put_unaligned_le16(val << shift, buf); +} + +static void regmap_format_16_native(void *buf, unsigned int val, + unsigned int shift) +{ + u16 v = val << shift; + + memcpy(buf, &v, sizeof(v)); +} + +static void regmap_format_24(void *buf, unsigned int val, unsigned int shift) +{ + u8 *b = buf; + + val <<= shift; + + b[0] = val >> 16; + b[1] = val >> 8; + b[2] = val; +} + +static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) +{ + put_unaligned_be32(val << shift, buf); +} + +static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) +{ + put_unaligned_le32(val << shift, buf); +} + +static void regmap_format_32_native(void *buf, unsigned int val, + unsigned int shift) +{ + u32 v = val << shift; + + memcpy(buf, &v, sizeof(v)); +} + +#ifdef CONFIG_64BIT +static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift) +{ + put_unaligned_be64((u64) val << shift, buf); +} + +static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift) +{ + put_unaligned_le64((u64) val << shift, buf); +} + +static void regmap_format_64_native(void *buf, unsigned int val, + unsigned int shift) +{ + u64 v = (u64) val << shift; + + memcpy(buf, &v, sizeof(v)); +} +#endif + +static void regmap_parse_inplace_noop(void *buf) +{ +} + +static unsigned int regmap_parse_8(const void *buf) +{ + const u8 *b = buf; + + return b[0]; +} + +static unsigned int regmap_parse_16_be(const void *buf) +{ + return get_unaligned_be16(buf); +} + +static unsigned int regmap_parse_16_le(const void *buf) +{ + return get_unaligned_le16(buf); +} + +static void regmap_parse_16_be_inplace(void *buf) +{ + u16 v = get_unaligned_be16(buf); + + memcpy(buf, &v, sizeof(v)); +} + +static void regmap_parse_16_le_inplace(void *buf) +{ + u16 v = get_unaligned_le16(buf); + + memcpy(buf, &v, sizeof(v)); +} + +static unsigned int regmap_parse_16_native(const void *buf) +{ + u16 v; + + memcpy(&v, buf, sizeof(v)); + return v; +} + +static unsigned int regmap_parse_24(const void *buf) +{ + const u8 *b = buf; + unsigned int ret = b[2]; + ret |= ((unsigned int)b[1]) << 8; + ret |= ((unsigned int)b[0]) << 16; + + return ret; +} + +static unsigned int regmap_parse_32_be(const void *buf) +{ + return get_unaligned_be32(buf); +} + +static unsigned int regmap_parse_32_le(const void *buf) +{ + return get_unaligned_le32(buf); +} + +static void regmap_parse_32_be_inplace(void *buf) +{ + u32 v = get_unaligned_be32(buf); + + memcpy(buf, &v, sizeof(v)); +} + +static void regmap_parse_32_le_inplace(void *buf) +{ + u32 v = get_unaligned_le32(buf); + + memcpy(buf, &v, sizeof(v)); +} + +static unsigned int regmap_parse_32_native(const void *buf) +{ + u32 v; + + memcpy(&v, buf, sizeof(v)); + return v; +} + +#ifdef CONFIG_64BIT +static unsigned int regmap_parse_64_be(const void *buf) +{ + return get_unaligned_be64(buf); +} + +static unsigned int regmap_parse_64_le(const void *buf) +{ + return get_unaligned_le64(buf); +} + +static void regmap_parse_64_be_inplace(void *buf) +{ + u64 v = get_unaligned_be64(buf); + + memcpy(buf, &v, sizeof(v)); +} + +static void regmap_parse_64_le_inplace(void *buf) +{ + u64 v = get_unaligned_le64(buf); + + memcpy(buf, &v, sizeof(v)); +} + +static unsigned int regmap_parse_64_native(const void *buf) +{ + u64 v; + + memcpy(&v, buf, sizeof(v)); + return v; +} +#endif + +static void regmap_lock_hwlock(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout(map->hwlock, UINT_MAX); +} + +static void regmap_lock_hwlock_irq(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); +} + +static void regmap_lock_hwlock_irqsave(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, + &map->spinlock_flags); +} + +static void regmap_unlock_hwlock(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock(map->hwlock); +} + +static void regmap_unlock_hwlock_irq(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock_irq(map->hwlock); +} + +static void regmap_unlock_hwlock_irqrestore(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); +} + +static void regmap_lock_unlock_none(void *__map) +{ + +} + +static void regmap_lock_mutex(void *__map) +{ + struct regmap *map = __map; + mutex_lock(&map->mutex); +} + +static void regmap_unlock_mutex(void *__map) +{ + struct regmap *map = __map; + mutex_unlock(&map->mutex); +} + +static void regmap_lock_spinlock(void *__map) +__acquires(&map->spinlock) +{ + struct regmap *map = __map; + unsigned long flags; + + spin_lock_irqsave(&map->spinlock, flags); + map->spinlock_flags = flags; +} + +static void regmap_unlock_spinlock(void *__map) +__releases(&map->spinlock) +{ + struct regmap *map = __map; + spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); +} + +static void dev_get_regmap_release(struct device *dev, void *res) +{ + /* + * We don't actually have anything to do here; the goal here + * is not to manage the regmap but to provide a simple way to + * get the regmap back given a struct device. + */ +} + +static bool _regmap_range_add(struct regmap *map, + struct regmap_range_node *data) +{ + struct rb_root *root = &map->range_tree; + struct rb_node **new = &(root->rb_node), *parent = NULL; + + while (*new) { + struct regmap_range_node *this = + rb_entry(*new, struct regmap_range_node, node); + + parent = *new; + if (data->range_max < this->range_min) + new = &((*new)->rb_left); + else if (data->range_min > this->range_max) + new = &((*new)->rb_right); + else + return false; + } + + rb_link_node(&data->node, parent, new); + rb_insert_color(&data->node, root); + + return true; +} + +static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, + unsigned int reg) +{ + struct rb_node *node = map->range_tree.rb_node; + + while (node) { + struct regmap_range_node *this = + rb_entry(node, struct regmap_range_node, node); + + if (reg < this->range_min) + node = node->rb_left; + else if (reg > this->range_max) + node = node->rb_right; + else + return this; + } + + return NULL; +} + +static void regmap_range_exit(struct regmap *map) +{ + struct rb_node *next; + struct regmap_range_node *range_node; + + next = rb_first(&map->range_tree); + while (next) { + range_node = rb_entry(next, struct regmap_range_node, node); + next = rb_next(&range_node->node); + rb_erase(&range_node->node, &map->range_tree); + kfree(range_node); + } + + kfree(map->selector_work_buf); +} + +static int regmap_set_name(struct regmap *map, const struct regmap_config *config) +{ + if (config->name) { + const char *name = kstrdup_const(config->name, GFP_KERNEL); + + if (!name) + return -ENOMEM; + + kfree_const(map->name); + map->name = name; + } + + return 0; +} + +int regmap_attach_dev(struct device *dev, struct regmap *map, + const struct regmap_config *config) +{ + struct regmap **m; + int ret; + + map->dev = dev; + + ret = regmap_set_name(map, config); + if (ret) + return ret; + + regmap_debugfs_exit(map); + regmap_debugfs_init(map); + + /* Add a devres resource for dev_get_regmap() */ + m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); + if (!m) { + regmap_debugfs_exit(map); + return -ENOMEM; + } + *m = map; + devres_add(dev, m); + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_attach_dev); + +static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, + const struct regmap_config *config) +{ + enum regmap_endian endian; + + /* Retrieve the endianness specification from the regmap config */ + endian = config->reg_format_endian; + + /* If the regmap config specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* Retrieve the endianness specification from the bus config */ + if (bus && bus->reg_format_endian_default) + endian = bus->reg_format_endian_default; + + /* If the bus specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* Use this if no other value was found */ + return REGMAP_ENDIAN_BIG; +} + +enum regmap_endian regmap_get_val_endian(struct device *dev, + const struct regmap_bus *bus, + const struct regmap_config *config) +{ + struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL; + enum regmap_endian endian; + + /* Retrieve the endianness specification from the regmap config */ + endian = config->val_format_endian; + + /* If the regmap config specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* If the firmware node exist try to get endianness from it */ + if (fwnode_property_read_bool(fwnode, "big-endian")) + endian = REGMAP_ENDIAN_BIG; + else if (fwnode_property_read_bool(fwnode, "little-endian")) + endian = REGMAP_ENDIAN_LITTLE; + else if (fwnode_property_read_bool(fwnode, "native-endian")) + endian = REGMAP_ENDIAN_NATIVE; + + /* If the endianness was specified in fwnode, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* Retrieve the endianness specification from the bus config */ + if (bus && bus->val_format_endian_default) + endian = bus->val_format_endian_default; + + /* If the bus specified a non-default value, use that */ + if (endian != REGMAP_ENDIAN_DEFAULT) + return endian; + + /* Use this if no other value was found */ + return REGMAP_ENDIAN_BIG; +} +EXPORT_SYMBOL_GPL(regmap_get_val_endian); + +struct regmap *__regmap_init(struct device *dev, + const struct regmap_bus *bus, + void *bus_context, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap *map; + int ret = -EINVAL; + enum regmap_endian reg_endian, val_endian; + int i, j; + + if (!config) + goto err; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) { + ret = -ENOMEM; + goto err; + } + + ret = regmap_set_name(map, config); + if (ret) + goto err_map; + + ret = -EINVAL; /* Later error paths rely on this */ + + if (config->disable_locking) { + map->lock = map->unlock = regmap_lock_unlock_none; + map->can_sleep = config->can_sleep; + regmap_debugfs_disable(map); + } else if (config->lock && config->unlock) { + map->lock = config->lock; + map->unlock = config->unlock; + map->lock_arg = config->lock_arg; + map->can_sleep = config->can_sleep; + } else if (config->use_hwlock) { + map->hwlock = hwspin_lock_request_specific(config->hwlock_id); + if (!map->hwlock) { + ret = -ENXIO; + goto err_name; + } + + switch (config->hwlock_mode) { + case HWLOCK_IRQSTATE: + map->lock = regmap_lock_hwlock_irqsave; + map->unlock = regmap_unlock_hwlock_irqrestore; + break; + case HWLOCK_IRQ: + map->lock = regmap_lock_hwlock_irq; + map->unlock = regmap_unlock_hwlock_irq; + break; + default: + map->lock = regmap_lock_hwlock; + map->unlock = regmap_unlock_hwlock; + break; + } + + map->lock_arg = map; + } else { + if ((bus && bus->fast_io) || + config->fast_io) { + spin_lock_init(&map->spinlock); + map->lock = regmap_lock_spinlock; + map->unlock = regmap_unlock_spinlock; + lockdep_set_class_and_name(&map->spinlock, + lock_key, lock_name); + } else { + mutex_init(&map->mutex); + map->lock = regmap_lock_mutex; + map->unlock = regmap_unlock_mutex; + map->can_sleep = true; + lockdep_set_class_and_name(&map->mutex, + lock_key, lock_name); + } + map->lock_arg = map; + } + + /* + * When we write in fast-paths with regmap_bulk_write() don't allocate + * scratch buffers with sleeping allocations. + */ + if ((bus && bus->fast_io) || config->fast_io) + map->alloc_flags = GFP_ATOMIC; + else + map->alloc_flags = GFP_KERNEL; + + map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); + map->format.pad_bytes = config->pad_bits / 8; + map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); + map->format.buf_size = DIV_ROUND_UP(config->reg_bits + + config->val_bits + config->pad_bits, 8); + map->reg_shift = config->pad_bits % 8; + if (config->reg_stride) + map->reg_stride = config->reg_stride; + else + map->reg_stride = 1; + if (is_power_of_2(map->reg_stride)) + map->reg_stride_order = ilog2(map->reg_stride); + else + map->reg_stride_order = -1; + map->use_single_read = config->use_single_read || !bus || !bus->read; + map->use_single_write = config->use_single_write || !bus || !bus->write; + map->can_multi_write = config->can_multi_write && bus && bus->write; + if (bus) { + map->max_raw_read = bus->max_raw_read; + map->max_raw_write = bus->max_raw_write; + } + map->dev = dev; + map->bus = bus; + map->bus_context = bus_context; + map->max_register = config->max_register; + map->wr_table = config->wr_table; + map->rd_table = config->rd_table; + map->volatile_table = config->volatile_table; + map->precious_table = config->precious_table; + map->wr_noinc_table = config->wr_noinc_table; + map->rd_noinc_table = config->rd_noinc_table; + map->writeable_reg = config->writeable_reg; + map->readable_reg = config->readable_reg; + map->volatile_reg = config->volatile_reg; + map->precious_reg = config->precious_reg; + map->writeable_noinc_reg = config->writeable_noinc_reg; + map->readable_noinc_reg = config->readable_noinc_reg; + map->cache_type = config->cache_type; + + spin_lock_init(&map->async_lock); + INIT_LIST_HEAD(&map->async_list); + INIT_LIST_HEAD(&map->async_free); + init_waitqueue_head(&map->async_waitq); + + if (config->read_flag_mask || + config->write_flag_mask || + config->zero_flag_mask) { + map->read_flag_mask = config->read_flag_mask; + map->write_flag_mask = config->write_flag_mask; + } else if (bus) { + map->read_flag_mask = bus->read_flag_mask; + } + + if (!bus) { + map->reg_read = config->reg_read; + map->reg_write = config->reg_write; + + map->defer_caching = false; + goto skip_format_initialization; + } else if (!bus->read || !bus->write) { + map->reg_read = _regmap_bus_reg_read; + map->reg_write = _regmap_bus_reg_write; + map->reg_update_bits = bus->reg_update_bits; + + map->defer_caching = false; + goto skip_format_initialization; + } else { + map->reg_read = _regmap_bus_read; + map->reg_update_bits = bus->reg_update_bits; + } + + reg_endian = regmap_get_reg_endian(bus, config); + val_endian = regmap_get_val_endian(dev, bus, config); + + switch (config->reg_bits + map->reg_shift) { + case 2: + switch (config->val_bits) { + case 6: + map->format.format_write = regmap_format_2_6_write; + break; + default: + goto err_hwlock; + } + break; + + case 4: + switch (config->val_bits) { + case 12: + map->format.format_write = regmap_format_4_12_write; + break; + default: + goto err_hwlock; + } + break; + + case 7: + switch (config->val_bits) { + case 9: + map->format.format_write = regmap_format_7_9_write; + break; + default: + goto err_hwlock; + } + break; + + case 10: + switch (config->val_bits) { + case 14: + map->format.format_write = regmap_format_10_14_write; + break; + default: + goto err_hwlock; + } + break; + + case 12: + switch (config->val_bits) { + case 20: + map->format.format_write = regmap_format_12_20_write; + break; + default: + goto err_hwlock; + } + break; + + case 8: + map->format.format_reg = regmap_format_8; + break; + + case 16: + switch (reg_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_reg = regmap_format_16_be; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_reg = regmap_format_16_le; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_reg = regmap_format_16_native; + break; + default: + goto err_hwlock; + } + break; + + case 24: + if (reg_endian != REGMAP_ENDIAN_BIG) + goto err_hwlock; + map->format.format_reg = regmap_format_24; + break; + + case 32: + switch (reg_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_reg = regmap_format_32_be; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_reg = regmap_format_32_le; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_reg = regmap_format_32_native; + break; + default: + goto err_hwlock; + } + break; + +#ifdef CONFIG_64BIT + case 64: + switch (reg_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_reg = regmap_format_64_be; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_reg = regmap_format_64_le; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_reg = regmap_format_64_native; + break; + default: + goto err_hwlock; + } + break; +#endif + + default: + goto err_hwlock; + } + + if (val_endian == REGMAP_ENDIAN_NATIVE) + map->format.parse_inplace = regmap_parse_inplace_noop; + + switch (config->val_bits) { + case 8: + map->format.format_val = regmap_format_8; + map->format.parse_val = regmap_parse_8; + map->format.parse_inplace = regmap_parse_inplace_noop; + break; + case 16: + switch (val_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_val = regmap_format_16_be; + map->format.parse_val = regmap_parse_16_be; + map->format.parse_inplace = regmap_parse_16_be_inplace; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_val = regmap_format_16_le; + map->format.parse_val = regmap_parse_16_le; + map->format.parse_inplace = regmap_parse_16_le_inplace; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_val = regmap_format_16_native; + map->format.parse_val = regmap_parse_16_native; + break; + default: + goto err_hwlock; + } + break; + case 24: + if (val_endian != REGMAP_ENDIAN_BIG) + goto err_hwlock; + map->format.format_val = regmap_format_24; + map->format.parse_val = regmap_parse_24; + break; + case 32: + switch (val_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_val = regmap_format_32_be; + map->format.parse_val = regmap_parse_32_be; + map->format.parse_inplace = regmap_parse_32_be_inplace; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_val = regmap_format_32_le; + map->format.parse_val = regmap_parse_32_le; + map->format.parse_inplace = regmap_parse_32_le_inplace; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_val = regmap_format_32_native; + map->format.parse_val = regmap_parse_32_native; + break; + default: + goto err_hwlock; + } + break; +#ifdef CONFIG_64BIT + case 64: + switch (val_endian) { + case REGMAP_ENDIAN_BIG: + map->format.format_val = regmap_format_64_be; + map->format.parse_val = regmap_parse_64_be; + map->format.parse_inplace = regmap_parse_64_be_inplace; + break; + case REGMAP_ENDIAN_LITTLE: + map->format.format_val = regmap_format_64_le; + map->format.parse_val = regmap_parse_64_le; + map->format.parse_inplace = regmap_parse_64_le_inplace; + break; + case REGMAP_ENDIAN_NATIVE: + map->format.format_val = regmap_format_64_native; + map->format.parse_val = regmap_parse_64_native; + break; + default: + goto err_hwlock; + } + break; +#endif + } + + if (map->format.format_write) { + if ((reg_endian != REGMAP_ENDIAN_BIG) || + (val_endian != REGMAP_ENDIAN_BIG)) + goto err_hwlock; + map->use_single_write = true; + } + + if (!map->format.format_write && + !(map->format.format_reg && map->format.format_val)) + goto err_hwlock; + + map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); + if (map->work_buf == NULL) { + ret = -ENOMEM; + goto err_hwlock; + } + + if (map->format.format_write) { + map->defer_caching = false; + map->reg_write = _regmap_bus_formatted_write; + } else if (map->format.format_val) { + map->defer_caching = true; + map->reg_write = _regmap_bus_raw_write; + } + +skip_format_initialization: + + map->range_tree = RB_ROOT; + for (i = 0; i < config->num_ranges; i++) { + const struct regmap_range_cfg *range_cfg = &config->ranges[i]; + struct regmap_range_node *new; + + /* Sanity check */ + if (range_cfg->range_max < range_cfg->range_min) { + dev_err(map->dev, "Invalid range %d: %d < %d\n", i, + range_cfg->range_max, range_cfg->range_min); + goto err_range; + } + + if (range_cfg->range_max > map->max_register) { + dev_err(map->dev, "Invalid range %d: %d > %d\n", i, + range_cfg->range_max, map->max_register); + goto err_range; + } + + if (range_cfg->selector_reg > map->max_register) { + dev_err(map->dev, + "Invalid range %d: selector out of map\n", i); + goto err_range; + } + + if (range_cfg->window_len == 0) { + dev_err(map->dev, "Invalid range %d: window_len 0\n", + i); + goto err_range; + } + + /* Make sure, that this register range has no selector + or data window within its boundary */ + for (j = 0; j < config->num_ranges; j++) { + unsigned sel_reg = config->ranges[j].selector_reg; + unsigned win_min = config->ranges[j].window_start; + unsigned win_max = win_min + + config->ranges[j].window_len - 1; + + /* Allow data window inside its own virtual range */ + if (j == i) + continue; + + if (range_cfg->range_min <= sel_reg && + sel_reg <= range_cfg->range_max) { + dev_err(map->dev, + "Range %d: selector for %d in window\n", + i, j); + goto err_range; + } + + if (!(win_max < range_cfg->range_min || + win_min > range_cfg->range_max)) { + dev_err(map->dev, + "Range %d: window for %d in window\n", + i, j); + goto err_range; + } + } + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (new == NULL) { + ret = -ENOMEM; + goto err_range; + } + + new->map = map; + new->name = range_cfg->name; + new->range_min = range_cfg->range_min; + new->range_max = range_cfg->range_max; + new->selector_reg = range_cfg->selector_reg; + new->selector_mask = range_cfg->selector_mask; + new->selector_shift = range_cfg->selector_shift; + new->window_start = range_cfg->window_start; + new->window_len = range_cfg->window_len; + + if (!_regmap_range_add(map, new)) { + dev_err(map->dev, "Failed to add range %d\n", i); + kfree(new); + goto err_range; + } + + if (map->selector_work_buf == NULL) { + map->selector_work_buf = + kzalloc(map->format.buf_size, GFP_KERNEL); + if (map->selector_work_buf == NULL) { + ret = -ENOMEM; + goto err_range; + } + } + } + + ret = regcache_init(map, config); + if (ret != 0) + goto err_range; + + if (dev) { + ret = regmap_attach_dev(dev, map, config); + if (ret != 0) + goto err_regcache; + } else { + regmap_debugfs_init(map); + } + + return map; + +err_regcache: + regcache_exit(map); +err_range: + regmap_range_exit(map); + kfree(map->work_buf); +err_hwlock: + if (map->hwlock) + hwspin_lock_free(map->hwlock); +err_name: + kfree_const(map->name); +err_map: + kfree(map); +err: + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(__regmap_init); + +static void devm_regmap_release(struct device *dev, void *res) +{ + regmap_exit(*(struct regmap **)res); +} + +struct regmap *__devm_regmap_init(struct device *dev, + const struct regmap_bus *bus, + void *bus_context, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct regmap **ptr, *regmap; + + ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + regmap = __regmap_init(dev, bus, bus_context, config, + lock_key, lock_name); + if (!IS_ERR(regmap)) { + *ptr = regmap; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return regmap; +} +EXPORT_SYMBOL_GPL(__devm_regmap_init); + +static void regmap_field_init(struct regmap_field *rm_field, + struct regmap *regmap, struct reg_field reg_field) +{ + rm_field->regmap = regmap; + rm_field->reg = reg_field.reg; + rm_field->shift = reg_field.lsb; + rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb); + rm_field->id_size = reg_field.id_size; + rm_field->id_offset = reg_field.id_offset; +} + +/** + * devm_regmap_field_alloc() - Allocate and initialise a register field. + * + * @dev: Device that will be interacted with + * @regmap: regmap bank in which this register field is located. + * @reg_field: Register field with in the bank. + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap_field. The regmap_field will be automatically freed + * by the device management code. + */ +struct regmap_field *devm_regmap_field_alloc(struct device *dev, + struct regmap *regmap, struct reg_field reg_field) +{ + struct regmap_field *rm_field = devm_kzalloc(dev, + sizeof(*rm_field), GFP_KERNEL); + if (!rm_field) + return ERR_PTR(-ENOMEM); + + regmap_field_init(rm_field, regmap, reg_field); + + return rm_field; + +} +EXPORT_SYMBOL_GPL(devm_regmap_field_alloc); + + +/** + * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field. + * + * @regmap: regmap bank in which this register field is located. + * @rm_field: regmap register fields within the bank. + * @reg_field: Register fields within the bank. + * @num_fields: Number of register fields. + * + * The return value will be an -ENOMEM on error or zero for success. + * Newly allocated regmap_fields should be freed by calling + * regmap_field_bulk_free() + */ +int regmap_field_bulk_alloc(struct regmap *regmap, + struct regmap_field **rm_field, + struct reg_field *reg_field, + int num_fields) +{ + struct regmap_field *rf; + int i; + + rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL); + if (!rf) + return -ENOMEM; + + for (i = 0; i < num_fields; i++) { + regmap_field_init(&rf[i], regmap, reg_field[i]); + rm_field[i] = &rf[i]; + } + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc); + +/** + * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register + * fields. + * + * @dev: Device that will be interacted with + * @regmap: regmap bank in which this register field is located. + * @rm_field: regmap register fields within the bank. + * @reg_field: Register fields within the bank. + * @num_fields: Number of register fields. + * + * The return value will be an -ENOMEM on error or zero for success. + * Newly allocated regmap_fields will be automatically freed by the + * device management code. + */ +int devm_regmap_field_bulk_alloc(struct device *dev, + struct regmap *regmap, + struct regmap_field **rm_field, + struct reg_field *reg_field, + int num_fields) +{ + struct regmap_field *rf; + int i; + + rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL); + if (!rf) + return -ENOMEM; + + for (i = 0; i < num_fields; i++) { + regmap_field_init(&rf[i], regmap, reg_field[i]); + rm_field[i] = &rf[i]; + } + + return 0; +} +EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc); + +/** + * regmap_field_bulk_free() - Free register field allocated using + * regmap_field_bulk_alloc. + * + * @field: regmap fields which should be freed. + */ +void regmap_field_bulk_free(struct regmap_field *field) +{ + kfree(field); +} +EXPORT_SYMBOL_GPL(regmap_field_bulk_free); + +/** + * devm_regmap_field_bulk_free() - Free a bulk register field allocated using + * devm_regmap_field_bulk_alloc. + * + * @dev: Device that will be interacted with + * @field: regmap field which should be freed. + * + * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually + * drivers need not call this function, as the memory allocated via devm + * will be freed as per device-driver life-cycle. + */ +void devm_regmap_field_bulk_free(struct device *dev, + struct regmap_field *field) +{ + devm_kfree(dev, field); +} +EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free); + +/** + * devm_regmap_field_free() - Free a register field allocated using + * devm_regmap_field_alloc. + * + * @dev: Device that will be interacted with + * @field: regmap field which should be freed. + * + * Free register field allocated using devm_regmap_field_alloc(). Usually + * drivers need not call this function, as the memory allocated via devm + * will be freed as per device-driver life-cyle. + */ +void devm_regmap_field_free(struct device *dev, + struct regmap_field *field) +{ + devm_kfree(dev, field); +} +EXPORT_SYMBOL_GPL(devm_regmap_field_free); + +/** + * regmap_field_alloc() - Allocate and initialise a register field. + * + * @regmap: regmap bank in which this register field is located. + * @reg_field: Register field with in the bank. + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap_field. The regmap_field should be freed by the + * user once its finished working with it using regmap_field_free(). + */ +struct regmap_field *regmap_field_alloc(struct regmap *regmap, + struct reg_field reg_field) +{ + struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL); + + if (!rm_field) + return ERR_PTR(-ENOMEM); + + regmap_field_init(rm_field, regmap, reg_field); + + return rm_field; +} +EXPORT_SYMBOL_GPL(regmap_field_alloc); + +/** + * regmap_field_free() - Free register field allocated using + * regmap_field_alloc. + * + * @field: regmap field which should be freed. + */ +void regmap_field_free(struct regmap_field *field) +{ + kfree(field); +} +EXPORT_SYMBOL_GPL(regmap_field_free); + +/** + * regmap_reinit_cache() - Reinitialise the current register cache + * + * @map: Register map to operate on. + * @config: New configuration. Only the cache data will be used. + * + * Discard any existing register cache for the map and initialize a + * new cache. This can be used to restore the cache to defaults or to + * update the cache configuration to reflect runtime discovery of the + * hardware. + * + * No explicit locking is done here, the user needs to ensure that + * this function will not race with other calls to regmap. + */ +int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) +{ + int ret; + + regcache_exit(map); + regmap_debugfs_exit(map); + + map->max_register = config->max_register; + map->writeable_reg = config->writeable_reg; + map->readable_reg = config->readable_reg; + map->volatile_reg = config->volatile_reg; + map->precious_reg = config->precious_reg; + map->writeable_noinc_reg = config->writeable_noinc_reg; + map->readable_noinc_reg = config->readable_noinc_reg; + map->cache_type = config->cache_type; + + ret = regmap_set_name(map, config); + if (ret) + return ret; + + regmap_debugfs_init(map); + + map->cache_bypass = false; + map->cache_only = false; + + return regcache_init(map, config); +} +EXPORT_SYMBOL_GPL(regmap_reinit_cache); + +/** + * regmap_exit() - Free a previously allocated register map + * + * @map: Register map to operate on. + */ +void regmap_exit(struct regmap *map) +{ + struct regmap_async *async; + + regcache_exit(map); + regmap_debugfs_exit(map); + regmap_range_exit(map); + if (map->bus && map->bus->free_context) + map->bus->free_context(map->bus_context); + kfree(map->work_buf); + while (!list_empty(&map->async_free)) { + async = list_first_entry_or_null(&map->async_free, + struct regmap_async, + list); + list_del(&async->list); + kfree(async->work_buf); + kfree(async); + } + if (map->hwlock) + hwspin_lock_free(map->hwlock); + if (map->lock == regmap_lock_mutex) + mutex_destroy(&map->mutex); + kfree_const(map->name); + kfree(map->patch); + kfree(map); +} +EXPORT_SYMBOL_GPL(regmap_exit); + +static int dev_get_regmap_match(struct device *dev, void *res, void *data) +{ + struct regmap **r = res; + if (!r || !*r) { + WARN_ON(!r || !*r); + return 0; + } + + /* If the user didn't specify a name match any */ + if (data) + return (*r)->name && !strcmp((*r)->name, data); + else + return 1; +} + +/** + * dev_get_regmap() - Obtain the regmap (if any) for a device + * + * @dev: Device to retrieve the map for + * @name: Optional name for the register map, usually NULL. + * + * Returns the regmap for the device if one is present, or NULL. If + * name is specified then it must match the name specified when + * registering the device, if it is NULL then the first regmap found + * will be used. Devices with multiple register maps are very rare, + * generic code should normally not need to specify a name. + */ +struct regmap *dev_get_regmap(struct device *dev, const char *name) +{ + struct regmap **r = devres_find(dev, dev_get_regmap_release, + dev_get_regmap_match, (void *)name); + + if (!r) + return NULL; + return *r; +} +EXPORT_SYMBOL_GPL(dev_get_regmap); + +/** + * regmap_get_device() - Obtain the device from a regmap + * + * @map: Register map to operate on. + * + * Returns the underlying device that the regmap has been created for. + */ +struct device *regmap_get_device(struct regmap *map) +{ + return map->dev; +} +EXPORT_SYMBOL_GPL(regmap_get_device); + +static int _regmap_select_page(struct regmap *map, unsigned int *reg, + struct regmap_range_node *range, + unsigned int val_num) +{ + void *orig_work_buf; + unsigned int win_offset; + unsigned int win_page; + bool page_chg; + int ret; + + win_offset = (*reg - range->range_min) % range->window_len; + win_page = (*reg - range->range_min) / range->window_len; + + if (val_num > 1) { + /* Bulk write shouldn't cross range boundary */ + if (*reg + val_num - 1 > range->range_max) + return -EINVAL; + + /* ... or single page boundary */ + if (val_num > range->window_len - win_offset) + return -EINVAL; + } + + /* It is possible to have selector register inside data window. + In that case, selector register is located on every page and + it needs no page switching, when accessed alone. */ + if (val_num > 1 || + range->window_start + win_offset != range->selector_reg) { + /* Use separate work_buf during page switching */ + orig_work_buf = map->work_buf; + map->work_buf = map->selector_work_buf; + + ret = _regmap_update_bits(map, range->selector_reg, + range->selector_mask, + win_page << range->selector_shift, + &page_chg, false); + + map->work_buf = orig_work_buf; + + if (ret != 0) + return ret; + } + + *reg = range->window_start + win_offset; + + return 0; +} + +static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, + unsigned long mask) +{ + u8 *buf; + int i; + + if (!mask || !map->work_buf) + return; + + buf = map->work_buf; + + for (i = 0; i < max_bytes; i++) + buf[i] |= (mask >> (8 * i)) & 0xff; +} + +static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, + const void *val, size_t val_len, bool noinc) +{ + struct regmap_range_node *range; + unsigned long flags; + void *work_val = map->work_buf + map->format.reg_bytes + + map->format.pad_bytes; + void *buf; + int ret = -ENOTSUPP; + size_t len; + int i; + + WARN_ON(!map->bus); + + /* Check for unwritable or noinc registers in range + * before we start + */ + if (!regmap_writeable_noinc(map, reg)) { + for (i = 0; i < val_len / map->format.val_bytes; i++) { + unsigned int element = + reg + regmap_get_offset(map, i); + if (!regmap_writeable(map, element) || + regmap_writeable_noinc(map, element)) + return -EINVAL; + } + } + + if (!map->cache_bypass && map->format.parse_val) { + unsigned int ival, offset; + int val_bytes = map->format.val_bytes; + + /* Cache the last written value for noinc writes */ + i = noinc ? val_len - val_bytes : 0; + for (; i < val_len; i += val_bytes) { + ival = map->format.parse_val(val + i); + offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes); + ret = regcache_write(map, reg + offset, ival); + if (ret) { + dev_err(map->dev, + "Error in caching of register: %x ret: %d\n", + reg + offset, ret); + return ret; + } + } + if (map->cache_only) { + map->cache_dirty = true; + return 0; + } + } + + range = _regmap_range_lookup(map, reg); + if (range) { + int val_num = val_len / map->format.val_bytes; + int win_offset = (reg - range->range_min) % range->window_len; + int win_residue = range->window_len - win_offset; + + /* If the write goes beyond the end of the window split it */ + while (val_num > win_residue) { + dev_dbg(map->dev, "Writing window %d/%zu\n", + win_residue, val_len / map->format.val_bytes); + ret = _regmap_raw_write_impl(map, reg, val, + win_residue * + map->format.val_bytes, noinc); + if (ret != 0) + return ret; + + reg += win_residue; + val_num -= win_residue; + val += win_residue * map->format.val_bytes; + val_len -= win_residue * map->format.val_bytes; + + win_offset = (reg - range->range_min) % + range->window_len; + win_residue = range->window_len - win_offset; + } + + ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); + if (ret != 0) + return ret; + } + + map->format.format_reg(map->work_buf, reg, map->reg_shift); + regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, + map->write_flag_mask); + + /* + * Essentially all I/O mechanisms will be faster with a single + * buffer to write. Since register syncs often generate raw + * writes of single registers optimise that case. + */ + if (val != work_val && val_len == map->format.val_bytes) { + memcpy(work_val, val, map->format.val_bytes); + val = work_val; + } + + if (map->async && map->bus->async_write) { + struct regmap_async *async; + + trace_regmap_async_write_start(map, reg, val_len); + + spin_lock_irqsave(&map->async_lock, flags); + async = list_first_entry_or_null(&map->async_free, + struct regmap_async, + list); + if (async) + list_del(&async->list); + spin_unlock_irqrestore(&map->async_lock, flags); + + if (!async) { + async = map->bus->async_alloc(); + if (!async) + return -ENOMEM; + + async->work_buf = kzalloc(map->format.buf_size, + GFP_KERNEL | GFP_DMA); + if (!async->work_buf) { + kfree(async); + return -ENOMEM; + } + } + + async->map = map; + + /* If the caller supplied the value we can use it safely. */ + memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + + map->format.reg_bytes + map->format.val_bytes); + + spin_lock_irqsave(&map->async_lock, flags); + list_add_tail(&async->list, &map->async_list); + spin_unlock_irqrestore(&map->async_lock, flags); + + if (val != work_val) + ret = map->bus->async_write(map->bus_context, + async->work_buf, + map->format.reg_bytes + + map->format.pad_bytes, + val, val_len, async); + else + ret = map->bus->async_write(map->bus_context, + async->work_buf, + map->format.reg_bytes + + map->format.pad_bytes + + val_len, NULL, 0, async); + + if (ret != 0) { + dev_err(map->dev, "Failed to schedule write: %d\n", + ret); + + spin_lock_irqsave(&map->async_lock, flags); + list_move(&async->list, &map->async_free); + spin_unlock_irqrestore(&map->async_lock, flags); + } + + return ret; + } + + trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); + + /* If we're doing a single register write we can probably just + * send the work_buf directly, otherwise try to do a gather + * write. + */ + if (val == work_val) + ret = map->bus->write(map->bus_context, map->work_buf, + map->format.reg_bytes + + map->format.pad_bytes + + val_len); + else if (map->bus->gather_write) + ret = map->bus->gather_write(map->bus_context, map->work_buf, + map->format.reg_bytes + + map->format.pad_bytes, + val, val_len); + else + ret = -ENOTSUPP; + + /* If that didn't work fall back on linearising by hand. */ + if (ret == -ENOTSUPP) { + len = map->format.reg_bytes + map->format.pad_bytes + val_len; + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, map->work_buf, map->format.reg_bytes); + memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, + val, val_len); + ret = map->bus->write(map->bus_context, buf, len); + + kfree(buf); + } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { + /* regcache_drop_region() takes lock that we already have, + * thus call map->cache_ops->drop() directly + */ + if (map->cache_ops && map->cache_ops->drop) + map->cache_ops->drop(map, reg, reg + 1); + } + + trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); + + return ret; +} + +/** + * regmap_can_raw_write - Test if regmap_raw_write() is supported + * + * @map: Map to check. + */ +bool regmap_can_raw_write(struct regmap *map) +{ + return map->bus && map->bus->write && map->format.format_val && + map->format.format_reg; +} +EXPORT_SYMBOL_GPL(regmap_can_raw_write); + +/** + * regmap_get_raw_read_max - Get the maximum size we can read + * + * @map: Map to check. + */ +size_t regmap_get_raw_read_max(struct regmap *map) +{ + return map->max_raw_read; +} +EXPORT_SYMBOL_GPL(regmap_get_raw_read_max); + +/** + * regmap_get_raw_write_max - Get the maximum size we can read + * + * @map: Map to check. + */ +size_t regmap_get_raw_write_max(struct regmap *map) +{ + return map->max_raw_write; +} +EXPORT_SYMBOL_GPL(regmap_get_raw_write_max); + +static int _regmap_bus_formatted_write(void *context, unsigned int reg, + unsigned int val) +{ + int ret; + struct regmap_range_node *range; + struct regmap *map = context; + + WARN_ON(!map->bus || !map->format.format_write); + + range = _regmap_range_lookup(map, reg); + if (range) { + ret = _regmap_select_page(map, ®, range, 1); + if (ret != 0) + return ret; + } + + map->format.format_write(map, reg, val); + + trace_regmap_hw_write_start(map, reg, 1); + + ret = map->bus->write(map->bus_context, map->work_buf, + map->format.buf_size); + + trace_regmap_hw_write_done(map, reg, 1); + + return ret; +} + +static int _regmap_bus_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct regmap *map = context; + + return map->bus->reg_write(map->bus_context, reg, val); +} + +static int _regmap_bus_raw_write(void *context, unsigned int reg, + unsigned int val) +{ + struct regmap *map = context; + + WARN_ON(!map->bus || !map->format.format_val); + + map->format.format_val(map->work_buf + map->format.reg_bytes + + map->format.pad_bytes, val, 0); + return _regmap_raw_write_impl(map, reg, + map->work_buf + + map->format.reg_bytes + + map->format.pad_bytes, + map->format.val_bytes, + false); +} + +static inline void *_regmap_map_get_context(struct regmap *map) +{ + return (map->bus) ? map : map->bus_context; +} + +int _regmap_write(struct regmap *map, unsigned int reg, + unsigned int val) +{ + int ret; + void *context = _regmap_map_get_context(map); + + if (!regmap_writeable(map, reg)) + return -EIO; + + if (!map->cache_bypass && !map->defer_caching) { + ret = regcache_write(map, reg, val); + if (ret != 0) + return ret; + if (map->cache_only) { + map->cache_dirty = true; + return 0; + } + } + + if (regmap_should_log(map)) + dev_info(map->dev, "%x <= %x\n", reg, val); + + trace_regmap_reg_write(map, reg, val); + + return map->reg_write(context, reg, val); +} + +/** + * regmap_write() - Write a value to a single register + * + * @map: Register map to write to + * @reg: Register to write to + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) +{ + int ret; + + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + + map->lock(map->lock_arg); + + ret = _regmap_write(map, reg, val); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_write); + +/** + * regmap_write_async() - Write a value to a single register asynchronously + * + * @map: Register map to write to + * @reg: Register to write to + * @val: Value to be written + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) +{ + int ret; + + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + + map->lock(map->lock_arg); + + map->async = true; + + ret = _regmap_write(map, reg, val); + + map->async = false; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_write_async); + +int _regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len, bool noinc) +{ + size_t val_bytes = map->format.val_bytes; + size_t val_count = val_len / val_bytes; + size_t chunk_count, chunk_bytes; + size_t chunk_regs = val_count; + int ret, i; + + if (!val_count) + return -EINVAL; + + if (map->use_single_write) + chunk_regs = 1; + else if (map->max_raw_write && val_len > map->max_raw_write) + chunk_regs = map->max_raw_write / val_bytes; + + chunk_count = val_count / chunk_regs; + chunk_bytes = chunk_regs * val_bytes; + + /* Write as many bytes as possible with chunk_size */ + for (i = 0; i < chunk_count; i++) { + ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); + if (ret) + return ret; + + reg += regmap_get_offset(map, chunk_regs); + val += chunk_bytes; + val_len -= chunk_bytes; + } + + /* Write remaining bytes */ + if (val_len) + ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); + + return ret; +} + +/** + * regmap_raw_write() - Write raw values to one or more registers + * + * @map: Register map to write to + * @reg: Initial register to write to + * @val: Block of data to be written, laid out for direct transmission to the + * device + * @val_len: Length of data pointed to by val. + * + * This function is intended to be used for things like firmware + * download where a large block of data needs to be transferred to the + * device. No formatting will be done on the data provided. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_raw_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + int ret; + + if (!regmap_can_raw_write(map)) + return -EINVAL; + if (val_len % map->format.val_bytes) + return -EINVAL; + + map->lock(map->lock_arg); + + ret = _regmap_raw_write(map, reg, val, val_len, false); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_raw_write); + +/** + * regmap_noinc_write(): Write data from a register without incrementing the + * register number + * + * @map: Register map to write to + * @reg: Register to write to + * @val: Pointer to data buffer + * @val_len: Length of output buffer in bytes. + * + * The regmap API usually assumes that bulk bus write operations will write a + * range of registers. Some devices have certain registers for which a write + * operation can write to an internal FIFO. + * + * The target register must be volatile but registers after it can be + * completely unrelated cacheable registers. + * + * This will attempt multiple writes as required to write val_len bytes. + * + * A value of zero will be returned on success, a negative errno will be + * returned in error cases. + */ +int regmap_noinc_write(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + size_t write_len; + int ret; + + if (!map->bus) + return -EINVAL; + if (!map->bus->write) + return -ENOTSUPP; + if (val_len % map->format.val_bytes) + return -EINVAL; + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + if (val_len == 0) + return -EINVAL; + + map->lock(map->lock_arg); + + if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { + ret = -EINVAL; + goto out_unlock; + } + + while (val_len) { + if (map->max_raw_write && map->max_raw_write < val_len) + write_len = map->max_raw_write; + else + write_len = val_len; + ret = _regmap_raw_write(map, reg, val, write_len, true); + if (ret) + goto out_unlock; + val = ((u8 *)val) + write_len; + val_len -= write_len; + } + +out_unlock: + map->unlock(map->lock_arg); + return ret; +} +EXPORT_SYMBOL_GPL(regmap_noinc_write); + +/** + * regmap_field_update_bits_base() - Perform a read/modify/write cycle a + * register field. + * + * @field: Register field to write to + * @mask: Bitmask to change + * @val: Value to be written + * @change: Boolean indicating if a write was done + * @async: Boolean indicating asynchronously + * @force: Boolean indicating use force update + * + * Perform a read/modify/write cycle on the register field with change, + * async, force option. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_update_bits_base(struct regmap_field *field, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) +{ + mask = (mask << field->shift) & field->mask; + + return regmap_update_bits_base(field->regmap, field->reg, + mask, val << field->shift, + change, async, force); +} +EXPORT_SYMBOL_GPL(regmap_field_update_bits_base); + +/** + * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a + * register field with port ID + * + * @field: Register field to write to + * @id: port ID + * @mask: Bitmask to change + * @val: Value to be written + * @change: Boolean indicating if a write was done + * @async: Boolean indicating asynchronously + * @force: Boolean indicating use force update + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) +{ + if (id >= field->id_size) + return -EINVAL; + + mask = (mask << field->shift) & field->mask; + + return regmap_update_bits_base(field->regmap, + field->reg + (field->id_offset * id), + mask, val << field->shift, + change, async, force); +} +EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base); + +/** + * regmap_bulk_write() - Write multiple registers to the device + * + * @map: Register map to write to + * @reg: First register to be write from + * @val: Block of data to be written, in native register size for device + * @val_count: Number of registers to write + * + * This function is intended to be used for writing a large block of + * data to the device either in single transfer or multiple transfer. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, + size_t val_count) +{ + int ret = 0, i; + size_t val_bytes = map->format.val_bytes; + + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + + /* + * Some devices don't support bulk write, for them we have a series of + * single write operations. + */ + if (!map->bus || !map->format.parse_inplace) { + map->lock(map->lock_arg); + for (i = 0; i < val_count; i++) { + unsigned int ival; + + switch (val_bytes) { + case 1: + ival = *(u8 *)(val + (i * val_bytes)); + break; + case 2: + ival = *(u16 *)(val + (i * val_bytes)); + break; + case 4: + ival = *(u32 *)(val + (i * val_bytes)); + break; +#ifdef CONFIG_64BIT + case 8: + ival = *(u64 *)(val + (i * val_bytes)); + break; +#endif + default: + ret = -EINVAL; + goto out; + } + + ret = _regmap_write(map, + reg + regmap_get_offset(map, i), + ival); + if (ret != 0) + goto out; + } +out: + map->unlock(map->lock_arg); + } else { + void *wval; + + wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); + if (!wval) + return -ENOMEM; + + for (i = 0; i < val_count * val_bytes; i += val_bytes) + map->format.parse_inplace(wval + i); + + ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); + + kfree(wval); + } + return ret; +} +EXPORT_SYMBOL_GPL(regmap_bulk_write); + +/* + * _regmap_raw_multi_reg_write() + * + * the (register,newvalue) pairs in regs have not been formatted, but + * they are all in the same page and have been changed to being page + * relative. The page register has been written if that was necessary. + */ +static int _regmap_raw_multi_reg_write(struct regmap *map, + const struct reg_sequence *regs, + size_t num_regs) +{ + int ret; + void *buf; + int i; + u8 *u8; + size_t val_bytes = map->format.val_bytes; + size_t reg_bytes = map->format.reg_bytes; + size_t pad_bytes = map->format.pad_bytes; + size_t pair_size = reg_bytes + pad_bytes + val_bytes; + size_t len = pair_size * num_regs; + + if (!len) + return -EINVAL; + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* We have to linearise by hand. */ + + u8 = buf; + + for (i = 0; i < num_regs; i++) { + unsigned int reg = regs[i].reg; + unsigned int val = regs[i].def; + trace_regmap_hw_write_start(map, reg, 1); + map->format.format_reg(u8, reg, map->reg_shift); + u8 += reg_bytes + pad_bytes; + map->format.format_val(u8, val, 0); + u8 += val_bytes; + } + u8 = buf; + *u8 |= map->write_flag_mask; + + ret = map->bus->write(map->bus_context, buf, len); + + kfree(buf); + + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + trace_regmap_hw_write_done(map, reg, 1); + } + return ret; +} + +static unsigned int _regmap_register_page(struct regmap *map, + unsigned int reg, + struct regmap_range_node *range) +{ + unsigned int win_page = (reg - range->range_min) / range->window_len; + + return win_page; +} + +static int _regmap_range_multi_paged_reg_write(struct regmap *map, + struct reg_sequence *regs, + size_t num_regs) +{ + int ret; + int i, n; + struct reg_sequence *base; + unsigned int this_page = 0; + unsigned int page_change = 0; + /* + * the set of registers are not neccessarily in order, but + * since the order of write must be preserved this algorithm + * chops the set each time the page changes. This also applies + * if there is a delay required at any point in the sequence. + */ + base = regs; + for (i = 0, n = 0; i < num_regs; i++, n++) { + unsigned int reg = regs[i].reg; + struct regmap_range_node *range; + + range = _regmap_range_lookup(map, reg); + if (range) { + unsigned int win_page = _regmap_register_page(map, reg, + range); + + if (i == 0) + this_page = win_page; + if (win_page != this_page) { + this_page = win_page; + page_change = 1; + } + } + + /* If we have both a page change and a delay make sure to + * write the regs and apply the delay before we change the + * page. + */ + + if (page_change || regs[i].delay_us) { + + /* For situations where the first write requires + * a delay we need to make sure we don't call + * raw_multi_reg_write with n=0 + * This can't occur with page breaks as we + * never write on the first iteration + */ + if (regs[i].delay_us && i == 0) + n = 1; + + ret = _regmap_raw_multi_reg_write(map, base, n); + if (ret != 0) + return ret; + + if (regs[i].delay_us) { + if (map->can_sleep) + fsleep(regs[i].delay_us); + else + udelay(regs[i].delay_us); + } + + base += n; + n = 0; + + if (page_change) { + ret = _regmap_select_page(map, + &base[n].reg, + range, 1); + if (ret != 0) + return ret; + + page_change = 0; + } + + } + + } + if (n > 0) + return _regmap_raw_multi_reg_write(map, base, n); + return 0; +} + +static int _regmap_multi_reg_write(struct regmap *map, + const struct reg_sequence *regs, + size_t num_regs) +{ + int i; + int ret; + + if (!map->can_multi_write) { + for (i = 0; i < num_regs; i++) { + ret = _regmap_write(map, regs[i].reg, regs[i].def); + if (ret != 0) + return ret; + + if (regs[i].delay_us) { + if (map->can_sleep) + fsleep(regs[i].delay_us); + else + udelay(regs[i].delay_us); + } + } + return 0; + } + + if (!map->format.parse_inplace) + return -EINVAL; + + if (map->writeable_reg) + for (i = 0; i < num_regs; i++) { + int reg = regs[i].reg; + if (!map->writeable_reg(map->dev, reg)) + return -EINVAL; + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + } + + if (!map->cache_bypass) { + for (i = 0; i < num_regs; i++) { + unsigned int val = regs[i].def; + unsigned int reg = regs[i].reg; + ret = regcache_write(map, reg, val); + if (ret) { + dev_err(map->dev, + "Error in caching of register: %x ret: %d\n", + reg, ret); + return ret; + } + } + if (map->cache_only) { + map->cache_dirty = true; + return 0; + } + } + + WARN_ON(!map->bus); + + for (i = 0; i < num_regs; i++) { + unsigned int reg = regs[i].reg; + struct regmap_range_node *range; + + /* Coalesce all the writes between a page break or a delay + * in a sequence + */ + range = _regmap_range_lookup(map, reg); + if (range || regs[i].delay_us) { + size_t len = sizeof(struct reg_sequence)*num_regs; + struct reg_sequence *base = kmemdup(regs, len, + GFP_KERNEL); + if (!base) + return -ENOMEM; + ret = _regmap_range_multi_paged_reg_write(map, base, + num_regs); + kfree(base); + + return ret; + } + } + return _regmap_raw_multi_reg_write(map, regs, num_regs); +} + +/** + * regmap_multi_reg_write() - Write multiple registers to the device + * + * @map: Register map to write to + * @regs: Array of structures containing register,value to be written + * @num_regs: Number of registers to write + * + * Write multiple registers to the device where the set of register, value + * pairs are supplied in any order, possibly not all in a single range. + * + * The 'normal' block write mode will send ultimately send data on the + * target bus as R,V1,V2,V3,..,Vn where successively higher registers are + * addressed. However, this alternative block multi write mode will send + * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device + * must of course support the mode. + * + * A value of zero will be returned on success, a negative errno will be + * returned in error cases. + */ +int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, + int num_regs) +{ + int ret; + + map->lock(map->lock_arg); + + ret = _regmap_multi_reg_write(map, regs, num_regs); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_multi_reg_write); + +/** + * regmap_multi_reg_write_bypassed() - Write multiple registers to the + * device but not the cache + * + * @map: Register map to write to + * @regs: Array of structures containing register,value to be written + * @num_regs: Number of registers to write + * + * Write multiple registers to the device but not the cache where the set + * of register are supplied in any order. + * + * This function is intended to be used for writing a large block of data + * atomically to the device in single transfer for those I2C client devices + * that implement this alternative block write mode. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_multi_reg_write_bypassed(struct regmap *map, + const struct reg_sequence *regs, + int num_regs) +{ + int ret; + bool bypass; + + map->lock(map->lock_arg); + + bypass = map->cache_bypass; + map->cache_bypass = true; + + ret = _regmap_multi_reg_write(map, regs, num_regs); + + map->cache_bypass = bypass; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed); + +/** + * regmap_raw_write_async() - Write raw values to one or more registers + * asynchronously + * + * @map: Register map to write to + * @reg: Initial register to write to + * @val: Block of data to be written, laid out for direct transmission to the + * device. Must be valid until regmap_async_complete() is called. + * @val_len: Length of data pointed to by val. + * + * This function is intended to be used for things like firmware + * download where a large block of data needs to be transferred to the + * device. No formatting will be done on the data provided. + * + * If supported by the underlying bus the write will be scheduled + * asynchronously, helping maximise I/O speed on higher speed buses + * like SPI. regmap_async_complete() can be called to ensure that all + * asynchrnous writes have been completed. + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_raw_write_async(struct regmap *map, unsigned int reg, + const void *val, size_t val_len) +{ + int ret; + + if (val_len % map->format.val_bytes) + return -EINVAL; + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + + map->lock(map->lock_arg); + + map->async = true; + + ret = _regmap_raw_write(map, reg, val, val_len, false); + + map->async = false; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_raw_write_async); + +static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, + unsigned int val_len, bool noinc) +{ + struct regmap_range_node *range; + int ret; + + WARN_ON(!map->bus); + + if (!map->bus || !map->bus->read) + return -EINVAL; + + range = _regmap_range_lookup(map, reg); + if (range) { + ret = _regmap_select_page(map, ®, range, + noinc ? 1 : val_len / map->format.val_bytes); + if (ret != 0) + return ret; + } + + map->format.format_reg(map->work_buf, reg, map->reg_shift); + regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, + map->read_flag_mask); + trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); + + ret = map->bus->read(map->bus_context, map->work_buf, + map->format.reg_bytes + map->format.pad_bytes, + val, val_len); + + trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); + + return ret; +} + +static int _regmap_bus_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct regmap *map = context; + + return map->bus->reg_read(map->bus_context, reg, val); +} + +static int _regmap_bus_read(void *context, unsigned int reg, + unsigned int *val) +{ + int ret; + struct regmap *map = context; + void *work_val = map->work_buf + map->format.reg_bytes + + map->format.pad_bytes; + + if (!map->format.parse_val) + return -EINVAL; + + ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); + if (ret == 0) + *val = map->format.parse_val(work_val); + + return ret; +} + +static int _regmap_read(struct regmap *map, unsigned int reg, + unsigned int *val) +{ + int ret; + void *context = _regmap_map_get_context(map); + + if (!map->cache_bypass) { + ret = regcache_read(map, reg, val); + if (ret == 0) + return 0; + } + + if (map->cache_only) + return -EBUSY; + + if (!regmap_readable(map, reg)) + return -EIO; + + ret = map->reg_read(context, reg, val); + if (ret == 0) { + if (regmap_should_log(map)) + dev_info(map->dev, "%x => %x\n", reg, *val); + + trace_regmap_reg_read(map, reg, *val); + + if (!map->cache_bypass) + regcache_write(map, reg, *val); + } + + return ret; +} + +/** + * regmap_read() - Read a value from a single register + * + * @map: Register map to read from + * @reg: Register to be read from + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) +{ + int ret; + + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + + map->lock(map->lock_arg); + + ret = _regmap_read(map, reg, val); + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_read); + +/** + * regmap_raw_read() - Read raw data from the device + * + * @map: Register map to read from + * @reg: First register to be read from + * @val: Pointer to store read value + * @val_len: Size of data to read + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, + size_t val_len) +{ + size_t val_bytes = map->format.val_bytes; + size_t val_count = val_len / val_bytes; + unsigned int v; + int ret, i; + + if (!map->bus) + return -EINVAL; + if (val_len % map->format.val_bytes) + return -EINVAL; + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + if (val_count == 0) + return -EINVAL; + + map->lock(map->lock_arg); + + if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || + map->cache_type == REGCACHE_NONE) { + size_t chunk_count, chunk_bytes; + size_t chunk_regs = val_count; + + if (!map->bus->read) { + ret = -ENOTSUPP; + goto out; + } + + if (map->use_single_read) + chunk_regs = 1; + else if (map->max_raw_read && val_len > map->max_raw_read) + chunk_regs = map->max_raw_read / val_bytes; + + chunk_count = val_count / chunk_regs; + chunk_bytes = chunk_regs * val_bytes; + + /* Read bytes that fit into whole chunks */ + for (i = 0; i < chunk_count; i++) { + ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); + if (ret != 0) + goto out; + + reg += regmap_get_offset(map, chunk_regs); + val += chunk_bytes; + val_len -= chunk_bytes; + } + + /* Read remaining bytes */ + if (val_len) { + ret = _regmap_raw_read(map, reg, val, val_len, false); + if (ret != 0) + goto out; + } + } else { + /* Otherwise go word by word for the cache; should be low + * cost as we expect to hit the cache. + */ + for (i = 0; i < val_count; i++) { + ret = _regmap_read(map, reg + regmap_get_offset(map, i), + &v); + if (ret != 0) + goto out; + + map->format.format_val(val + (i * val_bytes), v, 0); + } + } + + out: + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_raw_read); + +/** + * regmap_noinc_read(): Read data from a register without incrementing the + * register number + * + * @map: Register map to read from + * @reg: Register to read from + * @val: Pointer to data buffer + * @val_len: Length of output buffer in bytes. + * + * The regmap API usually assumes that bulk bus read operations will read a + * range of registers. Some devices have certain registers for which a read + * operation read will read from an internal FIFO. + * + * The target register must be volatile but registers after it can be + * completely unrelated cacheable registers. + * + * This will attempt multiple reads as required to read val_len bytes. + * + * A value of zero will be returned on success, a negative errno will be + * returned in error cases. + */ +int regmap_noinc_read(struct regmap *map, unsigned int reg, + void *val, size_t val_len) +{ + size_t read_len; + int ret; + + if (!map->bus) + return -EINVAL; + if (!map->bus->read) + return -ENOTSUPP; + if (val_len % map->format.val_bytes) + return -EINVAL; + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + if (val_len == 0) + return -EINVAL; + + map->lock(map->lock_arg); + + if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { + ret = -EINVAL; + goto out_unlock; + } + + while (val_len) { + if (map->max_raw_read && map->max_raw_read < val_len) + read_len = map->max_raw_read; + else + read_len = val_len; + ret = _regmap_raw_read(map, reg, val, read_len, true); + if (ret) + goto out_unlock; + val = ((u8 *)val) + read_len; + val_len -= read_len; + } + +out_unlock: + map->unlock(map->lock_arg); + return ret; +} +EXPORT_SYMBOL_GPL(regmap_noinc_read); + +/** + * regmap_field_read(): Read a value to a single register field + * + * @field: Register field to read from + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_field_read(struct regmap_field *field, unsigned int *val) +{ + int ret; + unsigned int reg_val; + ret = regmap_read(field->regmap, field->reg, ®_val); + if (ret != 0) + return ret; + + reg_val &= field->mask; + reg_val >>= field->shift; + *val = reg_val; + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_field_read); + +/** + * regmap_fields_read() - Read a value to a single register field with port ID + * + * @field: Register field to read from + * @id: port ID + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_fields_read(struct regmap_field *field, unsigned int id, + unsigned int *val) +{ + int ret; + unsigned int reg_val; + + if (id >= field->id_size) + return -EINVAL; + + ret = regmap_read(field->regmap, + field->reg + (field->id_offset * id), + ®_val); + if (ret != 0) + return ret; + + reg_val &= field->mask; + reg_val >>= field->shift; + *val = reg_val; + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_fields_read); + +/** + * regmap_bulk_read() - Read multiple registers from the device + * + * @map: Register map to read from + * @reg: First register to be read from + * @val: Pointer to store read value, in native register size for device + * @val_count: Number of registers to read + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, + size_t val_count) +{ + int ret, i; + size_t val_bytes = map->format.val_bytes; + bool vol = regmap_volatile_range(map, reg, val_count); + + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + if (val_count == 0) + return -EINVAL; + + if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { + ret = regmap_raw_read(map, reg, val, val_bytes * val_count); + if (ret != 0) + return ret; + + for (i = 0; i < val_count * val_bytes; i += val_bytes) + map->format.parse_inplace(val + i); + } else { +#ifdef CONFIG_64BIT + u64 *u64 = val; +#endif + u32 *u32 = val; + u16 *u16 = val; + u8 *u8 = val; + + map->lock(map->lock_arg); + + for (i = 0; i < val_count; i++) { + unsigned int ival; + + ret = _regmap_read(map, reg + regmap_get_offset(map, i), + &ival); + if (ret != 0) + goto out; + + switch (map->format.val_bytes) { +#ifdef CONFIG_64BIT + case 8: + u64[i] = ival; + break; +#endif + case 4: + u32[i] = ival; + break; + case 2: + u16[i] = ival; + break; + case 1: + u8[i] = ival; + break; + default: + ret = -EINVAL; + goto out; + } + } + +out: + map->unlock(map->lock_arg); + } + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_bulk_read); + +static int _regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change, bool force_write) +{ + int ret; + unsigned int tmp, orig; + + if (change) + *change = false; + + if (regmap_volatile(map, reg) && map->reg_update_bits) { + ret = map->reg_update_bits(map->bus_context, reg, mask, val); + if (ret == 0 && change) + *change = true; + } else { + ret = _regmap_read(map, reg, &orig); + if (ret != 0) + return ret; + + tmp = orig & ~mask; + tmp |= val & mask; + + if (force_write || (tmp != orig)) { + ret = _regmap_write(map, reg, tmp); + if (ret == 0 && change) + *change = true; + } + } + + return ret; +} + +/** + * regmap_update_bits_base() - Perform a read/modify/write cycle on a register + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * @change: Boolean indicating if a write was done + * @async: Boolean indicating asynchronously + * @force: Boolean indicating use force update + * + * Perform a read/modify/write cycle on a register map with change, async, force + * options. + * + * If async is true: + * + * With most buses the read must be done synchronously so this is most useful + * for devices with a cache which do not need to interact with the hardware to + * determine the current register value. + * + * Returns zero for success, a negative number on error. + */ +int regmap_update_bits_base(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change, bool async, bool force) +{ + int ret; + + map->lock(map->lock_arg); + + map->async = async; + + ret = _regmap_update_bits(map, reg, mask, val, change, force); + + map->async = false; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_update_bits_base); + +/** + * regmap_test_bits() - Check if all specified bits are set in a register. + * + * @map: Register map to operate on + * @reg: Register to read from + * @bits: Bits to test + * + * Returns 0 if at least one of the tested bits is not set, 1 if all tested + * bits are set and a negative error number if the underlying regmap_read() + * fails. + */ +int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) +{ + unsigned int val, ret; + + ret = regmap_read(map, reg, &val); + if (ret) + return ret; + + return (val & bits) == bits; +} +EXPORT_SYMBOL_GPL(regmap_test_bits); + +void regmap_async_complete_cb(struct regmap_async *async, int ret) +{ + struct regmap *map = async->map; + bool wake; + + trace_regmap_async_io_complete(map); + + spin_lock(&map->async_lock); + list_move(&async->list, &map->async_free); + wake = list_empty(&map->async_list); + + if (ret != 0) + map->async_ret = ret; + + spin_unlock(&map->async_lock); + + if (wake) + wake_up(&map->async_waitq); +} +EXPORT_SYMBOL_GPL(regmap_async_complete_cb); + +static int regmap_async_is_done(struct regmap *map) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&map->async_lock, flags); + ret = list_empty(&map->async_list); + spin_unlock_irqrestore(&map->async_lock, flags); + + return ret; +} + +/** + * regmap_async_complete - Ensure all asynchronous I/O has completed. + * + * @map: Map to operate on. + * + * Blocks until any pending asynchronous I/O has completed. Returns + * an error code for any failed I/O operations. + */ +int regmap_async_complete(struct regmap *map) +{ + unsigned long flags; + int ret; + + /* Nothing to do with no async support */ + if (!map->bus || !map->bus->async_write) + return 0; + + trace_regmap_async_complete_start(map); + + wait_event(map->async_waitq, regmap_async_is_done(map)); + + spin_lock_irqsave(&map->async_lock, flags); + ret = map->async_ret; + map->async_ret = 0; + spin_unlock_irqrestore(&map->async_lock, flags); + + trace_regmap_async_complete_done(map); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_async_complete); + +/** + * regmap_register_patch - Register and apply register updates to be applied + * on device initialistion + * + * @map: Register map to apply updates to. + * @regs: Values to update. + * @num_regs: Number of entries in regs. + * + * Register a set of register updates to be applied to the device + * whenever the device registers are synchronised with the cache and + * apply them immediately. Typically this is used to apply + * corrections to be applied to the device defaults on startup, such + * as the updates some vendors provide to undocumented registers. + * + * The caller must ensure that this function cannot be called + * concurrently with either itself or regcache_sync(). + */ +int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, + int num_regs) +{ + struct reg_sequence *p; + int ret; + bool bypass; + + if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n", + num_regs)) + return 0; + + p = krealloc(map->patch, + sizeof(struct reg_sequence) * (map->patch_regs + num_regs), + GFP_KERNEL); + if (p) { + memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); + map->patch = p; + map->patch_regs += num_regs; + } else { + return -ENOMEM; + } + + map->lock(map->lock_arg); + + bypass = map->cache_bypass; + + map->cache_bypass = true; + map->async = true; + + ret = _regmap_multi_reg_write(map, regs, num_regs); + + map->async = false; + map->cache_bypass = bypass; + + map->unlock(map->lock_arg); + + regmap_async_complete(map); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_register_patch); + +/** + * regmap_get_val_bytes() - Report the size of a register value + * + * @map: Register map to operate on. + * + * Report the size of a register value, mainly intended to for use by + * generic infrastructure built on top of regmap. + */ +int regmap_get_val_bytes(struct regmap *map) +{ + if (map->format.format_write) + return -EINVAL; + + return map->format.val_bytes; +} +EXPORT_SYMBOL_GPL(regmap_get_val_bytes); + +/** + * regmap_get_max_register() - Report the max register value + * + * @map: Register map to operate on. + * + * Report the max register value, mainly intended to for use by + * generic infrastructure built on top of regmap. + */ +int regmap_get_max_register(struct regmap *map) +{ + return map->max_register ? map->max_register : -EINVAL; +} +EXPORT_SYMBOL_GPL(regmap_get_max_register); + +/** + * regmap_get_reg_stride() - Report the register address stride + * + * @map: Register map to operate on. + * + * Report the register address stride, mainly intended to for use by + * generic infrastructure built on top of regmap. + */ +int regmap_get_reg_stride(struct regmap *map) +{ + return map->reg_stride; +} +EXPORT_SYMBOL_GPL(regmap_get_reg_stride); + +int regmap_parse_val(struct regmap *map, const void *buf, + unsigned int *val) +{ + if (!map->format.parse_val) + return -EINVAL; + + *val = map->format.parse_val(buf); + + return 0; +} +EXPORT_SYMBOL_GPL(regmap_parse_val); + +static int __init regmap_initcall(void) +{ + regmap_debugfs_initcall(); + + return 0; +} +postcore_initcall(regmap_initcall); diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h new file mode 100644 index 000000000..d4066fa07 --- /dev/null +++ b/drivers/base/regmap/trace.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM regmap + +#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_REGMAP_H + +#include <linux/ktime.h> +#include <linux/tracepoint.h> + +#include "internal.h" + +/* + * Log register events + */ +DECLARE_EVENT_CLASS(regmap_reg, + + TP_PROTO(struct regmap *map, unsigned int reg, + unsigned int val), + + TP_ARGS(map, reg, val), + + TP_STRUCT__entry( + __string( name, regmap_name(map) ) + __field( unsigned int, reg ) + __field( unsigned int, val ) + ), + + TP_fast_assign( + __assign_str(name, regmap_name(map)); + __entry->reg = reg; + __entry->val = val; + ), + + TP_printk("%s reg=%x val=%x", __get_str(name), + (unsigned int)__entry->reg, + (unsigned int)__entry->val) +); + +DEFINE_EVENT(regmap_reg, regmap_reg_write, + + TP_PROTO(struct regmap *map, unsigned int reg, + unsigned int val), + + TP_ARGS(map, reg, val) + +); + +DEFINE_EVENT(regmap_reg, regmap_reg_read, + + TP_PROTO(struct regmap *map, unsigned int reg, + unsigned int val), + + TP_ARGS(map, reg, val) + +); + +DEFINE_EVENT(regmap_reg, regmap_reg_read_cache, + + TP_PROTO(struct regmap *map, unsigned int reg, + unsigned int val), + + TP_ARGS(map, reg, val) + +); + +DECLARE_EVENT_CLASS(regmap_block, + + TP_PROTO(struct regmap *map, unsigned int reg, int count), + + TP_ARGS(map, reg, count), + + TP_STRUCT__entry( + __string( name, regmap_name(map) ) + __field( unsigned int, reg ) + __field( int, count ) + ), + + TP_fast_assign( + __assign_str(name, regmap_name(map)); + __entry->reg = reg; + __entry->count = count; + ), + + TP_printk("%s reg=%x count=%d", __get_str(name), + (unsigned int)__entry->reg, + (int)__entry->count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_read_start, + + TP_PROTO(struct regmap *map, unsigned int reg, int count), + + TP_ARGS(map, reg, count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_read_done, + + TP_PROTO(struct regmap *map, unsigned int reg, int count), + + TP_ARGS(map, reg, count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_write_start, + + TP_PROTO(struct regmap *map, unsigned int reg, int count), + + TP_ARGS(map, reg, count) +); + +DEFINE_EVENT(regmap_block, regmap_hw_write_done, + + TP_PROTO(struct regmap *map, unsigned int reg, int count), + + TP_ARGS(map, reg, count) +); + +TRACE_EVENT(regcache_sync, + + TP_PROTO(struct regmap *map, const char *type, + const char *status), + + TP_ARGS(map, type, status), + + TP_STRUCT__entry( + __string( name, regmap_name(map) ) + __string( status, status ) + __string( type, type ) + __field( int, type ) + ), + + TP_fast_assign( + __assign_str(name, regmap_name(map)); + __assign_str(status, status); + __assign_str(type, type); + ), + + TP_printk("%s type=%s status=%s", __get_str(name), + __get_str(type), __get_str(status)) +); + +DECLARE_EVENT_CLASS(regmap_bool, + + TP_PROTO(struct regmap *map, bool flag), + + TP_ARGS(map, flag), + + TP_STRUCT__entry( + __string( name, regmap_name(map) ) + __field( int, flag ) + ), + + TP_fast_assign( + __assign_str(name, regmap_name(map)); + __entry->flag = flag; + ), + + TP_printk("%s flag=%d", __get_str(name), + (int)__entry->flag) +); + +DEFINE_EVENT(regmap_bool, regmap_cache_only, + + TP_PROTO(struct regmap *map, bool flag), + + TP_ARGS(map, flag) + +); + +DEFINE_EVENT(regmap_bool, regmap_cache_bypass, + + TP_PROTO(struct regmap *map, bool flag), + + TP_ARGS(map, flag) + +); + +DECLARE_EVENT_CLASS(regmap_async, + + TP_PROTO(struct regmap *map), + + TP_ARGS(map), + + TP_STRUCT__entry( + __string( name, regmap_name(map) ) + ), + + TP_fast_assign( + __assign_str(name, regmap_name(map)); + ), + + TP_printk("%s", __get_str(name)) +); + +DEFINE_EVENT(regmap_block, regmap_async_write_start, + + TP_PROTO(struct regmap *map, unsigned int reg, int count), + + TP_ARGS(map, reg, count) +); + +DEFINE_EVENT(regmap_async, regmap_async_io_complete, + + TP_PROTO(struct regmap *map), + + TP_ARGS(map) + +); + +DEFINE_EVENT(regmap_async, regmap_async_complete_start, + + TP_PROTO(struct regmap *map), + + TP_ARGS(map) + +); + +DEFINE_EVENT(regmap_async, regmap_async_complete_done, + + TP_PROTO(struct regmap *map), + + TP_ARGS(map) + +); + +TRACE_EVENT(regcache_drop_region, + + TP_PROTO(struct regmap *map, unsigned int from, + unsigned int to), + + TP_ARGS(map, from, to), + + TP_STRUCT__entry( + __string( name, regmap_name(map) ) + __field( unsigned int, from ) + __field( unsigned int, to ) + ), + + TP_fast_assign( + __assign_str(name, regmap_name(map)); + __entry->from = from; + __entry->to = to; + ), + + TP_printk("%s %u-%u", __get_str(name), (unsigned int)__entry->from, + (unsigned int)__entry->to) +); + +#endif /* _TRACE_REGMAP_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/base/soc.c b/drivers/base/soc.c new file mode 100644 index 000000000..d34609bb7 --- /dev/null +++ b/drivers/base/soc.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson. + */ + +#include <linux/sysfs.h> +#include <linux/init.h> +#include <linux/stat.h> +#include <linux/slab.h> +#include <linux/idr.h> +#include <linux/spinlock.h> +#include <linux/sys_soc.h> +#include <linux/err.h> +#include <linux/glob.h> + +static DEFINE_IDA(soc_ida); + +/* Prototype to allow declarations of DEVICE_ATTR(<foo>) before soc_info_show */ +static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr, + char *buf); + +struct soc_device { + struct device dev; + struct soc_device_attribute *attr; + int soc_dev_num; +}; + +static struct bus_type soc_bus_type = { + .name = "soc", +}; + +static DEVICE_ATTR(machine, 0444, soc_info_show, NULL); +static DEVICE_ATTR(family, 0444, soc_info_show, NULL); +static DEVICE_ATTR(serial_number, 0444, soc_info_show, NULL); +static DEVICE_ATTR(soc_id, 0444, soc_info_show, NULL); +static DEVICE_ATTR(revision, 0444, soc_info_show, NULL); + +struct device *soc_device_to_device(struct soc_device *soc_dev) +{ + return &soc_dev->dev; +} + +static umode_t soc_attribute_mode(struct kobject *kobj, + struct attribute *attr, + int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); + + if ((attr == &dev_attr_machine.attr) && soc_dev->attr->machine) + return attr->mode; + if ((attr == &dev_attr_family.attr) && soc_dev->attr->family) + return attr->mode; + if ((attr == &dev_attr_revision.attr) && soc_dev->attr->revision) + return attr->mode; + if ((attr == &dev_attr_serial_number.attr) && soc_dev->attr->serial_number) + return attr->mode; + if ((attr == &dev_attr_soc_id.attr) && soc_dev->attr->soc_id) + return attr->mode; + + /* Unknown or unfilled attribute */ + return 0; +} + +static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); + const char *output; + + if (attr == &dev_attr_machine) + output = soc_dev->attr->machine; + else if (attr == &dev_attr_family) + output = soc_dev->attr->family; + else if (attr == &dev_attr_revision) + output = soc_dev->attr->revision; + else if (attr == &dev_attr_serial_number) + output = soc_dev->attr->serial_number; + else if (attr == &dev_attr_soc_id) + output = soc_dev->attr->soc_id; + else + return -EINVAL; + + return sysfs_emit(buf, "%s\n", output); +} + +static struct attribute *soc_attr[] = { + &dev_attr_machine.attr, + &dev_attr_family.attr, + &dev_attr_serial_number.attr, + &dev_attr_soc_id.attr, + &dev_attr_revision.attr, + NULL, +}; + +static const struct attribute_group soc_attr_group = { + .attrs = soc_attr, + .is_visible = soc_attribute_mode, +}; + +static void soc_release(struct device *dev) +{ + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); + + ida_simple_remove(&soc_ida, soc_dev->soc_dev_num); + kfree(soc_dev->dev.groups); + kfree(soc_dev); +} + +static struct soc_device_attribute *early_soc_dev_attr; + +struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr) +{ + struct soc_device *soc_dev; + const struct attribute_group **soc_attr_groups; + int ret; + + if (!soc_bus_type.p) { + if (early_soc_dev_attr) + return ERR_PTR(-EBUSY); + early_soc_dev_attr = soc_dev_attr; + return NULL; + } + + soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL); + if (!soc_dev) { + ret = -ENOMEM; + goto out1; + } + + soc_attr_groups = kcalloc(3, sizeof(*soc_attr_groups), GFP_KERNEL); + if (!soc_attr_groups) { + ret = -ENOMEM; + goto out2; + } + soc_attr_groups[0] = &soc_attr_group; + soc_attr_groups[1] = soc_dev_attr->custom_attr_group; + + /* Fetch a unique (reclaimable) SOC ID. */ + ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL); + if (ret < 0) + goto out3; + soc_dev->soc_dev_num = ret; + + soc_dev->attr = soc_dev_attr; + soc_dev->dev.bus = &soc_bus_type; + soc_dev->dev.groups = soc_attr_groups; + soc_dev->dev.release = soc_release; + + dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num); + + ret = device_register(&soc_dev->dev); + if (ret) { + put_device(&soc_dev->dev); + return ERR_PTR(ret); + } + + return soc_dev; + +out3: + kfree(soc_attr_groups); +out2: + kfree(soc_dev); +out1: + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(soc_device_register); + +/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */ +void soc_device_unregister(struct soc_device *soc_dev) +{ + device_unregister(&soc_dev->dev); + early_soc_dev_attr = NULL; +} +EXPORT_SYMBOL_GPL(soc_device_unregister); + +static int __init soc_bus_register(void) +{ + int ret; + + ret = bus_register(&soc_bus_type); + if (ret) + return ret; + + if (early_soc_dev_attr) + return PTR_ERR(soc_device_register(early_soc_dev_attr)); + + return 0; +} +core_initcall(soc_bus_register); + +static int soc_device_match_attr(const struct soc_device_attribute *attr, + const struct soc_device_attribute *match) +{ + if (match->machine && + (!attr->machine || !glob_match(match->machine, attr->machine))) + return 0; + + if (match->family && + (!attr->family || !glob_match(match->family, attr->family))) + return 0; + + if (match->revision && + (!attr->revision || !glob_match(match->revision, attr->revision))) + return 0; + + if (match->soc_id && + (!attr->soc_id || !glob_match(match->soc_id, attr->soc_id))) + return 0; + + return 1; +} + +static int soc_device_match_one(struct device *dev, void *arg) +{ + struct soc_device *soc_dev = container_of(dev, struct soc_device, dev); + + return soc_device_match_attr(soc_dev->attr, arg); +} + +/* + * soc_device_match - identify the SoC in the machine + * @matches: zero-terminated array of possible matches + * + * returns the first matching entry of the argument array, or NULL + * if none of them match. + * + * This function is meant as a helper in place of of_match_node() + * in cases where either no device tree is available or the information + * in a device node is insufficient to identify a particular variant + * by its compatible strings or other properties. For new devices, + * the DT binding should always provide unique compatible strings + * that allow the use of of_match_node() instead. + * + * The calling function can use the .data entry of the + * soc_device_attribute to pass a structure or function pointer for + * each entry. + */ +const struct soc_device_attribute *soc_device_match( + const struct soc_device_attribute *matches) +{ + int ret = 0; + + if (!matches) + return NULL; + + while (!ret) { + if (!(matches->machine || matches->family || + matches->revision || matches->soc_id)) + break; + ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches, + soc_device_match_one); + if (ret < 0 && early_soc_dev_attr) + ret = soc_device_match_attr(early_soc_dev_attr, + matches); + if (ret < 0) + return NULL; + if (!ret) + matches++; + else + return matches; + } + return NULL; +} +EXPORT_SYMBOL_GPL(soc_device_match); diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c new file mode 100644 index 000000000..b664c3638 --- /dev/null +++ b/drivers/base/swnode.c @@ -0,0 +1,913 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Software nodes for the firmware node framework. + * + * Copyright (C) 2018, Intel Corporation + * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com> + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/property.h> +#include <linux/slab.h> + +struct swnode { + int id; + struct kobject kobj; + struct fwnode_handle fwnode; + const struct software_node *node; + + /* hierarchy */ + struct ida child_ids; + struct list_head entry; + struct list_head children; + struct swnode *parent; + + unsigned int allocated:1; +}; + +static DEFINE_IDA(swnode_root_ids); +static struct kset *swnode_kset; + +#define kobj_to_swnode(_kobj_) container_of(_kobj_, struct swnode, kobj) + +static const struct fwnode_operations software_node_ops; + +bool is_software_node(const struct fwnode_handle *fwnode) +{ + return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &software_node_ops; +} +EXPORT_SYMBOL_GPL(is_software_node); + +#define to_swnode(__fwnode) \ + ({ \ + typeof(__fwnode) __to_swnode_fwnode = __fwnode; \ + \ + is_software_node(__to_swnode_fwnode) ? \ + container_of(__to_swnode_fwnode, \ + struct swnode, fwnode) : NULL; \ + }) + +static struct swnode * +software_node_to_swnode(const struct software_node *node) +{ + struct swnode *swnode = NULL; + struct kobject *k; + + if (!node) + return NULL; + + spin_lock(&swnode_kset->list_lock); + + list_for_each_entry(k, &swnode_kset->list, entry) { + swnode = kobj_to_swnode(k); + if (swnode->node == node) + break; + swnode = NULL; + } + + spin_unlock(&swnode_kset->list_lock); + + return swnode; +} + +const struct software_node *to_software_node(const struct fwnode_handle *fwnode) +{ + const struct swnode *swnode = to_swnode(fwnode); + + return swnode ? swnode->node : NULL; +} +EXPORT_SYMBOL_GPL(to_software_node); + +struct fwnode_handle *software_node_fwnode(const struct software_node *node) +{ + struct swnode *swnode = software_node_to_swnode(node); + + return swnode ? &swnode->fwnode : NULL; +} +EXPORT_SYMBOL_GPL(software_node_fwnode); + +/* -------------------------------------------------------------------------- */ +/* property_entry processing */ + +static const struct property_entry * +property_entry_get(const struct property_entry *prop, const char *name) +{ + if (!prop) + return NULL; + + for (; prop->name; prop++) + if (!strcmp(name, prop->name)) + return prop; + + return NULL; +} + +static const void *property_get_pointer(const struct property_entry *prop) +{ + if (!prop->length) + return NULL; + + return prop->is_inline ? &prop->value : prop->pointer; +} + +static const void *property_entry_find(const struct property_entry *props, + const char *propname, size_t length) +{ + const struct property_entry *prop; + const void *pointer; + + prop = property_entry_get(props, propname); + if (!prop) + return ERR_PTR(-EINVAL); + pointer = property_get_pointer(prop); + if (!pointer) + return ERR_PTR(-ENODATA); + if (length > prop->length) + return ERR_PTR(-EOVERFLOW); + return pointer; +} + +static int +property_entry_count_elems_of_size(const struct property_entry *props, + const char *propname, size_t length) +{ + const struct property_entry *prop; + + prop = property_entry_get(props, propname); + if (!prop) + return -EINVAL; + + return prop->length / length; +} + +static int property_entry_read_int_array(const struct property_entry *props, + const char *name, + unsigned int elem_size, void *val, + size_t nval) +{ + const void *pointer; + size_t length; + + if (!val) + return property_entry_count_elems_of_size(props, name, + elem_size); + + if (!is_power_of_2(elem_size) || elem_size > sizeof(u64)) + return -ENXIO; + + length = nval * elem_size; + + pointer = property_entry_find(props, name, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(val, pointer, length); + return 0; +} + +static int property_entry_read_string_array(const struct property_entry *props, + const char *propname, + const char **strings, size_t nval) +{ + const void *pointer; + size_t length; + int array_len; + + /* Find out the array length. */ + array_len = property_entry_count_elems_of_size(props, propname, + sizeof(const char *)); + if (array_len < 0) + return array_len; + + /* Return how many there are if strings is NULL. */ + if (!strings) + return array_len; + + array_len = min_t(size_t, nval, array_len); + length = array_len * sizeof(*strings); + + pointer = property_entry_find(props, propname, length); + if (IS_ERR(pointer)) + return PTR_ERR(pointer); + + memcpy(strings, pointer, length); + + return array_len; +} + +static void property_entry_free_data(const struct property_entry *p) +{ + const char * const *src_str; + size_t i, nval; + + if (p->type == DEV_PROP_STRING) { + src_str = property_get_pointer(p); + nval = p->length / sizeof(*src_str); + for (i = 0; i < nval; i++) + kfree(src_str[i]); + } + + if (!p->is_inline) + kfree(p->pointer); + + kfree(p->name); +} + +static bool property_copy_string_array(const char **dst_ptr, + const char * const *src_ptr, + size_t nval) +{ + int i; + + for (i = 0; i < nval; i++) { + dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL); + if (!dst_ptr[i] && src_ptr[i]) { + while (--i >= 0) + kfree(dst_ptr[i]); + return false; + } + } + + return true; +} + +static int property_entry_copy_data(struct property_entry *dst, + const struct property_entry *src) +{ + const void *pointer = property_get_pointer(src); + void *dst_ptr; + size_t nval; + + /* + * Properties with no data should not be marked as stored + * out of line. + */ + if (!src->is_inline && !src->length) + return -ENODATA; + + /* + * Reference properties are never stored inline as + * they are too big. + */ + if (src->type == DEV_PROP_REF && src->is_inline) + return -EINVAL; + + if (src->length <= sizeof(dst->value)) { + dst_ptr = &dst->value; + dst->is_inline = true; + } else { + dst_ptr = kmalloc(src->length, GFP_KERNEL); + if (!dst_ptr) + return -ENOMEM; + dst->pointer = dst_ptr; + } + + if (src->type == DEV_PROP_STRING) { + nval = src->length / sizeof(const char *); + if (!property_copy_string_array(dst_ptr, pointer, nval)) { + if (!dst->is_inline) + kfree(dst->pointer); + return -ENOMEM; + } + } else { + memcpy(dst_ptr, pointer, src->length); + } + + dst->length = src->length; + dst->type = src->type; + dst->name = kstrdup(src->name, GFP_KERNEL); + if (!dst->name) { + property_entry_free_data(dst); + return -ENOMEM; + } + + return 0; +} + +/** + * property_entries_dup - duplicate array of properties + * @properties: array of properties to copy + * + * This function creates a deep copy of the given NULL-terminated array + * of property entries. + */ +struct property_entry * +property_entries_dup(const struct property_entry *properties) +{ + struct property_entry *p; + int i, n = 0; + int ret; + + if (!properties) + return NULL; + + while (properties[n].name) + n++; + + p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < n; i++) { + ret = property_entry_copy_data(&p[i], &properties[i]); + if (ret) { + while (--i >= 0) + property_entry_free_data(&p[i]); + kfree(p); + return ERR_PTR(ret); + } + } + + return p; +} +EXPORT_SYMBOL_GPL(property_entries_dup); + +/** + * property_entries_free - free previously allocated array of properties + * @properties: array of properties to destroy + * + * This function frees given NULL-terminated array of property entries, + * along with their data. + */ +void property_entries_free(const struct property_entry *properties) +{ + const struct property_entry *p; + + if (!properties) + return; + + for (p = properties; p->name; p++) + property_entry_free_data(p); + + kfree(properties); +} +EXPORT_SYMBOL_GPL(property_entries_free); + +/* -------------------------------------------------------------------------- */ +/* fwnode operations */ + +static struct fwnode_handle *software_node_get(struct fwnode_handle *fwnode) +{ + struct swnode *swnode = to_swnode(fwnode); + + kobject_get(&swnode->kobj); + + return &swnode->fwnode; +} + +static void software_node_put(struct fwnode_handle *fwnode) +{ + struct swnode *swnode = to_swnode(fwnode); + + kobject_put(&swnode->kobj); +} + +static bool software_node_property_present(const struct fwnode_handle *fwnode, + const char *propname) +{ + struct swnode *swnode = to_swnode(fwnode); + + return !!property_entry_get(swnode->node->properties, propname); +} + +static int software_node_read_int_array(const struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) +{ + struct swnode *swnode = to_swnode(fwnode); + + return property_entry_read_int_array(swnode->node->properties, propname, + elem_size, val, nval); +} + +static int software_node_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) +{ + struct swnode *swnode = to_swnode(fwnode); + + return property_entry_read_string_array(swnode->node->properties, + propname, val, nval); +} + +static const char * +software_node_get_name(const struct fwnode_handle *fwnode) +{ + const struct swnode *swnode = to_swnode(fwnode); + + if (!swnode) + return "(null)"; + + return kobject_name(&swnode->kobj); +} + +static const char * +software_node_get_name_prefix(const struct fwnode_handle *fwnode) +{ + struct fwnode_handle *parent; + const char *prefix; + + parent = fwnode_get_parent(fwnode); + if (!parent) + return ""; + + /* Figure out the prefix from the parents. */ + while (is_software_node(parent)) + parent = fwnode_get_next_parent(parent); + + prefix = fwnode_get_name_prefix(parent); + fwnode_handle_put(parent); + + /* Guess something if prefix was NULL. */ + return prefix ?: "/"; +} + +static struct fwnode_handle * +software_node_get_parent(const struct fwnode_handle *fwnode) +{ + struct swnode *swnode = to_swnode(fwnode); + + if (!swnode || !swnode->parent) + return NULL; + + return fwnode_handle_get(&swnode->parent->fwnode); +} + +static struct fwnode_handle * +software_node_get_next_child(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + struct swnode *p = to_swnode(fwnode); + struct swnode *c = to_swnode(child); + + if (!p || list_empty(&p->children) || + (c && list_is_last(&c->entry, &p->children))) { + fwnode_handle_put(child); + return NULL; + } + + if (c) + c = list_next_entry(c, entry); + else + c = list_first_entry(&p->children, struct swnode, entry); + + fwnode_handle_put(child); + return fwnode_handle_get(&c->fwnode); +} + +static struct fwnode_handle * +software_node_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) +{ + struct swnode *swnode = to_swnode(fwnode); + struct swnode *child; + + if (!swnode || list_empty(&swnode->children)) + return NULL; + + list_for_each_entry(child, &swnode->children, entry) { + if (!strcmp(childname, kobject_name(&child->kobj))) { + kobject_get(&child->kobj); + return &child->fwnode; + } + } + return NULL; +} + +static int +software_node_get_reference_args(const struct fwnode_handle *fwnode, + const char *propname, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args) +{ + struct swnode *swnode = to_swnode(fwnode); + const struct software_node_ref_args *ref_array; + const struct software_node_ref_args *ref; + const struct property_entry *prop; + struct fwnode_handle *refnode; + u32 nargs_prop_val; + int error; + int i; + + if (!swnode) + return -ENOENT; + + prop = property_entry_get(swnode->node->properties, propname); + if (!prop) + return -ENOENT; + + if (prop->type != DEV_PROP_REF) + return -EINVAL; + + /* + * We expect that references are never stored inline, even + * single ones, as they are too big. + */ + if (prop->is_inline) + return -EINVAL; + + if (index * sizeof(*ref) >= prop->length) + return -ENOENT; + + ref_array = prop->pointer; + ref = &ref_array[index]; + + refnode = software_node_fwnode(ref->node); + if (!refnode) + return -ENOENT; + + if (nargs_prop) { + error = property_entry_read_int_array(ref->node->properties, + nargs_prop, sizeof(u32), + &nargs_prop_val, 1); + if (error) + return error; + + nargs = nargs_prop_val; + } + + if (nargs > NR_FWNODE_REFERENCE_ARGS) + return -EINVAL; + + if (!args) + return 0; + + args->fwnode = software_node_get(refnode); + args->nargs = nargs; + + for (i = 0; i < nargs; i++) + args->args[i] = ref->args[i]; + + return 0; +} + +static const struct fwnode_operations software_node_ops = { + .get = software_node_get, + .put = software_node_put, + .property_present = software_node_property_present, + .property_read_int_array = software_node_read_int_array, + .property_read_string_array = software_node_read_string_array, + .get_name = software_node_get_name, + .get_name_prefix = software_node_get_name_prefix, + .get_parent = software_node_get_parent, + .get_next_child_node = software_node_get_next_child, + .get_named_child_node = software_node_get_named_child_node, + .get_reference_args = software_node_get_reference_args +}; + +/* -------------------------------------------------------------------------- */ + +/** + * software_node_find_by_name - Find software node by name + * @parent: Parent of the software node + * @name: Name of the software node + * + * The function will find a node that is child of @parent and that is named + * @name. If no node is found, the function returns NULL. + * + * NOTE: you will need to drop the reference with fwnode_handle_put() after use. + */ +const struct software_node * +software_node_find_by_name(const struct software_node *parent, const char *name) +{ + struct swnode *swnode = NULL; + struct kobject *k; + + if (!name) + return NULL; + + spin_lock(&swnode_kset->list_lock); + + list_for_each_entry(k, &swnode_kset->list, entry) { + swnode = kobj_to_swnode(k); + if (parent == swnode->node->parent && swnode->node->name && + !strcmp(name, swnode->node->name)) { + kobject_get(&swnode->kobj); + break; + } + swnode = NULL; + } + + spin_unlock(&swnode_kset->list_lock); + + return swnode ? swnode->node : NULL; +} +EXPORT_SYMBOL_GPL(software_node_find_by_name); + +static int +software_node_register_properties(struct software_node *node, + const struct property_entry *properties) +{ + struct property_entry *props; + + props = property_entries_dup(properties); + if (IS_ERR(props)) + return PTR_ERR(props); + + node->properties = props; + + return 0; +} + +static void software_node_release(struct kobject *kobj) +{ + struct swnode *swnode = kobj_to_swnode(kobj); + + if (swnode->parent) { + ida_simple_remove(&swnode->parent->child_ids, swnode->id); + list_del(&swnode->entry); + } else { + ida_simple_remove(&swnode_root_ids, swnode->id); + } + + if (swnode->allocated) { + property_entries_free(swnode->node->properties); + kfree(swnode->node); + } + ida_destroy(&swnode->child_ids); + kfree(swnode); +} + +static struct kobj_type software_node_type = { + .release = software_node_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +static struct fwnode_handle * +swnode_register(const struct software_node *node, struct swnode *parent, + unsigned int allocated) +{ + struct swnode *swnode; + int ret; + + swnode = kzalloc(sizeof(*swnode), GFP_KERNEL); + if (!swnode) { + ret = -ENOMEM; + goto out_err; + } + + ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids, + 0, 0, GFP_KERNEL); + if (ret < 0) { + kfree(swnode); + goto out_err; + } + + swnode->id = ret; + swnode->node = node; + swnode->parent = parent; + swnode->allocated = allocated; + swnode->kobj.kset = swnode_kset; + swnode->fwnode.ops = &software_node_ops; + + ida_init(&swnode->child_ids); + INIT_LIST_HEAD(&swnode->entry); + INIT_LIST_HEAD(&swnode->children); + + if (node->name) + ret = kobject_init_and_add(&swnode->kobj, &software_node_type, + parent ? &parent->kobj : NULL, + "%s", node->name); + else + ret = kobject_init_and_add(&swnode->kobj, &software_node_type, + parent ? &parent->kobj : NULL, + "node%d", swnode->id); + if (ret) { + kobject_put(&swnode->kobj); + return ERR_PTR(ret); + } + + if (parent) + list_add_tail(&swnode->entry, &parent->children); + + kobject_uevent(&swnode->kobj, KOBJ_ADD); + return &swnode->fwnode; + +out_err: + if (allocated) + property_entries_free(node->properties); + return ERR_PTR(ret); +} + +/** + * software_node_register_nodes - Register an array of software nodes + * @nodes: Zero terminated array of software nodes to be registered + * + * Register multiple software nodes at once. + */ +int software_node_register_nodes(const struct software_node *nodes) +{ + int ret; + int i; + + for (i = 0; nodes[i].name; i++) { + ret = software_node_register(&nodes[i]); + if (ret) { + software_node_unregister_nodes(nodes); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(software_node_register_nodes); + +/** + * software_node_unregister_nodes - Unregister an array of software nodes + * @nodes: Zero terminated array of software nodes to be unregistered + * + * Unregister multiple software nodes at once. + * + * NOTE: Be careful using this call if the nodes had parent pointers set up in + * them before registering. If so, it is wiser to remove the nodes + * individually, in the correct order (child before parent) instead of relying + * on the sequential order of the list of nodes in the array. + */ +void software_node_unregister_nodes(const struct software_node *nodes) +{ + int i; + + for (i = 0; nodes[i].name; i++) + software_node_unregister(&nodes[i]); +} +EXPORT_SYMBOL_GPL(software_node_unregister_nodes); + +/** + * software_node_register_node_group - Register a group of software nodes + * @node_group: NULL terminated array of software node pointers to be registered + * + * Register multiple software nodes at once. + */ +int software_node_register_node_group(const struct software_node **node_group) +{ + unsigned int i; + int ret; + + if (!node_group) + return 0; + + for (i = 0; node_group[i]; i++) { + ret = software_node_register(node_group[i]); + if (ret) { + software_node_unregister_node_group(node_group); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(software_node_register_node_group); + +/** + * software_node_unregister_node_group - Unregister a group of software nodes + * @node_group: NULL terminated array of software node pointers to be unregistered + * + * Unregister multiple software nodes at once. + */ +void software_node_unregister_node_group(const struct software_node **node_group) +{ + unsigned int i; + + if (!node_group) + return; + + for (i = 0; node_group[i]; i++) + software_node_unregister(node_group[i]); +} +EXPORT_SYMBOL_GPL(software_node_unregister_node_group); + +/** + * software_node_register - Register static software node + * @node: The software node to be registered + */ +int software_node_register(const struct software_node *node) +{ + struct swnode *parent = software_node_to_swnode(node->parent); + + if (software_node_to_swnode(node)) + return -EEXIST; + + if (node->parent && !parent) + return -EINVAL; + + return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0)); +} +EXPORT_SYMBOL_GPL(software_node_register); + +/** + * software_node_unregister - Unregister static software node + * @node: The software node to be unregistered + */ +void software_node_unregister(const struct software_node *node) +{ + struct swnode *swnode; + + swnode = software_node_to_swnode(node); + if (swnode) + fwnode_remove_software_node(&swnode->fwnode); +} +EXPORT_SYMBOL_GPL(software_node_unregister); + +struct fwnode_handle * +fwnode_create_software_node(const struct property_entry *properties, + const struct fwnode_handle *parent) +{ + struct software_node *node; + struct swnode *p = NULL; + int ret; + + if (parent) { + if (IS_ERR(parent)) + return ERR_CAST(parent); + if (!is_software_node(parent)) + return ERR_PTR(-EINVAL); + p = to_swnode(parent); + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return ERR_PTR(-ENOMEM); + + ret = software_node_register_properties(node, properties); + if (ret) { + kfree(node); + return ERR_PTR(ret); + } + + node->parent = p ? p->node : NULL; + + return swnode_register(node, p, 1); +} +EXPORT_SYMBOL_GPL(fwnode_create_software_node); + +void fwnode_remove_software_node(struct fwnode_handle *fwnode) +{ + struct swnode *swnode = to_swnode(fwnode); + + if (!swnode) + return; + + kobject_put(&swnode->kobj); +} +EXPORT_SYMBOL_GPL(fwnode_remove_software_node); + +int software_node_notify(struct device *dev, unsigned long action) +{ + struct fwnode_handle *fwnode = dev_fwnode(dev); + struct swnode *swnode; + int ret; + + if (!fwnode) + return 0; + + if (!is_software_node(fwnode)) + fwnode = fwnode->secondary; + if (!is_software_node(fwnode)) + return 0; + + swnode = to_swnode(fwnode); + + switch (action) { + case KOBJ_ADD: + ret = sysfs_create_link(&dev->kobj, &swnode->kobj, + "software_node"); + if (ret) + break; + + ret = sysfs_create_link(&swnode->kobj, &dev->kobj, + dev_name(dev)); + if (ret) { + sysfs_remove_link(&dev->kobj, "software_node"); + break; + } + kobject_get(&swnode->kobj); + break; + case KOBJ_REMOVE: + sysfs_remove_link(&swnode->kobj, dev_name(dev)); + sysfs_remove_link(&dev->kobj, "software_node"); + kobject_put(&swnode->kobj); + break; + default: + break; + } + + return 0; +} + +static int __init software_node_init(void) +{ + swnode_kset = kset_create_and_add("software_nodes", NULL, kernel_kobj); + if (!swnode_kset) + return -ENOMEM; + return 0; +} +postcore_initcall(software_node_init); + +static void __exit software_node_exit(void) +{ + ida_destroy(&swnode_root_ids); + kset_unregister(swnode_kset); +} +__exitcall(software_node_exit); diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c new file mode 100644 index 000000000..13db1f78d --- /dev/null +++ b/drivers/base/syscore.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * syscore.c - Execution of system core operations. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + */ + +#include <linux/syscore_ops.h> +#include <linux/mutex.h> +#include <linux/module.h> +#include <linux/suspend.h> +#include <trace/events/power.h> + +static LIST_HEAD(syscore_ops_list); +static DEFINE_MUTEX(syscore_ops_lock); + +/** + * register_syscore_ops - Register a set of system core operations. + * @ops: System core operations to register. + */ +void register_syscore_ops(struct syscore_ops *ops) +{ + mutex_lock(&syscore_ops_lock); + list_add_tail(&ops->node, &syscore_ops_list); + mutex_unlock(&syscore_ops_lock); +} +EXPORT_SYMBOL_GPL(register_syscore_ops); + +/** + * unregister_syscore_ops - Unregister a set of system core operations. + * @ops: System core operations to unregister. + */ +void unregister_syscore_ops(struct syscore_ops *ops) +{ + mutex_lock(&syscore_ops_lock); + list_del(&ops->node); + mutex_unlock(&syscore_ops_lock); +} +EXPORT_SYMBOL_GPL(unregister_syscore_ops); + +#ifdef CONFIG_PM_SLEEP +/** + * syscore_suspend - Execute all the registered system core suspend callbacks. + * + * This function is executed with one CPU on-line and disabled interrupts. + */ +int syscore_suspend(void) +{ + struct syscore_ops *ops; + int ret = 0; + + trace_suspend_resume(TPS("syscore_suspend"), 0, true); + pm_pr_dbg("Checking wakeup interrupts\n"); + + /* Return error code if there are any wakeup interrupts pending. */ + if (pm_wakeup_pending()) + return -EBUSY; + + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled before system core suspend.\n"); + + list_for_each_entry_reverse(ops, &syscore_ops_list, node) + if (ops->suspend) { + pm_pr_dbg("Calling %pS\n", ops->suspend); + ret = ops->suspend(); + if (ret) + goto err_out; + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pS\n", ops->suspend); + } + + trace_suspend_resume(TPS("syscore_suspend"), 0, false); + return 0; + + err_out: + pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend); + + list_for_each_entry_continue(ops, &syscore_ops_list, node) + if (ops->resume) + ops->resume(); + + return ret; +} +EXPORT_SYMBOL_GPL(syscore_suspend); + +/** + * syscore_resume - Execute all the registered system core resume callbacks. + * + * This function is executed with one CPU on-line and disabled interrupts. + */ +void syscore_resume(void) +{ + struct syscore_ops *ops; + + trace_suspend_resume(TPS("syscore_resume"), 0, true); + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled before system core resume.\n"); + + list_for_each_entry(ops, &syscore_ops_list, node) + if (ops->resume) { + pm_pr_dbg("Calling %pS\n", ops->resume); + ops->resume(); + WARN_ONCE(!irqs_disabled(), + "Interrupts enabled after %pS\n", ops->resume); + } + trace_suspend_resume(TPS("syscore_resume"), 0, false); +} +EXPORT_SYMBOL_GPL(syscore_resume); +#endif /* CONFIG_PM_SLEEP */ + +/** + * syscore_shutdown - Execute all the registered system core shutdown callbacks. + */ +void syscore_shutdown(void) +{ + struct syscore_ops *ops; + + mutex_lock(&syscore_ops_lock); + + list_for_each_entry_reverse(ops, &syscore_ops_list, node) + if (ops->shutdown) { + if (initcall_debug) + pr_info("PM: Calling %pS\n", ops->shutdown); + ops->shutdown(); + } + + mutex_unlock(&syscore_ops_lock); +} diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig new file mode 100644 index 000000000..ba225eb1b --- /dev/null +++ b/drivers/base/test/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +config TEST_ASYNC_DRIVER_PROBE + tristate "Build kernel module to test asynchronous driver probing" + depends on m + help + Enabling this option produces a kernel module that allows + testing asynchronous driver probing by the device core. + The module name will be test_async_driver_probe.ko + + If unsure say N. +config KUNIT_DRIVER_PE_TEST + bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS + depends on KUNIT=y + default KUNIT_ALL_TESTS diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile new file mode 100644 index 000000000..2f15fae86 --- /dev/null +++ b/drivers/base/test/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o + +obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o +CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c new file mode 100644 index 000000000..abe033151 --- /dev/null +++ b/drivers/base/test/property-entry-test.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0 +// Unit tests for property entries API +// +// Copyright 2019 Google LLC. + +#include <kunit/test.h> +#include <linux/property.h> +#include <linux/types.h> + +static void pe_test_uints(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8("prop-u8", 8), + PROPERTY_ENTRY_U16("prop-u16", 16), + PROPERTY_ENTRY_U32("prop-u32", 32), + PROPERTY_ENTRY_U64("prop-u64", 64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[2]; + u16 val_u16, array_u16[2]; + u32 val_u32, array_u32[2]; + u64 val_u64, array_u64[2]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_uint_arrays(struct kunit *test) +{ + static const u8 a_u8[16] = { 8, 9 }; + static const u16 a_u16[16] = { 16, 17 }; + static const u32 a_u32[16] = { 32, 33 }; + static const u64 a_u64[16] = { 64, 65 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8), + PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16), + PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32), + PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64), + { } + }; + + struct fwnode_handle *node; + u8 val_u8, array_u8[32]; + u16 val_u16, array_u16[32]; + u32 val_u32, array_u32[32]; + u64 val_u64, array_u64[32]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_u8(node, "prop-u8", &val_u8); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u8, 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u8[0], 8); + KUNIT_EXPECT_EQ(test, (int)array_u8[1], 9); + + error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "prop-u16", &val_u16); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u16, 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u16[0], 16); + KUNIT_EXPECT_EQ(test, (int)array_u16[1], 17); + + error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "prop-u32", &val_u32); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u32, 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u32[0], 32); + KUNIT_EXPECT_EQ(test, (int)array_u32[1], 33); + + error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "prop-u64", &val_u64); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)val_u64, 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_EQ(test, (int)array_u64[0], 64); + KUNIT_EXPECT_EQ(test, (int)array_u64[1], 65); + + error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); +} + +static void pe_test_strings(struct kunit *test) +{ + static const char *strings[] = { + "string-a", + "string-b", + }; + + static const struct property_entry entries[] = { + PROPERTY_ENTRY_STRING("str", "single"), + PROPERTY_ENTRY_STRING("empty", ""), + PROPERTY_ENTRY_STRING_ARRAY("strs", strings), + { } + }; + + struct fwnode_handle *node; + const char *str; + const char *strs[10]; + int error; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_read_string(node, "str", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "single"); + + error = fwnode_property_read_string_array(node, "str", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + /* asking for more data returns what we have */ + error = fwnode_property_read_string_array(node, "str", strs, 2); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "single"); + + error = fwnode_property_read_string(node, "no-str", &str); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_read_string_array(node, "no-str", strs, 1); + KUNIT_EXPECT_LT(test, error, 0); + + error = fwnode_property_read_string(node, "empty", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, ""); + + error = fwnode_property_read_string_array(node, "strs", strs, 3); + KUNIT_EXPECT_EQ(test, error, 2); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + KUNIT_EXPECT_STREQ(test, strs[1], "string-b"); + + error = fwnode_property_read_string_array(node, "strs", strs, 1); + KUNIT_EXPECT_EQ(test, error, 1); + KUNIT_EXPECT_STREQ(test, strs[0], "string-a"); + + /* NULL argument -> returns size */ + error = fwnode_property_read_string_array(node, "strs", NULL, 0); + KUNIT_EXPECT_EQ(test, error, 2); + + /* accessing array as single value */ + error = fwnode_property_read_string(node, "strs", &str); + KUNIT_EXPECT_EQ(test, error, 0); + KUNIT_EXPECT_STREQ(test, str, "string-a"); + + fwnode_remove_software_node(node); +} + +static void pe_test_bool(struct kunit *test) +{ + static const struct property_entry entries[] = { + PROPERTY_ENTRY_BOOL("prop"), + { } + }; + + struct fwnode_handle *node; + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop")); + KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop")); + + fwnode_remove_software_node(node); +} + +/* Verifies that small U8 array is stored inline when property is copied */ +static void pe_test_move_inline_u8(struct kunit *test) +{ + static const u8 u8_array_small[8] = { 1, 2, 3, 4 }; + static const u8 u8_array_big[128] = { 5, 6, 7, 8 }; + static const struct property_entry entries[] = { + PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small), + PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big), + { } + }; + + struct property_entry *copy; + const u8 *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + data_ptr = (u8 *)©[0].value; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 1); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 2); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_EQ(test, (int)data_ptr[0], 5); + KUNIT_EXPECT_EQ(test, (int)data_ptr[1], 6); + + property_entries_free(copy); +} + +/* Verifies that single string array is stored inline when property is copied */ +static void pe_test_move_inline_str(struct kunit *test) +{ + static char *str_array_small[] = { "a" }; + static char *str_array_big[] = { "b", "c", "d", "e" }; + static char *str_array_small_empty[] = { "" }; + static struct property_entry entries[] = { + PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small), + PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big), + PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty), + { } + }; + + struct property_entry *copy; + const char * const *data_ptr; + + copy = property_entries_dup(entries); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy); + + KUNIT_EXPECT_TRUE(test, copy[0].is_inline); + KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a"); + + KUNIT_EXPECT_FALSE(test, copy[1].is_inline); + data_ptr = copy[1].pointer; + KUNIT_EXPECT_STREQ(test, data_ptr[0], "b"); + KUNIT_EXPECT_STREQ(test, data_ptr[1], "c"); + + KUNIT_EXPECT_TRUE(test, copy[2].is_inline); + KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], ""); + + property_entries_free(copy); +} + +/* Handling of reference properties */ +static void pe_test_reference(struct kunit *test) +{ + static const struct software_node nodes[] = { + { .name = "1", }, + { .name = "2", }, + { } + }; + + static const struct software_node_ref_args refs[] = { + { + .node = &nodes[0], + .nargs = 0, + }, + { + .node = &nodes[1], + .nargs = 2, + .args = { 3, 4 }, + }, + }; + + const struct property_entry entries[] = { + PROPERTY_ENTRY_REF("ref-1", &nodes[0]), + PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2), + PROPERTY_ENTRY_REF_ARRAY("ref-3", refs), + { } + }; + + struct fwnode_handle *node; + struct fwnode_reference_args ref; + int error; + + error = software_node_register_nodes(nodes); + KUNIT_ASSERT_EQ(test, error, 0); + + node = fwnode_create_software_node(entries, NULL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node); + + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 1, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 1U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + + /* asking for more args, padded with zero data */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 3, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 3U); + KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU); + KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-2", NULL, + 2, 1, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + /* array of references */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 0, 0, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]); + KUNIT_EXPECT_EQ(test, ref.nargs, 0U); + + /* second reference in the array */ + error = fwnode_property_get_reference_args(node, "ref-3", NULL, + 2, 1, &ref); + KUNIT_ASSERT_EQ(test, error, 0); + KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]); + KUNIT_EXPECT_EQ(test, ref.nargs, 2U); + KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU); + KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU); + + /* wrong index */ + error = fwnode_property_get_reference_args(node, "ref-1", NULL, + 0, 2, &ref); + KUNIT_EXPECT_NE(test, error, 0); + + fwnode_remove_software_node(node); + software_node_unregister_nodes(nodes); +} + +static struct kunit_case property_entry_test_cases[] = { + KUNIT_CASE(pe_test_uints), + KUNIT_CASE(pe_test_uint_arrays), + KUNIT_CASE(pe_test_strings), + KUNIT_CASE(pe_test_bool), + KUNIT_CASE(pe_test_move_inline_u8), + KUNIT_CASE(pe_test_move_inline_str), + KUNIT_CASE(pe_test_reference), + { } +}; + +static struct kunit_suite property_entry_test_suite = { + .name = "property-entry", + .test_cases = property_entry_test_cases, +}; + +kunit_test_suite(property_entry_test_suite); diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c new file mode 100644 index 000000000..88336f093 --- /dev/null +++ b/drivers/base/test/test_async_driver_probe.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014 Google, Inc. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/hrtimer.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/time.h> +#include <linux/numa.h> +#include <linux/nodemask.h> +#include <linux/topology.h> + +#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */ +#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2) + +static atomic_t warnings, errors, timeout, async_completed; + +static int test_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + /* + * Determine if we have hit the "timeout" limit for the test if we + * have then report it as an error, otherwise we wil sleep for the + * required amount of time and then report completion. + */ + if (atomic_read(&timeout)) { + dev_err(dev, "async probe took too long\n"); + atomic_inc(&errors); + } else { + dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n", + TEST_PROBE_DELAY); + msleep(TEST_PROBE_DELAY); + dev_dbg(&pdev->dev, "done sleeping\n"); + } + + /* + * Report NUMA mismatch if device node is set and we are not + * performing an async init on that node. + */ + if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) { + if (IS_ENABLED(CONFIG_NUMA) && + dev_to_node(dev) != numa_node_id()) { + dev_warn(dev, "NUMA node mismatch %d != %d\n", + dev_to_node(dev), numa_node_id()); + atomic_inc(&warnings); + } + + atomic_inc(&async_completed); + } + + return 0; +} + +static struct platform_driver async_driver = { + .driver = { + .name = "test_async_driver", + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .probe = test_probe, +}; + +static struct platform_driver sync_driver = { + .driver = { + .name = "test_sync_driver", + .probe_type = PROBE_FORCE_SYNCHRONOUS, + }, + .probe = test_probe, +}; + +static struct platform_device *async_dev[NR_CPUS * 2]; +static struct platform_device *sync_dev[2]; + +static struct platform_device * +test_platform_device_register_node(char *name, int id, int nid) +{ + struct platform_device *pdev; + int ret; + + pdev = platform_device_alloc(name, id); + if (!pdev) + return ERR_PTR(-ENOMEM); + + if (nid != NUMA_NO_NODE) + set_dev_node(&pdev->dev, nid); + + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); + return ERR_PTR(ret); + } + + return pdev; + +} + +static int __init test_async_probe_init(void) +{ + struct platform_device **pdev = NULL; + int async_id = 0, sync_id = 0; + unsigned long long duration; + ktime_t calltime, delta; + int err, nid, cpu; + + pr_info("registering first set of asynchronous devices...\n"); + + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &async_dev[async_id]; + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_devs; + } + + async_id++; + } + + pr_info("registering asynchronous driver...\n"); + calltime = ktime_get(); + err = platform_driver_register(&async_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_async_devs; + } + + delta = ktime_sub(ktime_get(), calltime); + duration = (unsigned long long) ktime_to_ms(delta); + pr_info("registration took %lld msecs\n", duration); + if (duration > TEST_PROBE_THRESHOLD) { + pr_err("test failed: probe took too long\n"); + err = -ETIMEDOUT; + goto err_unregister_async_driver; + } + + pr_info("registering second set of asynchronous devices...\n"); + calltime = ktime_get(); + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &async_dev[async_id]; + + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_driver; + } + + async_id++; + } + + delta = ktime_sub(ktime_get(), calltime); + duration = (unsigned long long) ktime_to_ms(delta); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); + if (duration > TEST_PROBE_THRESHOLD) { + dev_err(&(*pdev)->dev, + "test failed: probe took too long\n"); + err = -ETIMEDOUT; + goto err_unregister_async_driver; + } + + + pr_info("registering first synchronous device...\n"); + nid = cpu_to_node(cpu); + pdev = &sync_dev[sync_id]; + + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_async_driver; + } + + sync_id++; + + pr_info("registering synchronous driver...\n"); + calltime = ktime_get(); + err = platform_driver_register(&sync_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_sync_devs; + } + + delta = ktime_sub(ktime_get(), calltime); + duration = (unsigned long long) ktime_to_ms(delta); + pr_info("registration took %lld msecs\n", duration); + if (duration < TEST_PROBE_THRESHOLD) { + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; + } + + pr_info("registering second synchronous device...\n"); + pdev = &sync_dev[sync_id]; + calltime = ktime_get(); + + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_sync_driver; + } + + sync_id++; + + delta = ktime_sub(ktime_get(), calltime); + duration = (unsigned long long) ktime_to_ms(delta); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); + if (duration < TEST_PROBE_THRESHOLD) { + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; + } + + /* + * The async events should have completed while we were taking care + * of the synchronous events. We will now terminate any outstanding + * asynchronous probe calls remaining by forcing timeout and remove + * the driver before we return which should force the flush of the + * pending asynchronous probe calls. + * + * Otherwise if they completed without errors or warnings then + * report successful completion. + */ + if (atomic_read(&async_completed) != async_id) { + pr_err("async events still pending, forcing timeout\n"); + atomic_inc(&timeout); + err = -ETIMEDOUT; + } else if (!atomic_read(&errors) && !atomic_read(&warnings)) { + pr_info("completed successfully\n"); + return 0; + } + +err_unregister_sync_driver: + platform_driver_unregister(&sync_driver); +err_unregister_sync_devs: + while (sync_id--) + platform_device_unregister(sync_dev[sync_id]); +err_unregister_async_driver: + platform_driver_unregister(&async_driver); +err_unregister_async_devs: + while (async_id--) + platform_device_unregister(async_dev[async_id]); + + /* + * If err is already set then count that as an additional error for + * the test. Otherwise we will report an invalid argument error and + * not count that as we should have reached here as a result of + * errors or warnings being reported by the probe routine. + */ + if (err) + atomic_inc(&errors); + else + err = -EINVAL; + + pr_err("Test failed with %d errors and %d warnings\n", + atomic_read(&errors), atomic_read(&warnings)); + + return err; +} +module_init(test_async_probe_init); + +static void __exit test_async_probe_exit(void) +{ + int id = 2; + + platform_driver_unregister(&async_driver); + platform_driver_unregister(&sync_driver); + + while (id--) + platform_device_unregister(sync_dev[id]); + + id = NR_CPUS * 2; + while (id--) + platform_device_unregister(async_dev[id]); +} +module_exit(test_async_probe_exit); + +MODULE_DESCRIPTION("Test module for asynchronous driver probing"); +MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/base/topology.c b/drivers/base/topology.c new file mode 100644 index 000000000..4d254fcc9 --- /dev/null +++ b/drivers/base/topology.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * driver/base/topology.c - Populate sysfs with cpu topology information + * + * Written by: Zhang Yanmin, Intel Corporation + * + * Copyright (C) 2006, Intel Corp. + * + * All rights reserved. + */ +#include <linux/mm.h> +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/hardirq.h> +#include <linux/topology.h> + +#define define_id_show_func(name) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return sysfs_emit(buf, "%d\n", topology_##name(dev->id)); \ +} + +#define define_siblings_show_map(name, mask) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + return cpumap_print_to_pagebuf(false, buf, topology_##mask(dev->id));\ +} + +#define define_siblings_show_list(name, mask) \ +static ssize_t name##_list_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return cpumap_print_to_pagebuf(true, buf, topology_##mask(dev->id));\ +} + +#define define_siblings_show_func(name, mask) \ + define_siblings_show_map(name, mask); \ + define_siblings_show_list(name, mask) + +define_id_show_func(physical_package_id); +static DEVICE_ATTR_RO(physical_package_id); + +define_id_show_func(die_id); +static DEVICE_ATTR_RO(die_id); + +define_id_show_func(core_id); +static DEVICE_ATTR_RO(core_id); + +define_siblings_show_func(thread_siblings, sibling_cpumask); +static DEVICE_ATTR_RO(thread_siblings); +static DEVICE_ATTR_RO(thread_siblings_list); + +define_siblings_show_func(core_cpus, sibling_cpumask); +static DEVICE_ATTR_RO(core_cpus); +static DEVICE_ATTR_RO(core_cpus_list); + +define_siblings_show_func(core_siblings, core_cpumask); +static DEVICE_ATTR_RO(core_siblings); +static DEVICE_ATTR_RO(core_siblings_list); + +define_siblings_show_func(die_cpus, die_cpumask); +static DEVICE_ATTR_RO(die_cpus); +static DEVICE_ATTR_RO(die_cpus_list); + +define_siblings_show_func(package_cpus, core_cpumask); +static DEVICE_ATTR_RO(package_cpus); +static DEVICE_ATTR_RO(package_cpus_list); + +#ifdef CONFIG_SCHED_BOOK +define_id_show_func(book_id); +static DEVICE_ATTR_RO(book_id); +define_siblings_show_func(book_siblings, book_cpumask); +static DEVICE_ATTR_RO(book_siblings); +static DEVICE_ATTR_RO(book_siblings_list); +#endif + +#ifdef CONFIG_SCHED_DRAWER +define_id_show_func(drawer_id); +static DEVICE_ATTR_RO(drawer_id); +define_siblings_show_func(drawer_siblings, drawer_cpumask); +static DEVICE_ATTR_RO(drawer_siblings); +static DEVICE_ATTR_RO(drawer_siblings_list); +#endif + +static struct attribute *default_attrs[] = { + &dev_attr_physical_package_id.attr, + &dev_attr_die_id.attr, + &dev_attr_core_id.attr, + &dev_attr_thread_siblings.attr, + &dev_attr_thread_siblings_list.attr, + &dev_attr_core_cpus.attr, + &dev_attr_core_cpus_list.attr, + &dev_attr_core_siblings.attr, + &dev_attr_core_siblings_list.attr, + &dev_attr_die_cpus.attr, + &dev_attr_die_cpus_list.attr, + &dev_attr_package_cpus.attr, + &dev_attr_package_cpus_list.attr, +#ifdef CONFIG_SCHED_BOOK + &dev_attr_book_id.attr, + &dev_attr_book_siblings.attr, + &dev_attr_book_siblings_list.attr, +#endif +#ifdef CONFIG_SCHED_DRAWER + &dev_attr_drawer_id.attr, + &dev_attr_drawer_siblings.attr, + &dev_attr_drawer_siblings_list.attr, +#endif + NULL +}; + +static const struct attribute_group topology_attr_group = { + .attrs = default_attrs, + .name = "topology" +}; + +/* Add/Remove cpu_topology interface for CPU device */ +static int topology_add_dev(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + return sysfs_create_group(&dev->kobj, &topology_attr_group); +} + +static int topology_remove_dev(unsigned int cpu) +{ + struct device *dev = get_cpu_device(cpu); + + sysfs_remove_group(&dev->kobj, &topology_attr_group); + return 0; +} + +static int __init topology_sysfs_init(void) +{ + return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE, + "base/topology:prepare", topology_add_dev, + topology_remove_dev); +} + +device_initcall(topology_sysfs_init); diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c new file mode 100644 index 000000000..ccc86206e --- /dev/null +++ b/drivers/base/transport_class.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * transport_class.c - implementation of generic transport classes + * using attribute_containers + * + * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com> + * + * The basic idea here is to allow any "device controller" (which + * would most often be a Host Bus Adapter to use the services of one + * or more tranport classes for performing transport specific + * services. Transport specific services are things that the generic + * command layer doesn't want to know about (speed settings, line + * condidtioning, etc), but which the user might be interested in. + * Thus, the HBA's use the routines exported by the transport classes + * to perform these functions. The transport classes export certain + * values to the user via sysfs using attribute containers. + * + * Note: because not every HBA will care about every transport + * attribute, there's a many to one relationship that goes like this: + * + * transport class<-----attribute container<----class device + * + * Usually the attribute container is per-HBA, but the design doesn't + * mandate that. Although most of the services will be specific to + * the actual external storage connection used by the HBA, the generic + * transport class is framed entirely in terms of generic devices to + * allow it to be used by any physical HBA in the system. + */ +#include <linux/export.h> +#include <linux/attribute_container.h> +#include <linux/transport_class.h> + +static int transport_remove_classdev(struct attribute_container *cont, + struct device *dev, + struct device *classdev); + +/** + * transport_class_register - register an initial transport class + * + * @tclass: a pointer to the transport class structure to be initialised + * + * The transport class contains an embedded class which is used to + * identify it. The caller should initialise this structure with + * zeros and then generic class must have been initialised with the + * actual transport class unique name. There's a macro + * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must + * be registered). + * + * Returns 0 on success or error on failure. + */ +int transport_class_register(struct transport_class *tclass) +{ + return class_register(&tclass->class); +} +EXPORT_SYMBOL_GPL(transport_class_register); + +/** + * transport_class_unregister - unregister a previously registered class + * + * @tclass: The transport class to unregister + * + * Must be called prior to deallocating the memory for the transport + * class. + */ +void transport_class_unregister(struct transport_class *tclass) +{ + class_unregister(&tclass->class); +} +EXPORT_SYMBOL_GPL(transport_class_unregister); + +static int anon_transport_dummy_function(struct transport_container *tc, + struct device *dev, + struct device *cdev) +{ + /* do nothing */ + return 0; +} + +/** + * anon_transport_class_register - register an anonymous class + * + * @atc: The anon transport class to register + * + * The anonymous transport class contains both a transport class and a + * container. The idea of an anonymous class is that it never + * actually has any device attributes associated with it (and thus + * saves on container storage). So it can only be used for triggering + * events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to + * initialise the anon transport class storage. + */ +int anon_transport_class_register(struct anon_transport_class *atc) +{ + int error; + atc->container.class = &atc->tclass.class; + attribute_container_set_no_classdevs(&atc->container); + error = attribute_container_register(&atc->container); + if (error) + return error; + atc->tclass.setup = anon_transport_dummy_function; + atc->tclass.remove = anon_transport_dummy_function; + return 0; +} +EXPORT_SYMBOL_GPL(anon_transport_class_register); + +/** + * anon_transport_class_unregister - unregister an anon class + * + * @atc: Pointer to the anon transport class to unregister + * + * Must be called prior to deallocating the memory for the anon + * transport class. + */ +void anon_transport_class_unregister(struct anon_transport_class *atc) +{ + if (unlikely(attribute_container_unregister(&atc->container))) + BUG(); +} +EXPORT_SYMBOL_GPL(anon_transport_class_unregister); + +static int transport_setup_classdev(struct attribute_container *cont, + struct device *dev, + struct device *classdev) +{ + struct transport_class *tclass = class_to_transport_class(cont->class); + struct transport_container *tcont = attribute_container_to_transport_container(cont); + + if (tclass->setup) + tclass->setup(tcont, dev, classdev); + + return 0; +} + +/** + * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. + * @dev: the generic device representing the entity being added + * + * Usually, dev represents some component in the HBA system (either + * the HBA itself or a device remote across the HBA bus). This + * routine is simply a trigger point to see if any set of transport + * classes wishes to associate with the added device. This allocates + * storage for the class device and initialises it, but does not yet + * add it to the system or add attributes to it (you do this with + * transport_add_device). If you have no need for a separate setup + * and add operations, use transport_register_device (see + * transport_class.h). + */ + +void transport_setup_device(struct device *dev) +{ + attribute_container_add_device(dev, transport_setup_classdev); +} +EXPORT_SYMBOL_GPL(transport_setup_device); + +static int transport_add_class_device(struct attribute_container *cont, + struct device *dev, + struct device *classdev) +{ + int error = attribute_container_add_class_device(classdev); + struct transport_container *tcont = + attribute_container_to_transport_container(cont); + + if (!error && tcont->statistics) + error = sysfs_create_group(&classdev->kobj, tcont->statistics); + + return error; +} + + +/** + * transport_add_device - declare a new dev for transport class association + * + * @dev: the generic device representing the entity being added + * + * Usually, dev represents some component in the HBA system (either + * the HBA itself or a device remote across the HBA bus). This + * routine is simply a trigger point used to add the device to the + * system and register attributes for it. + */ +int transport_add_device(struct device *dev) +{ + return attribute_container_device_trigger_safe(dev, + transport_add_class_device, + transport_remove_classdev); +} +EXPORT_SYMBOL_GPL(transport_add_device); + +static int transport_configure(struct attribute_container *cont, + struct device *dev, + struct device *cdev) +{ + struct transport_class *tclass = class_to_transport_class(cont->class); + struct transport_container *tcont = attribute_container_to_transport_container(cont); + + if (tclass->configure) + tclass->configure(tcont, dev, cdev); + + return 0; +} + +/** + * transport_configure_device - configure an already set up device + * + * @dev: generic device representing device to be configured + * + * The idea of configure is simply to provide a point within the setup + * process to allow the transport class to extract information from a + * device after it has been setup. This is used in SCSI because we + * have to have a setup device to begin using the HBA, but after we + * send the initial inquiry, we use configure to extract the device + * parameters. The device need not have been added to be configured. + */ +void transport_configure_device(struct device *dev) +{ + attribute_container_device_trigger(dev, transport_configure); +} +EXPORT_SYMBOL_GPL(transport_configure_device); + +static int transport_remove_classdev(struct attribute_container *cont, + struct device *dev, + struct device *classdev) +{ + struct transport_container *tcont = + attribute_container_to_transport_container(cont); + struct transport_class *tclass = class_to_transport_class(cont->class); + + if (tclass->remove) + tclass->remove(tcont, dev, classdev); + + if (tclass->remove != anon_transport_dummy_function) { + if (tcont->statistics) + sysfs_remove_group(&classdev->kobj, tcont->statistics); + attribute_container_class_device_del(classdev); + } + + return 0; +} + + +/** + * transport_remove_device - remove the visibility of a device + * + * @dev: generic device to remove + * + * This call removes the visibility of the device (to the user from + * sysfs), but does not destroy it. To eliminate a device entirely + * you must also call transport_destroy_device. If you don't need to + * do remove and destroy as separate operations, use + * transport_unregister_device() (see transport_class.h) which will + * perform both calls for you. + */ +void transport_remove_device(struct device *dev) +{ + attribute_container_device_trigger(dev, transport_remove_classdev); +} +EXPORT_SYMBOL_GPL(transport_remove_device); + +static void transport_destroy_classdev(struct attribute_container *cont, + struct device *dev, + struct device *classdev) +{ + struct transport_class *tclass = class_to_transport_class(cont->class); + + if (tclass->remove != anon_transport_dummy_function) + put_device(classdev); +} + + +/** + * transport_destroy_device - destroy a removed device + * + * @dev: device to eliminate from the transport class. + * + * This call triggers the elimination of storage associated with the + * transport classdev. Note: all it really does is relinquish a + * reference to the classdev. The memory will not be freed until the + * last reference goes to zero. Note also that the classdev retains a + * reference count on dev, so dev too will remain for as long as the + * transport class device remains around. + */ +void transport_destroy_device(struct device *dev) +{ + attribute_container_remove_device(dev, transport_destroy_classdev); +} +EXPORT_SYMBOL_GPL(transport_destroy_device); |