summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/base
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig233
-rw-r--r--drivers/base/Makefile36
-rw-r--r--drivers/base/arch_numa.c477
-rw-r--r--drivers/base/arch_topology.c863
-rw-r--r--drivers/base/attribute_container.c548
-rw-r--r--drivers/base/auxiliary.c419
-rw-r--r--drivers/base/base.h207
-rw-r--r--drivers/base/bus.c1180
-rw-r--r--drivers/base/cacheinfo.c742
-rw-r--r--drivers/base/class.c592
-rw-r--r--drivers/base/component.c824
-rw-r--r--drivers/base/container.c41
-rw-r--r--drivers/base/core.c5152
-rw-r--r--drivers/base/cpu.c646
-rw-r--r--drivers/base/dd.c1352
-rw-r--r--drivers/base/devcoredump.c429
-rw-r--r--drivers/base/devres.c1224
-rw-r--r--drivers/base/devtmpfs.c491
-rw-r--r--drivers/base/driver.c303
-rw-r--r--drivers/base/firmware.c26
-rw-r--r--drivers/base/firmware_loader/Kconfig221
-rw-r--r--drivers/base/firmware_loader/Makefile12
-rw-r--r--drivers/base/firmware_loader/builtin/.gitignore2
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile42
-rw-r--r--drivers/base/firmware_loader/builtin/main.c106
-rw-r--r--drivers/base/firmware_loader/fallback.c235
-rw-r--r--drivers/base/firmware_loader/fallback.h45
-rw-r--r--drivers/base/firmware_loader/fallback_platform.c45
-rw-r--r--drivers/base/firmware_loader/fallback_table.c69
-rw-r--r--drivers/base/firmware_loader/firmware.h199
-rw-r--r--drivers/base/firmware_loader/main.c1599
-rw-r--r--drivers/base/firmware_loader/sysfs.c419
-rw-r--r--drivers/base/firmware_loader/sysfs.h122
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c407
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.h41
-rw-r--r--drivers/base/hypervisor.c24
-rw-r--r--drivers/base/init.c42
-rw-r--r--drivers/base/isa.c182
-rw-r--r--drivers/base/map.c154
-rw-r--r--drivers/base/memory.c1178
-rw-r--r--drivers/base/module.c93
-rw-r--r--drivers/base/node.c969
-rw-r--r--drivers/base/physical_location.c146
-rw-r--r--drivers/base/physical_location.h16
-rw-r--r--drivers/base/pinctrl.c105
-rw-r--r--drivers/base/platform-msi.c351
-rw-r--r--drivers/base/platform.c1529
-rw-r--r--drivers/base/power/Makefile9
-rw-r--r--drivers/base/power/clock_ops.c805
-rw-r--r--drivers/base/power/common.c230
-rw-r--r--drivers/base/power/domain.c3407
-rw-r--r--drivers/base/power/domain_governor.c411
-rw-r--r--drivers/base/power/generic_ops.c298
-rw-r--r--drivers/base/power/main.c2027
-rw-r--r--drivers/base/power/power.h170
-rw-r--r--drivers/base/power/qos-test.c117
-rw-r--r--drivers/base/power/qos.c982
-rw-r--r--drivers/base/power/runtime.c1949
-rw-r--r--drivers/base/power/sysfs.c838
-rw-r--r--drivers/base/power/trace.c307
-rw-r--r--drivers/base/power/wakeirq.c409
-rw-r--r--drivers/base/power/wakeup.c1188
-rw-r--r--drivers/base/power/wakeup_stats.c217
-rw-r--r--drivers/base/property.c1421
-rw-r--r--drivers/base/regmap/Kconfig67
-rw-r--r--drivers/base/regmap/Makefile22
-rw-r--r--drivers/base/regmap/internal.h310
-rw-r--r--drivers/base/regmap/regcache-flat.c83
-rw-r--r--drivers/base/regmap/regcache-lzo.c368
-rw-r--r--drivers/base/regmap/regcache-rbtree.c554
-rw-r--r--drivers/base/regmap/regcache.c827
-rw-r--r--drivers/base/regmap/regmap-ac97.c89
-rw-r--r--drivers/base/regmap/regmap-debugfs.c692
-rw-r--r--drivers/base/regmap/regmap-i2c.c399
-rw-r--r--drivers/base/regmap/regmap-i3c.c60
-rw-r--r--drivers/base/regmap/regmap-irq.c1313
-rw-r--r--drivers/base/regmap/regmap-mdio.c116
-rw-r--r--drivers/base/regmap/regmap-mmio.c636
-rw-r--r--drivers/base/regmap/regmap-sccb.c128
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c101
-rw-r--r--drivers/base/regmap/regmap-sdw.c89
-rw-r--r--drivers/base/regmap/regmap-slimbus.c71
-rw-r--r--drivers/base/regmap/regmap-spi-avmm.c713
-rw-r--r--drivers/base/regmap/regmap-spi.c168
-rw-r--r--drivers/base/regmap/regmap-spmi.c225
-rw-r--r--drivers/base/regmap/regmap-w1.c237
-rw-r--r--drivers/base/regmap/regmap.c3515
-rw-r--r--drivers/base/regmap/trace.h284
-rw-r--r--drivers/base/soc.c265
-rw-r--r--drivers/base/swnode.c1177
-rw-r--r--drivers/base/syscore.c128
-rw-r--r--drivers/base/test/Kconfig14
-rw-r--r--drivers/base/test/Makefile5
-rw-r--r--drivers/base/test/property-entry-test.c510
-rw-r--r--drivers/base/test/test_async_driver_probe.c299
-rw-r--r--drivers/base/topology.c194
-rw-r--r--drivers/base/trace.c10
-rw-r--r--drivers/base/trace.h56
-rw-r--r--drivers/base/transport_class.c299
99 files changed, 52947 insertions, 0 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
new file mode 100644
index 000000000..6f04b831a
--- /dev/null
+++ b/drivers/base/Kconfig
@@ -0,0 +1,233 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Generic Driver Options"
+
+config AUXILIARY_BUS
+ bool
+
+config UEVENT_HELPER
+ bool "Support for uevent helper"
+ help
+ The uevent helper program is forked by the kernel for
+ every uevent.
+ Before the switch to the netlink-based uevent source, this was
+ used to hook hotplug scripts into kernel device events. It
+ usually pointed to a shell script at /sbin/hotplug.
+ This should not be used today, because usual systems create
+ many events at bootup or device discovery in a very short time
+ frame. One forked process per event can create so many processes
+ that it creates a high system load, or on smaller systems
+ it is known to create out-of-memory situations during bootup.
+
+config UEVENT_HELPER_PATH
+ string "path to uevent helper"
+ depends on UEVENT_HELPER
+ default ""
+ help
+ To disable user space helper program execution at by default
+ specify an empty string here. This setting can still be altered
+ via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper
+ later at runtime.
+
+config DEVTMPFS
+ bool "Maintain a devtmpfs filesystem to mount at /dev"
+ help
+ This creates a tmpfs/ramfs filesystem instance early at bootup.
+ In this filesystem, the kernel driver core maintains device
+ nodes with their default names and permissions for all
+ registered devices with an assigned major/minor number.
+ Userspace can modify the filesystem content as needed, add
+ symlinks, and apply needed permissions.
+ It provides a fully functional /dev directory, where usually
+ udev runs on top, managing permissions and adding meaningful
+ symlinks.
+ In very limited environments, it may provide a sufficient
+ functional /dev without any further help. It also allows simple
+ rescue systems, and reliably handles dynamic major/minor numbers.
+
+ Notice: if CONFIG_TMPFS isn't enabled, the simpler ramfs
+ file system will be used instead.
+
+config DEVTMPFS_MOUNT
+ bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs"
+ depends on DEVTMPFS
+ help
+ This will instruct the kernel to automatically mount the
+ devtmpfs filesystem at /dev, directly after the kernel has
+ mounted the root filesystem. The behavior can be overridden
+ with the commandline parameter: devtmpfs.mount=0|1.
+ This option does not affect initramfs based booting, here
+ the devtmpfs filesystem always needs to be mounted manually
+ after the rootfs is mounted.
+ With this option enabled, it allows to bring up a system in
+ rescue mode with init=/bin/sh, even when the /dev directory
+ on the rootfs is completely empty.
+
+config DEVTMPFS_SAFE
+ bool "Use nosuid,noexec mount options on devtmpfs"
+ depends on DEVTMPFS
+ help
+ This instructs the kernel to include the MS_NOEXEC and MS_NOSUID mount
+ flags when mounting devtmpfs.
+
+ Notice: If enabled, things like /dev/mem cannot be mmapped
+ with the PROT_EXEC flag. This can break, for example, non-KMS
+ video drivers.
+
+config STANDALONE
+ bool "Select only drivers that don't need compile-time external firmware"
+ default y
+ help
+ Select this option if you don't have magic firmware for drivers that
+ need it.
+
+ If unsure, say Y.
+
+config PREVENT_FIRMWARE_BUILD
+ bool "Disable drivers features which enable custom firmware building"
+ default y
+ help
+ Say yes to disable driver features which enable building a custom
+ driver firmware at kernel build time. These drivers do not use the
+ kernel firmware API to load firmware (CONFIG_FW_LOADER), instead they
+ use their own custom loading mechanism. The required firmware is
+ usually shipped with the driver, building the driver firmware
+ should only be needed if you have an updated firmware source.
+
+ Firmware should not be being built as part of kernel, these days
+ you should always prevent this and say Y here. There are only two
+ old drivers which enable building of its firmware at kernel build
+ time:
+
+ o CONFIG_WANXL through CONFIG_WANXL_BUILD_FIRMWARE
+ o CONFIG_SCSI_AIC79XX through CONFIG_AIC79XX_BUILD_FIRMWARE
+
+source "drivers/base/firmware_loader/Kconfig"
+
+config WANT_DEV_COREDUMP
+ bool
+ help
+ Drivers should "select" this option if they desire to use the
+ device coredump mechanism.
+
+config ALLOW_DEV_COREDUMP
+ bool "Allow device coredump" if EXPERT
+ default y
+ help
+ This option controls if the device coredump mechanism is available or
+ not; if disabled, the mechanism will be omitted even if drivers that
+ can use it are enabled.
+ Say 'N' for more sensitive systems or systems that don't want
+ to ever access the information to not have the code, nor keep any
+ data.
+
+ If unsure, say Y.
+
+config DEV_COREDUMP
+ bool
+ default y if WANT_DEV_COREDUMP
+ depends on ALLOW_DEV_COREDUMP
+
+config DEBUG_DRIVER
+ bool "Driver Core verbose debug messages"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you want the Driver core to produce a bunch of
+ debug messages to the system log. Select this if you are having a
+ problem with the driver core and want to see more of what is
+ going on.
+
+ If you are unsure about this, say N here.
+
+config DEBUG_DEVRES
+ bool "Managed device resources verbose debug messages"
+ depends on DEBUG_KERNEL
+ help
+ This option enables kernel parameter devres.log. If set to
+ non-zero, devres debug messages are printed. Select this if
+ you are having a problem with devres or want to debug
+ resource management for a managed device. devres.log can be
+ switched on and off from sysfs node.
+
+ If you are unsure about this, Say N here.
+
+config DEBUG_TEST_DRIVER_REMOVE
+ bool "Test driver remove calls during probe (UNSTABLE)"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here if you want the Driver core to test driver remove functions
+ by calling probe, remove, probe. This tests the remove path without
+ having to unbind the driver or unload the driver module.
+
+ This option is expected to find errors and may render your system
+ unusable. You should say N here unless you are explicitly looking to
+ test this functionality.
+
+config PM_QOS_KUNIT_TEST
+ bool "KUnit Test for PM QoS features" if !KUNIT_ALL_TESTS
+ depends on KUNIT=y
+ default KUNIT_ALL_TESTS
+
+config HMEM_REPORTING
+ bool
+ default n
+ depends on NUMA
+ help
+ Enable reporting for heterogeneous memory access attributes under
+ their non-uniform memory nodes.
+
+source "drivers/base/test/Kconfig"
+
+config SYS_HYPERVISOR
+ bool
+ default n
+
+config GENERIC_CPU_DEVICES
+ bool
+ default n
+
+config GENERIC_CPU_AUTOPROBE
+ bool
+
+config GENERIC_CPU_VULNERABILITIES
+ bool
+
+config SOC_BUS
+ bool
+ select GLOB
+
+source "drivers/base/regmap/Kconfig"
+
+config DMA_SHARED_BUFFER
+ bool
+ default n
+ select IRQ_WORK
+ help
+ This option enables the framework for buffer-sharing between
+ multiple drivers. A buffer is associated with a file using driver
+ APIs extension; the file's descriptor can then be passed on to other
+ driver.
+
+config DMA_FENCE_TRACE
+ bool "Enable verbose DMA_FENCE_TRACE messages"
+ depends on DMA_SHARED_BUFFER
+ help
+ Enable the DMA_FENCE_TRACE printks. This will add extra
+ spam to the console log, but will make it easier to diagnose
+ lockup related problems for dma-buffers shared across multiple
+ devices.
+
+config GENERIC_ARCH_TOPOLOGY
+ bool
+ help
+ Enable support for architectures common topology code: e.g., parsing
+ CPU capacity information from DT, usage of such information for
+ appropriate scaling, sysfs interface for reading capacity values at
+ runtime.
+
+config GENERIC_ARCH_NUMA
+ bool
+ help
+ Enable support for generic NUMA implementation. Currently, RISC-V
+ and ARM64 use it.
+
+endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
new file mode 100644
index 000000000..83217d243
--- /dev/null
+++ b/drivers/base/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Linux device tree
+
+obj-y := component.o core.o bus.o dd.o syscore.o \
+ driver.o class.o platform.o \
+ cpu.o firmware.o init.o map.o devres.o \
+ attribute_container.o transport_class.o \
+ topology.o container.o property.o cacheinfo.o \
+ swnode.o
+obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o
+obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
+obj-y += power/
+obj-$(CONFIG_ISA_BUS_API) += isa.o
+obj-y += firmware_loader/
+obj-$(CONFIG_NUMA) += node.o
+obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o
+ifeq ($(CONFIG_SYSFS),y)
+obj-$(CONFIG_MODULES) += module.o
+endif
+obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
+obj-$(CONFIG_REGMAP) += regmap/
+obj-$(CONFIG_SOC_BUS) += soc.o
+obj-$(CONFIG_PINCTRL) += pinctrl.o
+obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
+obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += arch_topology.o
+obj-$(CONFIG_GENERIC_ARCH_NUMA) += arch_numa.o
+obj-$(CONFIG_ACPI) += physical_location.o
+
+obj-y += test/
+
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o := -I$(src)
+obj-$(CONFIG_TRACING) += trace.o
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
new file mode 100644
index 000000000..eaa31e567
--- /dev/null
+++ b/drivers/base/arch_numa.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * NUMA support, based on the x86 implementation.
+ *
+ * Copyright (C) 2015 Cavium Inc.
+ * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
+ */
+
+#define pr_fmt(fmt) "NUMA: " fmt
+
+#include <linux/acpi.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <asm/sections.h>
+
+struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL(node_data);
+nodemask_t numa_nodes_parsed __initdata;
+static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
+
+static int numa_distance_cnt;
+static u8 *numa_distance;
+bool numa_off;
+
+static __init int numa_parse_early_param(char *opt)
+{
+ if (!opt)
+ return -EINVAL;
+ if (str_has_prefix(opt, "off"))
+ numa_off = true;
+
+ return 0;
+}
+early_param("numa", numa_parse_early_param);
+
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+EXPORT_SYMBOL(node_to_cpumask_map);
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+
+/*
+ * Returns a pointer to the bitmask of CPUs on Node 'node'.
+ */
+const struct cpumask *cpumask_of_node(int node)
+{
+
+ if (node == NUMA_NO_NODE)
+ return cpu_all_mask;
+
+ if (WARN_ON(node < 0 || node >= nr_node_ids))
+ return cpu_none_mask;
+
+ if (WARN_ON(node_to_cpumask_map[node] == NULL))
+ return cpu_online_mask;
+
+ return node_to_cpumask_map[node];
+}
+EXPORT_SYMBOL(cpumask_of_node);
+
+#endif
+
+static void numa_update_cpu(unsigned int cpu, bool remove)
+{
+ int nid = cpu_to_node(cpu);
+
+ if (nid == NUMA_NO_NODE)
+ return;
+
+ if (remove)
+ cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]);
+ else
+ cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+}
+
+void numa_add_cpu(unsigned int cpu)
+{
+ numa_update_cpu(cpu, false);
+}
+
+void numa_remove_cpu(unsigned int cpu)
+{
+ numa_update_cpu(cpu, true);
+}
+
+void numa_clear_node(unsigned int cpu)
+{
+ numa_remove_cpu(cpu);
+ set_cpu_numa_node(cpu, NUMA_NO_NODE);
+}
+
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: cpumask_of_node() is not valid until after this is done.
+ * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
+ */
+static void __init setup_node_to_cpumask_map(void)
+{
+ int node;
+
+ /* setup nr_node_ids if not done yet */
+ if (nr_node_ids == MAX_NUMNODES)
+ setup_nr_node_ids();
+
+ /* allocate and clear the mapping */
+ for (node = 0; node < nr_node_ids; node++) {
+ alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
+ cpumask_clear(node_to_cpumask_map[node]);
+ }
+
+ /* cpumask_of_node() will now work */
+ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
+}
+
+/*
+ * Set the cpu to node and mem mapping
+ */
+void numa_store_cpu_info(unsigned int cpu)
+{
+ set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
+}
+
+void __init early_map_cpu_to_node(unsigned int cpu, int nid)
+{
+ /* fallback to node 0 */
+ if (nid < 0 || nid >= MAX_NUMNODES || numa_off)
+ nid = 0;
+
+ cpu_to_node_map[cpu] = nid;
+
+ /*
+ * We should set the numa node of cpu0 as soon as possible, because it
+ * has already been set up online before. cpu_to_node(0) will soon be
+ * called.
+ */
+ if (!cpu)
+ set_cpu_numa_node(cpu, nid);
+}
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init early_cpu_to_node(int cpu)
+{
+ return cpu_to_node_map[cpu];
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc = -EINVAL;
+
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
+ pcpu_cpu_distance,
+ early_cpu_to_node);
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+ if (rc < 0)
+ pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
+#endif
+ }
+
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+ if (rc < 0)
+ rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node);
+#endif
+ if (rc < 0)
+ panic("Failed to initialize percpu areas (err=%d).", rc);
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
+/**
+ * numa_add_memblk() - Set node id to memblk
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ int ret;
+
+ ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
+ if (ret < 0) {
+ pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
+ start, (end - 1), nid);
+ return ret;
+ }
+
+ node_set(nid, numa_nodes_parsed);
+ return ret;
+}
+
+/*
+ * Initialize NODE_DATA for a node on the local memory
+ */
+static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
+{
+ const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
+ u64 nd_pa;
+ void *nd;
+ int tnid;
+
+ if (start_pfn >= end_pfn)
+ pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
+
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, nid);
+
+ nd = __va(nd_pa);
+
+ /* report and initialize */
+ pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n",
+ nd_pa, nd_pa + nd_size - 1);
+ tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+ if (tnid != nid)
+ pr_info("NODE_DATA(%d) on node %d\n", nid, tnid);
+
+ node_data[nid] = nd;
+ memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
+ NODE_DATA(nid)->node_id = nid;
+ NODE_DATA(nid)->node_start_pfn = start_pfn;
+ NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+}
+
+/*
+ * numa_free_distance
+ *
+ * The current table is freed.
+ */
+void __init numa_free_distance(void)
+{
+ size_t size;
+
+ if (!numa_distance)
+ return;
+
+ size = numa_distance_cnt * numa_distance_cnt *
+ sizeof(numa_distance[0]);
+
+ memblock_free(numa_distance, size);
+ numa_distance_cnt = 0;
+ numa_distance = NULL;
+}
+
+/*
+ * Create a new NUMA distance table.
+ */
+static int __init numa_alloc_distance(void)
+{
+ size_t size;
+ int i, j;
+
+ size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
+ numa_distance = memblock_alloc(size, PAGE_SIZE);
+ if (WARN_ON(!numa_distance))
+ return -ENOMEM;
+
+ numa_distance_cnt = nr_node_ids;
+
+ /* fill with the default distances */
+ for (i = 0; i < numa_distance_cnt; i++)
+ for (j = 0; j < numa_distance_cnt; j++)
+ numa_distance[i * numa_distance_cnt + j] = i == j ?
+ LOCAL_DISTANCE : REMOTE_DISTANCE;
+
+ pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
+
+ return 0;
+}
+
+/**
+ * numa_set_distance() - Set inter node NUMA distance from node to node.
+ * @from: the 'from' node to set distance
+ * @to: the 'to' node to set distance
+ * @distance: NUMA distance
+ *
+ * Set the distance from node @from to @to to @distance.
+ * If distance table doesn't exist, a warning is printed.
+ *
+ * If @from or @to is higher than the highest known node or lower than zero
+ * or @distance doesn't make sense, the call is ignored.
+ */
+void __init numa_set_distance(int from, int to, int distance)
+{
+ if (!numa_distance) {
+ pr_warn_once("Warning: distance table not allocated yet\n");
+ return;
+ }
+
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
+ from < 0 || to < 0) {
+ pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ if ((u8)distance != distance ||
+ (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ numa_distance[from * numa_distance_cnt + to] = distance;
+}
+
+/*
+ * Return NUMA distance @from to @to
+ */
+int __node_distance(int from, int to)
+{
+ if (from >= numa_distance_cnt || to >= numa_distance_cnt)
+ return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+ return numa_distance[from * numa_distance_cnt + to];
+}
+EXPORT_SYMBOL(__node_distance);
+
+static int __init numa_register_nodes(void)
+{
+ int nid;
+ struct memblock_region *mblk;
+
+ /* Check that valid nid is set to memblks */
+ for_each_mem_region(mblk) {
+ int mblk_nid = memblock_get_region_node(mblk);
+ phys_addr_t start = mblk->base;
+ phys_addr_t end = mblk->base + mblk->size - 1;
+
+ if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
+ pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
+ mblk_nid, &start, &end);
+ return -EINVAL;
+ }
+ }
+
+ /* Finally register nodes. */
+ for_each_node_mask(nid, numa_nodes_parsed) {
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+ setup_node_data(nid, start_pfn, end_pfn);
+ node_set_online(nid);
+ }
+
+ /* Setup online nodes to actual nodes*/
+ node_possible_map = numa_nodes_parsed;
+
+ return 0;
+}
+
+static int __init numa_init(int (*init_func)(void))
+{
+ int ret;
+
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+
+ ret = numa_alloc_distance();
+ if (ret < 0)
+ return ret;
+
+ ret = init_func();
+ if (ret < 0)
+ goto out_free_distance;
+
+ if (nodes_empty(numa_nodes_parsed)) {
+ pr_info("No NUMA configuration found\n");
+ ret = -EINVAL;
+ goto out_free_distance;
+ }
+
+ ret = numa_register_nodes();
+ if (ret < 0)
+ goto out_free_distance;
+
+ setup_node_to_cpumask_map();
+
+ return 0;
+out_free_distance:
+ numa_free_distance();
+ return ret;
+}
+
+/**
+ * dummy_numa_init() - Fallback dummy NUMA init
+ *
+ * Used if there's no underlying NUMA architecture, NUMA initialization
+ * fails, or NUMA is disabled on the command line.
+ *
+ * Must online at least one node (node 0) and add memory blocks that cover all
+ * allowed memory. It is unlikely that this function fails.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int __init dummy_numa_init(void)
+{
+ phys_addr_t start = memblock_start_of_DRAM();
+ phys_addr_t end = memblock_end_of_DRAM() - 1;
+ int ret;
+
+ if (numa_off)
+ pr_info("NUMA disabled\n"); /* Forced off on command line. */
+ pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
+
+ ret = numa_add_memblk(0, start, end + 1);
+ if (ret) {
+ pr_err("NUMA init failed\n");
+ return ret;
+ }
+
+ numa_off = true;
+ return 0;
+}
+
+#ifdef CONFIG_ACPI_NUMA
+static int __init arch_acpi_numa_init(void)
+{
+ int ret;
+
+ ret = acpi_numa_init();
+ if (ret) {
+ pr_info("Failed to initialise from firmware\n");
+ return ret;
+ }
+
+ return srat_disabled() ? -EINVAL : 0;
+}
+#else
+static int __init arch_acpi_numa_init(void)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/**
+ * arch_numa_init() - Initialize NUMA
+ *
+ * Try each configured NUMA initialization method until one succeeds. The
+ * last fallback is dummy single node config encompassing whole memory.
+ */
+void __init arch_numa_init(void)
+{
+ if (!numa_off) {
+ if (!acpi_disabled && !numa_init(arch_acpi_numa_init))
+ return;
+ if (acpi_disabled && !numa_init(of_numa_init))
+ return;
+ }
+
+ numa_init(dummy_numa_init);
+}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
new file mode 100644
index 000000000..e7d6e6657
--- /dev/null
+++ b/drivers/base/arch_topology.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Arch specific cpu topology information
+ *
+ * Copyright (C) 2016, ARM Ltd.
+ * Written by: Juri Lelli, ARM Ltd.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/sched/topology.h>
+#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+#include <linux/sched.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/thermal_pressure.h>
+
+static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
+static struct cpumask scale_freq_counters_mask;
+static bool scale_freq_invariant;
+static DEFINE_PER_CPU(u32, freq_factor) = 1;
+
+static bool supports_scale_freq_counters(const struct cpumask *cpus)
+{
+ return cpumask_subset(cpus, &scale_freq_counters_mask);
+}
+
+bool topology_scale_freq_invariant(void)
+{
+ return cpufreq_supports_freq_invariance() ||
+ supports_scale_freq_counters(cpu_online_mask);
+}
+
+static void update_scale_freq_invariant(bool status)
+{
+ if (scale_freq_invariant == status)
+ return;
+
+ /*
+ * Task scheduler behavior depends on frequency invariance support,
+ * either cpufreq or counter driven. If the support status changes as
+ * a result of counter initialisation and use, retrigger the build of
+ * scheduling domains to ensure the information is propagated properly.
+ */
+ if (topology_scale_freq_invariant() == status) {
+ scale_freq_invariant = status;
+ rebuild_sched_domains_energy();
+ }
+}
+
+void topology_set_scale_freq_source(struct scale_freq_data *data,
+ const struct cpumask *cpus)
+{
+ struct scale_freq_data *sfd;
+ int cpu;
+
+ /*
+ * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
+ * supported by cpufreq.
+ */
+ if (cpumask_empty(&scale_freq_counters_mask))
+ scale_freq_invariant = topology_scale_freq_invariant();
+
+ rcu_read_lock();
+
+ for_each_cpu(cpu, cpus) {
+ sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
+
+ /* Use ARCH provided counters whenever possible */
+ if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
+ rcu_assign_pointer(per_cpu(sft_data, cpu), data);
+ cpumask_set_cpu(cpu, &scale_freq_counters_mask);
+ }
+ }
+
+ rcu_read_unlock();
+
+ update_scale_freq_invariant(true);
+}
+EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
+
+void topology_clear_scale_freq_source(enum scale_freq_source source,
+ const struct cpumask *cpus)
+{
+ struct scale_freq_data *sfd;
+ int cpu;
+
+ rcu_read_lock();
+
+ for_each_cpu(cpu, cpus) {
+ sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
+
+ if (sfd && sfd->source == source) {
+ rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
+ cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
+ }
+ }
+
+ rcu_read_unlock();
+
+ /*
+ * Make sure all references to previous sft_data are dropped to avoid
+ * use-after-free races.
+ */
+ synchronize_rcu();
+
+ update_scale_freq_invariant(false);
+}
+EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
+
+void topology_scale_freq_tick(void)
+{
+ struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
+
+ if (sfd)
+ sfd->set_freq_scale();
+}
+
+DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
+
+void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
+{
+ unsigned long scale;
+ int i;
+
+ if (WARN_ON_ONCE(!cur_freq || !max_freq))
+ return;
+
+ /*
+ * If the use of counters for FIE is enabled, just return as we don't
+ * want to update the scale factor with information from CPUFREQ.
+ * Instead the scale factor will be updated from arch_scale_freq_tick.
+ */
+ if (supports_scale_freq_counters(cpus))
+ return;
+
+ scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+
+ for_each_cpu(i, cpus)
+ per_cpu(arch_freq_scale, i) = scale;
+}
+
+DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
+
+void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
+DEFINE_PER_CPU(unsigned long, thermal_pressure);
+
+/**
+ * topology_update_thermal_pressure() - Update thermal pressure for CPUs
+ * @cpus : The related CPUs for which capacity has been reduced
+ * @capped_freq : The maximum allowed frequency that CPUs can run at
+ *
+ * Update the value of thermal pressure for all @cpus in the mask. The
+ * cpumask should include all (online+offline) affected CPUs, to avoid
+ * operating on stale data when hot-plug is used for some CPUs. The
+ * @capped_freq reflects the currently allowed max CPUs frequency due to
+ * thermal capping. It might be also a boost frequency value, which is bigger
+ * than the internal 'freq_factor' max frequency. In such case the pressure
+ * value should simply be removed, since this is an indication that there is
+ * no thermal throttling. The @capped_freq must be provided in kHz.
+ */
+void topology_update_thermal_pressure(const struct cpumask *cpus,
+ unsigned long capped_freq)
+{
+ unsigned long max_capacity, capacity, th_pressure;
+ u32 max_freq;
+ int cpu;
+
+ cpu = cpumask_first(cpus);
+ max_capacity = arch_scale_cpu_capacity(cpu);
+ max_freq = per_cpu(freq_factor, cpu);
+
+ /* Convert to MHz scale which is used in 'freq_factor' */
+ capped_freq /= 1000;
+
+ /*
+ * Handle properly the boost frequencies, which should simply clean
+ * the thermal pressure value.
+ */
+ if (max_freq <= capped_freq)
+ capacity = max_capacity;
+ else
+ capacity = mult_frac(max_capacity, capped_freq, max_freq);
+
+ th_pressure = max_capacity - capacity;
+
+ trace_thermal_pressure_update(cpu, th_pressure);
+
+ for_each_cpu(cpu, cpus)
+ WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+}
+EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
+
+static ssize_t cpu_capacity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+
+ return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
+}
+
+static void update_topology_flags_workfn(struct work_struct *work);
+static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
+
+static DEVICE_ATTR_RO(cpu_capacity);
+
+static int register_cpu_capacity_sysctl(void)
+{
+ int i;
+ struct device *cpu;
+
+ for_each_possible_cpu(i) {
+ cpu = get_cpu_device(i);
+ if (!cpu) {
+ pr_err("%s: too early to get CPU%d device!\n",
+ __func__, i);
+ continue;
+ }
+ device_create_file(cpu, &dev_attr_cpu_capacity);
+ }
+
+ return 0;
+}
+subsys_initcall(register_cpu_capacity_sysctl);
+
+static int update_topology;
+
+int topology_update_cpu_topology(void)
+{
+ return update_topology;
+}
+
+/*
+ * Updating the sched_domains can't be done directly from cpufreq callbacks
+ * due to locking, so queue the work for later.
+ */
+static void update_topology_flags_workfn(struct work_struct *work)
+{
+ update_topology = 1;
+ rebuild_sched_domains();
+ pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
+ update_topology = 0;
+}
+
+static u32 *raw_capacity;
+
+static int free_raw_capacity(void)
+{
+ kfree(raw_capacity);
+ raw_capacity = NULL;
+
+ return 0;
+}
+
+void topology_normalize_cpu_scale(void)
+{
+ u64 capacity;
+ u64 capacity_scale;
+ int cpu;
+
+ if (!raw_capacity)
+ return;
+
+ capacity_scale = 1;
+ for_each_possible_cpu(cpu) {
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity_scale = max(capacity, capacity_scale);
+ }
+
+ pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
+ for_each_possible_cpu(cpu) {
+ capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
+ capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
+ capacity_scale);
+ topology_set_cpu_scale(cpu, capacity);
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
+ cpu, topology_get_cpu_scale(cpu));
+ }
+}
+
+bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+{
+ struct clk *cpu_clk;
+ static bool cap_parsing_failed;
+ int ret;
+ u32 cpu_capacity;
+
+ if (cap_parsing_failed)
+ return false;
+
+ ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
+ &cpu_capacity);
+ if (!ret) {
+ if (!raw_capacity) {
+ raw_capacity = kcalloc(num_possible_cpus(),
+ sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity) {
+ cap_parsing_failed = true;
+ return false;
+ }
+ }
+ raw_capacity[cpu] = cpu_capacity;
+ pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
+ cpu_node, raw_capacity[cpu]);
+
+ /*
+ * Update freq_factor for calculating early boot cpu capacities.
+ * For non-clk CPU DVFS mechanism, there's no way to get the
+ * frequency value now, assuming they are running at the same
+ * frequency (by keeping the initial freq_factor value).
+ */
+ cpu_clk = of_clk_get(cpu_node, 0);
+ if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ per_cpu(freq_factor, cpu) =
+ clk_get_rate(cpu_clk) / 1000;
+ clk_put(cpu_clk);
+ }
+ } else {
+ if (raw_capacity) {
+ pr_err("cpu_capacity: missing %pOF raw capacity\n",
+ cpu_node);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ }
+ cap_parsing_failed = true;
+ free_raw_capacity();
+ }
+
+ return !ret;
+}
+
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+
+void topology_init_cpu_capacity_cppc(void)
+{
+ struct cppc_perf_caps perf_caps;
+ int cpu;
+
+ if (likely(!acpi_cpc_valid()))
+ return;
+
+ raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
+ GFP_KERNEL);
+ if (!raw_capacity)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ if (!cppc_get_perf_caps(cpu, &perf_caps) &&
+ (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
+ (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
+ raw_capacity[cpu] = perf_caps.highest_perf;
+ pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
+ cpu, raw_capacity[cpu]);
+ continue;
+ }
+
+ pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
+ pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
+ goto exit;
+ }
+
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ pr_debug("cpu_capacity: cpu_capacity initialization done\n");
+
+exit:
+ free_raw_capacity();
+}
+#endif
+
+#ifdef CONFIG_CPU_FREQ
+static cpumask_var_t cpus_to_visit;
+static void parsing_done_workfn(struct work_struct *work);
+static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
+
+static int
+init_cpu_capacity_callback(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+ int cpu;
+
+ if (!raw_capacity)
+ return 0;
+
+ if (val != CPUFREQ_CREATE_POLICY)
+ return 0;
+
+ pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+ cpumask_pr_args(policy->related_cpus),
+ cpumask_pr_args(cpus_to_visit));
+
+ cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
+
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
+
+ if (cpumask_empty(cpus_to_visit)) {
+ topology_normalize_cpu_scale();
+ schedule_work(&update_topology_flags_work);
+ free_raw_capacity();
+ pr_debug("cpu_capacity: parsing done\n");
+ schedule_work(&parsing_done_work);
+ }
+
+ return 0;
+}
+
+static struct notifier_block init_cpu_capacity_notifier = {
+ .notifier_call = init_cpu_capacity_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ int ret;
+
+ /*
+ * On ACPI-based systems skip registering cpufreq notifier as cpufreq
+ * information is not needed for cpu capacity initialization.
+ */
+ if (!acpi_disabled || !raw_capacity)
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(cpus_to_visit, cpu_possible_mask);
+
+ ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ if (ret)
+ free_cpumask_var(cpus_to_visit);
+
+ return ret;
+}
+core_initcall(register_cpufreq_notifier);
+
+static void parsing_done_workfn(struct work_struct *work)
+{
+ cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+ free_cpumask_var(cpus_to_visit);
+}
+
+#else
+core_initcall(free_raw_capacity);
+#endif
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+/*
+ * This function returns the logic cpu number of the node.
+ * There are basically three kinds of return values:
+ * (1) logic cpu number which is > 0.
+ * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
+ * there is no possible logical CPU in the kernel to match. This happens
+ * when CONFIG_NR_CPUS is configure to be smaller than the number of
+ * CPU nodes in DT. We need to just ignore this case.
+ * (3) -1 if the node does not exist in the device tree
+ */
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ cpu = of_cpu_node_to_id(cpu_node);
+ if (cpu >= 0)
+ topology_parse_cpu_capacity(cpu_node, cpu);
+ else
+ pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
+ cpu_node, cpumask_pr_args(cpu_possible_mask));
+
+ of_node_put(cpu_node);
+ return cpu;
+}
+
+static int __init parse_core(struct device_node *core, int package_id,
+ int cluster_id, int core_id)
+{
+ char name[20];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else if (cpu != -ENODEV) {
+ pr_err("%pOF: Can't get CPU for thread\n", t);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%pOF: Core has both threads and CPU\n",
+ core);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].package_id = package_id;
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf && cpu != -ENODEV) {
+ pr_err("%pOF: Can't get CPU for leaf core\n", core);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int package_id,
+ int cluster_id, int depth)
+{
+ char name[20];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, package_id, i, depth + 1);
+ if (depth > 0)
+ pr_warn("Topology for clusters of clusters not yet supported\n");
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%pOF: cpu-map children should be clusters\n",
+ c);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, package_id, cluster_id,
+ core_id++);
+ } else {
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%pOF: empty cluster\n", cluster);
+
+ return 0;
+}
+
+static int __init parse_socket(struct device_node *socket)
+{
+ char name[20];
+ struct device_node *c;
+ bool has_socket = false;
+ int package_id = 0, ret;
+
+ do {
+ snprintf(name, sizeof(name), "socket%d", package_id);
+ c = of_get_child_by_name(socket, name);
+ if (c) {
+ has_socket = true;
+ ret = parse_cluster(c, package_id, -1, 0);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ package_id++;
+ } while (c);
+
+ if (!has_socket)
+ ret = parse_cluster(socket, 0, -1, 0);
+
+ return ret;
+}
+
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_socket(map);
+ if (ret != 0)
+ goto out_map;
+
+ topology_normalize_cpu_scale();
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu)
+ if (cpu_topology[cpu].package_id < 0) {
+ ret = -EINVAL;
+ break;
+ }
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+#endif
+
+/*
+ * cpu topology table
+ */
+struct cpu_topology cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
+
+ /* Find the smaller of NUMA, core or LLC siblings */
+ if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
+ /* not numa in package, lets use the package siblings */
+ core_mask = &cpu_topology[cpu].core_sibling;
+ }
+
+ if (last_level_cache_is_valid(cpu)) {
+ if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
+ core_mask = &cpu_topology[cpu].llc_sibling;
+ }
+
+ /*
+ * For systems with no shared cpu-side LLC but with clusters defined,
+ * extend core_mask to cluster_siblings. The sched domain builder will
+ * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
+ */
+ if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
+ cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
+ core_mask = &cpu_topology[cpu].cluster_sibling;
+
+ return core_mask;
+}
+
+const struct cpumask *cpu_clustergroup_mask(int cpu)
+{
+ /*
+ * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
+ * cpu_coregroup_mask().
+ */
+ if (cpumask_subset(cpu_coregroup_mask(cpu),
+ &cpu_topology[cpu].cluster_sibling))
+ return topology_sibling_cpumask(cpu);
+
+ return &cpu_topology[cpu].cluster_sibling;
+}
+
+void update_siblings_masks(unsigned int cpuid)
+{
+ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu, ret;
+
+ ret = detect_cache_attributes(cpuid);
+ if (ret && ret != -ENOENT)
+ pr_info("Early cacheinfo failed, ret = %d\n", ret);
+
+ /* update core and thread sibling masks */
+ for_each_online_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (last_level_cache_is_shared(cpu, cpuid)) {
+ cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
+ }
+
+ if (cpuid_topo->package_id != cpu_topo->package_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ continue;
+
+ if (cpuid_topo->cluster_id >= 0) {
+ cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
+ }
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+}
+
+static void clear_cpu_topology(int cpu)
+{
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpumask_clear(&cpu_topo->llc_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+
+ cpumask_clear(&cpu_topo->cluster_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+}
+
+void __init reset_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = -1;
+ cpu_topo->cluster_id = -1;
+ cpu_topo->package_id = -1;
+
+ clear_cpu_topology(cpu);
+ }
+}
+
+void remove_cpu_topology(unsigned int cpu)
+{
+ int sibling;
+
+ for_each_cpu(sibling, topology_core_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
+ for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
+ for_each_cpu(sibling, topology_cluster_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
+ for_each_cpu(sibling, topology_llc_cpumask(cpu))
+ cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
+
+ clear_cpu_topology(cpu);
+}
+
+__weak int __init parse_acpi_topology(void)
+{
+ return 0;
+}
+
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+void __init init_cpu_topology(void)
+{
+ int ret;
+
+ reset_cpu_topology();
+ ret = parse_acpi_topology();
+ if (!ret)
+ ret = of_have_populated_dt() && parse_dt_topology();
+
+ if (ret) {
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ reset_cpu_topology();
+ return;
+ }
+}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+
+ if (cpuid_topo->package_id != -1)
+ goto topology_populated;
+
+ cpuid_topo->thread_id = -1;
+ cpuid_topo->core_id = cpuid;
+ cpuid_topo->package_id = cpu_to_node(cpuid);
+
+ pr_debug("CPU%u: package %d core %d thread %d\n",
+ cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
+ cpuid_topo->thread_id);
+
+topology_populated:
+ update_siblings_masks(cpuid);
+}
+#endif
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
new file mode 100644
index 000000000..01ef796c2
--- /dev/null
+++ b/drivers/base/attribute_container.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * attribute_container.c - implementation of a simple container for classes
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * The basic idea here is to enable a device to be attached to an
+ * aritrary numer of classes without having to allocate storage for them.
+ * Instead, the contained classes select the devices they need to attach
+ * to via a matching function.
+ */
+
+#include <linux/attribute_container.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+#include "base.h"
+
+/* This is a private structure used to tie the classdev and the
+ * container .. it should never be visible outside this file */
+struct internal_container {
+ struct klist_node node;
+ struct attribute_container *cont;
+ struct device classdev;
+};
+
+static void internal_container_klist_get(struct klist_node *n)
+{
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+ get_device(&ic->classdev);
+}
+
+static void internal_container_klist_put(struct klist_node *n)
+{
+ struct internal_container *ic =
+ container_of(n, struct internal_container, node);
+ put_device(&ic->classdev);
+}
+
+
+/**
+ * attribute_container_classdev_to_container - given a classdev, return the container
+ *
+ * @classdev: the class device created by attribute_container_add_device.
+ *
+ * Returns the container associated with this classdev.
+ */
+struct attribute_container *
+attribute_container_classdev_to_container(struct device *classdev)
+{
+ struct internal_container *ic =
+ container_of(classdev, struct internal_container, classdev);
+ return ic->cont;
+}
+EXPORT_SYMBOL_GPL(attribute_container_classdev_to_container);
+
+static LIST_HEAD(attribute_container_list);
+
+static DEFINE_MUTEX(attribute_container_mutex);
+
+/**
+ * attribute_container_register - register an attribute container
+ *
+ * @cont: The container to register. This must be allocated by the
+ * callee and should also be zeroed by it.
+ */
+int
+attribute_container_register(struct attribute_container *cont)
+{
+ INIT_LIST_HEAD(&cont->node);
+ klist_init(&cont->containers, internal_container_klist_get,
+ internal_container_klist_put);
+
+ mutex_lock(&attribute_container_mutex);
+ list_add_tail(&cont->node, &attribute_container_list);
+ mutex_unlock(&attribute_container_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(attribute_container_register);
+
+/**
+ * attribute_container_unregister - remove a container registration
+ *
+ * @cont: previously registered container to remove
+ */
+int
+attribute_container_unregister(struct attribute_container *cont)
+{
+ int retval = -EBUSY;
+
+ mutex_lock(&attribute_container_mutex);
+ spin_lock(&cont->containers.k_lock);
+ if (!list_empty(&cont->containers.k_list))
+ goto out;
+ retval = 0;
+ list_del(&cont->node);
+ out:
+ spin_unlock(&cont->containers.k_lock);
+ mutex_unlock(&attribute_container_mutex);
+ return retval;
+
+}
+EXPORT_SYMBOL_GPL(attribute_container_unregister);
+
+/* private function used as class release */
+static void attribute_container_release(struct device *classdev)
+{
+ struct internal_container *ic
+ = container_of(classdev, struct internal_container, classdev);
+ struct device *dev = classdev->parent;
+
+ kfree(ic);
+ put_device(dev);
+}
+
+/**
+ * attribute_container_add_device - see if any container is interested in dev
+ *
+ * @dev: device to add attributes to
+ * @fn: function to trigger addition of class device.
+ *
+ * This function allocates storage for the class device(s) to be
+ * attached to dev (one for each matching attribute_container). If no
+ * fn is provided, the code will simply register the class device via
+ * device_add. If a function is provided, it is expected to add
+ * the class device at the appropriate time. One of the things that
+ * might be necessary is to allocate and initialise the classdev and
+ * then add it a later time. To do this, call this routine for
+ * allocation and initialisation and then use
+ * attribute_container_device_trigger() to call device_add() on
+ * it. Note: after this, the class device contains a reference to dev
+ * which is not relinquished until the release of the classdev.
+ */
+void
+attribute_container_add_device(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *))
+{
+ struct attribute_container *cont;
+
+ mutex_lock(&attribute_container_mutex);
+ list_for_each_entry(cont, &attribute_container_list, node) {
+ struct internal_container *ic;
+
+ if (attribute_container_no_classdevs(cont))
+ continue;
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ ic = kzalloc(sizeof(*ic), GFP_KERNEL);
+ if (!ic) {
+ dev_err(dev, "failed to allocate class container\n");
+ continue;
+ }
+
+ ic->cont = cont;
+ device_initialize(&ic->classdev);
+ ic->classdev.parent = get_device(dev);
+ ic->classdev.class = cont->class;
+ cont->class->dev_release = attribute_container_release;
+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else
+ attribute_container_add_class_device(&ic->classdev);
+ klist_add_tail(&ic->node, &cont->containers);
+ }
+ mutex_unlock(&attribute_container_mutex);
+}
+
+/* FIXME: can't break out of this unless klist_iter_exit is also
+ * called before doing the break
+ */
+#define klist_for_each_entry(pos, head, member, iter) \
+ for (klist_iter_init(head, iter); (pos = ({ \
+ struct klist_node *n = klist_next(iter); \
+ n ? container_of(n, typeof(*pos), member) : \
+ ({ klist_iter_exit(iter) ; NULL; }); \
+ })) != NULL;)
+
+
+/**
+ * attribute_container_remove_device - make device eligible for removal.
+ *
+ * @dev: The generic device
+ * @fn: A function to call to remove the device
+ *
+ * This routine triggers device removal. If fn is NULL, then it is
+ * simply done via device_unregister (note that if something
+ * still has a reference to the classdev, then the memory occupied
+ * will not be freed until the classdev is released). If you want a
+ * two phase release: remove from visibility and then delete the
+ * device, then you should use this routine with a fn that calls
+ * device_del() and then use attribute_container_device_trigger()
+ * to do the final put on the classdev.
+ */
+void
+attribute_container_remove_device(struct device *dev,
+ void (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *))
+{
+ struct attribute_container *cont;
+
+ mutex_lock(&attribute_container_mutex);
+ list_for_each_entry(cont, &attribute_container_list, node) {
+ struct internal_container *ic;
+ struct klist_iter iter;
+
+ if (attribute_container_no_classdevs(cont))
+ continue;
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (dev != ic->classdev.parent)
+ continue;
+ klist_del(&ic->node);
+ if (fn)
+ fn(cont, dev, &ic->classdev);
+ else {
+ attribute_container_remove_attrs(&ic->classdev);
+ device_unregister(&ic->classdev);
+ }
+ }
+ }
+ mutex_unlock(&attribute_container_mutex);
+}
+
+static int
+do_attribute_container_device_trigger_safe(struct device *dev,
+ struct attribute_container *cont,
+ int (*fn)(struct attribute_container *,
+ struct device *, struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *, struct device *))
+{
+ int ret;
+ struct internal_container *ic, *failed;
+ struct klist_iter iter;
+
+ if (attribute_container_no_classdevs(cont))
+ return fn(cont, dev, NULL);
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (dev == ic->classdev.parent) {
+ ret = fn(cont, dev, &ic->classdev);
+ if (ret) {
+ failed = ic;
+ klist_iter_exit(&iter);
+ goto fail;
+ }
+ }
+ }
+ return 0;
+
+fail:
+ if (!undo)
+ return ret;
+
+ /* Attempt to undo the work partially done. */
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (ic == failed) {
+ klist_iter_exit(&iter);
+ break;
+ }
+ if (dev == ic->classdev.parent)
+ undo(cont, dev, &ic->classdev);
+ }
+ return ret;
+}
+
+/**
+ * attribute_container_device_trigger_safe - execute a trigger for each
+ * matching classdev or fail all of them.
+ *
+ * @dev: The generic device to run the trigger for
+ * @fn: the function to execute for each classdev.
+ * @undo: A function to undo the work previously done in case of error
+ *
+ * This function is a safe version of
+ * attribute_container_device_trigger. It stops on the first error and
+ * undo the partial work that has been done, on previous classdev. It
+ * is guaranteed that either they all succeeded, or none of them
+ * succeeded.
+ */
+int
+attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *))
+{
+ struct attribute_container *cont, *failed = NULL;
+ int ret = 0;
+
+ mutex_lock(&attribute_container_mutex);
+
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ ret = do_attribute_container_device_trigger_safe(dev, cont,
+ fn, undo);
+ if (ret) {
+ failed = cont;
+ break;
+ }
+ }
+
+ if (ret && !WARN_ON(!undo)) {
+ list_for_each_entry(cont, &attribute_container_list, node) {
+
+ if (failed == cont)
+ break;
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ do_attribute_container_device_trigger_safe(dev, cont,
+ undo, NULL);
+ }
+ }
+
+ mutex_unlock(&attribute_container_mutex);
+ return ret;
+
+}
+
+/**
+ * attribute_container_device_trigger - execute a trigger for each matching classdev
+ *
+ * @dev: The generic device to run the trigger for
+ * @fn: the function to execute for each classdev.
+ *
+ * This function is for executing a trigger when you need to know both
+ * the container and the classdev. If you only care about the
+ * container, then use attribute_container_trigger() instead.
+ */
+void
+attribute_container_device_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *))
+{
+ struct attribute_container *cont;
+
+ mutex_lock(&attribute_container_mutex);
+ list_for_each_entry(cont, &attribute_container_list, node) {
+ struct internal_container *ic;
+ struct klist_iter iter;
+
+ if (!cont->match(cont, dev))
+ continue;
+
+ if (attribute_container_no_classdevs(cont)) {
+ fn(cont, dev, NULL);
+ continue;
+ }
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (dev == ic->classdev.parent)
+ fn(cont, dev, &ic->classdev);
+ }
+ }
+ mutex_unlock(&attribute_container_mutex);
+}
+
+/**
+ * attribute_container_trigger - trigger a function for each matching container
+ *
+ * @dev: The generic device to activate the trigger for
+ * @fn: the function to trigger
+ *
+ * This routine triggers a function that only needs to know the
+ * matching containers (not the classdev) associated with a device.
+ * It is more lightweight than attribute_container_device_trigger, so
+ * should be used in preference unless the triggering function
+ * actually needs to know the classdev.
+ */
+void
+attribute_container_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *))
+{
+ struct attribute_container *cont;
+
+ mutex_lock(&attribute_container_mutex);
+ list_for_each_entry(cont, &attribute_container_list, node) {
+ if (cont->match(cont, dev))
+ fn(cont, dev);
+ }
+ mutex_unlock(&attribute_container_mutex);
+}
+
+/**
+ * attribute_container_add_attrs - add attributes
+ *
+ * @classdev: The class device
+ *
+ * This simply creates all the class device sysfs files from the
+ * attributes listed in the container
+ */
+int
+attribute_container_add_attrs(struct device *classdev)
+{
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+ struct device_attribute **attrs = cont->attrs;
+ int i, error;
+
+ BUG_ON(attrs && cont->grp);
+
+ if (!attrs && !cont->grp)
+ return 0;
+
+ if (cont->grp)
+ return sysfs_create_group(&classdev->kobj, cont->grp);
+
+ for (i = 0; attrs[i]; i++) {
+ sysfs_attr_init(&attrs[i]->attr);
+ error = device_create_file(classdev, attrs[i]);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * attribute_container_add_class_device - same function as device_add
+ *
+ * @classdev: the class device to add
+ *
+ * This performs essentially the same function as device_add except for
+ * attribute containers, namely add the classdev to the system and then
+ * create the attribute files
+ */
+int
+attribute_container_add_class_device(struct device *classdev)
+{
+ int error = device_add(classdev);
+
+ if (error)
+ return error;
+ return attribute_container_add_attrs(classdev);
+}
+
+/**
+ * attribute_container_add_class_device_adapter - simple adapter for triggers
+ *
+ * @cont: the container to register.
+ * @dev: the generic device to activate the trigger for
+ * @classdev: the class device to add
+ *
+ * This function is identical to attribute_container_add_class_device except
+ * that it is designed to be called from the triggers
+ */
+int
+attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev)
+{
+ return attribute_container_add_class_device(classdev);
+}
+
+/**
+ * attribute_container_remove_attrs - remove any attribute files
+ *
+ * @classdev: The class device to remove the files from
+ *
+ */
+void
+attribute_container_remove_attrs(struct device *classdev)
+{
+ struct attribute_container *cont =
+ attribute_container_classdev_to_container(classdev);
+ struct device_attribute **attrs = cont->attrs;
+ int i;
+
+ if (!attrs && !cont->grp)
+ return;
+
+ if (cont->grp) {
+ sysfs_remove_group(&classdev->kobj, cont->grp);
+ return ;
+ }
+
+ for (i = 0; attrs[i]; i++)
+ device_remove_file(classdev, attrs[i]);
+}
+
+/**
+ * attribute_container_class_device_del - equivalent of class_device_del
+ *
+ * @classdev: the class device
+ *
+ * This function simply removes all the attribute files and then calls
+ * device_del.
+ */
+void
+attribute_container_class_device_del(struct device *classdev)
+{
+ attribute_container_remove_attrs(classdev);
+ device_del(classdev);
+}
+
+/**
+ * attribute_container_find_class_device - find the corresponding class_device
+ *
+ * @cont: the container
+ * @dev: the generic device
+ *
+ * Looks up the device in the container's list of class devices and returns
+ * the corresponding class_device.
+ */
+struct device *
+attribute_container_find_class_device(struct attribute_container *cont,
+ struct device *dev)
+{
+ struct device *cdev = NULL;
+ struct internal_container *ic;
+ struct klist_iter iter;
+
+ klist_for_each_entry(ic, &cont->containers, node, &iter) {
+ if (ic->classdev.parent == dev) {
+ cdev = &ic->classdev;
+ /* FIXME: must exit iterator then break */
+ klist_iter_exit(&iter);
+ break;
+ }
+ }
+
+ return cdev;
+}
+EXPORT_SYMBOL_GPL(attribute_container_find_class_device);
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
new file mode 100644
index 000000000..8c5e65930
--- /dev/null
+++ b/drivers/base/auxiliary.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 Intel Corporation
+ *
+ * Please see Documentation/driver-api/auxiliary_bus.rst for more information.
+ */
+
+#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+#include <linux/auxiliary_bus.h>
+#include "base.h"
+
+/**
+ * DOC: PURPOSE
+ *
+ * In some subsystems, the functionality of the core device (PCI/ACPI/other) is
+ * too complex for a single device to be managed by a monolithic driver (e.g.
+ * Sound Open Firmware), multiple devices might implement a common intersection
+ * of functionality (e.g. NICs + RDMA), or a driver may want to export an
+ * interface for another subsystem to drive (e.g. SIOV Physical Function export
+ * Virtual Function management). A split of the functionality into child-
+ * devices representing sub-domains of functionality makes it possible to
+ * compartmentalize, layer, and distribute domain-specific concerns via a Linux
+ * device-driver model.
+ *
+ * An example for this kind of requirement is the audio subsystem where a
+ * single IP is handling multiple entities such as HDMI, Soundwire, local
+ * devices such as mics/speakers etc. The split for the core's functionality
+ * can be arbitrary or be defined by the DSP firmware topology and include
+ * hooks for test/debug. This allows for the audio core device to be minimal
+ * and focused on hardware-specific control and communication.
+ *
+ * Each auxiliary_device represents a part of its parent functionality. The
+ * generic behavior can be extended and specialized as needed by encapsulating
+ * an auxiliary_device within other domain-specific structures and the use of
+ * .ops callbacks. Devices on the auxiliary bus do not share any structures and
+ * the use of a communication channel with the parent is domain-specific.
+ *
+ * Note that ops are intended as a way to augment instance behavior within a
+ * class of auxiliary devices, it is not the mechanism for exporting common
+ * infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
+ * infrastructure from the parent module to the auxiliary module(s).
+ */
+
+/**
+ * DOC: USAGE
+ *
+ * The auxiliary bus is to be used when a driver and one or more kernel
+ * modules, who share a common header file with the driver, need a mechanism to
+ * connect and provide access to a shared object allocated by the
+ * auxiliary_device's registering driver. The registering driver for the
+ * auxiliary_device(s) and the kernel module(s) registering auxiliary_drivers
+ * can be from the same subsystem, or from multiple subsystems.
+ *
+ * The emphasis here is on a common generic interface that keeps subsystem
+ * customization out of the bus infrastructure.
+ *
+ * One example is a PCI network device that is RDMA-capable and exports a child
+ * device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
+ * driver allocates and registers an auxiliary_device for each physical
+ * function on the NIC. The RDMA driver registers an auxiliary_driver that
+ * claims each of these auxiliary_devices. This conveys data/ops published by
+ * the parent PCI device/driver to the RDMA auxiliary_driver.
+ *
+ * Another use case is for the PCI device to be split out into multiple sub
+ * functions. For each sub function an auxiliary_device is created. A PCI sub
+ * function driver binds to such devices that creates its own one or more class
+ * devices. A PCI sub function auxiliary device is likely to be contained in a
+ * struct with additional attributes such as user defined sub function number
+ * and optional attributes such as resources and a link to the parent device.
+ * These attributes could be used by systemd/udev; and hence should be
+ * initialized before a driver binds to an auxiliary_device.
+ *
+ * A key requirement for utilizing the auxiliary bus is that there is no
+ * dependency on a physical bus, device, register accesses or regmap support.
+ * These individual devices split from the core cannot live on the platform bus
+ * as they are not physical devices that are controlled by DT/ACPI. The same
+ * argument applies for not using MFD in this scenario as MFD relies on
+ * individual function devices being physical devices.
+ */
+
+/**
+ * DOC: EXAMPLE
+ *
+ * Auxiliary devices are created and registered by a subsystem-level core
+ * device that needs to break up its functionality into smaller fragments. One
+ * way to extend the scope of an auxiliary_device is to encapsulate it within a
+ * domain- pecific structure defined by the parent device. This structure
+ * contains the auxiliary_device and any associated shared data/callbacks
+ * needed to establish the connection with the parent.
+ *
+ * An example is:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * struct auxiliary_device auxdev;
+ * void (*connect)(struct auxiliary_device *auxdev);
+ * void (*disconnect)(struct auxiliary_device *auxdev);
+ * void *data;
+ * };
+ *
+ * The parent device then registers the auxiliary_device by calling
+ * auxiliary_device_init(), and then auxiliary_device_add(), with the pointer
+ * to the auxdev member of the above structure. The parent provides a name for
+ * the auxiliary_device that, combined with the parent's KBUILD_MODNAME,
+ * creates a match_name that is be used for matching and binding with a driver.
+ *
+ * Whenever an auxiliary_driver is registered, based on the match_name, the
+ * auxiliary_driver's probe() is invoked for the matching devices. The
+ * auxiliary_driver can also be encapsulated inside custom drivers that make
+ * the core device's functionality extensible by adding additional
+ * domain-specific ops as follows:
+ *
+ * .. code-block:: c
+ *
+ * struct my_ops {
+ * void (*send)(struct auxiliary_device *auxdev);
+ * void (*receive)(struct auxiliary_device *auxdev);
+ * };
+ *
+ *
+ * struct my_driver {
+ * struct auxiliary_driver auxiliary_drv;
+ * const struct my_ops ops;
+ * };
+ *
+ * An example of this type of usage is:
+ *
+ * .. code-block:: c
+ *
+ * const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * { },
+ * };
+ *
+ * const struct my_ops my_custom_ops = {
+ * .send = my_tx,
+ * .receive = my_rx,
+ * };
+ *
+ * const struct my_driver my_drv = {
+ * .auxiliary_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_probe,
+ * .remove = my_remove,
+ * .shutdown = my_shutdown,
+ * },
+ * .ops = my_custom_ops,
+ * };
+ */
+
+static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
+ const struct auxiliary_device *auxdev)
+{
+ for (; id->name[0]; id++) {
+ const char *p = strrchr(dev_name(&auxdev->dev), '.');
+ int match_size;
+
+ if (!p)
+ continue;
+ match_size = p - dev_name(&auxdev->dev);
+
+ /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
+ if (strlen(id->name) == match_size &&
+ !strncmp(dev_name(&auxdev->dev), id->name, match_size))
+ return id;
+ }
+ return NULL;
+}
+
+static int auxiliary_match(struct device *dev, struct device_driver *drv)
+{
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ struct auxiliary_driver *auxdrv = to_auxiliary_drv(drv);
+
+ return !!auxiliary_match_id(auxdrv->id_table, auxdev);
+}
+
+static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const char *name, *p;
+
+ name = dev_name(dev);
+ p = strrchr(name, '.');
+
+ return add_uevent_var(env, "MODALIAS=%s%.*s", AUXILIARY_MODULE_PREFIX,
+ (int)(p - name), name);
+}
+
+static const struct dev_pm_ops auxiliary_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)
+};
+
+static int auxiliary_bus_probe(struct device *dev)
+{
+ struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+ int ret;
+
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret) {
+ dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
+ return ret;
+ }
+
+ ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
+ if (ret)
+ dev_pm_domain_detach(dev, true);
+
+ return ret;
+}
+
+static void auxiliary_bus_remove(struct device *dev)
+{
+ struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
+ struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
+
+ if (auxdrv->remove)
+ auxdrv->remove(auxdev);
+ dev_pm_domain_detach(dev, true);
+}
+
+static void auxiliary_bus_shutdown(struct device *dev)
+{
+ struct auxiliary_driver *auxdrv = NULL;
+ struct auxiliary_device *auxdev;
+
+ if (dev->driver) {
+ auxdrv = to_auxiliary_drv(dev->driver);
+ auxdev = to_auxiliary_dev(dev);
+ }
+
+ if (auxdrv && auxdrv->shutdown)
+ auxdrv->shutdown(auxdev);
+}
+
+static struct bus_type auxiliary_bus_type = {
+ .name = "auxiliary",
+ .probe = auxiliary_bus_probe,
+ .remove = auxiliary_bus_remove,
+ .shutdown = auxiliary_bus_shutdown,
+ .match = auxiliary_match,
+ .uevent = auxiliary_uevent,
+ .pm = &auxiliary_dev_pm_ops,
+};
+
+/**
+ * auxiliary_device_init - check auxiliary_device and initialize
+ * @auxdev: auxiliary device struct
+ *
+ * This is the second step in the three-step process to register an
+ * auxiliary_device.
+ *
+ * When this function returns an error code, then the device_initialize will
+ * *not* have been performed, and the caller will be responsible to free any
+ * memory allocated for the auxiliary_device in the error path directly.
+ *
+ * It returns 0 on success. On success, the device_initialize has been
+ * performed. After this point any error unwinding will need to include a call
+ * to auxiliary_device_uninit(). In this post-initialize error scenario, a call
+ * to the device's .release callback will be triggered, and all memory clean-up
+ * is expected to be handled there.
+ */
+int auxiliary_device_init(struct auxiliary_device *auxdev)
+{
+ struct device *dev = &auxdev->dev;
+
+ if (!dev->parent) {
+ pr_err("auxiliary_device has a NULL dev->parent\n");
+ return -EINVAL;
+ }
+
+ if (!auxdev->name) {
+ pr_err("auxiliary_device has a NULL name\n");
+ return -EINVAL;
+ }
+
+ dev->bus = &auxiliary_bus_type;
+ device_initialize(&auxdev->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auxiliary_device_init);
+
+/**
+ * __auxiliary_device_add - add an auxiliary bus device
+ * @auxdev: auxiliary bus device to add to the bus
+ * @modname: name of the parent device's driver module
+ *
+ * This is the third step in the three-step process to register an
+ * auxiliary_device.
+ *
+ * This function must be called after a successful call to
+ * auxiliary_device_init(), which will perform the device_initialize. This
+ * means that if this returns an error code, then a call to
+ * auxiliary_device_uninit() must be performed so that the .release callback
+ * will be triggered to free the memory associated with the auxiliary_device.
+ *
+ * The expectation is that users will call the "auxiliary_device_add" macro so
+ * that the caller's KBUILD_MODNAME is automatically inserted for the modname
+ * parameter. Only if a user requires a custom name would this version be
+ * called directly.
+ */
+int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname)
+{
+ struct device *dev = &auxdev->dev;
+ int ret;
+
+ if (!modname) {
+ dev_err(dev, "auxiliary device modname is NULL\n");
+ return -EINVAL;
+ }
+
+ ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id);
+ if (ret) {
+ dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = device_add(dev);
+ if (ret)
+ dev_err(dev, "adding auxiliary device failed!: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__auxiliary_device_add);
+
+/**
+ * auxiliary_find_device - auxiliary device iterator for locating a particular device.
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This function returns a reference to a device that is 'found'
+ * for later use, as determined by the @match callback.
+ *
+ * The reference returned should be released with put_device().
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+struct auxiliary_device *auxiliary_find_device(struct device *start,
+ const void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ struct device *dev;
+
+ dev = bus_find_device(&auxiliary_bus_type, start, data, match);
+ if (!dev)
+ return NULL;
+
+ return to_auxiliary_dev(dev);
+}
+EXPORT_SYMBOL_GPL(auxiliary_find_device);
+
+/**
+ * __auxiliary_driver_register - register a driver for auxiliary bus devices
+ * @auxdrv: auxiliary_driver structure
+ * @owner: owning module/driver
+ * @modname: KBUILD_MODNAME for parent driver
+ *
+ * The expectation is that users will call the "auxiliary_driver_register"
+ * macro so that the caller's KBUILD_MODNAME is automatically inserted for the
+ * modname parameter. Only if a user requires a custom name would this version
+ * be called directly.
+ */
+int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
+ struct module *owner, const char *modname)
+{
+ int ret;
+
+ if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
+ return -EINVAL;
+
+ if (auxdrv->name)
+ auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", modname,
+ auxdrv->name);
+ else
+ auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", modname);
+ if (!auxdrv->driver.name)
+ return -ENOMEM;
+
+ auxdrv->driver.owner = owner;
+ auxdrv->driver.bus = &auxiliary_bus_type;
+ auxdrv->driver.mod_name = modname;
+
+ ret = driver_register(&auxdrv->driver);
+ if (ret)
+ kfree(auxdrv->driver.name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__auxiliary_driver_register);
+
+/**
+ * auxiliary_driver_unregister - unregister a driver
+ * @auxdrv: auxiliary_driver structure
+ */
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
+{
+ driver_unregister(&auxdrv->driver);
+ kfree(auxdrv->driver.name);
+}
+EXPORT_SYMBOL_GPL(auxiliary_driver_unregister);
+
+void __init auxiliary_bus_init(void)
+{
+ WARN_ON(bus_register(&auxiliary_bus_type));
+}
diff --git a/drivers/base/base.h b/drivers/base/base.h
new file mode 100644
index 000000000..b902d1ecc
--- /dev/null
+++ b/drivers/base/base.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2012 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * Core driver model functions and structures that should not be
+ * shared outside of the drivers/base/ directory.
+ *
+ */
+#include <linux/notifier.h>
+
+/**
+ * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
+ *
+ * @subsys - the struct kset that defines this subsystem
+ * @devices_kset - the subsystem's 'devices' directory
+ * @interfaces - list of subsystem interfaces associated
+ * @mutex - protect the devices, and interfaces lists.
+ *
+ * @drivers_kset - the list of drivers associated
+ * @klist_devices - the klist to iterate over the @devices_kset
+ * @klist_drivers - the klist to iterate over the @drivers_kset
+ * @bus_notifier - the bus notifier list for anything that cares about things
+ * on this bus.
+ * @bus - pointer back to the struct bus_type that this structure is associated
+ * with.
+ *
+ * @glue_dirs - "glue" directory to put in-between the parent device to
+ * avoid namespace conflicts
+ * @class - pointer back to the struct class that this structure is associated
+ * with.
+ *
+ * This structure is the one that is the actual kobject allowing struct
+ * bus_type/class to be statically allocated safely. Nothing outside of the
+ * driver core should ever touch these fields.
+ */
+struct subsys_private {
+ struct kset subsys;
+ struct kset *devices_kset;
+ struct list_head interfaces;
+ struct mutex mutex;
+
+ struct kset *drivers_kset;
+ struct klist klist_devices;
+ struct klist klist_drivers;
+ struct blocking_notifier_head bus_notifier;
+ unsigned int drivers_autoprobe:1;
+ struct bus_type *bus;
+
+ struct kset glue_dirs;
+ struct class *class;
+};
+#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
+
+struct driver_private {
+ struct kobject kobj;
+ struct klist klist_devices;
+ struct klist_node knode_bus;
+ struct module_kobject *mkobj;
+ struct device_driver *driver;
+};
+#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+
+/**
+ * struct device_private - structure to hold the private to the driver core portions of the device structure.
+ *
+ * @klist_children - klist containing all children of this device
+ * @knode_parent - node in sibling list
+ * @knode_driver - node in driver list
+ * @knode_bus - node in bus list
+ * @knode_class - node in class list
+ * @deferred_probe - entry in deferred_probe_list which is used to retry the
+ * binding of drivers which were unable to get all the resources needed by
+ * the device; typically because it depends on another driver getting
+ * probed first.
+ * @async_driver - pointer to device driver awaiting probe via async_probe
+ * @device - pointer back to the struct device that this structure is
+ * associated with.
+ * @dead - This device is currently either in the process of or has been
+ * removed from the system. Any asynchronous events scheduled for this
+ * device should exit without taking any action.
+ *
+ * Nothing outside of the driver core should ever touch these fields.
+ */
+struct device_private {
+ struct klist klist_children;
+ struct klist_node knode_parent;
+ struct klist_node knode_driver;
+ struct klist_node knode_bus;
+ struct klist_node knode_class;
+ struct list_head deferred_probe;
+ struct device_driver *async_driver;
+ char *deferred_probe_reason;
+ struct device *device;
+ u8 dead:1;
+};
+#define to_device_private_parent(obj) \
+ container_of(obj, struct device_private, knode_parent)
+#define to_device_private_driver(obj) \
+ container_of(obj, struct device_private, knode_driver)
+#define to_device_private_bus(obj) \
+ container_of(obj, struct device_private, knode_bus)
+#define to_device_private_class(obj) \
+ container_of(obj, struct device_private, knode_class)
+
+/* initialisation functions */
+extern int devices_init(void);
+extern int buses_init(void);
+extern int classes_init(void);
+extern int firmware_init(void);
+#ifdef CONFIG_SYS_HYPERVISOR
+extern int hypervisor_init(void);
+#else
+static inline int hypervisor_init(void) { return 0; }
+#endif
+extern int platform_bus_init(void);
+extern void cpu_dev_init(void);
+extern void container_dev_init(void);
+#ifdef CONFIG_AUXILIARY_BUS
+extern void auxiliary_bus_init(void);
+#else
+static inline void auxiliary_bus_init(void) { }
+#endif
+
+struct kobject *virtual_device_parent(struct device *dev);
+
+extern int bus_add_device(struct device *dev);
+extern void bus_probe_device(struct device *dev);
+extern void bus_remove_device(struct device *dev);
+
+extern int bus_add_driver(struct device_driver *drv);
+extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent);
+
+extern void driver_detach(struct device_driver *drv);
+extern void driver_deferred_probe_del(struct device *dev);
+extern void device_set_deferred_probe_reason(const struct device *dev,
+ struct va_format *vaf);
+static inline int driver_match_device(struct device_driver *drv,
+ struct device *dev)
+{
+ return drv->bus->match ? drv->bus->match(dev, drv) : 1;
+}
+extern bool driver_allows_async_probing(struct device_driver *drv);
+
+extern int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+extern void driver_remove_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+void device_driver_detach(struct device *dev);
+
+extern int devres_release_all(struct device *dev);
+extern void device_block_probing(void);
+extern void device_unblock_probing(void);
+extern void deferred_probe_extend_timeout(void);
+extern void driver_deferred_probe_trigger(void);
+
+/* /sys/devices directory */
+extern struct kset *devices_kset;
+extern void devices_kset_move_last(struct device *dev);
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
+extern void module_add_driver(struct module *mod, struct device_driver *drv);
+extern void module_remove_driver(struct device_driver *drv);
+#else
+static inline void module_add_driver(struct module *mod,
+ struct device_driver *drv) { }
+static inline void module_remove_driver(struct device_driver *drv) { }
+#endif
+
+#ifdef CONFIG_DEVTMPFS
+extern int devtmpfs_init(void);
+#else
+static inline int devtmpfs_init(void) { return 0; }
+#endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_read_lock_held(void);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_force_bind(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
+extern void fw_devlink_drivers_done(void);
+
+/* device pm support */
+void device_pm_move_to_tail(struct device *dev);
+
+#ifdef CONFIG_DEVTMPFS
+int devtmpfs_create_node(struct device *dev);
+int devtmpfs_delete_node(struct device *dev);
+#else
+static inline int devtmpfs_create_node(struct device *dev) { return 0; }
+static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
+#endif
+
+void software_node_notify(struct device *dev);
+void software_node_notify_remove(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
new file mode 100644
index 000000000..7ca47e5b3
--- /dev/null
+++ b/drivers/base/bus.c
@@ -0,0 +1,1180 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bus.c - bus driver management
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007 Novell Inc.
+ */
+
+#include <linux/async.h>
+#include <linux/device/bus.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+#include "base.h"
+#include "power/power.h"
+
+/* /sys/devices/system */
+static struct kset *system_kset;
+
+#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
+
+/*
+ * sysfs bindings for drivers
+ */
+
+#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
+
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct driver_attribute driver_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
+
+static int __must_check bus_rescan_devices_helper(struct device *dev,
+ void *data);
+
+static struct bus_type *bus_get(struct bus_type *bus)
+{
+ if (bus) {
+ kset_get(&bus->p->subsys);
+ return bus;
+ }
+ return NULL;
+}
+
+static void bus_put(struct bus_type *bus)
+{
+ if (bus)
+ kset_put(&bus->p->subsys);
+}
+
+static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct driver_attribute *drv_attr = to_drv_attr(attr);
+ struct driver_private *drv_priv = to_driver(kobj);
+ ssize_t ret = -EIO;
+
+ if (drv_attr->show)
+ ret = drv_attr->show(drv_priv->driver, buf);
+ return ret;
+}
+
+static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct driver_attribute *drv_attr = to_drv_attr(attr);
+ struct driver_private *drv_priv = to_driver(kobj);
+ ssize_t ret = -EIO;
+
+ if (drv_attr->store)
+ ret = drv_attr->store(drv_priv->driver, buf, count);
+ return ret;
+}
+
+static const struct sysfs_ops driver_sysfs_ops = {
+ .show = drv_attr_show,
+ .store = drv_attr_store,
+};
+
+static void driver_release(struct kobject *kobj)
+{
+ struct driver_private *drv_priv = to_driver(kobj);
+
+ pr_debug("driver: '%s': %s\n", kobject_name(kobj), __func__);
+ kfree(drv_priv);
+}
+
+static struct kobj_type driver_ktype = {
+ .sysfs_ops = &driver_sysfs_ops,
+ .release = driver_release,
+};
+
+/*
+ * sysfs bindings for buses
+ */
+static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct bus_attribute *bus_attr = to_bus_attr(attr);
+ struct subsys_private *subsys_priv = to_subsys_private(kobj);
+ ssize_t ret = 0;
+
+ if (bus_attr->show)
+ ret = bus_attr->show(subsys_priv->bus, buf);
+ return ret;
+}
+
+static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bus_attribute *bus_attr = to_bus_attr(attr);
+ struct subsys_private *subsys_priv = to_subsys_private(kobj);
+ ssize_t ret = 0;
+
+ if (bus_attr->store)
+ ret = bus_attr->store(subsys_priv->bus, buf, count);
+ return ret;
+}
+
+static const struct sysfs_ops bus_sysfs_ops = {
+ .show = bus_attr_show,
+ .store = bus_attr_store,
+};
+
+int bus_create_file(struct bus_type *bus, struct bus_attribute *attr)
+{
+ int error;
+ if (bus_get(bus)) {
+ error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
+ bus_put(bus);
+ } else
+ error = -EINVAL;
+ return error;
+}
+EXPORT_SYMBOL_GPL(bus_create_file);
+
+void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
+{
+ if (bus_get(bus)) {
+ sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
+ bus_put(bus);
+ }
+}
+EXPORT_SYMBOL_GPL(bus_remove_file);
+
+static void bus_release(struct kobject *kobj)
+{
+ struct subsys_private *priv = to_subsys_private(kobj);
+ struct bus_type *bus = priv->bus;
+
+ kfree(priv);
+ bus->p = NULL;
+}
+
+static struct kobj_type bus_ktype = {
+ .sysfs_ops = &bus_sysfs_ops,
+ .release = bus_release,
+};
+
+static int bus_uevent_filter(struct kobject *kobj)
+{
+ const struct kobj_type *ktype = get_ktype(kobj);
+
+ if (ktype == &bus_ktype)
+ return 1;
+ return 0;
+}
+
+static const struct kset_uevent_ops bus_uevent_ops = {
+ .filter = bus_uevent_filter,
+};
+
+static struct kset *bus_kset;
+
+/* Manually detach a device from its associated driver. */
+static ssize_t unbind_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ struct bus_type *bus = bus_get(drv->bus);
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && dev->driver == drv) {
+ device_driver_detach(dev);
+ err = count;
+ }
+ put_device(dev);
+ bus_put(bus);
+ return err;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, 0200, NULL, unbind_store);
+
+/*
+ * Manually attach a device to a driver.
+ * Note: the driver must want to bind to the device,
+ * it is not possible to override the driver's id table.
+ */
+static ssize_t bind_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ struct bus_type *bus = bus_get(drv->bus);
+ struct device *dev;
+ int err = -ENODEV;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (dev && driver_match_device(drv, dev)) {
+ err = device_driver_attach(drv, dev);
+ if (!err) {
+ /* success */
+ err = count;
+ }
+ }
+ put_device(dev);
+ bus_put(bus);
+ return err;
+}
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
+
+static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", bus->p->drivers_autoprobe);
+}
+
+static ssize_t drivers_autoprobe_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ if (buf[0] == '0')
+ bus->p->drivers_autoprobe = 0;
+ else
+ bus->p->drivers_autoprobe = 1;
+ return count;
+}
+
+static ssize_t drivers_probe_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct device *dev;
+ int err = -EINVAL;
+
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (!dev)
+ return -ENODEV;
+ if (bus_rescan_devices_helper(dev, NULL) == 0)
+ err = count;
+ put_device(dev);
+ return err;
+}
+
+static struct device *next_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_next(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
+/**
+ * bus_for_each_dev - device iterator.
+ * @bus: bus type.
+ * @start: device to start iterating from.
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
+ *
+ * Iterate over @bus's list of devices, and call @fn for each,
+ * passing it @data. If @start is not NULL, we use that device to
+ * begin iterating from.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ *
+ * NOTE: The device that returns a non-zero value is not retained
+ * in any way, nor is its refcount incremented. If the caller needs
+ * to retain this data, it should do so, and increment the reference
+ * count in the supplied callback.
+ */
+int bus_for_each_dev(struct bus_type *bus, struct device *start,
+ void *data, int (*fn)(struct device *, void *))
+{
+ struct klist_iter i;
+ struct device *dev;
+ int error = 0;
+
+ if (!bus || !bus->p)
+ return -EINVAL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while (!error && (dev = next_device(&i)))
+ error = fn(dev, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(bus_for_each_dev);
+
+/**
+ * bus_find_device - device iterator for locating a particular device.
+ * @bus: bus type
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the bus_for_each_dev() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+struct device *bus_find_device(struct bus_type *bus,
+ struct device *start, const void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!bus || !bus->p)
+ return NULL;
+
+ klist_iter_init_node(&bus->p->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while ((dev = next_device(&i)))
+ if (match(dev, data) && get_device(dev))
+ break;
+ klist_iter_exit(&i);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device);
+
+/**
+ * subsys_find_device_by_id - find a device with a specific enumeration number
+ * @subsys: subsystem
+ * @id: index 'id' in struct device
+ * @hint: device to check first
+ *
+ * Check the hint's next object and if it is a match return it directly,
+ * otherwise, fall back to a full list search. Either way a reference for
+ * the returned object is taken.
+ */
+struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
+ struct device *hint)
+{
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!subsys)
+ return NULL;
+
+ if (hint) {
+ klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
+ dev = next_device(&i);
+ if (dev && dev->id == id && get_device(dev)) {
+ klist_iter_exit(&i);
+ return dev;
+ }
+ klist_iter_exit(&i);
+ }
+
+ klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
+ while ((dev = next_device(&i))) {
+ if (dev->id == id && get_device(dev)) {
+ klist_iter_exit(&i);
+ return dev;
+ }
+ }
+ klist_iter_exit(&i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
+
+static struct device_driver *next_driver(struct klist_iter *i)
+{
+ struct klist_node *n = klist_next(i);
+ struct driver_private *drv_priv;
+
+ if (n) {
+ drv_priv = container_of(n, struct driver_private, knode_bus);
+ return drv_priv->driver;
+ }
+ return NULL;
+}
+
+/**
+ * bus_for_each_drv - driver iterator
+ * @bus: bus we're dealing with.
+ * @start: driver to start iterating on.
+ * @data: data to pass to the callback.
+ * @fn: function to call for each driver.
+ *
+ * This is nearly identical to the device iterator above.
+ * We iterate over each driver that belongs to @bus, and call
+ * @fn for each. If @fn returns anything but 0, we break out
+ * and return it. If @start is not NULL, we use it as the head
+ * of the list.
+ *
+ * NOTE: we don't return the driver that returns a non-zero
+ * value, nor do we leave the reference count incremented for that
+ * driver. If the caller needs to know that info, it must set it
+ * in the callback. It must also be sure to increment the refcount
+ * so it doesn't disappear before returning to the caller.
+ */
+int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+ void *data, int (*fn)(struct device_driver *, void *))
+{
+ struct klist_iter i;
+ struct device_driver *drv;
+ int error = 0;
+
+ if (!bus)
+ return -EINVAL;
+
+ klist_iter_init_node(&bus->p->klist_drivers, &i,
+ start ? &start->p->knode_bus : NULL);
+ while ((drv = next_driver(&i)) && !error)
+ error = fn(drv, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(bus_for_each_drv);
+
+/**
+ * bus_add_device - add device to bus
+ * @dev: device being added
+ *
+ * - Add device's bus attributes.
+ * - Create links to device's bus.
+ * - Add the device to its bus's list of devices.
+ */
+int bus_add_device(struct device *dev)
+{
+ struct bus_type *bus = bus_get(dev->bus);
+ int error = 0;
+
+ if (bus) {
+ pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev));
+ error = device_add_groups(dev, bus->dev_groups);
+ if (error)
+ goto out_put;
+ error = sysfs_create_link(&bus->p->devices_kset->kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+ goto out_groups;
+ error = sysfs_create_link(&dev->kobj,
+ &dev->bus->p->subsys.kobj, "subsystem");
+ if (error)
+ goto out_subsys;
+ klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
+ }
+ return 0;
+
+out_subsys:
+ sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
+out_groups:
+ device_remove_groups(dev, bus->dev_groups);
+out_put:
+ bus_put(dev->bus);
+ return error;
+}
+
+/**
+ * bus_probe_device - probe drivers for a new device
+ * @dev: device to probe
+ *
+ * - Automatically probe for a driver if the bus allows it.
+ */
+void bus_probe_device(struct device *dev)
+{
+ struct bus_type *bus = dev->bus;
+ struct subsys_interface *sif;
+
+ if (!bus)
+ return;
+
+ if (bus->p->drivers_autoprobe)
+ device_initial_probe(dev);
+
+ mutex_lock(&bus->p->mutex);
+ list_for_each_entry(sif, &bus->p->interfaces, node)
+ if (sif->add_dev)
+ sif->add_dev(dev, sif);
+ mutex_unlock(&bus->p->mutex);
+}
+
+/**
+ * bus_remove_device - remove device from bus
+ * @dev: device to be removed
+ *
+ * - Remove device from all interfaces.
+ * - Remove symlink from bus' directory.
+ * - Delete device from bus's list.
+ * - Detach from its driver.
+ * - Drop reference taken in bus_add_device().
+ */
+void bus_remove_device(struct device *dev)
+{
+ struct bus_type *bus = dev->bus;
+ struct subsys_interface *sif;
+
+ if (!bus)
+ return;
+
+ mutex_lock(&bus->p->mutex);
+ list_for_each_entry(sif, &bus->p->interfaces, node)
+ if (sif->remove_dev)
+ sif->remove_dev(dev, sif);
+ mutex_unlock(&bus->p->mutex);
+
+ sysfs_remove_link(&dev->kobj, "subsystem");
+ sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
+ dev_name(dev));
+ device_remove_groups(dev, dev->bus->dev_groups);
+ if (klist_node_attached(&dev->p->knode_bus))
+ klist_del(&dev->p->knode_bus);
+
+ pr_debug("bus: '%s': remove device %s\n",
+ dev->bus->name, dev_name(dev));
+ device_release_driver(dev);
+ bus_put(dev->bus);
+}
+
+static int __must_check add_bind_files(struct device_driver *drv)
+{
+ int ret;
+
+ ret = driver_create_file(drv, &driver_attr_unbind);
+ if (ret == 0) {
+ ret = driver_create_file(drv, &driver_attr_bind);
+ if (ret)
+ driver_remove_file(drv, &driver_attr_unbind);
+ }
+ return ret;
+}
+
+static void remove_bind_files(struct device_driver *drv)
+{
+ driver_remove_file(drv, &driver_attr_bind);
+ driver_remove_file(drv, &driver_attr_unbind);
+}
+
+static BUS_ATTR_WO(drivers_probe);
+static BUS_ATTR_RW(drivers_autoprobe);
+
+static int add_probe_files(struct bus_type *bus)
+{
+ int retval;
+
+ retval = bus_create_file(bus, &bus_attr_drivers_probe);
+ if (retval)
+ goto out;
+
+ retval = bus_create_file(bus, &bus_attr_drivers_autoprobe);
+ if (retval)
+ bus_remove_file(bus, &bus_attr_drivers_probe);
+out:
+ return retval;
+}
+
+static void remove_probe_files(struct bus_type *bus)
+{
+ bus_remove_file(bus, &bus_attr_drivers_autoprobe);
+ bus_remove_file(bus, &bus_attr_drivers_probe);
+}
+
+static ssize_t uevent_store(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int rc;
+
+ rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
+ return rc ? rc : count;
+}
+static DRIVER_ATTR_WO(uevent);
+
+/**
+ * bus_add_driver - Add a driver to the bus.
+ * @drv: driver.
+ */
+int bus_add_driver(struct device_driver *drv)
+{
+ struct bus_type *bus;
+ struct driver_private *priv;
+ int error = 0;
+
+ bus = bus_get(drv->bus);
+ if (!bus)
+ return -EINVAL;
+
+ pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ error = -ENOMEM;
+ goto out_put_bus;
+ }
+ klist_init(&priv->klist_devices, NULL, NULL);
+ priv->driver = drv;
+ drv->p = priv;
+ priv->kobj.kset = bus->p->drivers_kset;
+ error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
+ "%s", drv->name);
+ if (error)
+ goto out_unregister;
+
+ klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
+ if (drv->bus->p->drivers_autoprobe) {
+ error = driver_attach(drv);
+ if (error)
+ goto out_del_list;
+ }
+ module_add_driver(drv->owner, drv);
+
+ error = driver_create_file(drv, &driver_attr_uevent);
+ if (error) {
+ printk(KERN_ERR "%s: uevent attr (%s) failed\n",
+ __func__, drv->name);
+ }
+ error = driver_add_groups(drv, bus->drv_groups);
+ if (error) {
+ /* How the hell do we get out of this pickle? Give up */
+ printk(KERN_ERR "%s: driver_add_groups(%s) failed\n",
+ __func__, drv->name);
+ }
+
+ if (!drv->suppress_bind_attrs) {
+ error = add_bind_files(drv);
+ if (error) {
+ /* Ditto */
+ printk(KERN_ERR "%s: add_bind_files(%s) failed\n",
+ __func__, drv->name);
+ }
+ }
+
+ return 0;
+
+out_del_list:
+ klist_del(&priv->knode_bus);
+out_unregister:
+ kobject_put(&priv->kobj);
+ /* drv->p is freed in driver_release() */
+ drv->p = NULL;
+out_put_bus:
+ bus_put(bus);
+ return error;
+}
+
+/**
+ * bus_remove_driver - delete driver from bus's knowledge.
+ * @drv: driver.
+ *
+ * Detach the driver from the devices it controls, and remove
+ * it from its bus's list of drivers. Finally, we drop the reference
+ * to the bus we took in bus_add_driver().
+ */
+void bus_remove_driver(struct device_driver *drv)
+{
+ if (!drv->bus)
+ return;
+
+ if (!drv->suppress_bind_attrs)
+ remove_bind_files(drv);
+ driver_remove_groups(drv, drv->bus->drv_groups);
+ driver_remove_file(drv, &driver_attr_uevent);
+ klist_remove(&drv->p->knode_bus);
+ pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
+ driver_detach(drv);
+ module_remove_driver(drv);
+ kobject_put(&drv->p->kobj);
+ bus_put(drv->bus);
+}
+
+/* Helper for bus_rescan_devices's iter */
+static int __must_check bus_rescan_devices_helper(struct device *dev,
+ void *data)
+{
+ int ret = 0;
+
+ if (!dev->driver) {
+ if (dev->parent && dev->bus->need_parent_lock)
+ device_lock(dev->parent);
+ ret = device_attach(dev);
+ if (dev->parent && dev->bus->need_parent_lock)
+ device_unlock(dev->parent);
+ }
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * bus_rescan_devices - rescan devices on the bus for possible drivers
+ * @bus: the bus to scan.
+ *
+ * This function will look for devices on the bus with no driver
+ * attached and rescan it against existing drivers to see if it matches
+ * any by calling device_attach() for the unbound devices.
+ */
+int bus_rescan_devices(struct bus_type *bus)
+{
+ return bus_for_each_dev(bus, NULL, NULL, bus_rescan_devices_helper);
+}
+EXPORT_SYMBOL_GPL(bus_rescan_devices);
+
+/**
+ * device_reprobe - remove driver for a device and probe for a new driver
+ * @dev: the device to reprobe
+ *
+ * This function detaches the attached driver (if any) for the given
+ * device and restarts the driver probing process. It is intended
+ * to use if probing criteria changed during a devices lifetime and
+ * driver attachment should change accordingly.
+ */
+int device_reprobe(struct device *dev)
+{
+ if (dev->driver)
+ device_driver_detach(dev);
+ return bus_rescan_devices_helper(dev, NULL);
+}
+EXPORT_SYMBOL_GPL(device_reprobe);
+
+static int bus_add_groups(struct bus_type *bus,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&bus->p->subsys.kobj, groups);
+}
+
+static void bus_remove_groups(struct bus_type *bus,
+ const struct attribute_group **groups)
+{
+ sysfs_remove_groups(&bus->p->subsys.kobj, groups);
+}
+
+static void klist_devices_get(struct klist_node *n)
+{
+ struct device_private *dev_prv = to_device_private_bus(n);
+ struct device *dev = dev_prv->device;
+
+ get_device(dev);
+}
+
+static void klist_devices_put(struct klist_node *n)
+{
+ struct device_private *dev_prv = to_device_private_bus(n);
+ struct device *dev = dev_prv->device;
+
+ put_device(dev);
+}
+
+static ssize_t bus_uevent_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
+ return rc ? rc : count;
+}
+/*
+ * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO()
+ * here, but can not use it as earlier in the file we have
+ * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store
+ * function name.
+ */
+static struct bus_attribute bus_attr_uevent = __ATTR(uevent, 0200, NULL,
+ bus_uevent_store);
+
+/**
+ * bus_register - register a driver-core subsystem
+ * @bus: bus to register
+ *
+ * Once we have that, we register the bus with the kobject
+ * infrastructure, then register the children subsystems it has:
+ * the devices and drivers that belong to the subsystem.
+ */
+int bus_register(struct bus_type *bus)
+{
+ int retval;
+ struct subsys_private *priv;
+ struct lock_class_key *key = &bus->lock_key;
+
+ priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = bus;
+ bus->p = priv;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
+
+ retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name);
+ if (retval)
+ goto out;
+
+ priv->subsys.kobj.kset = bus_kset;
+ priv->subsys.kobj.ktype = &bus_ktype;
+ priv->drivers_autoprobe = 1;
+
+ retval = kset_register(&priv->subsys);
+ if (retval)
+ goto out;
+
+ retval = bus_create_file(bus, &bus_attr_uevent);
+ if (retval)
+ goto bus_uevent_fail;
+
+ priv->devices_kset = kset_create_and_add("devices", NULL,
+ &priv->subsys.kobj);
+ if (!priv->devices_kset) {
+ retval = -ENOMEM;
+ goto bus_devices_fail;
+ }
+
+ priv->drivers_kset = kset_create_and_add("drivers", NULL,
+ &priv->subsys.kobj);
+ if (!priv->drivers_kset) {
+ retval = -ENOMEM;
+ goto bus_drivers_fail;
+ }
+
+ INIT_LIST_HEAD(&priv->interfaces);
+ __mutex_init(&priv->mutex, "subsys mutex", key);
+ klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
+ klist_init(&priv->klist_drivers, NULL, NULL);
+
+ retval = add_probe_files(bus);
+ if (retval)
+ goto bus_probe_files_fail;
+
+ retval = bus_add_groups(bus, bus->bus_groups);
+ if (retval)
+ goto bus_groups_fail;
+
+ pr_debug("bus: '%s': registered\n", bus->name);
+ return 0;
+
+bus_groups_fail:
+ remove_probe_files(bus);
+bus_probe_files_fail:
+ kset_unregister(bus->p->drivers_kset);
+bus_drivers_fail:
+ kset_unregister(bus->p->devices_kset);
+bus_devices_fail:
+ bus_remove_file(bus, &bus_attr_uevent);
+bus_uevent_fail:
+ kset_unregister(&bus->p->subsys);
+out:
+ kfree(bus->p);
+ bus->p = NULL;
+ return retval;
+}
+EXPORT_SYMBOL_GPL(bus_register);
+
+/**
+ * bus_unregister - remove a bus from the system
+ * @bus: bus.
+ *
+ * Unregister the child subsystems and the bus itself.
+ * Finally, we call bus_put() to release the refcount
+ */
+void bus_unregister(struct bus_type *bus)
+{
+ pr_debug("bus: '%s': unregistering\n", bus->name);
+ if (bus->dev_root)
+ device_unregister(bus->dev_root);
+ bus_remove_groups(bus, bus->bus_groups);
+ remove_probe_files(bus);
+ kset_unregister(bus->p->drivers_kset);
+ kset_unregister(bus->p->devices_kset);
+ bus_remove_file(bus, &bus_attr_uevent);
+ kset_unregister(&bus->p->subsys);
+}
+EXPORT_SYMBOL_GPL(bus_unregister);
+
+int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bus_register_notifier);
+
+int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bus_unregister_notifier);
+
+struct kset *bus_get_kset(struct bus_type *bus)
+{
+ return &bus->p->subsys;
+}
+EXPORT_SYMBOL_GPL(bus_get_kset);
+
+struct klist *bus_get_device_klist(struct bus_type *bus)
+{
+ return &bus->p->klist_devices;
+}
+EXPORT_SYMBOL_GPL(bus_get_device_klist);
+
+/*
+ * Yes, this forcibly breaks the klist abstraction temporarily. It
+ * just wants to sort the klist, not change reference counts and
+ * take/drop locks rapidly in the process. It does all this while
+ * holding the lock for the list, so objects can't otherwise be
+ * added/removed while we're swizzling.
+ */
+static void device_insertion_sort_klist(struct device *a, struct list_head *list,
+ int (*compare)(const struct device *a,
+ const struct device *b))
+{
+ struct klist_node *n;
+ struct device_private *dev_prv;
+ struct device *b;
+
+ list_for_each_entry(n, list, n_node) {
+ dev_prv = to_device_private_bus(n);
+ b = dev_prv->device;
+ if (compare(a, b) <= 0) {
+ list_move_tail(&a->p->knode_bus.n_node,
+ &b->p->knode_bus.n_node);
+ return;
+ }
+ }
+ list_move_tail(&a->p->knode_bus.n_node, list);
+}
+
+void bus_sort_breadthfirst(struct bus_type *bus,
+ int (*compare)(const struct device *a,
+ const struct device *b))
+{
+ LIST_HEAD(sorted_devices);
+ struct klist_node *n, *tmp;
+ struct device_private *dev_prv;
+ struct device *dev;
+ struct klist *device_klist;
+
+ device_klist = bus_get_device_klist(bus);
+
+ spin_lock(&device_klist->k_lock);
+ list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ device_insertion_sort_klist(dev, &sorted_devices, compare);
+ }
+ list_splice(&sorted_devices, &device_klist->k_list);
+ spin_unlock(&device_klist->k_lock);
+}
+EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
+
+/**
+ * subsys_dev_iter_init - initialize subsys device iterator
+ * @iter: subsys iterator to initialize
+ * @subsys: the subsys we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize subsys iterator @iter such that it iterates over devices
+ * of @subsys. If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
+ struct device *start, const struct device_type *type)
+{
+ struct klist_node *start_knode = NULL;
+
+ if (start)
+ start_knode = &start->p->knode_bus;
+ klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
+ iter->type = type;
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
+
+/**
+ * subsys_dev_iter_next - iterate to the next device
+ * @iter: subsys iterator to proceed
+ *
+ * Proceed @iter to the next device and return it. Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited. The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into subsys code.
+ */
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
+{
+ struct klist_node *knode;
+ struct device *dev;
+
+ for (;;) {
+ knode = klist_next(&iter->ki);
+ if (!knode)
+ return NULL;
+ dev = to_device_private_bus(knode)->device;
+ if (!iter->type || iter->type == dev->type)
+ return dev;
+ }
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
+
+/**
+ * subsys_dev_iter_exit - finish iteration
+ * @iter: subsys iterator to finish
+ *
+ * Finish an iteration. Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
+{
+ klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
+
+int subsys_interface_register(struct subsys_interface *sif)
+{
+ struct bus_type *subsys;
+ struct subsys_dev_iter iter;
+ struct device *dev;
+
+ if (!sif || !sif->subsys)
+ return -ENODEV;
+
+ subsys = bus_get(sif->subsys);
+ if (!subsys)
+ return -EINVAL;
+
+ mutex_lock(&subsys->p->mutex);
+ list_add_tail(&sif->node, &subsys->p->interfaces);
+ if (sif->add_dev) {
+ subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ while ((dev = subsys_dev_iter_next(&iter)))
+ sif->add_dev(dev, sif);
+ subsys_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&subsys->p->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(subsys_interface_register);
+
+void subsys_interface_unregister(struct subsys_interface *sif)
+{
+ struct bus_type *subsys;
+ struct subsys_dev_iter iter;
+ struct device *dev;
+
+ if (!sif || !sif->subsys)
+ return;
+
+ subsys = sif->subsys;
+
+ mutex_lock(&subsys->p->mutex);
+ list_del_init(&sif->node);
+ if (sif->remove_dev) {
+ subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ while ((dev = subsys_dev_iter_next(&iter)))
+ sif->remove_dev(dev, sif);
+ subsys_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&subsys->p->mutex);
+
+ bus_put(subsys);
+}
+EXPORT_SYMBOL_GPL(subsys_interface_unregister);
+
+static void system_root_device_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int subsys_register(struct bus_type *subsys,
+ const struct attribute_group **groups,
+ struct kobject *parent_of_root)
+{
+ struct device *dev;
+ int err;
+
+ err = bus_register(subsys);
+ if (err < 0)
+ return err;
+
+ dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_dev;
+ }
+
+ err = dev_set_name(dev, "%s", subsys->name);
+ if (err < 0)
+ goto err_name;
+
+ dev->kobj.parent = parent_of_root;
+ dev->groups = groups;
+ dev->release = system_root_device_release;
+
+ err = device_register(dev);
+ if (err < 0)
+ goto err_dev_reg;
+
+ subsys->dev_root = dev;
+ return 0;
+
+err_dev_reg:
+ put_device(dev);
+ dev = NULL;
+err_name:
+ kfree(dev);
+err_dev:
+ bus_unregister(subsys);
+ return err;
+}
+
+/**
+ * subsys_system_register - register a subsystem at /sys/devices/system/
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'system' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+ * number appended. The registered devices are not explicitly named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+ * with bad ideas only. New subsystems should use plain subsystems; and
+ * add the subsystem-wide attributes should be added to the subsystem
+ * directory itself and not some create fake root-device placed in
+ * /sys/devices/system/<name>.
+ */
+int subsys_system_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ return subsys_register(subsys, groups, &system_kset->kobj);
+}
+EXPORT_SYMBOL_GPL(subsys_system_register);
+
+/**
+ * subsys_virtual_register - register a subsystem at /sys/devices/virtual/
+ * @subsys: virtual subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'virtual' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subystem. The root device can carry subsystem-wide
+ * attributes. All registered devices are below this single root device.
+ * There's no restriction on device naming. This is for kernel software
+ * constructs which need sysfs interface.
+ */
+int subsys_virtual_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ struct kobject *virtual_dir;
+
+ virtual_dir = virtual_device_parent(NULL);
+ if (!virtual_dir)
+ return -ENOMEM;
+
+ return subsys_register(subsys, groups, virtual_dir);
+}
+EXPORT_SYMBOL_GPL(subsys_virtual_register);
+
+int __init buses_init(void)
+{
+ bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
+ if (!bus_kset)
+ return -ENOMEM;
+
+ system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
+ if (!system_kset)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
new file mode 100644
index 000000000..26e13887a
--- /dev/null
+++ b/drivers/base/cacheinfo.c
@@ -0,0 +1,742 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * cacheinfo support - processor cache information via sysfs
+ *
+ * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
+ * Author: Sudeep Holla <sudeep.holla@arm.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+
+/* pointer to per cpu cacheinfo */
+static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
+#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
+#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
+#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
+#define per_cpu_cacheinfo_idx(cpu, idx) \
+ (per_cpu_cacheinfo(cpu) + (idx))
+
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
+{
+ return ci_cacheinfo(cpu);
+}
+
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ /*
+ * For non DT/ACPI systems, assume unique level 1 caches,
+ * system-wide shared caches for all other levels.
+ */
+ if (!(IS_ENABLED(CONFIG_OF) || IS_ENABLED(CONFIG_ACPI)))
+ return (this_leaf->level != 1) && (sib_leaf->level != 1);
+
+ if ((sib_leaf->attributes & CACHE_ID) &&
+ (this_leaf->attributes & CACHE_ID))
+ return sib_leaf->id == this_leaf->id;
+
+ return sib_leaf->fw_token == this_leaf->fw_token;
+}
+
+bool last_level_cache_is_valid(unsigned int cpu)
+{
+ struct cacheinfo *llc;
+
+ if (!cache_leaves(cpu))
+ return false;
+
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ return (llc->attributes & CACHE_ID) || !!llc->fw_token;
+
+}
+
+bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y)
+{
+ struct cacheinfo *llc_x, *llc_y;
+
+ if (!last_level_cache_is_valid(cpu_x) ||
+ !last_level_cache_is_valid(cpu_y))
+ return false;
+
+ llc_x = per_cpu_cacheinfo_idx(cpu_x, cache_leaves(cpu_x) - 1);
+ llc_y = per_cpu_cacheinfo_idx(cpu_y, cache_leaves(cpu_y) - 1);
+
+ return cache_leaves_are_shared(llc_x, llc_y);
+}
+
+#ifdef CONFIG_OF
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+ const char *size_prop;
+ const char *line_size_props[2];
+ const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+ {
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ }, {
+ .size_prop = "i-cache-size",
+ .line_size_props = { "i-cache-line-size",
+ "i-cache-block-size", },
+ .nr_sets_prop = "i-cache-sets",
+ }, {
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+ if (type == CACHE_TYPE_UNIFIED)
+ return 0;
+ return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
+{
+ const char *propname;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].size_prop;
+
+ of_property_read_u32(np, propname, &this_leaf->size);
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf,
+ struct device_node *np)
+{
+ int i, lim, ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+ for (i = 0; i < lim; i++) {
+ int ret;
+ u32 line_size;
+ const char *propname;
+
+ propname = cache_type_info[ct_idx].line_size_props[i];
+ ret = of_property_read_u32(np, propname, &line_size);
+ if (!ret) {
+ this_leaf->coherency_line_size = line_size;
+ break;
+ }
+ }
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
+{
+ const char *propname;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].nr_sets_prop;
+
+ of_property_read_u32(np, propname, &this_leaf->number_of_sets);
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+ unsigned int line_size = this_leaf->coherency_line_size;
+ unsigned int nr_sets = this_leaf->number_of_sets;
+ unsigned int size = this_leaf->size;
+
+ /*
+ * If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static bool cache_node_is_unified(struct cacheinfo *this_leaf,
+ struct device_node *np)
+{
+ return of_property_read_bool(np, "cache-unified");
+}
+
+static void cache_of_set_props(struct cacheinfo *this_leaf,
+ struct device_node *np)
+{
+ /*
+ * init_cache_level must setup the cache level correctly
+ * overriding the architecturally specified levels, so
+ * if type is NONE at this stage, it should be unified
+ */
+ if (this_leaf->type == CACHE_TYPE_NOCACHE &&
+ cache_node_is_unified(this_leaf, np))
+ this_leaf->type = CACHE_TYPE_UNIFIED;
+ cache_size(this_leaf, np);
+ cache_get_line_size(this_leaf, np);
+ cache_nr_sets(this_leaf, np);
+ cache_associativity(this_leaf);
+}
+
+static int cache_setup_of_node(unsigned int cpu)
+{
+ struct device_node *np;
+ struct cacheinfo *this_leaf;
+ unsigned int index = 0;
+
+ np = of_cpu_device_node_get(cpu);
+ if (!np) {
+ pr_err("Failed to find cpu%d device node\n", cpu);
+ return -ENOENT;
+ }
+
+ while (index < cache_leaves(cpu)) {
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
+ if (this_leaf->level != 1)
+ np = of_find_next_cache_node(np);
+ else
+ np = of_node_get(np);/* cpu node itself */
+ if (!np)
+ break;
+ cache_of_set_props(this_leaf, np);
+ this_leaf->fw_token = np;
+ index++;
+ }
+
+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
+ return -ENOENT;
+
+ return 0;
+}
+#else
+static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+#endif
+
+int __weak cache_setup_acpi(unsigned int cpu)
+{
+ return -ENOTSUPP;
+}
+
+unsigned int coherency_max_size;
+
+static int cache_setup_properties(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ ret = cache_setup_acpi(cpu);
+
+ return ret;
+}
+
+static int cache_shared_cpu_map_setup(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf, *sib_leaf;
+ unsigned int index, sib_index;
+ int ret = 0;
+
+ if (this_cpu_ci->cpu_map_populated)
+ return 0;
+
+ /*
+ * skip setting up cache properties if LLC is valid, just need
+ * to update the shared cpu_map if the cache attributes were
+ * populated early before all the cpus are brought online
+ */
+ if (!last_level_cache_is_valid(cpu)) {
+ ret = cache_setup_properties(cpu);
+ if (ret)
+ return ret;
+ }
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ unsigned int i;
+
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
+
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ for_each_online_cpu(i) {
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+ if (i == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
+ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
+
+ /*
+ * Comparing cache IDs only makes sense if the leaves
+ * belong to the same cache level of same type. Skip
+ * the check if level and type do not match.
+ */
+ if (sib_leaf->level != this_leaf->level ||
+ sib_leaf->type != this_leaf->type)
+ continue;
+
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ break;
+ }
+ }
+ }
+ /* record the maximum cache line size */
+ if (this_leaf->coherency_line_size > coherency_max_size)
+ coherency_max_size = this_leaf->coherency_line_size;
+ }
+
+ return 0;
+}
+
+static void cache_shared_cpu_map_remove(unsigned int cpu)
+{
+ struct cacheinfo *this_leaf, *sib_leaf;
+ unsigned int sibling, index, sib_index;
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = per_cpu_cacheinfo_idx(cpu, index);
+ for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
+ struct cpu_cacheinfo *sib_cpu_ci =
+ get_cpu_cacheinfo(sibling);
+
+ if (sibling == cpu || !sib_cpu_ci->info_list)
+ continue;/* skip if itself or no cacheinfo */
+
+ for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
+
+ /*
+ * Comparing cache IDs only makes sense if the leaves
+ * belong to the same cache level of same type. Skip
+ * the check if level and type do not match.
+ */
+ if (sib_leaf->level != this_leaf->level ||
+ sib_leaf->type != this_leaf->type)
+ continue;
+
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ break;
+ }
+ }
+ }
+ if (of_have_populated_dt())
+ of_node_put(this_leaf->fw_token);
+ }
+}
+
+static void free_cache_attributes(unsigned int cpu)
+{
+ if (!per_cpu_cacheinfo(cpu))
+ return;
+
+ cache_shared_cpu_map_remove(cpu);
+
+ kfree(per_cpu_cacheinfo(cpu));
+ per_cpu_cacheinfo(cpu) = NULL;
+ cache_leaves(cpu) = 0;
+}
+
+int __weak init_cache_level(unsigned int cpu)
+{
+ return -ENOENT;
+}
+
+int __weak populate_cache_leaves(unsigned int cpu)
+{
+ return -ENOENT;
+}
+
+int detect_cache_attributes(unsigned int cpu)
+{
+ int ret;
+
+ /* Since early detection of the cacheinfo is allowed via this
+ * function and this also gets called as CPU hotplug callbacks via
+ * cacheinfo_cpu_online, the initialisation can be skipped and only
+ * CPU maps can be updated as the CPU online status would be update
+ * if called via cacheinfo_cpu_online path.
+ */
+ if (per_cpu_cacheinfo(cpu))
+ goto update_cpu_map;
+
+ if (init_cache_level(cpu) || !cache_leaves(cpu))
+ return -ENOENT;
+
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct cacheinfo), GFP_ATOMIC);
+ if (per_cpu_cacheinfo(cpu) == NULL) {
+ cache_leaves(cpu) = 0;
+ return -ENOMEM;
+ }
+
+ /*
+ * populate_cache_leaves() may completely setup the cache leaves and
+ * shared_cpu_map or it may leave it partially setup.
+ */
+ ret = populate_cache_leaves(cpu);
+ if (ret)
+ goto free_ci;
+
+update_cpu_map:
+ /*
+ * For systems using DT for cache hierarchy, fw_token
+ * and shared_cpu_map will be set up here only if they are
+ * not populated already
+ */
+ ret = cache_shared_cpu_map_setup(cpu);
+ if (ret) {
+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
+ goto free_ci;
+ }
+
+ return 0;
+
+free_ci:
+ free_cache_attributes(cpu);
+ return ret;
+}
+
+/* pointer to cpuX/cache device */
+static DEFINE_PER_CPU(struct device *, ci_cache_dev);
+#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
+
+static cpumask_t cache_dev_map;
+
+/* pointer to array of devices for cpuX/cache/indexY */
+static DEFINE_PER_CPU(struct device **, ci_index_dev);
+#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
+#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
+
+#define show_one(file_name, object) \
+static ssize_t file_name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
+ return sysfs_emit(buf, "%u\n", this_leaf->object); \
+}
+
+show_one(id, id);
+show_one(level, level);
+show_one(coherency_line_size, coherency_line_size);
+show_one(number_of_sets, number_of_sets);
+show_one(physical_line_partition, physical_line_partition);
+show_one(ways_of_associativity, ways_of_associativity);
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
+}
+
+static ssize_t shared_cpu_map_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+
+ return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
+}
+
+static ssize_t shared_cpu_list_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+
+ return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
+}
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const char *output;
+
+ switch (this_leaf->type) {
+ case CACHE_TYPE_DATA:
+ output = "Data";
+ break;
+ case CACHE_TYPE_INST:
+ output = "Instruction";
+ break;
+ case CACHE_TYPE_UNIFIED:
+ output = "Unified";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t allocation_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ unsigned int ci_attr = this_leaf->attributes;
+ const char *output;
+
+ if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
+ output = "ReadWriteAllocate";
+ else if (ci_attr & CACHE_READ_ALLOCATE)
+ output = "ReadAllocate";
+ else if (ci_attr & CACHE_WRITE_ALLOCATE)
+ output = "WriteAllocate";
+ else
+ return 0;
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static ssize_t write_policy_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ unsigned int ci_attr = this_leaf->attributes;
+ int n = 0;
+
+ if (ci_attr & CACHE_WRITE_THROUGH)
+ n = sysfs_emit(buf, "WriteThrough\n");
+ else if (ci_attr & CACHE_WRITE_BACK)
+ n = sysfs_emit(buf, "WriteBack\n");
+ return n;
+}
+
+static DEVICE_ATTR_RO(id);
+static DEVICE_ATTR_RO(level);
+static DEVICE_ATTR_RO(type);
+static DEVICE_ATTR_RO(coherency_line_size);
+static DEVICE_ATTR_RO(ways_of_associativity);
+static DEVICE_ATTR_RO(number_of_sets);
+static DEVICE_ATTR_RO(size);
+static DEVICE_ATTR_RO(allocation_policy);
+static DEVICE_ATTR_RO(write_policy);
+static DEVICE_ATTR_RO(shared_cpu_map);
+static DEVICE_ATTR_RO(shared_cpu_list);
+static DEVICE_ATTR_RO(physical_line_partition);
+
+static struct attribute *cache_default_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_type.attr,
+ &dev_attr_level.attr,
+ &dev_attr_shared_cpu_map.attr,
+ &dev_attr_shared_cpu_list.attr,
+ &dev_attr_coherency_line_size.attr,
+ &dev_attr_ways_of_associativity.attr,
+ &dev_attr_number_of_sets.attr,
+ &dev_attr_size.attr,
+ &dev_attr_allocation_policy.attr,
+ &dev_attr_write_policy.attr,
+ &dev_attr_physical_line_partition.attr,
+ NULL
+};
+
+static umode_t
+cache_default_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cacheinfo *this_leaf = dev_get_drvdata(dev);
+ const struct cpumask *mask = &this_leaf->shared_cpu_map;
+ umode_t mode = attr->mode;
+
+ if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+ return mode;
+ if ((attr == &dev_attr_type.attr) && this_leaf->type)
+ return mode;
+ if ((attr == &dev_attr_level.attr) && this_leaf->level)
+ return mode;
+ if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
+ return mode;
+ if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
+ return mode;
+ if ((attr == &dev_attr_coherency_line_size.attr) &&
+ this_leaf->coherency_line_size)
+ return mode;
+ if ((attr == &dev_attr_ways_of_associativity.attr) &&
+ this_leaf->size) /* allow 0 = full associativity */
+ return mode;
+ if ((attr == &dev_attr_number_of_sets.attr) &&
+ this_leaf->number_of_sets)
+ return mode;
+ if ((attr == &dev_attr_size.attr) && this_leaf->size)
+ return mode;
+ if ((attr == &dev_attr_write_policy.attr) &&
+ (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
+ return mode;
+ if ((attr == &dev_attr_allocation_policy.attr) &&
+ (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
+ return mode;
+ if ((attr == &dev_attr_physical_line_partition.attr) &&
+ this_leaf->physical_line_partition)
+ return mode;
+
+ return 0;
+}
+
+static const struct attribute_group cache_default_group = {
+ .attrs = cache_default_attrs,
+ .is_visible = cache_default_attrs_is_visible,
+};
+
+static const struct attribute_group *cache_default_groups[] = {
+ &cache_default_group,
+ NULL,
+};
+
+static const struct attribute_group *cache_private_groups[] = {
+ &cache_default_group,
+ NULL, /* Place holder for private group */
+ NULL,
+};
+
+const struct attribute_group *
+__weak cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+ return NULL;
+}
+
+static const struct attribute_group **
+cache_get_attribute_groups(struct cacheinfo *this_leaf)
+{
+ const struct attribute_group *priv_group =
+ cache_get_priv_group(this_leaf);
+
+ if (!priv_group)
+ return cache_default_groups;
+
+ if (!cache_private_groups[1])
+ cache_private_groups[1] = priv_group;
+
+ return cache_private_groups;
+}
+
+/* Add/Remove cache interface for CPU device */
+static void cpu_cache_sysfs_exit(unsigned int cpu)
+{
+ int i;
+ struct device *ci_dev;
+
+ if (per_cpu_index_dev(cpu)) {
+ for (i = 0; i < cache_leaves(cpu); i++) {
+ ci_dev = per_cache_index_dev(cpu, i);
+ if (!ci_dev)
+ continue;
+ device_unregister(ci_dev);
+ }
+ kfree(per_cpu_index_dev(cpu));
+ per_cpu_index_dev(cpu) = NULL;
+ }
+ device_unregister(per_cpu_cache_dev(cpu));
+ per_cpu_cache_dev(cpu) = NULL;
+}
+
+static int cpu_cache_sysfs_init(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ if (per_cpu_cacheinfo(cpu) == NULL)
+ return -ENOENT;
+
+ per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
+ if (IS_ERR(per_cpu_cache_dev(cpu)))
+ return PTR_ERR(per_cpu_cache_dev(cpu));
+
+ /* Allocate all required memory */
+ per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct device *), GFP_KERNEL);
+ if (unlikely(per_cpu_index_dev(cpu) == NULL))
+ goto err_out;
+
+ return 0;
+
+err_out:
+ cpu_cache_sysfs_exit(cpu);
+ return -ENOMEM;
+}
+
+static int cache_add_dev(unsigned int cpu)
+{
+ unsigned int i;
+ int rc;
+ struct device *ci_dev, *parent;
+ struct cacheinfo *this_leaf;
+ const struct attribute_group **cache_groups;
+
+ rc = cpu_cache_sysfs_init(cpu);
+ if (unlikely(rc < 0))
+ return rc;
+
+ parent = per_cpu_cache_dev(cpu);
+ for (i = 0; i < cache_leaves(cpu); i++) {
+ this_leaf = per_cpu_cacheinfo_idx(cpu, i);
+ if (this_leaf->disable_sysfs)
+ continue;
+ if (this_leaf->type == CACHE_TYPE_NOCACHE)
+ break;
+ cache_groups = cache_get_attribute_groups(this_leaf);
+ ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
+ "index%1u", i);
+ if (IS_ERR(ci_dev)) {
+ rc = PTR_ERR(ci_dev);
+ goto err;
+ }
+ per_cache_index_dev(cpu, i) = ci_dev;
+ }
+ cpumask_set_cpu(cpu, &cache_dev_map);
+
+ return 0;
+err:
+ cpu_cache_sysfs_exit(cpu);
+ return rc;
+}
+
+static int cacheinfo_cpu_online(unsigned int cpu)
+{
+ int rc = detect_cache_attributes(cpu);
+
+ if (rc)
+ return rc;
+ rc = cache_add_dev(cpu);
+ if (rc)
+ free_cache_attributes(cpu);
+ return rc;
+}
+
+static int cacheinfo_cpu_pre_down(unsigned int cpu)
+{
+ if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
+ cpu_cache_sysfs_exit(cpu);
+
+ free_cache_attributes(cpu);
+ return 0;
+}
+
+static int __init cacheinfo_sysfs_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
+ "base/cacheinfo:online",
+ cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
+}
+device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/class.c b/drivers/base/class.c
new file mode 100644
index 000000000..8ceafb7d0
--- /dev/null
+++ b/drivers/base/class.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * class.c - basic device class management
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman
+ * Copyright (c) 2003-2004 IBM Corp.
+ */
+
+#include <linux/device/class.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include "base.h"
+
+#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
+
+static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct class_attribute *class_attr = to_class_attr(attr);
+ struct subsys_private *cp = to_subsys_private(kobj);
+ ssize_t ret = -EIO;
+
+ if (class_attr->show)
+ ret = class_attr->show(cp->class, class_attr, buf);
+ return ret;
+}
+
+static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct class_attribute *class_attr = to_class_attr(attr);
+ struct subsys_private *cp = to_subsys_private(kobj);
+ ssize_t ret = -EIO;
+
+ if (class_attr->store)
+ ret = class_attr->store(cp->class, class_attr, buf, count);
+ return ret;
+}
+
+static void class_release(struct kobject *kobj)
+{
+ struct subsys_private *cp = to_subsys_private(kobj);
+ struct class *class = cp->class;
+
+ pr_debug("class '%s': release.\n", class->name);
+
+ if (class->class_release)
+ class->class_release(class);
+ else
+ pr_debug("class '%s' does not have a release() function, "
+ "be careful\n", class->name);
+
+ kfree(cp);
+}
+
+static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj)
+{
+ struct subsys_private *cp = to_subsys_private(kobj);
+ struct class *class = cp->class;
+
+ return class->ns_type;
+}
+
+static const struct sysfs_ops class_sysfs_ops = {
+ .show = class_attr_show,
+ .store = class_attr_store,
+};
+
+static struct kobj_type class_ktype = {
+ .sysfs_ops = &class_sysfs_ops,
+ .release = class_release,
+ .child_ns_type = class_child_ns_type,
+};
+
+/* Hotplug events for classes go to the class subsys */
+static struct kset *class_kset;
+
+
+int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
+{
+ int error;
+
+ if (cls)
+ error = sysfs_create_file_ns(&cls->p->subsys.kobj,
+ &attr->attr, ns);
+ else
+ error = -EINVAL;
+ return error;
+}
+
+void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
+ const void *ns)
+{
+ if (cls)
+ sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
+}
+
+static struct class *class_get(struct class *cls)
+{
+ if (cls)
+ kset_get(&cls->p->subsys);
+ return cls;
+}
+
+static void class_put(struct class *cls)
+{
+ if (cls)
+ kset_put(&cls->p->subsys);
+}
+
+static struct device *klist_class_to_dev(struct klist_node *n)
+{
+ struct device_private *p = to_device_private_class(n);
+ return p->device;
+}
+
+static void klist_class_dev_get(struct klist_node *n)
+{
+ struct device *dev = klist_class_to_dev(n);
+
+ get_device(dev);
+}
+
+static void klist_class_dev_put(struct klist_node *n)
+{
+ struct device *dev = klist_class_to_dev(n);
+
+ put_device(dev);
+}
+
+static int class_add_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
+int __class_register(struct class *cls, struct lock_class_key *key)
+{
+ struct subsys_private *cp;
+ int error;
+
+ pr_debug("device class '%s': registering\n", cls->name);
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+ klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
+ INIT_LIST_HEAD(&cp->interfaces);
+ kset_init(&cp->glue_dirs);
+ __mutex_init(&cp->mutex, "subsys mutex", key);
+ error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
+ if (error) {
+ kfree(cp);
+ return error;
+ }
+
+ /* set the default /sys/dev directory for devices of this class */
+ if (!cls->dev_kobj)
+ cls->dev_kobj = sysfs_dev_char_kobj;
+
+#if defined(CONFIG_BLOCK)
+ /* let the block class directory show up in the root of sysfs */
+ if (!sysfs_deprecated || cls != &block_class)
+ cp->subsys.kobj.kset = class_kset;
+#else
+ cp->subsys.kobj.kset = class_kset;
+#endif
+ cp->subsys.kobj.ktype = &class_ktype;
+ cp->class = cls;
+ cls->p = cp;
+
+ error = kset_register(&cp->subsys);
+ if (error) {
+ kfree(cp);
+ return error;
+ }
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
+ if (error) {
+ kobject_del(&cp->subsys.kobj);
+ kfree_const(cp->subsys.kobj.name);
+ kfree(cp);
+ }
+ return error;
+}
+EXPORT_SYMBOL_GPL(__class_register);
+
+void class_unregister(struct class *cls)
+{
+ pr_debug("device class '%s': unregistering\n", cls->name);
+ class_remove_groups(cls, cls->class_groups);
+ kset_unregister(&cls->p->subsys);
+}
+
+static void class_create_release(struct class *cls)
+{
+ pr_debug("%s called for %s\n", __func__, cls->name);
+ kfree(cls);
+}
+
+/**
+ * __class_create - create a struct class structure
+ * @owner: pointer to the module that is to "own" this struct class
+ * @name: pointer to a string for the name of this class.
+ * @key: the lock_class_key for this class; used by mutex lock debugging
+ *
+ * This is used to create a struct class pointer that can then be used
+ * in calls to device_create().
+ *
+ * Returns &struct class pointer on success, or ERR_PTR() on error.
+ *
+ * Note, the pointer created here is to be destroyed when finished by
+ * making a call to class_destroy().
+ */
+struct class *__class_create(struct module *owner, const char *name,
+ struct lock_class_key *key)
+{
+ struct class *cls;
+ int retval;
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ cls->name = name;
+ cls->owner = owner;
+ cls->class_release = class_create_release;
+
+ retval = __class_register(cls, key);
+ if (retval)
+ goto error;
+
+ return cls;
+
+error:
+ kfree(cls);
+ return ERR_PTR(retval);
+}
+EXPORT_SYMBOL_GPL(__class_create);
+
+/**
+ * class_destroy - destroys a struct class structure
+ * @cls: pointer to the struct class that is to be destroyed
+ *
+ * Note, the pointer to be destroyed must have been created with a call
+ * to class_create().
+ */
+void class_destroy(struct class *cls)
+{
+ if (IS_ERR_OR_NULL(cls))
+ return;
+
+ class_unregister(cls);
+}
+
+/**
+ * class_dev_iter_init - initialize class device iterator
+ * @iter: class iterator to initialize
+ * @class: the class we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize class iterator @iter such that it iterates over devices
+ * of @class. If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
+ struct device *start, const struct device_type *type)
+{
+ struct klist_node *start_knode = NULL;
+
+ if (start)
+ start_knode = &start->p->knode_class;
+ klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode);
+ iter->type = type;
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_init);
+
+/**
+ * class_dev_iter_next - iterate to the next device
+ * @iter: class iterator to proceed
+ *
+ * Proceed @iter to the next device and return it. Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited. The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into class code.
+ */
+struct device *class_dev_iter_next(struct class_dev_iter *iter)
+{
+ struct klist_node *knode;
+ struct device *dev;
+
+ while (1) {
+ knode = klist_next(&iter->ki);
+ if (!knode)
+ return NULL;
+ dev = klist_class_to_dev(knode);
+ if (!iter->type || iter->type == dev->type)
+ return dev;
+ }
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_next);
+
+/**
+ * class_dev_iter_exit - finish iteration
+ * @iter: class iterator to finish
+ *
+ * Finish an iteration. Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void class_dev_iter_exit(struct class_dev_iter *iter)
+{
+ klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_exit);
+
+/**
+ * class_for_each_device - device iterator
+ * @class: the class we're iterating
+ * @start: the device to start with in the list, if any.
+ * @data: data for the callback
+ * @fn: function to be called for each device
+ *
+ * Iterate over @class's list of devices, and call @fn for each,
+ * passing it @data. If @start is set, the list iteration will start
+ * there, otherwise if it is NULL, the iteration starts at the
+ * beginning of the list.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ *
+ * @fn is allowed to do anything including calling back into class
+ * code. There's no locking restriction.
+ */
+int class_for_each_device(struct class *class, struct device *start,
+ void *data, int (*fn)(struct device *, void *))
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+ int error = 0;
+
+ if (!class)
+ return -EINVAL;
+ if (!class->p) {
+ WARN(1, "%s called for class '%s' before it was initialized",
+ __func__, class->name);
+ return -EINVAL;
+ }
+
+ class_dev_iter_init(&iter, class, start, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ error = fn(dev, data);
+ if (error)
+ break;
+ }
+ class_dev_iter_exit(&iter);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(class_for_each_device);
+
+/**
+ * class_find_device - device iterator for locating a particular device
+ * @class: the class we're iterating
+ * @start: Device to begin with
+ * @data: data for the match function
+ * @match: function to check device
+ *
+ * This is similar to the class_for_each_dev() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ *
+ * Note, you will need to drop the reference with put_device() after use.
+ *
+ * @match is allowed to do anything including calling back into class
+ * code. There's no locking restriction.
+ */
+struct device *class_find_device(struct class *class, struct device *start,
+ const void *data,
+ int (*match)(struct device *, const void *))
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ if (!class)
+ return NULL;
+ if (!class->p) {
+ WARN(1, "%s called for class '%s' before it was initialized",
+ __func__, class->name);
+ return NULL;
+ }
+
+ class_dev_iter_init(&iter, class, start, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ class_dev_iter_exit(&iter);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(class_find_device);
+
+int class_interface_register(struct class_interface *class_intf)
+{
+ struct class *parent;
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ if (!class_intf || !class_intf->class)
+ return -ENODEV;
+
+ parent = class_get(class_intf->class);
+ if (!parent)
+ return -EINVAL;
+
+ mutex_lock(&parent->p->mutex);
+ list_add_tail(&class_intf->node, &parent->p->interfaces);
+ if (class_intf->add_dev) {
+ class_dev_iter_init(&iter, parent, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter)))
+ class_intf->add_dev(dev, class_intf);
+ class_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&parent->p->mutex);
+
+ return 0;
+}
+
+void class_interface_unregister(struct class_interface *class_intf)
+{
+ struct class *parent = class_intf->class;
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ if (!parent)
+ return;
+
+ mutex_lock(&parent->p->mutex);
+ list_del_init(&class_intf->node);
+ if (class_intf->remove_dev) {
+ class_dev_iter_init(&iter, parent, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter)))
+ class_intf->remove_dev(dev, class_intf);
+ class_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&parent->p->mutex);
+
+ class_put(parent);
+}
+
+ssize_t show_class_attr_string(struct class *class,
+ struct class_attribute *attr, char *buf)
+{
+ struct class_attribute_string *cs;
+
+ cs = container_of(attr, struct class_attribute_string, attr);
+ return sysfs_emit(buf, "%s\n", cs->str);
+}
+
+EXPORT_SYMBOL_GPL(show_class_attr_string);
+
+struct class_compat {
+ struct kobject *kobj;
+};
+
+/**
+ * class_compat_register - register a compatibility class
+ * @name: the name of the class
+ *
+ * Compatibility class are meant as a temporary user-space compatibility
+ * workaround when converting a family of class devices to a bus devices.
+ */
+struct class_compat *class_compat_register(const char *name)
+{
+ struct class_compat *cls;
+
+ cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL);
+ if (!cls)
+ return NULL;
+ cls->kobj = kobject_create_and_add(name, &class_kset->kobj);
+ if (!cls->kobj) {
+ kfree(cls);
+ return NULL;
+ }
+ return cls;
+}
+EXPORT_SYMBOL_GPL(class_compat_register);
+
+/**
+ * class_compat_unregister - unregister a compatibility class
+ * @cls: the class to unregister
+ */
+void class_compat_unregister(struct class_compat *cls)
+{
+ kobject_put(cls->kobj);
+ kfree(cls);
+}
+EXPORT_SYMBOL_GPL(class_compat_unregister);
+
+/**
+ * class_compat_create_link - create a compatibility class device link to
+ * a bus device
+ * @cls: the compatibility class
+ * @dev: the target bus device
+ * @device_link: an optional device to which a "device" link should be created
+ */
+int class_compat_create_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link)
+{
+ int error;
+
+ error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev));
+ if (error)
+ return error;
+
+ /*
+ * Optionally add a "device" link (typically to the parent), as a
+ * class device would have one and we want to provide as much
+ * backwards compatibility as possible.
+ */
+ if (device_link) {
+ error = sysfs_create_link(&dev->kobj, &device_link->kobj,
+ "device");
+ if (error)
+ sysfs_remove_link(cls->kobj, dev_name(dev));
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(class_compat_create_link);
+
+/**
+ * class_compat_remove_link - remove a compatibility class device link to
+ * a bus device
+ * @cls: the compatibility class
+ * @dev: the target bus device
+ * @device_link: an optional device to which a "device" link was previously
+ * created
+ */
+void class_compat_remove_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link)
+{
+ if (device_link)
+ sysfs_remove_link(&dev->kobj, "device");
+ sysfs_remove_link(cls->kobj, dev_name(dev));
+}
+EXPORT_SYMBOL_GPL(class_compat_remove_link);
+
+int __init classes_init(void)
+{
+ class_kset = kset_create_and_add("class", NULL, NULL);
+ if (!class_kset)
+ return -ENOMEM;
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(class_create_file_ns);
+EXPORT_SYMBOL_GPL(class_remove_file_ns);
+EXPORT_SYMBOL_GPL(class_unregister);
+EXPORT_SYMBOL_GPL(class_destroy);
+
+EXPORT_SYMBOL_GPL(class_interface_register);
+EXPORT_SYMBOL_GPL(class_interface_unregister);
diff --git a/drivers/base/component.c b/drivers/base/component.c
new file mode 100644
index 000000000..7dbf14a1d
--- /dev/null
+++ b/drivers/base/component.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Componentized device handling.
+ */
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+/**
+ * DOC: overview
+ *
+ * The component helper allows drivers to collect a pile of sub-devices,
+ * including their bound drivers, into an aggregate driver. Various subsystems
+ * already provide functions to get hold of such components, e.g.
+ * of_clk_get_by_name(). The component helper can be used when such a
+ * subsystem-specific way to find a device is not available: The component
+ * helper fills the niche of aggregate drivers for specific hardware, where
+ * further standardization into a subsystem would not be practical. The common
+ * example is when a logical device (e.g. a DRM display driver) is spread around
+ * the SoC on various components (scanout engines, blending blocks, transcoders
+ * for various outputs and so on).
+ *
+ * The component helper also doesn't solve runtime dependencies, e.g. for system
+ * suspend and resume operations. See also :ref:`device links<device_link>`.
+ *
+ * Components are registered using component_add() and unregistered with
+ * component_del(), usually from the driver's probe and disconnect functions.
+ *
+ * Aggregate drivers first assemble a component match list of what they need
+ * using component_match_add(). This is then registered as an aggregate driver
+ * using component_master_add_with_match(), and unregistered using
+ * component_master_del().
+ */
+
+struct component;
+
+struct component_match_array {
+ void *data;
+ int (*compare)(struct device *, void *);
+ int (*compare_typed)(struct device *, int, void *);
+ void (*release)(struct device *, void *);
+ struct component *component;
+ bool duplicate;
+};
+
+struct component_match {
+ size_t alloc;
+ size_t num;
+ struct component_match_array *compare;
+};
+
+struct aggregate_device {
+ struct list_head node;
+ bool bound;
+
+ const struct component_master_ops *ops;
+ struct device *parent;
+ struct component_match *match;
+};
+
+struct component {
+ struct list_head node;
+ struct aggregate_device *adev;
+ bool bound;
+
+ const struct component_ops *ops;
+ int subcomponent;
+ struct device *dev;
+};
+
+static DEFINE_MUTEX(component_mutex);
+static LIST_HEAD(component_list);
+static LIST_HEAD(aggregate_devices);
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct dentry *component_debugfs_dir;
+
+static int component_devices_show(struct seq_file *s, void *data)
+{
+ struct aggregate_device *m = s->private;
+ struct component_match *match = m->match;
+ size_t i;
+
+ mutex_lock(&component_mutex);
+ seq_printf(s, "%-40s %20s\n", "aggregate_device name", "status");
+ seq_puts(s, "-------------------------------------------------------------\n");
+ seq_printf(s, "%-40s %20s\n\n",
+ dev_name(m->parent), m->bound ? "bound" : "not bound");
+
+ seq_printf(s, "%-40s %20s\n", "device name", "status");
+ seq_puts(s, "-------------------------------------------------------------\n");
+ for (i = 0; i < match->num; i++) {
+ struct component *component = match->compare[i].component;
+
+ seq_printf(s, "%-40s %20s\n",
+ component ? dev_name(component->dev) : "(unknown)",
+ component ? (component->bound ? "bound" : "not bound") : "not registered");
+ }
+ mutex_unlock(&component_mutex);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(component_devices);
+
+static int __init component_debug_init(void)
+{
+ component_debugfs_dir = debugfs_create_dir("device_component", NULL);
+
+ return 0;
+}
+
+core_initcall(component_debug_init);
+
+static void component_debugfs_add(struct aggregate_device *m)
+{
+ debugfs_create_file(dev_name(m->parent), 0444, component_debugfs_dir, m,
+ &component_devices_fops);
+}
+
+static void component_debugfs_del(struct aggregate_device *m)
+{
+ debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir);
+}
+
+#else
+
+static void component_debugfs_add(struct aggregate_device *m)
+{ }
+
+static void component_debugfs_del(struct aggregate_device *m)
+{ }
+
+#endif
+
+static struct aggregate_device *__aggregate_find(struct device *parent,
+ const struct component_master_ops *ops)
+{
+ struct aggregate_device *m;
+
+ list_for_each_entry(m, &aggregate_devices, node)
+ if (m->parent == parent && (!ops || m->ops == ops))
+ return m;
+
+ return NULL;
+}
+
+static struct component *find_component(struct aggregate_device *adev,
+ struct component_match_array *mc)
+{
+ struct component *c;
+
+ list_for_each_entry(c, &component_list, node) {
+ if (c->adev && c->adev != adev)
+ continue;
+
+ if (mc->compare && mc->compare(c->dev, mc->data))
+ return c;
+
+ if (mc->compare_typed &&
+ mc->compare_typed(c->dev, c->subcomponent, mc->data))
+ return c;
+ }
+
+ return NULL;
+}
+
+static int find_components(struct aggregate_device *adev)
+{
+ struct component_match *match = adev->match;
+ size_t i;
+ int ret = 0;
+
+ /*
+ * Scan the array of match functions and attach
+ * any components which are found to this adev.
+ */
+ for (i = 0; i < match->num; i++) {
+ struct component_match_array *mc = &match->compare[i];
+ struct component *c;
+
+ dev_dbg(adev->parent, "Looking for component %zu\n", i);
+
+ if (match->compare[i].component)
+ continue;
+
+ c = find_component(adev, mc);
+ if (!c) {
+ ret = -ENXIO;
+ break;
+ }
+
+ dev_dbg(adev->parent, "found component %s, duplicate %u\n",
+ dev_name(c->dev), !!c->adev);
+
+ /* Attach this component to the adev */
+ match->compare[i].duplicate = !!c->adev;
+ match->compare[i].component = c;
+ c->adev = adev;
+ }
+ return ret;
+}
+
+/* Detach component from associated aggregate_device */
+static void remove_component(struct aggregate_device *adev, struct component *c)
+{
+ size_t i;
+
+ /* Detach the component from this adev. */
+ for (i = 0; i < adev->match->num; i++)
+ if (adev->match->compare[i].component == c)
+ adev->match->compare[i].component = NULL;
+}
+
+/*
+ * Try to bring up an aggregate device. If component is NULL, we're interested
+ * in this aggregate device, otherwise it's a component which must be present
+ * to try and bring up the aggregate device.
+ *
+ * Returns 1 for successful bringup, 0 if not ready, or -ve errno.
+ */
+static int try_to_bring_up_aggregate_device(struct aggregate_device *adev,
+ struct component *component)
+{
+ int ret;
+
+ dev_dbg(adev->parent, "trying to bring up adev\n");
+
+ if (find_components(adev)) {
+ dev_dbg(adev->parent, "master has incomplete components\n");
+ return 0;
+ }
+
+ if (component && component->adev != adev) {
+ dev_dbg(adev->parent, "master is not for this component (%s)\n",
+ dev_name(component->dev));
+ return 0;
+ }
+
+ if (!devres_open_group(adev->parent, adev, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* Found all components */
+ ret = adev->ops->bind(adev->parent);
+ if (ret < 0) {
+ devres_release_group(adev->parent, NULL);
+ if (ret != -EPROBE_DEFER)
+ dev_info(adev->parent, "adev bind failed: %d\n", ret);
+ return ret;
+ }
+
+ devres_close_group(adev->parent, NULL);
+ adev->bound = true;
+ return 1;
+}
+
+static int try_to_bring_up_masters(struct component *component)
+{
+ struct aggregate_device *adev;
+ int ret = 0;
+
+ list_for_each_entry(adev, &aggregate_devices, node) {
+ if (!adev->bound) {
+ ret = try_to_bring_up_aggregate_device(adev, component);
+ if (ret != 0)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void take_down_aggregate_device(struct aggregate_device *adev)
+{
+ if (adev->bound) {
+ adev->ops->unbind(adev->parent);
+ devres_release_group(adev->parent, adev);
+ adev->bound = false;
+ }
+}
+
+/**
+ * component_compare_of - A common component compare function for of_node
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * A common compare function when compare_data is device of_node. e.g.
+ * component_match_add_release(masterdev, &match, component_release_of,
+ * component_compare_of, component_dev_of_node)
+ */
+int component_compare_of(struct device *dev, void *data)
+{
+ return device_match_of_node(dev, data);
+}
+EXPORT_SYMBOL_GPL(component_compare_of);
+
+/**
+ * component_release_of - A common component release function for of_node
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * About the example, Please see component_compare_of().
+ */
+void component_release_of(struct device *dev, void *data)
+{
+ of_node_put(data);
+}
+EXPORT_SYMBOL_GPL(component_release_of);
+
+/**
+ * component_compare_dev - A common component compare function for dev
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * A common compare function when compare_data is struce device. e.g.
+ * component_match_add(masterdev, &match, component_compare_dev, component_dev)
+ */
+int component_compare_dev(struct device *dev, void *data)
+{
+ return dev == data;
+}
+EXPORT_SYMBOL_GPL(component_compare_dev);
+
+/**
+ * component_compare_dev_name - A common component compare function for device name
+ * @dev: component device
+ * @data: @compare_data from component_match_add_release()
+ *
+ * A common compare function when compare_data is device name string. e.g.
+ * component_match_add(masterdev, &match, component_compare_dev_name,
+ * "component_dev_name")
+ */
+int component_compare_dev_name(struct device *dev, void *data)
+{
+ return device_match_name(dev, data);
+}
+EXPORT_SYMBOL_GPL(component_compare_dev_name);
+
+static void devm_component_match_release(struct device *parent, void *res)
+{
+ struct component_match *match = res;
+ unsigned int i;
+
+ for (i = 0; i < match->num; i++) {
+ struct component_match_array *mc = &match->compare[i];
+
+ if (mc->release)
+ mc->release(parent, mc->data);
+ }
+
+ kfree(match->compare);
+}
+
+static int component_match_realloc(struct component_match *match, size_t num)
+{
+ struct component_match_array *new;
+
+ if (match->alloc == num)
+ return 0;
+
+ new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ if (match->compare) {
+ memcpy(new, match->compare, sizeof(*new) *
+ min(match->num, num));
+ kfree(match->compare);
+ }
+ match->compare = new;
+ match->alloc = num;
+
+ return 0;
+}
+
+static void __component_match_add(struct device *parent,
+ struct component_match **matchptr,
+ void (*release)(struct device *, void *),
+ int (*compare)(struct device *, void *),
+ int (*compare_typed)(struct device *, int, void *),
+ void *compare_data)
+{
+ struct component_match *match = *matchptr;
+
+ if (IS_ERR(match))
+ return;
+
+ if (!match) {
+ match = devres_alloc(devm_component_match_release,
+ sizeof(*match), GFP_KERNEL);
+ if (!match) {
+ *matchptr = ERR_PTR(-ENOMEM);
+ return;
+ }
+
+ devres_add(parent, match);
+
+ *matchptr = match;
+ }
+
+ if (match->num == match->alloc) {
+ size_t new_size = match->alloc + 16;
+ int ret;
+
+ ret = component_match_realloc(match, new_size);
+ if (ret) {
+ *matchptr = ERR_PTR(ret);
+ return;
+ }
+ }
+
+ match->compare[match->num].compare = compare;
+ match->compare[match->num].compare_typed = compare_typed;
+ match->compare[match->num].release = release;
+ match->compare[match->num].data = compare_data;
+ match->compare[match->num].component = NULL;
+ match->num++;
+}
+
+/**
+ * component_match_add_release - add a component match entry with release callback
+ * @parent: parent device of the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @release: release function for @compare_data
+ * @compare: compare function to match against all components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions, where upon @release will be called to free any references held by
+ * @compare_data, e.g. when @compare_data is a &device_node that must be
+ * released with of_node_put().
+ *
+ * See also component_match_add() and component_match_add_typed().
+ */
+void component_match_add_release(struct device *parent,
+ struct component_match **matchptr,
+ void (*release)(struct device *, void *),
+ int (*compare)(struct device *, void *), void *compare_data)
+{
+ __component_match_add(parent, matchptr, release, compare, NULL,
+ compare_data);
+}
+EXPORT_SYMBOL(component_match_add_release);
+
+/**
+ * component_match_add_typed - add a component match entry for a typed component
+ * @parent: parent device of the aggregate driver
+ * @matchptr: pointer to the list of component matches
+ * @compare_typed: compare function to match against all typed components
+ * @compare_data: opaque pointer passed to the @compare function
+ *
+ * Adds a new component match to the list stored in @matchptr, which the
+ * aggregate driver needs to function. The list of component matches pointed to
+ * by @matchptr must be initialized to NULL before adding the first match. This
+ * only matches against components added with component_add_typed().
+ *
+ * The allocated match list in @matchptr is automatically released using devm
+ * actions.
+ *
+ * See also component_match_add_release() and component_match_add_typed().
+ */
+void component_match_add_typed(struct device *parent,
+ struct component_match **matchptr,
+ int (*compare_typed)(struct device *, int, void *), void *compare_data)
+{
+ __component_match_add(parent, matchptr, NULL, NULL, compare_typed,
+ compare_data);
+}
+EXPORT_SYMBOL(component_match_add_typed);
+
+static void free_aggregate_device(struct aggregate_device *adev)
+{
+ struct component_match *match = adev->match;
+ int i;
+
+ component_debugfs_del(adev);
+ list_del(&adev->node);
+
+ if (match) {
+ for (i = 0; i < match->num; i++) {
+ struct component *c = match->compare[i].component;
+ if (c)
+ c->adev = NULL;
+ }
+ }
+
+ kfree(adev);
+}
+
+/**
+ * component_master_add_with_match - register an aggregate driver
+ * @parent: parent device of the aggregate driver
+ * @ops: callbacks for the aggregate driver
+ * @match: component match list for the aggregate driver
+ *
+ * Registers a new aggregate driver consisting of the components added to @match
+ * by calling one of the component_match_add() functions. Once all components in
+ * @match are available, it will be assembled by calling
+ * &component_master_ops.bind from @ops. Must be unregistered by calling
+ * component_master_del().
+ */
+int component_master_add_with_match(struct device *parent,
+ const struct component_master_ops *ops,
+ struct component_match *match)
+{
+ struct aggregate_device *adev;
+ int ret;
+
+ /* Reallocate the match array for its true size */
+ ret = component_match_realloc(match, match->num);
+ if (ret)
+ return ret;
+
+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->parent = parent;
+ adev->ops = ops;
+ adev->match = match;
+
+ component_debugfs_add(adev);
+ /* Add to the list of available aggregate devices. */
+ mutex_lock(&component_mutex);
+ list_add(&adev->node, &aggregate_devices);
+
+ ret = try_to_bring_up_aggregate_device(adev, NULL);
+
+ if (ret < 0)
+ free_aggregate_device(adev);
+
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(component_master_add_with_match);
+
+/**
+ * component_master_del - unregister an aggregate driver
+ * @parent: parent device of the aggregate driver
+ * @ops: callbacks for the aggregate driver
+ *
+ * Unregisters an aggregate driver registered with
+ * component_master_add_with_match(). If necessary the aggregate driver is first
+ * disassembled by calling &component_master_ops.unbind from @ops.
+ */
+void component_master_del(struct device *parent,
+ const struct component_master_ops *ops)
+{
+ struct aggregate_device *adev;
+
+ mutex_lock(&component_mutex);
+ adev = __aggregate_find(parent, ops);
+ if (adev) {
+ take_down_aggregate_device(adev);
+ free_aggregate_device(adev);
+ }
+ mutex_unlock(&component_mutex);
+}
+EXPORT_SYMBOL_GPL(component_master_del);
+
+static void component_unbind(struct component *component,
+ struct aggregate_device *adev, void *data)
+{
+ WARN_ON(!component->bound);
+
+ if (component->ops && component->ops->unbind)
+ component->ops->unbind(component->dev, adev->parent, data);
+ component->bound = false;
+
+ /* Release all resources claimed in the binding of this component */
+ devres_release_group(component->dev, component);
+}
+
+/**
+ * component_unbind_all - unbind all components of an aggregate driver
+ * @parent: parent device of the aggregate driver
+ * @data: opaque pointer, passed to all components
+ *
+ * Unbinds all components of the aggregate device by passing @data to their
+ * &component_ops.unbind functions. Should be called from
+ * &component_master_ops.unbind.
+ */
+void component_unbind_all(struct device *parent, void *data)
+{
+ struct aggregate_device *adev;
+ struct component *c;
+ size_t i;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ adev = __aggregate_find(parent, NULL);
+ if (!adev)
+ return;
+
+ /* Unbind components in reverse order */
+ for (i = adev->match->num; i--; )
+ if (!adev->match->compare[i].duplicate) {
+ c = adev->match->compare[i].component;
+ component_unbind(c, adev, data);
+ }
+}
+EXPORT_SYMBOL_GPL(component_unbind_all);
+
+static int component_bind(struct component *component, struct aggregate_device *adev,
+ void *data)
+{
+ int ret;
+
+ /*
+ * Each component initialises inside its own devres group.
+ * This allows us to roll-back a failed component without
+ * affecting anything else.
+ */
+ if (!devres_open_group(adev->parent, NULL, GFP_KERNEL))
+ return -ENOMEM;
+
+ /*
+ * Also open a group for the device itself: this allows us
+ * to release the resources claimed against the sub-device
+ * at the appropriate moment.
+ */
+ if (!devres_open_group(component->dev, component, GFP_KERNEL)) {
+ devres_release_group(adev->parent, NULL);
+ return -ENOMEM;
+ }
+
+ dev_dbg(adev->parent, "binding %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+
+ ret = component->ops->bind(component->dev, adev->parent, data);
+ if (!ret) {
+ component->bound = true;
+
+ /*
+ * Close the component device's group so that resources
+ * allocated in the binding are encapsulated for removal
+ * at unbind. Remove the group on the DRM device as we
+ * can clean those resources up independently.
+ */
+ devres_close_group(component->dev, NULL);
+ devres_remove_group(adev->parent, NULL);
+
+ dev_info(adev->parent, "bound %s (ops %ps)\n",
+ dev_name(component->dev), component->ops);
+ } else {
+ devres_release_group(component->dev, NULL);
+ devres_release_group(adev->parent, NULL);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(adev->parent, "failed to bind %s (ops %ps): %d\n",
+ dev_name(component->dev), component->ops, ret);
+ }
+
+ return ret;
+}
+
+/**
+ * component_bind_all - bind all components of an aggregate driver
+ * @parent: parent device of the aggregate driver
+ * @data: opaque pointer, passed to all components
+ *
+ * Binds all components of the aggregate @dev by passing @data to their
+ * &component_ops.bind functions. Should be called from
+ * &component_master_ops.bind.
+ */
+int component_bind_all(struct device *parent, void *data)
+{
+ struct aggregate_device *adev;
+ struct component *c;
+ size_t i;
+ int ret = 0;
+
+ WARN_ON(!mutex_is_locked(&component_mutex));
+
+ adev = __aggregate_find(parent, NULL);
+ if (!adev)
+ return -EINVAL;
+
+ /* Bind components in match order */
+ for (i = 0; i < adev->match->num; i++)
+ if (!adev->match->compare[i].duplicate) {
+ c = adev->match->compare[i].component;
+ ret = component_bind(c, adev, data);
+ if (ret)
+ break;
+ }
+
+ if (ret != 0) {
+ for (; i > 0; i--)
+ if (!adev->match->compare[i - 1].duplicate) {
+ c = adev->match->compare[i - 1].component;
+ component_unbind(c, adev, data);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(component_bind_all);
+
+static int __component_add(struct device *dev, const struct component_ops *ops,
+ int subcomponent)
+{
+ struct component *component;
+ int ret;
+
+ component = kzalloc(sizeof(*component), GFP_KERNEL);
+ if (!component)
+ return -ENOMEM;
+
+ component->ops = ops;
+ component->dev = dev;
+ component->subcomponent = subcomponent;
+
+ dev_dbg(dev, "adding component (ops %ps)\n", ops);
+
+ mutex_lock(&component_mutex);
+ list_add_tail(&component->node, &component_list);
+
+ ret = try_to_bring_up_masters(component);
+ if (ret < 0) {
+ if (component->adev)
+ remove_component(component->adev, component);
+ list_del(&component->node);
+
+ kfree(component);
+ }
+ mutex_unlock(&component_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * component_add_typed - register a component
+ * @dev: component device
+ * @ops: component callbacks
+ * @subcomponent: nonzero identifier for subcomponents
+ *
+ * Register a new component for @dev. Functions in @ops will be call when the
+ * aggregate driver is ready to bind the overall driver by calling
+ * component_bind_all(). See also &struct component_ops.
+ *
+ * @subcomponent must be nonzero and is used to differentiate between multiple
+ * components registerd on the same device @dev. These components are match
+ * using component_match_add_typed().
+ *
+ * The component needs to be unregistered at driver unload/disconnect by
+ * calling component_del().
+ *
+ * See also component_add().
+ */
+int component_add_typed(struct device *dev, const struct component_ops *ops,
+ int subcomponent)
+{
+ if (WARN_ON(subcomponent == 0))
+ return -EINVAL;
+
+ return __component_add(dev, ops, subcomponent);
+}
+EXPORT_SYMBOL_GPL(component_add_typed);
+
+/**
+ * component_add - register a component
+ * @dev: component device
+ * @ops: component callbacks
+ *
+ * Register a new component for @dev. Functions in @ops will be called when the
+ * aggregate driver is ready to bind the overall driver by calling
+ * component_bind_all(). See also &struct component_ops.
+ *
+ * The component needs to be unregistered at driver unload/disconnect by
+ * calling component_del().
+ *
+ * See also component_add_typed() for a variant that allows multipled different
+ * components on the same device.
+ */
+int component_add(struct device *dev, const struct component_ops *ops)
+{
+ return __component_add(dev, ops, 0);
+}
+EXPORT_SYMBOL_GPL(component_add);
+
+/**
+ * component_del - unregister a component
+ * @dev: component device
+ * @ops: component callbacks
+ *
+ * Unregister a component added with component_add(). If the component is bound
+ * into an aggregate driver, this will force the entire aggregate driver, including
+ * all its components, to be unbound.
+ */
+void component_del(struct device *dev, const struct component_ops *ops)
+{
+ struct component *c, *component = NULL;
+
+ mutex_lock(&component_mutex);
+ list_for_each_entry(c, &component_list, node)
+ if (c->dev == dev && c->ops == ops) {
+ list_del(&c->node);
+ component = c;
+ break;
+ }
+
+ if (component && component->adev) {
+ take_down_aggregate_device(component->adev);
+ remove_component(component->adev, component);
+ }
+
+ mutex_unlock(&component_mutex);
+
+ WARN_ON(!component);
+ kfree(component);
+}
+EXPORT_SYMBOL_GPL(component_del);
diff --git a/drivers/base/container.c b/drivers/base/container.c
new file mode 100644
index 000000000..1ba42d2d3
--- /dev/null
+++ b/drivers/base/container.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * System bus type for containers.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ */
+
+#include <linux/container.h>
+
+#include "base.h"
+
+#define CONTAINER_BUS_NAME "container"
+
+static int trivial_online(struct device *dev)
+{
+ return 0;
+}
+
+static int container_offline(struct device *dev)
+{
+ struct container_dev *cdev = to_container_dev(dev);
+
+ return cdev->offline ? cdev->offline(cdev) : 0;
+}
+
+struct bus_type container_subsys = {
+ .name = CONTAINER_BUS_NAME,
+ .dev_name = CONTAINER_BUS_NAME,
+ .online = trivial_online,
+ .offline = container_offline,
+};
+
+void __init container_dev_init(void)
+{
+ int ret;
+
+ ret = subsys_system_register(&container_subsys, NULL);
+ if (ret)
+ pr_err("%s() failed: %d\n", __func__, ret);
+}
diff --git a/drivers/base/core.c b/drivers/base/core.c
new file mode 100644
index 000000000..af90bfb0c
--- /dev/null
+++ b/drivers/base/core.c
@@ -0,0 +1,5152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/core.c - core driver model code (device registration, etc)
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2006 Novell, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fwnode.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/kdev_t.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/blkdev.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/netdevice.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
+#include <linux/swiotlb.h>
+#include <linux/sysfs.h>
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
+
+#include "base.h"
+#include "physical_location.h"
+#include "power/power.h"
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+#ifdef CONFIG_SYSFS_DEPRECATED_V2
+long sysfs_deprecated = 1;
+#else
+long sysfs_deprecated = 0;
+#endif
+static int __init sysfs_deprecated_setup(char *arg)
+{
+ return kstrtol(arg, 10, &sysfs_deprecated);
+}
+early_param("sysfs.deprecated", sysfs_deprecated_setup);
+#endif
+
+/* Device links support. */
+static LIST_HEAD(deferred_sync);
+static unsigned int defer_sync_state_count = 1;
+static DEFINE_MUTEX(fwnode_link_lock);
+static bool fw_devlink_is_permissive(void);
+static void __fw_devlink_link_to_consumers(struct device *dev);
+static bool fw_devlink_drv_reg_done;
+static bool fw_devlink_best_effort;
+
+/**
+ * __fwnode_link_add - Create a link between two fwnode_handles.
+ * @con: Consumer end of the link.
+ * @sup: Supplier end of the link.
+ *
+ * Create a fwnode link between fwnode handles @con and @sup. The fwnode link
+ * represents the detail that the firmware lists @sup fwnode as supplying a
+ * resource to @con.
+ *
+ * The driver core will use the fwnode link to create a device link between the
+ * two device objects corresponding to @con and @sup when they are created. The
+ * driver core will automatically delete the fwnode link between @con and @sup
+ * after doing that.
+ *
+ * Attempts to create duplicate links between the same pair of fwnode handles
+ * are ignored and there is no reference counting.
+ */
+static int __fwnode_link_add(struct fwnode_handle *con,
+ struct fwnode_handle *sup, u8 flags)
+{
+ struct fwnode_link *link;
+
+ list_for_each_entry(link, &sup->consumers, s_hook)
+ if (link->consumer == con) {
+ link->flags |= flags;
+ return 0;
+ }
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ link->supplier = sup;
+ INIT_LIST_HEAD(&link->s_hook);
+ link->consumer = con;
+ INIT_LIST_HEAD(&link->c_hook);
+ link->flags = flags;
+
+ list_add(&link->s_hook, &sup->consumers);
+ list_add(&link->c_hook, &con->suppliers);
+ pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+ con, sup);
+
+ return 0;
+}
+
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+{
+ int ret;
+
+ mutex_lock(&fwnode_link_lock);
+ ret = __fwnode_link_add(con, sup, 0);
+ mutex_unlock(&fwnode_link_lock);
+ return ret;
+}
+
+/**
+ * __fwnode_link_del - Delete a link between two fwnode_handles.
+ * @link: the fwnode_link to be deleted
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_del(struct fwnode_link *link)
+{
+ pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
+ link->consumer, link->supplier);
+ list_del(&link->s_hook);
+ list_del(&link->c_hook);
+ kfree(link);
+}
+
+/**
+ * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
+ * @link: the fwnode_link to be marked
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_cycle(struct fwnode_link *link)
+{
+ pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ link->consumer, link->supplier);
+ link->flags |= FWLINK_FLAG_CYCLE;
+}
+
+/**
+ * fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
+ * @fwnode: fwnode whose supplier links need to be deleted
+ *
+ * Deletes all supplier links connecting directly to @fwnode.
+ */
+static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
+{
+ struct fwnode_link *link, *tmp;
+
+ mutex_lock(&fwnode_link_lock);
+ list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
+ __fwnode_link_del(link);
+ mutex_unlock(&fwnode_link_lock);
+}
+
+/**
+ * fwnode_links_purge_consumers - Delete all consumer links of fwnode_handle.
+ * @fwnode: fwnode whose consumer links need to be deleted
+ *
+ * Deletes all consumer links connecting directly to @fwnode.
+ */
+static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
+{
+ struct fwnode_link *link, *tmp;
+
+ mutex_lock(&fwnode_link_lock);
+ list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
+ __fwnode_link_del(link);
+ mutex_unlock(&fwnode_link_lock);
+}
+
+/**
+ * fwnode_links_purge - Delete all links connected to a fwnode_handle.
+ * @fwnode: fwnode whose links needs to be deleted
+ *
+ * Deletes all links connecting directly to a fwnode.
+ */
+void fwnode_links_purge(struct fwnode_handle *fwnode)
+{
+ fwnode_links_purge_suppliers(fwnode);
+ fwnode_links_purge_consumers(fwnode);
+}
+
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+
+ /* Don't purge consumer links of an added child */
+ if (fwnode->dev)
+ return;
+
+ fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ fwnode_links_purge_consumers(fwnode);
+
+ fwnode_for_each_available_child_node(fwnode, child)
+ fw_devlink_purge_absent_suppliers(child);
+}
+EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
+
+/**
+ * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
+ * @from: move consumers away from this fwnode
+ * @to: move consumers to this fwnode
+ *
+ * Move all consumer links from @from fwnode to @to fwnode.
+ */
+static void __fwnode_links_move_consumers(struct fwnode_handle *from,
+ struct fwnode_handle *to)
+{
+ struct fwnode_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
+ __fwnode_link_add(link->consumer, to, link->flags);
+ __fwnode_link_del(link);
+ }
+}
+
+/**
+ * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
+ * @fwnode: fwnode from which to pick up dangling consumers
+ * @new_sup: fwnode of new supplier
+ *
+ * If the @fwnode has a corresponding struct device and the device supports
+ * probing (that is, added to a bus), then we want to let fw_devlink create
+ * MANAGED device links to this device, so leave @fwnode and its descendant's
+ * fwnode links alone.
+ *
+ * Otherwise, move its consumers to the new supplier @new_sup.
+ */
+static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
+ struct fwnode_handle *new_sup)
+{
+ struct fwnode_handle *child;
+
+ if (fwnode->dev && fwnode->dev->bus)
+ return;
+
+ fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ __fwnode_links_move_consumers(fwnode, new_sup);
+
+ fwnode_for_each_available_child_node(fwnode, child)
+ __fw_devlink_pickup_dangling_consumers(child, new_sup);
+}
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+ mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void) __acquires(&device_links_srcu)
+{
+ return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx) __releases(&device_links_srcu)
+{
+ srcu_read_unlock(&device_links_srcu, idx);
+}
+
+int device_links_read_lock_held(void)
+{
+ return srcu_read_lock_held(&device_links_srcu);
+}
+
+static void device_link_synchronize_removal(void)
+{
+ synchronize_srcu(&device_links_srcu);
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+ down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ down_read(&device_links_lock);
+ return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+ up_read(&device_links_lock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+int device_links_read_lock_held(void)
+{
+ return lockdep_is_held(&device_links_lock);
+}
+#endif
+
+static inline void device_link_synchronize_removal(void)
+{
+}
+
+static void device_link_remove_from_lists(struct device_link *link)
+{
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+}
+#endif /* !CONFIG_SRCU */
+
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+ while (target->parent) {
+ target = target->parent;
+ if (dev == target)
+ return true;
+ }
+ return false;
+}
+
+static inline bool device_link_flag_is_sync_state_only(u32 flags)
+{
+ return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+}
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc). Return 1 if that is the case or 0 otherwise.
+ */
+int device_is_dependent(struct device *dev, void *target)
+{
+ struct device_link *link;
+ int ret;
+
+ /*
+ * The "ancestors" check is needed to catch the case when the target
+ * device has not been completely initialized yet and it is still
+ * missing from the list of children of its parent device.
+ */
+ if (dev == target || device_is_ancestor(dev, target))
+ return 1;
+
+ ret = device_for_each_child(dev, target, device_is_dependent);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (device_link_flag_is_sync_state_only(link->flags))
+ continue;
+
+ if (link->consumer == target)
+ return 1;
+
+ ret = device_is_dependent(link->consumer, target);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void device_link_init_status(struct device_link *link,
+ struct device *consumer,
+ struct device *supplier)
+{
+ switch (supplier->links.status) {
+ case DL_DEV_PROBING:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+ * A consumer driver can create a link to a supplier
+ * that has not completed its probing yet as long as it
+ * knows that the supplier is already functional (for
+ * example, it has just acquired some resources from the
+ * supplier).
+ */
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ link->status = DL_STATE_ACTIVE;
+ break;
+ default:
+ link->status = DL_STATE_AVAILABLE;
+ break;
+ }
+ break;
+ case DL_DEV_UNBINDING:
+ link->status = DL_STATE_SUPPLIER_UNBIND;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+ struct device_link *link;
+
+ /*
+ * Devices that have not been registered yet will be put to the ends
+ * of the lists during the registration, so skip them here.
+ */
+ if (device_is_registered(dev))
+ devices_kset_move_last(dev);
+
+ if (device_pm_initialized(dev))
+ device_pm_move_last(dev);
+
+ device_for_each_child(dev, NULL, device_reorder_to_tail);
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (device_link_flag_is_sync_state_only(link->flags))
+ continue;
+ device_reorder_to_tail(link->consumer, NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * device_pm_move_to_tail - Move set of devices to the end of device lists
+ * @dev: Device to move
+ *
+ * This is a device_reorder_to_tail() wrapper taking the requisite locks.
+ *
+ * It moves the @dev along with all of its children and all of its consumers
+ * to the ends of the device_kset and dpm_list, recursively.
+ */
+void device_pm_move_to_tail(struct device *dev)
+{
+ int idx;
+
+ idx = device_links_read_lock();
+ device_pm_lock();
+ device_reorder_to_tail(dev, NULL);
+ device_pm_unlock();
+ device_links_read_unlock(idx);
+}
+
+#define to_devlink(dev) container_of((dev), struct device_link, link_dev)
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *output;
+
+ switch (to_devlink(dev)->status) {
+ case DL_STATE_NONE:
+ output = "not tracked";
+ break;
+ case DL_STATE_DORMANT:
+ output = "dormant";
+ break;
+ case DL_STATE_AVAILABLE:
+ output = "available";
+ break;
+ case DL_STATE_CONSUMER_PROBE:
+ output = "consumer probing";
+ break;
+ case DL_STATE_ACTIVE:
+ output = "active";
+ break;
+ case DL_STATE_SUPPLIER_UNBIND:
+ output = "supplier unbinding";
+ break;
+ default:
+ output = "unknown";
+ break;
+ }
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t auto_remove_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct device_link *link = to_devlink(dev);
+ const char *output;
+
+ if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ output = "supplier unbind";
+ else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
+ output = "consumer unbind";
+ else
+ output = "never";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+static DEVICE_ATTR_RO(auto_remove_on);
+
+static ssize_t runtime_pm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct device_link *link = to_devlink(dev);
+
+ return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
+}
+static DEVICE_ATTR_RO(runtime_pm);
+
+static ssize_t sync_state_only_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct device_link *link = to_devlink(dev);
+
+ return sysfs_emit(buf, "%d\n",
+ !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+}
+static DEVICE_ATTR_RO(sync_state_only);
+
+static struct attribute *devlink_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_auto_remove_on.attr,
+ &dev_attr_runtime_pm.attr,
+ &dev_attr_sync_state_only.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(devlink);
+
+static void device_link_release_fn(struct work_struct *work)
+{
+ struct device_link *link = container_of(work, struct device_link, rm_work);
+
+ /* Ensure that all references to the link object have been dropped. */
+ device_link_synchronize_removal();
+
+ pm_runtime_release_supplier(link);
+ /*
+ * If supplier_preactivated is set, the link has been dropped between
+ * the pm_runtime_get_suppliers() and pm_runtime_put_suppliers() calls
+ * in __driver_probe_device(). In that case, drop the supplier's
+ * PM-runtime usage counter to remove the reference taken by
+ * pm_runtime_get_suppliers().
+ */
+ if (link->supplier_preactivated)
+ pm_runtime_put_noidle(link->supplier);
+
+ pm_request_idle(link->supplier);
+
+ put_device(link->consumer);
+ put_device(link->supplier);
+ kfree(link);
+}
+
+static void devlink_dev_release(struct device *dev)
+{
+ struct device_link *link = to_devlink(dev);
+
+ INIT_WORK(&link->rm_work, device_link_release_fn);
+ /*
+ * It may take a while to complete this work because of the SRCU
+ * synchronization in device_link_release_fn() and if the consumer or
+ * supplier devices get deleted when it runs, so put it into the "long"
+ * workqueue.
+ */
+ queue_work(system_long_wq, &link->rm_work);
+}
+
+static struct class devlink_class = {
+ .name = "devlink",
+ .owner = THIS_MODULE,
+ .dev_groups = devlink_groups,
+ .dev_release = devlink_dev_release,
+};
+
+static int devlink_add_symlinks(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int ret;
+ size_t len;
+ struct device_link *link = to_devlink(dev);
+ struct device *sup = link->supplier;
+ struct device *con = link->consumer;
+ char *buf;
+
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
+ len += strlen("supplier:") + 1;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = sysfs_create_link(&link->link_dev.kobj, &sup->kobj, "supplier");
+ if (ret)
+ goto out;
+
+ ret = sysfs_create_link(&link->link_dev.kobj, &con->kobj, "consumer");
+ if (ret)
+ goto err_con;
+
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
+ if (ret)
+ goto err_con_dev;
+
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
+ if (ret)
+ goto err_sup_dev;
+
+ goto out;
+
+err_sup_dev:
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ sysfs_remove_link(&sup->kobj, buf);
+err_con_dev:
+ sysfs_remove_link(&link->link_dev.kobj, "consumer");
+err_con:
+ sysfs_remove_link(&link->link_dev.kobj, "supplier");
+out:
+ kfree(buf);
+ return ret;
+}
+
+static void devlink_remove_symlinks(struct device *dev,
+ struct class_interface *class_intf)
+{
+ struct device_link *link = to_devlink(dev);
+ size_t len;
+ struct device *sup = link->supplier;
+ struct device *con = link->consumer;
+ char *buf;
+
+ sysfs_remove_link(&link->link_dev.kobj, "consumer");
+ sysfs_remove_link(&link->link_dev.kobj, "supplier");
+
+ len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+ strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+ len += strlen(":");
+ len += strlen("supplier:") + 1;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf) {
+ WARN(1, "Unable to properly free device link symlinks!\n");
+ return;
+ }
+
+ if (device_is_registered(con)) {
+ snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
+ sysfs_remove_link(&con->kobj, buf);
+ }
+ snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
+ sysfs_remove_link(&sup->kobj, buf);
+ kfree(buf);
+}
+
+static struct class_interface devlink_class_intf = {
+ .class = &devlink_class,
+ .add_dev = devlink_add_symlinks,
+ .remove_dev = devlink_remove_symlinks,
+};
+
+static int __init devlink_class_init(void)
+{
+ int ret;
+
+ ret = class_register(&devlink_class);
+ if (ret)
+ return ret;
+
+ ret = class_interface_register(&devlink_class_intf);
+ if (ret)
+ class_unregister(&devlink_class);
+
+ return ret;
+}
+postcore_initcall(devlink_class_init);
+
+#define DL_MANAGED_LINK_FLAGS (DL_FLAG_AUTOREMOVE_CONSUMER | \
+ DL_FLAG_AUTOREMOVE_SUPPLIER | \
+ DL_FLAG_AUTOPROBE_CONSUMER | \
+ DL_FLAG_SYNC_STATE_ONLY | \
+ DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE)
+
+#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account. Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active meta state and reference-counted upon the creation
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If DL_FLAG_STATELESS is set in @flags, the caller of this function is
+ * expected to release the link returned by it directly with the help of either
+ * device_link_del() or device_link_remove().
+ *
+ * If that flag is not set, however, the caller of this function is handing the
+ * management of the link over to the driver core entirely and its return value
+ * can only be used to check whether or not the link is present. In that case,
+ * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link
+ * flags can be used to indicate to the driver core when the link can be safely
+ * deleted. Namely, setting one of them in @flags indicates to the driver core
+ * that the link is not going to be used (by the given caller of this function)
+ * after unbinding the consumer or supplier driver, respectively, from its
+ * device, so the link can be deleted at that point. If none of them is set,
+ * the link will be maintained until one of the devices pointed to by it (either
+ * the consumer or the supplier) is unregistered.
+ *
+ * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and
+ * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent
+ * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can
+ * be used to request the driver core to automatically probe for a consumer
+ * driver after successfully binding a driver to the supplier device.
+ *
+ * The combination of DL_FLAG_STATELESS and one of DL_FLAG_AUTOREMOVE_CONSUMER,
+ * DL_FLAG_AUTOREMOVE_SUPPLIER, or DL_FLAG_AUTOPROBE_CONSUMER set in @flags at
+ * the same time is invalid and will cause NULL to be returned upfront.
+ * However, if a device link between the given @consumer and @supplier pair
+ * exists already when this function is called for them, the existing link will
+ * be returned regardless of its current type and status (the link's flags may
+ * be modified then). The caller of this function is then expected to treat
+ * the link as though it has just been created, so (in particular) if
+ * DL_FLAG_STATELESS was passed in @flags, the link needs to be released
+ * explicitly when not needed any more (as stated above).
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case. The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags)
+{
+ struct device_link *link;
+
+ if (!consumer || !supplier || consumer == supplier ||
+ flags & ~DL_ADD_VALID_FLAGS ||
+ (flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
+ (flags & DL_FLAG_AUTOPROBE_CONSUMER &&
+ flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_AUTOREMOVE_SUPPLIER)))
+ return NULL;
+
+ if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
+ if (pm_runtime_get_sync(supplier) < 0) {
+ pm_runtime_put_noidle(supplier);
+ return NULL;
+ }
+ }
+
+ if (!(flags & DL_FLAG_STATELESS))
+ flags |= DL_FLAG_MANAGED;
+
+ if (flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !device_link_flag_is_sync_state_only(flags))
+ return NULL;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ /*
+ * If the supplier has not been fully registered yet or there is a
+ * reverse (non-SYNC_STATE_ONLY) dependency between the consumer and
+ * the supplier already in the graph, return NULL. If the link is a
+ * SYNC_STATE_ONLY link, we don't check for reverse dependencies
+ * because it only affects sync_state() callbacks.
+ */
+ if (!device_pm_initialized(supplier)
+ || (!(flags & DL_FLAG_SYNC_STATE_ONLY) &&
+ device_is_dependent(consumer, supplier))) {
+ link = NULL;
+ goto out;
+ }
+
+ /*
+ * SYNC_STATE_ONLY links are useless once a consumer device has probed.
+ * So, only create it if the consumer hasn't probed yet.
+ */
+ if (flags & DL_FLAG_SYNC_STATE_ONLY &&
+ consumer->links.status != DL_DEV_NO_DRIVER &&
+ consumer->links.status != DL_DEV_PROBING) {
+ link = NULL;
+ goto out;
+ }
+
+ /*
+ * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed
+ * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both
+ * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER.
+ */
+ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node) {
+ if (link->consumer != consumer)
+ continue;
+
+ if (link->flags & DL_FLAG_INFERRED &&
+ !(flags & DL_FLAG_INFERRED))
+ link->flags &= ~DL_FLAG_INFERRED;
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
+ pm_runtime_new_link(consumer);
+ link->flags |= DL_FLAG_PM_RUNTIME;
+ }
+ if (flags & DL_FLAG_RPM_ACTIVE)
+ refcount_inc(&link->rpm_active);
+ }
+
+ if (flags & DL_FLAG_STATELESS) {
+ kref_get(&link->kref);
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(link->flags & DL_FLAG_STATELESS)) {
+ link->flags |= DL_FLAG_STATELESS;
+ goto reorder;
+ } else {
+ link->flags |= DL_FLAG_STATELESS;
+ goto out;
+ }
+ }
+
+ /*
+ * If the life time of the link following from the new flags is
+ * longer than indicated by the flags of the existing link,
+ * update the existing link to stay around longer.
+ */
+ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) {
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER;
+ link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
+ }
+ } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER |
+ DL_FLAG_AUTOREMOVE_SUPPLIER);
+ }
+ if (!(link->flags & DL_FLAG_MANAGED)) {
+ kref_get(&link->kref);
+ link->flags |= DL_FLAG_MANAGED;
+ device_link_init_status(link, consumer, supplier);
+ }
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ link->flags &= ~DL_FLAG_SYNC_STATE_ONLY;
+ goto reorder;
+ }
+
+ goto out;
+ }
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+
+ refcount_set(&link->rpm_active, 1);
+
+ get_device(supplier);
+ link->supplier = supplier;
+ INIT_LIST_HEAD(&link->s_node);
+ get_device(consumer);
+ link->consumer = consumer;
+ INIT_LIST_HEAD(&link->c_node);
+ link->flags = flags;
+ kref_init(&link->kref);
+
+ link->link_dev.class = &devlink_class;
+ device_set_pm_not_required(&link->link_dev);
+ dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+ dev_bus_name(supplier), dev_name(supplier),
+ dev_bus_name(consumer), dev_name(consumer));
+ if (device_register(&link->link_dev)) {
+ put_device(&link->link_dev);
+ link = NULL;
+ goto out;
+ }
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (flags & DL_FLAG_RPM_ACTIVE)
+ refcount_inc(&link->rpm_active);
+
+ pm_runtime_new_link(consumer);
+ }
+
+ /* Determine the initial link state. */
+ if (flags & DL_FLAG_STATELESS)
+ link->status = DL_STATE_NONE;
+ else
+ device_link_init_status(link, consumer, supplier);
+
+ /*
+ * Some callers expect the link creation during consumer driver probe to
+ * resume the supplier even without DL_FLAG_RPM_ACTIVE.
+ */
+ if (link->status == DL_STATE_CONSUMER_PROBE &&
+ flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_resume(supplier);
+
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+ if (flags & DL_FLAG_SYNC_STATE_ONLY) {
+ dev_dbg(consumer,
+ "Linked as a sync state only consumer to %s\n",
+ dev_name(supplier));
+ goto out;
+ }
+
+reorder:
+ /*
+ * Move the consumer and all of the devices depending on it to the end
+ * of dpm_list and the devices_kset list.
+ *
+ * It is necessary to hold dpm_list locked throughout all that or else
+ * we may end up suspending with a wrong ordering of it.
+ */
+ device_reorder_to_tail(consumer, NULL);
+
+ dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+out:
+ device_pm_unlock();
+ device_links_write_unlock();
+
+ if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
+ pm_runtime_put(supplier);
+
+ return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void __device_link_del(struct kref *kref)
+{
+ struct device_link *link = container_of(kref, struct device_link, kref);
+
+ dev_dbg(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ pm_runtime_drop_link(link);
+
+ device_link_remove_from_lists(link);
+ device_unregister(&link->link_dev);
+}
+
+static void device_link_put_kref(struct device_link *link)
+{
+ if (link->flags & DL_FLAG_STATELESS)
+ kref_put(&link->kref, __device_link_del);
+ else if (!device_is_registered(link->consumer))
+ __device_link_del(&link->kref);
+ else
+ WARN(1, "Unable to drop a managed device link reference\n");
+}
+
+/**
+ * device_link_del - Delete a stateless link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM. If the link was added multiple times, it needs to be deleted as often.
+ * Care is required for hotplugged devices: Their links are purged on removal
+ * and calling device_link_del() is then no longer allowed.
+ */
+void device_link_del(struct device_link *link)
+{
+ device_links_write_lock();
+ device_link_put_kref(link);
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+/**
+ * device_link_remove - Delete a stateless link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_remove(void *consumer, struct device *supplier)
+{
+ struct device_link *link;
+
+ if (WARN_ON(consumer == supplier))
+ return;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node) {
+ if (link->consumer == consumer) {
+ device_link_put_kref(link);
+ break;
+ }
+ }
+
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_remove);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->status != DL_STATE_CONSUMER_PROBE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+ }
+}
+
+static bool dev_is_best_effort(struct device *dev)
+{
+ return (fw_devlink_best_effort && dev->can_match) ||
+ (dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
+}
+
+static struct fwnode_handle *fwnode_links_check_suppliers(
+ struct fwnode_handle *fwnode)
+{
+ struct fwnode_link *link;
+
+ if (!fwnode || fw_devlink_is_permissive())
+ return NULL;
+
+ list_for_each_entry(link, &fwnode->suppliers, c_hook)
+ if (!(link->flags & FWLINK_FLAG_CYCLE))
+ return link->supplier;
+
+ return NULL;
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers. Walk the list of the device's
+ * links to suppliers and see if all of them are available. If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here. It only can go away in __device_release_driver() and
+ * that function checks the device's links to consumers. This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int ret = 0, fwnode_ret = 0;
+ struct fwnode_handle *sup_fw;
+
+ /*
+ * Device waiting for supplier to become available is not allowed to
+ * probe.
+ */
+ mutex_lock(&fwnode_link_lock);
+ sup_fw = fwnode_links_check_suppliers(dev->fwnode);
+ if (sup_fw) {
+ if (!dev_is_best_effort(dev)) {
+ fwnode_ret = -EPROBE_DEFER;
+ dev_err_probe(dev, -EPROBE_DEFER,
+ "wait for supplier %pfwP\n", sup_fw);
+ } else {
+ fwnode_ret = -EAGAIN;
+ }
+ }
+ mutex_unlock(&fwnode_link_lock);
+ if (fwnode_ret == -EPROBE_DEFER)
+ return fwnode_ret;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE &&
+ !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
+
+ if (dev_is_best_effort(dev) &&
+ link->flags & DL_FLAG_INFERRED &&
+ !link->supplier->can_match) {
+ ret = -EAGAIN;
+ continue;
+ }
+
+ device_links_missing_supplier(dev);
+ dev_err_probe(dev, -EPROBE_DEFER,
+ "supplier %s not ready\n",
+ dev_name(link->supplier));
+ ret = -EPROBE_DEFER;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+
+ return ret ? ret : fwnode_ret;
+}
+
+/**
+ * __device_links_queue_sync_state - Queue a device for sync_state() callback
+ * @dev: Device to call sync_state() on
+ * @list: List head to queue the @dev on
+ *
+ * Queues a device for a sync_state() callback when the device links write lock
+ * isn't held. This allows the sync_state() execution flow to use device links
+ * APIs. The caller must ensure this function is called with
+ * device_links_write_lock() held.
+ *
+ * This function does a get_device() to make sure the device is not freed while
+ * on this list.
+ *
+ * So the caller must also ensure that device_links_flush_sync_list() is called
+ * as soon as the caller releases device_links_write_lock(). This is necessary
+ * to make sure the sync_state() is called in a timely fashion and the
+ * put_device() is called on this device.
+ */
+static void __device_links_queue_sync_state(struct device *dev,
+ struct list_head *list)
+{
+ struct device_link *link;
+
+ if (!dev_has_sync_state(dev))
+ return;
+ if (dev->state_synced)
+ return;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+ if (link->status != DL_STATE_ACTIVE)
+ return;
+ }
+
+ /*
+ * Set the flag here to avoid adding the same device to a list more
+ * than once. This can happen if new consumers get added to the device
+ * and probed before the list is flushed.
+ */
+ dev->state_synced = true;
+
+ if (WARN_ON(!list_empty(&dev->links.defer_sync)))
+ return;
+
+ get_device(dev);
+ list_add_tail(&dev->links.defer_sync, list);
+}
+
+/**
+ * device_links_flush_sync_list - Call sync_state() on a list of devices
+ * @list: List of devices to call sync_state() on
+ * @dont_lock_dev: Device for which lock is already held by the caller
+ *
+ * Calls sync_state() on all the devices that have been queued for it. This
+ * function is used in conjunction with __device_links_queue_sync_state(). The
+ * @dont_lock_dev parameter is useful when this function is called from a
+ * context where a device lock is already held.
+ */
+static void device_links_flush_sync_list(struct list_head *list,
+ struct device *dont_lock_dev)
+{
+ struct device *dev, *tmp;
+
+ list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
+ list_del_init(&dev->links.defer_sync);
+
+ if (dev != dont_lock_dev)
+ device_lock(dev);
+
+ if (dev->bus->sync_state)
+ dev->bus->sync_state(dev);
+ else if (dev->driver && dev->driver->sync_state)
+ dev->driver->sync_state(dev);
+
+ if (dev != dont_lock_dev)
+ device_unlock(dev);
+
+ put_device(dev);
+ }
+}
+
+void device_links_supplier_sync_state_pause(void)
+{
+ device_links_write_lock();
+ defer_sync_state_count++;
+ device_links_write_unlock();
+}
+
+void device_links_supplier_sync_state_resume(void)
+{
+ struct device *dev, *tmp;
+ LIST_HEAD(sync_list);
+
+ device_links_write_lock();
+ if (!defer_sync_state_count) {
+ WARN(true, "Unmatched sync_state pause/resume!");
+ goto out;
+ }
+ defer_sync_state_count--;
+ if (defer_sync_state_count)
+ goto out;
+
+ list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
+ /*
+ * Delete from deferred_sync list before queuing it to
+ * sync_list because defer_sync is used for both lists.
+ */
+ list_del_init(&dev->links.defer_sync);
+ __device_links_queue_sync_state(dev, &sync_list);
+ }
+out:
+ device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list, NULL);
+}
+
+static int sync_state_resume_initcall(void)
+{
+ device_links_supplier_sync_state_resume();
+ return 0;
+}
+late_initcall(sync_state_resume_initcall);
+
+static void __device_links_supplier_defer_sync(struct device *sup)
+{
+ if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
+ list_add_tail(&sup->links.defer_sync, &deferred_sync);
+}
+
+static void device_link_drop_managed(struct device_link *link)
+{
+ link->flags &= ~DL_FLAG_MANAGED;
+ WRITE_ONCE(link->status, DL_STATE_NONE);
+ kref_put(&link->kref, __device_link_del);
+}
+
+static ssize_t waiting_for_supplier_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ bool val;
+
+ device_lock(dev);
+ mutex_lock(&fwnode_link_lock);
+ val = !!fwnode_links_check_suppliers(dev->fwnode);
+ mutex_unlock(&fwnode_link_lock);
+ device_unlock(dev);
+ return sysfs_emit(buf, "%u\n", val);
+}
+static DEVICE_ATTR_RO(waiting_for_supplier);
+
+/**
+ * device_links_force_bind - Prepares device to be force bound
+ * @dev: Consumer device.
+ *
+ * device_bind_driver() force binds a device to a driver without calling any
+ * driver probe functions. So the consumer really isn't going to wait for any
+ * supplier before it's bound to the driver. We still want the device link
+ * states to be sensible when this happens.
+ *
+ * In preparation for device_bind_driver(), this function goes through each
+ * supplier device links and checks if the supplier is bound. If it is, then
+ * the device link status is set to CONSUMER_PROBE. Otherwise, the device link
+ * is dropped. Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+void device_links_force_bind(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ device_links_write_lock();
+
+ list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE) {
+ device_link_drop_managed(link);
+ continue;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+ struct device_link *link, *ln;
+ LIST_HEAD(sync_list);
+
+ /*
+ * If a device binds successfully, it's expected to have created all
+ * the device links it needs to or make new device links as it needs
+ * them. So, fw_devlink no longer needs to create device links to any
+ * of the device's suppliers.
+ *
+ * Also, if a child firmware node of this bound device is not added as a
+ * device by now, assume it is never going to be added. Make this bound
+ * device the fallback supplier to the dangling consumers of the child
+ * firmware node because this bound device is probably implementing the
+ * child firmware node functionality and we don't want the dangling
+ * consumers to defer probe indefinitely waiting for a device for the
+ * child firmware node.
+ */
+ if (dev->fwnode && dev->fwnode->dev == dev) {
+ struct fwnode_handle *child;
+ fwnode_links_purge_suppliers(dev->fwnode);
+ mutex_lock(&fwnode_link_lock);
+ fwnode_for_each_available_child_node(dev->fwnode, child)
+ __fw_devlink_pickup_dangling_consumers(child,
+ dev->fwnode);
+ __fw_devlink_link_to_consumers(dev);
+ mutex_unlock(&fwnode_link_lock);
+ }
+ device_remove_file(dev, &dev_attr_waiting_for_supplier);
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ /*
+ * Links created during consumer probe may be in the "consumer
+ * probe" state to start with if the supplier is still probing
+ * when they are created and they may become "active" if the
+ * consumer probe returns first. Skip them here.
+ */
+ if (link->status == DL_STATE_CONSUMER_PROBE ||
+ link->status == DL_STATE_ACTIVE)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_DORMANT);
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+
+ if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER)
+ driver_deferred_probe_add(link->consumer);
+ }
+
+ if (defer_sync_state_count)
+ __device_links_supplier_defer_sync(dev);
+ else
+ __device_links_queue_sync_state(dev, &sync_list);
+
+ list_for_each_entry_safe(link, ln, &dev->links.suppliers, c_node) {
+ struct device *supplier;
+
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ supplier = link->supplier;
+ if (link->flags & DL_FLAG_SYNC_STATE_ONLY) {
+ /*
+ * When DL_FLAG_SYNC_STATE_ONLY is set, it means no
+ * other DL_MANAGED_LINK_FLAGS have been set. So, it's
+ * save to drop the managed link completely.
+ */
+ device_link_drop_managed(link);
+ } else if (dev_is_best_effort(dev) &&
+ link->flags & DL_FLAG_INFERRED &&
+ link->status != DL_STATE_CONSUMER_PROBE &&
+ !link->supplier->can_match) {
+ /*
+ * When dev_is_best_effort() is true, we ignore device
+ * links to suppliers that don't have a driver. If the
+ * consumer device still managed to probe, there's no
+ * point in maintaining a device link in a weird state
+ * (consumer probed before supplier). So delete it.
+ */
+ device_link_drop_managed(link);
+ } else {
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+
+ /*
+ * This needs to be done even for the deleted
+ * DL_FLAG_SYNC_STATE_ONLY device link in case it was the last
+ * device link that was preventing the supplier from getting a
+ * sync_state() call.
+ */
+ if (defer_sync_state_count)
+ __device_links_supplier_defer_sync(supplier);
+ else
+ __device_links_queue_sync_state(supplier, &sync_list);
+ }
+
+ dev->links.status = DL_DEV_DRIVER_BOUND;
+
+ device_links_write_unlock();
+
+ device_links_flush_sync_list(&sync_list, dev);
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) {
+ device_link_drop_managed(link);
+ continue;
+ }
+
+ if (link->status != DL_STATE_CONSUMER_PROBE &&
+ link->status != DL_STATE_ACTIVE)
+ continue;
+
+ if (link->supplier->links.status == DL_DEV_DRIVER_BOUND) {
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ } else {
+ WARN_ON(!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+ }
+
+ dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+/**
+ * device_links_no_driver - Update links after failing driver probe.
+ * @dev: Device whose driver has just failed to probe.
+ *
+ * Clean up leftover links to consumers for @dev and invoke
+ * %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+void device_links_no_driver(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ /*
+ * The probe has failed, so if the status of the link is
+ * "consumer probe" or "active", it must have been added by
+ * a probing consumer while this device was still probing.
+ * Change its state to "dormant", as it represents a valid
+ * relationship, but it is not functionally meaningful.
+ */
+ if (link->status == DL_STATE_CONSUMER_PROBE ||
+ link->status == DL_STATE_ACTIVE)
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+ __device_links_no_driver(dev);
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ device_links_write_lock();
+
+ list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE_CONSUMER);
+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+
+ /*
+ * autoremove the links between this @dev and its consumer
+ * devices that are not active, i.e. where the link state
+ * has moved to DL_STATE_SUPPLIER_UNBIND.
+ */
+ if (link->status == DL_STATE_SUPPLIER_UNBIND &&
+ link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
+ device_link_drop_managed(link);
+
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+ list_del_init(&dev->links.defer_sync);
+ __device_links_no_driver(dev);
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present). Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = false;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (!(link->flags & DL_FLAG_MANAGED))
+ continue;
+
+ if (link->status == DL_STATE_CONSUMER_PROBE
+ || link->status == DL_STATE_ACTIVE) {
+ ret = true;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ }
+
+ dev->links.status = DL_DEV_UNBINDING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state. If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links without the DL_FLAG_MANAGED flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ start:
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ enum device_link_state status;
+
+ if (!(link->flags & DL_FLAG_MANAGED) ||
+ link->flags & DL_FLAG_SYNC_STATE_ONLY)
+ continue;
+
+ status = link->status;
+ if (status == DL_STATE_CONSUMER_PROBE) {
+ device_links_write_unlock();
+
+ wait_for_device_probe();
+ goto start;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ if (status == DL_STATE_ACTIVE) {
+ struct device *consumer = link->consumer;
+
+ get_device(consumer);
+
+ device_links_write_unlock();
+
+ device_release_driver_internal(consumer, NULL,
+ consumer->parent);
+ put_device(consumer);
+ goto start;
+ }
+ }
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ if (dev->class == &devlink_class)
+ return;
+
+ /*
+ * Delete all of the remaining links from this device to any other
+ * devices (either consumers or suppliers).
+ */
+ device_links_write_lock();
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ WARN_ON(link->status == DL_STATE_ACTIVE);
+ __device_link_del(&link->kref);
+ }
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+ WARN_ON(link->status != DL_STATE_DORMANT &&
+ link->status != DL_STATE_NONE);
+ __device_link_del(&link->kref);
+ }
+
+ device_links_write_unlock();
+}
+
+#define FW_DEVLINK_FLAGS_PERMISSIVE (DL_FLAG_INFERRED | \
+ DL_FLAG_SYNC_STATE_ONLY)
+#define FW_DEVLINK_FLAGS_ON (DL_FLAG_INFERRED | \
+ DL_FLAG_AUTOPROBE_CONSUMER)
+#define FW_DEVLINK_FLAGS_RPM (FW_DEVLINK_FLAGS_ON | \
+ DL_FLAG_PM_RUNTIME)
+
+static u32 fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
+static int __init fw_devlink_setup(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strcmp(arg, "off") == 0) {
+ fw_devlink_flags = 0;
+ } else if (strcmp(arg, "permissive") == 0) {
+ fw_devlink_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+ } else if (strcmp(arg, "on") == 0) {
+ fw_devlink_flags = FW_DEVLINK_FLAGS_ON;
+ } else if (strcmp(arg, "rpm") == 0) {
+ fw_devlink_flags = FW_DEVLINK_FLAGS_RPM;
+ }
+ return 0;
+}
+early_param("fw_devlink", fw_devlink_setup);
+
+static bool fw_devlink_strict;
+static int __init fw_devlink_strict_setup(char *arg)
+{
+ return strtobool(arg, &fw_devlink_strict);
+}
+early_param("fw_devlink.strict", fw_devlink_strict_setup);
+
+static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
+{
+ if (fwlink_flags & FWLINK_FLAG_CYCLE)
+ return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
+
+ return fw_devlink_flags;
+}
+
+static bool fw_devlink_is_permissive(void)
+{
+ return fw_devlink_flags == FW_DEVLINK_FLAGS_PERMISSIVE;
+}
+
+bool fw_devlink_is_strict(void)
+{
+ return fw_devlink_strict && !fw_devlink_is_permissive();
+}
+
+static void fw_devlink_parse_fwnode(struct fwnode_handle *fwnode)
+{
+ if (fwnode->flags & FWNODE_FLAG_LINKS_ADDED)
+ return;
+
+ fwnode_call_int_op(fwnode, add_links);
+ fwnode->flags |= FWNODE_FLAG_LINKS_ADDED;
+}
+
+static void fw_devlink_parse_fwtree(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child = NULL;
+
+ fw_devlink_parse_fwnode(fwnode);
+
+ while ((child = fwnode_get_next_available_child_node(fwnode, child)))
+ fw_devlink_parse_fwtree(child);
+}
+
+static void fw_devlink_relax_link(struct device_link *link)
+{
+ if (!(link->flags & DL_FLAG_INFERRED))
+ return;
+
+ if (device_link_flag_is_sync_state_only(link->flags))
+ return;
+
+ pm_runtime_drop_link(link);
+ link->flags = DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE;
+ dev_dbg(link->consumer, "Relaxing link with %s\n",
+ dev_name(link->supplier));
+}
+
+static int fw_devlink_no_driver(struct device *dev, void *data)
+{
+ struct device_link *link = to_devlink(dev);
+
+ if (!link->supplier->can_match)
+ fw_devlink_relax_link(link);
+
+ return 0;
+}
+
+void fw_devlink_drivers_done(void)
+{
+ fw_devlink_drv_reg_done = true;
+ device_links_write_lock();
+ class_for_each_device(&devlink_class, NULL, NULL,
+ fw_devlink_no_driver);
+ device_links_write_unlock();
+}
+
+/**
+ * wait_for_init_devices_probe - Try to probe any device needed for init
+ *
+ * Some devices might need to be probed and bound successfully before the kernel
+ * boot sequence can finish and move on to init/userspace. For example, a
+ * network interface might need to be bound to be able to mount a NFS rootfs.
+ *
+ * With fw_devlink=on by default, some of these devices might be blocked from
+ * probing because they are waiting on a optional supplier that doesn't have a
+ * driver. While fw_devlink will eventually identify such devices and unblock
+ * the probing automatically, it might be too late by the time it unblocks the
+ * probing of devices. For example, the IP4 autoconfig might timeout before
+ * fw_devlink unblocks probing of the network interface.
+ *
+ * This function is available to temporarily try and probe all devices that have
+ * a driver even if some of their suppliers haven't been added or don't have
+ * drivers.
+ *
+ * The drivers can then decide which of the suppliers are optional vs mandatory
+ * and probe the device if possible. By the time this function returns, all such
+ * "best effort" probes are guaranteed to be completed. If a device successfully
+ * probes in this mode, we delete all fw_devlink discovered dependencies of that
+ * device where the supplier hasn't yet probed successfully because they have to
+ * be optional dependencies.
+ *
+ * Any devices that didn't successfully probe go back to being treated as if
+ * this function was never called.
+ *
+ * This also means that some devices that aren't needed for init and could have
+ * waited for their optional supplier to probe (when the supplier's module is
+ * loaded later on) would end up probing prematurely with limited functionality.
+ * So call this function only when boot would fail without it.
+ */
+void __init wait_for_init_devices_probe(void)
+{
+ if (!fw_devlink_flags || fw_devlink_is_permissive())
+ return;
+
+ /*
+ * Wait for all ongoing probes to finish so that the "best effort" is
+ * only applied to devices that can't probe otherwise.
+ */
+ wait_for_device_probe();
+
+ pr_info("Trying to probe devices needed for running init ...\n");
+ fw_devlink_best_effort = true;
+ driver_deferred_probe_trigger();
+
+ /*
+ * Wait for all "best effort" probes to finish before going back to
+ * normal enforcement.
+ */
+ wait_for_device_probe();
+ fw_devlink_best_effort = false;
+}
+
+static void fw_devlink_unblock_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ if (!fw_devlink_flags || fw_devlink_is_permissive())
+ return;
+
+ device_links_write_lock();
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ fw_devlink_relax_link(link);
+ device_links_write_unlock();
+}
+
+
+static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+ bool ret;
+
+ if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
+ return false;
+
+ dev = get_dev_from_fwnode(fwnode);
+ ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
+ put_device(dev);
+
+ return ret;
+}
+
+static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ if (fwnode_init_without_drv(parent)) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * __fw_devlink_relax_cycles - Relax and mark dependency cycles.
+ * @con: Potential consumer device.
+ * @sup_handle: Potential supplier's fwnode.
+ *
+ * Needs to be called with fwnode_lock and device link lock held.
+ *
+ * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
+ * depend on @con. This function can detect multiple cyles between @sup_handle
+ * and @con. When such dependency cycles are found, convert all device links
+ * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
+ * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
+ * converted into a device link in the future, they are created as
+ * SYNC_STATE_ONLY device links. This is the equivalent of doing
+ * fw_devlink=permissive just between the devices in the cycle. We need to do
+ * this because, at this point, fw_devlink can't tell which of these
+ * dependencies is not a real dependency.
+ *
+ * Return true if one or more cycles were found. Otherwise, return false.
+ */
+static bool __fw_devlink_relax_cycles(struct device *con,
+ struct fwnode_handle *sup_handle)
+{
+ struct device *sup_dev = NULL, *par_dev = NULL;
+ struct fwnode_link *link;
+ struct device_link *dev_link;
+ bool ret = false;
+
+ if (!sup_handle)
+ return false;
+
+ /*
+ * We aren't trying to find all cycles. Just a cycle between con and
+ * sup_handle.
+ */
+ if (sup_handle->flags & FWNODE_FLAG_VISITED)
+ return false;
+
+ sup_handle->flags |= FWNODE_FLAG_VISITED;
+
+ sup_dev = get_dev_from_fwnode(sup_handle);
+
+ /* Termination condition. */
+ if (sup_dev == con) {
+ ret = true;
+ goto out;
+ }
+
+ /*
+ * If sup_dev is bound to a driver and @con hasn't started binding to a
+ * driver, sup_dev can't be a consumer of @con. So, no need to check
+ * further.
+ */
+ if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
+ con->links.status == DL_DEV_NO_DRIVER) {
+ ret = false;
+ goto out;
+ }
+
+ list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
+ if (__fw_devlink_relax_cycles(con, link->supplier)) {
+ __fwnode_link_cycle(link);
+ ret = true;
+ }
+ }
+
+ /*
+ * Give priority to device parent over fwnode parent to account for any
+ * quirks in how fwnodes are converted to devices.
+ */
+ if (sup_dev)
+ par_dev = get_device(sup_dev->parent);
+ else
+ par_dev = fwnode_get_next_parent_dev(sup_handle);
+
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ ret = true;
+
+ if (!sup_dev)
+ goto out;
+
+ list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
+ /*
+ * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
+ * such due to a cycle.
+ */
+ if (device_link_flag_is_sync_state_only(dev_link->flags) &&
+ !(dev_link->flags & DL_FLAG_CYCLE))
+ continue;
+
+ if (__fw_devlink_relax_cycles(con,
+ dev_link->supplier->fwnode)) {
+ fw_devlink_relax_link(dev_link);
+ dev_link->flags |= DL_FLAG_CYCLE;
+ ret = true;
+ }
+ }
+
+out:
+ sup_handle->flags &= ~FWNODE_FLAG_VISITED;
+ put_device(sup_dev);
+ put_device(par_dev);
+ return ret;
+}
+
+/**
+ * fw_devlink_create_devlink - Create a device link from a consumer to fwnode
+ * @con: consumer device for the device link
+ * @sup_handle: fwnode handle of supplier
+ * @link: fwnode link that's being converted to a device link
+ *
+ * This function will try to create a device link between the consumer device
+ * @con and the supplier device represented by @sup_handle.
+ *
+ * The supplier has to be provided as a fwnode because incorrect cycles in
+ * fwnode links can sometimes cause the supplier device to never be created.
+ * This function detects such cases and returns an error if it cannot create a
+ * device link from the consumer to a missing supplier.
+ *
+ * Returns,
+ * 0 on successfully creating a device link
+ * -EINVAL if the device link cannot be created as expected
+ * -EAGAIN if the device link cannot be created right now, but it may be
+ * possible to do that in the future
+ */
+static int fw_devlink_create_devlink(struct device *con,
+ struct fwnode_handle *sup_handle,
+ struct fwnode_link *link)
+{
+ struct device *sup_dev;
+ int ret = 0;
+ u32 flags;
+
+ if (con->fwnode == link->consumer)
+ flags = fw_devlink_get_flags(link->flags);
+ else
+ flags = FW_DEVLINK_FLAGS_PERMISSIVE;
+
+ /*
+ * In some cases, a device P might also be a supplier to its child node
+ * C. However, this would defer the probe of C until the probe of P
+ * completes successfully. This is perfectly fine in the device driver
+ * model. device_add() doesn't guarantee probe completion of the device
+ * by the time it returns.
+ *
+ * However, there are a few drivers that assume C will finish probing
+ * as soon as it's added and before P finishes probing. So, we provide
+ * a flag to let fw_devlink know not to delay the probe of C until the
+ * probe of P completes successfully.
+ *
+ * When such a flag is set, we can't create device links where P is the
+ * supplier of C as that would delay the probe of C.
+ */
+ if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
+ fwnode_is_ancestor_of(sup_handle, con->fwnode))
+ return -EINVAL;
+
+ /*
+ * SYNC_STATE_ONLY device links don't block probing and supports cycles.
+ * So cycle detection isn't necessary and shouldn't be done.
+ */
+ if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(con, sup_handle)) {
+ __fwnode_link_cycle(link);
+ flags = fw_devlink_get_flags(link->flags);
+ dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
+ sup_handle);
+ }
+ device_links_write_unlock();
+ }
+
+ if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
+ sup_dev = fwnode_get_next_parent_dev(sup_handle);
+ else
+ sup_dev = get_dev_from_fwnode(sup_handle);
+
+ if (sup_dev) {
+ /*
+ * If it's one of those drivers that don't actually bind to
+ * their device using driver core, then don't wait on this
+ * supplier device indefinitely.
+ */
+ if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
+ sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
+ dev_dbg(con,
+ "Not linking %pfwf - dev might never probe\n",
+ sup_handle);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (con != sup_dev && !device_link_add(con, sup_dev, flags)) {
+ dev_err(con, "Failed to create device link (0x%x) with %s\n",
+ flags, dev_name(sup_dev));
+ ret = -EINVAL;
+ }
+
+ goto out;
+ }
+
+ /*
+ * Supplier or supplier's ancestor already initialized without a struct
+ * device or being probed by a driver.
+ */
+ if (fwnode_init_without_drv(sup_handle) ||
+ fwnode_ancestor_init_without_drv(sup_handle)) {
+ dev_dbg(con, "Not linking %pfwf - might never become dev\n",
+ sup_handle);
+ return -EINVAL;
+ }
+
+ ret = -EAGAIN;
+out:
+ put_device(sup_dev);
+ return ret;
+}
+
+/**
+ * __fw_devlink_link_to_consumers - Create device links to consumers of a device
+ * @dev: Device that needs to be linked to its consumers
+ *
+ * This function looks at all the consumer fwnodes of @dev and creates device
+ * links between the consumer device and @dev (supplier).
+ *
+ * If the consumer device has not been added yet, then this function creates a
+ * SYNC_STATE_ONLY link between @dev (supplier) and the closest ancestor device
+ * of the consumer fwnode. This is necessary to make sure @dev doesn't get a
+ * sync_state() callback before the real consumer device gets to be added and
+ * then probed.
+ *
+ * Once device links are created from the real consumer to @dev (supplier), the
+ * fwnode links are deleted.
+ */
+static void __fw_devlink_link_to_consumers(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev->fwnode;
+ struct fwnode_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
+ struct device *con_dev;
+ bool own_link = true;
+ int ret;
+
+ con_dev = get_dev_from_fwnode(link->consumer);
+ /*
+ * If consumer device is not available yet, make a "proxy"
+ * SYNC_STATE_ONLY link from the consumer's parent device to
+ * the supplier device. This is necessary to make sure the
+ * supplier doesn't get a sync_state() callback before the real
+ * consumer can create a device link to the supplier.
+ *
+ * This proxy link step is needed to handle the case where the
+ * consumer's parent device is added before the supplier.
+ */
+ if (!con_dev) {
+ con_dev = fwnode_get_next_parent_dev(link->consumer);
+ /*
+ * However, if the consumer's parent device is also the
+ * parent of the supplier, don't create a
+ * consumer-supplier link from the parent to its child
+ * device. Such a dependency is impossible.
+ */
+ if (con_dev &&
+ fwnode_is_ancestor_of(con_dev->fwnode, fwnode)) {
+ put_device(con_dev);
+ con_dev = NULL;
+ } else {
+ own_link = false;
+ }
+ }
+
+ if (!con_dev)
+ continue;
+
+ ret = fw_devlink_create_devlink(con_dev, fwnode, link);
+ put_device(con_dev);
+ if (!own_link || ret == -EAGAIN)
+ continue;
+
+ __fwnode_link_del(link);
+ }
+}
+
+/**
+ * __fw_devlink_link_to_suppliers - Create device links to suppliers of a device
+ * @dev: The consumer device that needs to be linked to its suppliers
+ * @fwnode: Root of the fwnode tree that is used to create device links
+ *
+ * This function looks at all the supplier fwnodes of fwnode tree rooted at
+ * @fwnode and creates device links between @dev (consumer) and all the
+ * supplier devices of the entire fwnode tree at @fwnode.
+ *
+ * The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
+ * and the real suppliers of @dev. Once these device links are created, the
+ * fwnode links are deleted.
+ *
+ * In addition, it also looks at all the suppliers of the entire fwnode tree
+ * because some of the child devices of @dev that have not been added yet
+ * (because @dev hasn't probed) might already have their suppliers added to
+ * driver core. So, this function creates SYNC_STATE_ONLY device links between
+ * @dev (consumer) and these suppliers to make sure they don't execute their
+ * sync_state() callbacks before these child devices have a chance to create
+ * their device links. The fwnode links that correspond to the child devices
+ * aren't delete because they are needed later to create the device links
+ * between the real consumer and supplier devices.
+ */
+static void __fw_devlink_link_to_suppliers(struct device *dev,
+ struct fwnode_handle *fwnode)
+{
+ bool own_link = (dev->fwnode == fwnode);
+ struct fwnode_link *link, *tmp;
+ struct fwnode_handle *child = NULL;
+
+ list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
+ int ret;
+ struct fwnode_handle *sup = link->supplier;
+
+ ret = fw_devlink_create_devlink(dev, sup, link);
+ if (!own_link || ret == -EAGAIN)
+ continue;
+
+ __fwnode_link_del(link);
+ }
+
+ /*
+ * Make "proxy" SYNC_STATE_ONLY device links to represent the needs of
+ * all the descendants. This proxy link step is needed to handle the
+ * case where the supplier is added before the consumer's parent device
+ * (@dev).
+ */
+ while ((child = fwnode_get_next_available_child_node(fwnode, child)))
+ __fw_devlink_link_to_suppliers(dev, child);
+}
+
+static void fw_devlink_link_device(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev->fwnode;
+
+ if (!fw_devlink_flags)
+ return;
+
+ fw_devlink_parse_fwtree(fwnode);
+
+ mutex_lock(&fwnode_link_lock);
+ __fw_devlink_link_to_consumers(dev);
+ __fw_devlink_link_to_suppliers(dev, fwnode);
+ mutex_unlock(&fwnode_link_lock);
+}
+
+/* Device links support end. */
+
+int (*platform_notify)(struct device *dev) = NULL;
+int (*platform_notify_remove)(struct device *dev) = NULL;
+static struct kobject *dev_kobj;
+struct kobject *sysfs_dev_char_kobj;
+struct kobject *sysfs_dev_block_kobj;
+
+static DEFINE_MUTEX(device_hotplug_lock);
+
+void lock_device_hotplug(void)
+{
+ mutex_lock(&device_hotplug_lock);
+}
+
+void unlock_device_hotplug(void)
+{
+ mutex_unlock(&device_hotplug_lock);
+}
+
+int lock_device_hotplug_sysfs(void)
+{
+ if (mutex_trylock(&device_hotplug_lock))
+ return 0;
+
+ /* Avoid busy looping (5 ms of sleep should do). */
+ msleep(5);
+ return restart_syscall();
+}
+
+#ifdef CONFIG_BLOCK
+static inline int device_is_not_partition(struct device *dev)
+{
+ return !(dev->type == &part_type);
+}
+#else
+static inline int device_is_not_partition(struct device *dev)
+{
+ return 1;
+}
+#endif
+
+static void device_platform_notify(struct device *dev)
+{
+ acpi_device_notify(dev);
+
+ software_node_notify(dev);
+
+ if (platform_notify)
+ platform_notify(dev);
+}
+
+static void device_platform_notify_remove(struct device *dev)
+{
+ acpi_device_notify_remove(dev);
+
+ software_node_notify_remove(dev);
+
+ if (platform_notify_remove)
+ platform_notify_remove(dev);
+}
+
+/**
+ * dev_driver_string - Return a device's driver name, if at all possible
+ * @dev: struct device to get the name of
+ *
+ * Will return the device's driver's name if it is bound to a device. If
+ * the device is not bound to a driver, it will return the name of the bus
+ * it is attached to. If it is not attached to a bus either, an empty
+ * string will be returned.
+ */
+const char *dev_driver_string(const struct device *dev)
+{
+ struct device_driver *drv;
+
+ /* dev->driver can change to NULL underneath us because of unbinding,
+ * so be careful about accessing it. dev->bus and dev->class should
+ * never change once they are set, so they don't need special care.
+ */
+ drv = READ_ONCE(dev->driver);
+ return drv ? drv->name : dev_bus_name(dev);
+}
+EXPORT_SYMBOL(dev_driver_string);
+
+#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+
+static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct device *dev = kobj_to_dev(kobj);
+ ssize_t ret = -EIO;
+
+ if (dev_attr->show)
+ ret = dev_attr->show(dev, dev_attr, buf);
+ if (ret >= (ssize_t)PAGE_SIZE) {
+ printk("dev_attr_show: %pS returned bad count\n",
+ dev_attr->show);
+ }
+ return ret;
+}
+
+static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct device *dev = kobj_to_dev(kobj);
+ ssize_t ret = -EIO;
+
+ if (dev_attr->store)
+ ret = dev_attr->store(dev, dev_attr, buf, count);
+ return ret;
+}
+
+static const struct sysfs_ops dev_sysfs_ops = {
+ .show = dev_attr_show,
+ .store = dev_attr_store,
+};
+
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+ssize_t device_store_ulong(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ int ret;
+ unsigned long new;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret)
+ return ret;
+ *(unsigned long *)(ea->var) = new;
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_ulong);
+
+ssize_t device_show_ulong(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_ulong);
+
+ssize_t device_store_int(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ int ret;
+ long new;
+
+ ret = kstrtol(buf, 0, &new);
+ if (ret)
+ return ret;
+
+ if (new > INT_MAX || new < INT_MIN)
+ return -EINVAL;
+ *(int *)(ea->var) = new;
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_int);
+
+ssize_t device_show_int(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_int);
+
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ if (strtobool(buf, ea->var) < 0)
+ return -EINVAL;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_bool);
+
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_bool);
+
+/**
+ * device_release - free device structure.
+ * @kobj: device's kobject.
+ *
+ * This is called once the reference count for the object
+ * reaches 0. We forward the call to the device's release
+ * method, which should handle actually freeing the structure.
+ */
+static void device_release(struct kobject *kobj)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct device_private *p = dev->p;
+
+ /*
+ * Some platform devices are driven without driver attached
+ * and managed resources may have been acquired. Make sure
+ * all resources are released.
+ *
+ * Drivers still can add resources into device after device
+ * is deleted but alive, so release devres here to avoid
+ * possible memory leak.
+ */
+ devres_release_all(dev);
+
+ kfree(dev->dma_range_map);
+
+ if (dev->release)
+ dev->release(dev);
+ else if (dev->type && dev->type->release)
+ dev->type->release(dev);
+ else if (dev->class && dev->class->dev_release)
+ dev->class->dev_release(dev);
+ else
+ WARN(1, KERN_ERR "Device '%s' does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
+ dev_name(dev));
+ kfree(p);
+}
+
+static const void *device_namespace(struct kobject *kobj)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ const void *ns = NULL;
+
+ if (dev->class && dev->class->ns_type)
+ ns = dev->class->namespace(dev);
+
+ return ns;
+}
+
+static void device_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (dev->class && dev->class->get_ownership)
+ dev->class->get_ownership(dev, uid, gid);
+}
+
+static struct kobj_type device_ktype = {
+ .release = device_release,
+ .sysfs_ops = &dev_sysfs_ops,
+ .namespace = device_namespace,
+ .get_ownership = device_get_ownership,
+};
+
+
+static int dev_uevent_filter(struct kobject *kobj)
+{
+ const struct kobj_type *ktype = get_ktype(kobj);
+
+ if (ktype == &device_ktype) {
+ struct device *dev = kobj_to_dev(kobj);
+ if (dev->bus)
+ return 1;
+ if (dev->class)
+ return 1;
+ }
+ return 0;
+}
+
+static const char *dev_uevent_name(struct kobject *kobj)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (dev->bus)
+ return dev->bus->name;
+ if (dev->class)
+ return dev->class->name;
+ return NULL;
+}
+
+static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ int retval = 0;
+
+ /* add device node properties if present */
+ if (MAJOR(dev->devt)) {
+ const char *tmp;
+ const char *name;
+ umode_t mode = 0;
+ kuid_t uid = GLOBAL_ROOT_UID;
+ kgid_t gid = GLOBAL_ROOT_GID;
+
+ add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
+ add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
+ name = device_get_devnode(dev, &mode, &uid, &gid, &tmp);
+ if (name) {
+ add_uevent_var(env, "DEVNAME=%s", name);
+ if (mode)
+ add_uevent_var(env, "DEVMODE=%#o", mode & 0777);
+ if (!uid_eq(uid, GLOBAL_ROOT_UID))
+ add_uevent_var(env, "DEVUID=%u", from_kuid(&init_user_ns, uid));
+ if (!gid_eq(gid, GLOBAL_ROOT_GID))
+ add_uevent_var(env, "DEVGID=%u", from_kgid(&init_user_ns, gid));
+ kfree(tmp);
+ }
+ }
+
+ if (dev->type && dev->type->name)
+ add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
+
+ if (dev->driver)
+ add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+
+ /* Add common DT information about the device */
+ of_device_uevent(dev, env);
+
+ /* have the bus specific function add its stuff */
+ if (dev->bus && dev->bus->uevent) {
+ retval = dev->bus->uevent(dev, env);
+ if (retval)
+ pr_debug("device: '%s': %s: bus uevent() returned %d\n",
+ dev_name(dev), __func__, retval);
+ }
+
+ /* have the class specific function add its stuff */
+ if (dev->class && dev->class->dev_uevent) {
+ retval = dev->class->dev_uevent(dev, env);
+ if (retval)
+ pr_debug("device: '%s': %s: class uevent() "
+ "returned %d\n", dev_name(dev),
+ __func__, retval);
+ }
+
+ /* have the device type specific function add its stuff */
+ if (dev->type && dev->type->uevent) {
+ retval = dev->type->uevent(dev, env);
+ if (retval)
+ pr_debug("device: '%s': %s: dev_type uevent() "
+ "returned %d\n", dev_name(dev),
+ __func__, retval);
+ }
+
+ return retval;
+}
+
+static const struct kset_uevent_ops device_uevent_ops = {
+ .filter = dev_uevent_filter,
+ .name = dev_uevent_name,
+ .uevent = dev_uevent,
+};
+
+static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct kobject *top_kobj;
+ struct kset *kset;
+ struct kobj_uevent_env *env = NULL;
+ int i;
+ int len = 0;
+ int retval;
+
+ /* search the kset, the device belongs to */
+ top_kobj = &dev->kobj;
+ while (!top_kobj->kset && top_kobj->parent)
+ top_kobj = top_kobj->parent;
+ if (!top_kobj->kset)
+ goto out;
+
+ kset = top_kobj->kset;
+ if (!kset->uevent_ops || !kset->uevent_ops->uevent)
+ goto out;
+
+ /* respect filter */
+ if (kset->uevent_ops && kset->uevent_ops->filter)
+ if (!kset->uevent_ops->filter(&dev->kobj))
+ goto out;
+
+ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ /* let the kset specific function add its keys */
+ retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ if (retval)
+ goto out;
+
+ /* copy keys to file */
+ for (i = 0; i < env->envp_idx; i++)
+ len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
+out:
+ kfree(env);
+ return len;
+}
+
+static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+
+ rc = kobject_synth_uevent(&dev->kobj, buf, count);
+
+ if (rc) {
+ dev_err(dev, "uevent: failed to send synthetic uevent: %d\n", rc);
+ return rc;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(uevent);
+
+static ssize_t online_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ bool val;
+
+ device_lock(dev);
+ val = !dev->offline;
+ device_unlock(dev);
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t online_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+ int ret;
+
+ ret = strtobool(buf, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ ret = val ? device_online(dev) : device_offline(dev);
+ unlock_device_hotplug();
+ return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_RW(online);
+
+static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *loc;
+
+ switch (dev->removable) {
+ case DEVICE_REMOVABLE:
+ loc = "removable";
+ break;
+ case DEVICE_FIXED:
+ loc = "fixed";
+ break;
+ default:
+ loc = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", loc);
+}
+static DEVICE_ATTR_RO(removable);
+
+int device_add_groups(struct device *dev, const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&dev->kobj, groups);
+}
+EXPORT_SYMBOL_GPL(device_add_groups);
+
+void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ sysfs_remove_groups(&dev->kobj, groups);
+}
+EXPORT_SYMBOL_GPL(device_remove_groups);
+
+union device_attr_group_devres {
+ const struct attribute_group *group;
+ const struct attribute_group **groups;
+};
+
+static int devm_attr_group_match(struct device *dev, void *res, void *data)
+{
+ return ((union device_attr_group_devres *)res)->group == data;
+}
+
+static void devm_attr_group_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group *group = devres->group;
+
+ dev_dbg(dev, "%s: removing group %p\n", __func__, group);
+ sysfs_remove_group(&dev->kobj, group);
+}
+
+static void devm_attr_groups_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group **groups = devres->groups;
+
+ dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
+ sysfs_remove_groups(&dev->kobj, groups);
+}
+
+/**
+ * devm_device_add_group - given a device, create a managed attribute group
+ * @dev: The device to create the group for
+ * @grp: The attribute group to create
+ *
+ * This function creates a group for the first time. It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_group_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_group(&dev->kobj, grp);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->group = grp;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_group);
+
+/**
+ * devm_device_remove_group: remove a managed group from a device
+ * @dev: device to remove the group from
+ * @grp: group to remove
+ *
+ * This function removes a group of attributes from a device. The attributes
+ * previously have to have been created for this group, otherwise it will fail.
+ */
+void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ WARN_ON(devres_release(dev, devm_attr_group_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)grp));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_group);
+
+/**
+ * devm_device_add_groups - create a bunch of managed attribute groups
+ * @dev: The device to create the group for
+ * @groups: The attribute groups to create, NULL terminated
+ *
+ * This function creates a bunch of managed attribute groups. If an error
+ * occurs when creating a group, all previously created groups will be
+ * removed, unwinding everything back to the original state when this
+ * function was called. It will explicitly warn and error if any of the
+ * attribute files being created already exist.
+ *
+ * Returns 0 on success or error code from sysfs_create_group on failure.
+ */
+int devm_device_add_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_groups_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_groups(&dev->kobj, groups);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->groups = groups;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_groups);
+
+/**
+ * devm_device_remove_groups - remove a list of managed groups
+ *
+ * @dev: The device for the groups to be removed from
+ * @groups: NULL terminated list of groups to be removed
+ *
+ * If groups is not NULL, remove the specified groups from the device.
+ */
+void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ WARN_ON(devres_release(dev, devm_attr_groups_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)groups));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_groups);
+
+static int device_add_attrs(struct device *dev)
+{
+ struct class *class = dev->class;
+ const struct device_type *type = dev->type;
+ int error;
+
+ if (class) {
+ error = device_add_groups(dev, class->dev_groups);
+ if (error)
+ return error;
+ }
+
+ if (type) {
+ error = device_add_groups(dev, type->groups);
+ if (error)
+ goto err_remove_class_groups;
+ }
+
+ error = device_add_groups(dev, dev->groups);
+ if (error)
+ goto err_remove_type_groups;
+
+ if (device_supports_offline(dev) && !dev->offline_disabled) {
+ error = device_create_file(dev, &dev_attr_online);
+ if (error)
+ goto err_remove_dev_groups;
+ }
+
+ if (fw_devlink_flags && !fw_devlink_is_permissive() && dev->fwnode) {
+ error = device_create_file(dev, &dev_attr_waiting_for_supplier);
+ if (error)
+ goto err_remove_dev_online;
+ }
+
+ if (dev_removable_is_valid(dev)) {
+ error = device_create_file(dev, &dev_attr_removable);
+ if (error)
+ goto err_remove_dev_waiting_for_supplier;
+ }
+
+ if (dev_add_physical_location(dev)) {
+ error = device_add_group(dev,
+ &dev_attr_physical_location_group);
+ if (error)
+ goto err_remove_dev_removable;
+ }
+
+ return 0;
+
+ err_remove_dev_removable:
+ device_remove_file(dev, &dev_attr_removable);
+ err_remove_dev_waiting_for_supplier:
+ device_remove_file(dev, &dev_attr_waiting_for_supplier);
+ err_remove_dev_online:
+ device_remove_file(dev, &dev_attr_online);
+ err_remove_dev_groups:
+ device_remove_groups(dev, dev->groups);
+ err_remove_type_groups:
+ if (type)
+ device_remove_groups(dev, type->groups);
+ err_remove_class_groups:
+ if (class)
+ device_remove_groups(dev, class->dev_groups);
+
+ return error;
+}
+
+static void device_remove_attrs(struct device *dev)
+{
+ struct class *class = dev->class;
+ const struct device_type *type = dev->type;
+
+ if (dev->physical_location) {
+ device_remove_group(dev, &dev_attr_physical_location_group);
+ kfree(dev->physical_location);
+ }
+
+ device_remove_file(dev, &dev_attr_removable);
+ device_remove_file(dev, &dev_attr_waiting_for_supplier);
+ device_remove_file(dev, &dev_attr_online);
+ device_remove_groups(dev, dev->groups);
+
+ if (type)
+ device_remove_groups(dev, type->groups);
+
+ if (class)
+ device_remove_groups(dev, class->dev_groups);
+}
+
+static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return print_dev_t(buf, dev->devt);
+}
+static DEVICE_ATTR_RO(dev);
+
+/* /sys/devices/ */
+struct kset *devices_kset;
+
+/**
+ * devices_kset_move_before - Move device in the devices_kset's list.
+ * @deva: Device to move.
+ * @devb: Device @deva should come before.
+ */
+static void devices_kset_move_before(struct device *deva, struct device *devb)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s before %s\n",
+ dev_name(deva), dev_name(devb));
+ spin_lock(&devices_kset->list_lock);
+ list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_after - Move device in the devices_kset's list.
+ * @deva: Device to move
+ * @devb: Device @deva should come after.
+ */
+static void devices_kset_move_after(struct device *deva, struct device *devb)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s after %s\n",
+ dev_name(deva), dev_name(devb));
+ spin_lock(&devices_kset->list_lock);
+ list_move(&deva->kobj.entry, &devb->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_last - move the device to the end of devices_kset's list.
+ * @dev: device to move
+ */
+void devices_kset_move_last(struct device *dev)
+{
+ if (!devices_kset)
+ return;
+ pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
+ spin_lock(&devices_kset->list_lock);
+ list_move_tail(&dev->kobj.entry, &devices_kset->list);
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * device_create_file - create sysfs attribute file for device.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ */
+int device_create_file(struct device *dev,
+ const struct device_attribute *attr)
+{
+ int error = 0;
+
+ if (dev) {
+ WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
+ "Attribute %s: write permission without 'store'\n",
+ attr->attr.name);
+ WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
+ "Attribute %s: read permission without 'show'\n",
+ attr->attr.name);
+ error = sysfs_create_file(&dev->kobj, &attr->attr);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_create_file);
+
+/**
+ * device_remove_file - remove sysfs attribute file.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ */
+void device_remove_file(struct device *dev,
+ const struct device_attribute *attr)
+{
+ if (dev)
+ sysfs_remove_file(&dev->kobj, &attr->attr);
+}
+EXPORT_SYMBOL_GPL(device_remove_file);
+
+/**
+ * device_remove_file_self - remove sysfs attribute file from its own method.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ *
+ * See kernfs_remove_self() for details.
+ */
+bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr)
+{
+ if (dev)
+ return sysfs_remove_file_self(&dev->kobj, &attr->attr);
+ else
+ return false;
+}
+EXPORT_SYMBOL_GPL(device_remove_file_self);
+
+/**
+ * device_create_bin_file - create sysfs binary attribute file for device.
+ * @dev: device.
+ * @attr: device binary attribute descriptor.
+ */
+int device_create_bin_file(struct device *dev,
+ const struct bin_attribute *attr)
+{
+ int error = -EINVAL;
+ if (dev)
+ error = sysfs_create_bin_file(&dev->kobj, attr);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_create_bin_file);
+
+/**
+ * device_remove_bin_file - remove sysfs binary attribute file
+ * @dev: device.
+ * @attr: device binary attribute descriptor.
+ */
+void device_remove_bin_file(struct device *dev,
+ const struct bin_attribute *attr)
+{
+ if (dev)
+ sysfs_remove_bin_file(&dev->kobj, attr);
+}
+EXPORT_SYMBOL_GPL(device_remove_bin_file);
+
+static void klist_children_get(struct klist_node *n)
+{
+ struct device_private *p = to_device_private_parent(n);
+ struct device *dev = p->device;
+
+ get_device(dev);
+}
+
+static void klist_children_put(struct klist_node *n)
+{
+ struct device_private *p = to_device_private_parent(n);
+ struct device *dev = p->device;
+
+ put_device(dev);
+}
+
+/**
+ * device_initialize - init device structure.
+ * @dev: device.
+ *
+ * This prepares the device for use by other layers by initializing
+ * its fields.
+ * It is the first half of device_register(), if called by
+ * that function, though it can also be called separately, so one
+ * may use @dev's fields. In particular, get_device()/put_device()
+ * may be used for reference counting of @dev after calling this
+ * function.
+ *
+ * All fields in @dev must be initialized by the caller to 0, except
+ * for those explicitly set to some other value. The simplest
+ * approach is to use kzalloc() to allocate the structure containing
+ * @dev.
+ *
+ * NOTE: Use put_device() to give up your reference instead of freeing
+ * @dev directly once you have called this function.
+ */
+void device_initialize(struct device *dev)
+{
+ dev->kobj.kset = devices_kset;
+ kobject_init(&dev->kobj, &device_ktype);
+ INIT_LIST_HEAD(&dev->dma_pools);
+ mutex_init(&dev->mutex);
+ lockdep_set_novalidate_class(&dev->mutex);
+ spin_lock_init(&dev->devres_lock);
+ INIT_LIST_HEAD(&dev->devres_head);
+ device_pm_init(dev);
+ set_dev_node(dev, NUMA_NO_NODE);
+ INIT_LIST_HEAD(&dev->links.consumers);
+ INIT_LIST_HEAD(&dev->links.suppliers);
+ INIT_LIST_HEAD(&dev->links.defer_sync);
+ dev->links.status = DL_DEV_NO_DRIVER;
+#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
+ defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
+ dev->dma_coherent = dma_default_coherent;
+#endif
+#ifdef CONFIG_SWIOTLB
+ dev->dma_io_tlb_mem = &io_tlb_default_mem;
+#endif
+}
+EXPORT_SYMBOL_GPL(device_initialize);
+
+struct kobject *virtual_device_parent(struct device *dev)
+{
+ static struct kobject *virtual_dir = NULL;
+
+ if (!virtual_dir)
+ virtual_dir = kobject_create_and_add("virtual",
+ &devices_kset->kobj);
+
+ return virtual_dir;
+}
+
+struct class_dir {
+ struct kobject kobj;
+ struct class *class;
+};
+
+#define to_class_dir(obj) container_of(obj, struct class_dir, kobj)
+
+static void class_dir_release(struct kobject *kobj)
+{
+ struct class_dir *dir = to_class_dir(kobj);
+ kfree(dir);
+}
+
+static const
+struct kobj_ns_type_operations *class_dir_child_ns_type(struct kobject *kobj)
+{
+ struct class_dir *dir = to_class_dir(kobj);
+ return dir->class->ns_type;
+}
+
+static struct kobj_type class_dir_ktype = {
+ .release = class_dir_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .child_ns_type = class_dir_child_ns_type
+};
+
+static struct kobject *
+class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
+{
+ struct class_dir *dir;
+ int retval;
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return ERR_PTR(-ENOMEM);
+
+ dir->class = class;
+ kobject_init(&dir->kobj, &class_dir_ktype);
+
+ dir->kobj.kset = &class->p->glue_dirs;
+
+ retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
+ if (retval < 0) {
+ kobject_put(&dir->kobj);
+ return ERR_PTR(retval);
+ }
+ return &dir->kobj;
+}
+
+static DEFINE_MUTEX(gdp_mutex);
+
+static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+{
+ if (dev->class) {
+ struct kobject *kobj = NULL;
+ struct kobject *parent_kobj;
+ struct kobject *k;
+
+#ifdef CONFIG_BLOCK
+ /* block disks show up in /sys/block */
+ if (sysfs_deprecated && dev->class == &block_class) {
+ if (parent && parent->class == &block_class)
+ return &parent->kobj;
+ return &block_class.p->subsys.kobj;
+ }
+#endif
+
+ /*
+ * If we have no parent, we live in "virtual".
+ * Class-devices with a non class-device as parent, live
+ * in a "glue" directory to prevent namespace collisions.
+ */
+ if (parent == NULL)
+ parent_kobj = virtual_device_parent(dev);
+ else if (parent->class && !dev->class->ns_type)
+ return &parent->kobj;
+ else
+ parent_kobj = &parent->kobj;
+
+ mutex_lock(&gdp_mutex);
+
+ /* find our class-directory at the parent and reference it */
+ spin_lock(&dev->class->p->glue_dirs.list_lock);
+ list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry)
+ if (k->parent == parent_kobj) {
+ kobj = kobject_get(k);
+ break;
+ }
+ spin_unlock(&dev->class->p->glue_dirs.list_lock);
+ if (kobj) {
+ mutex_unlock(&gdp_mutex);
+ return kobj;
+ }
+
+ /* or create a new class-directory at the parent device */
+ k = class_dir_create_and_add(dev->class, parent_kobj);
+ /* do not emit an uevent for this simple "glue" directory */
+ mutex_unlock(&gdp_mutex);
+ return k;
+ }
+
+ /* subsystems can specify a default root directory for their devices */
+ if (!parent && dev->bus && dev->bus->dev_root)
+ return &dev->bus->dev_root->kobj;
+
+ if (parent)
+ return &parent->kobj;
+ return NULL;
+}
+
+static inline bool live_in_glue_dir(struct kobject *kobj,
+ struct device *dev)
+{
+ if (!kobj || !dev->class ||
+ kobj->kset != &dev->class->p->glue_dirs)
+ return false;
+ return true;
+}
+
+static inline struct kobject *get_glue_dir(struct device *dev)
+{
+ return dev->kobj.parent;
+}
+
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+ WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
+
+ return kobj->sd && kobj->sd->dir.subdirs;
+}
+
+/*
+ * make sure cleaning up dir as the last step, we need to make
+ * sure .release handler of kobject is run with holding the
+ * global lock
+ */
+static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+{
+ unsigned int ref;
+
+ /* see if we live in a "glue" directory */
+ if (!live_in_glue_dir(glue_dir, dev))
+ return;
+
+ mutex_lock(&gdp_mutex);
+ /**
+ * There is a race condition between removing glue directory
+ * and adding a new device under the glue directory.
+ *
+ * CPU1: CPU2:
+ *
+ * device_add()
+ * get_device_parent()
+ * class_dir_create_and_add()
+ * kobject_add_internal()
+ * create_dir() // create glue_dir
+ *
+ * device_add()
+ * get_device_parent()
+ * kobject_get() // get glue_dir
+ *
+ * device_del()
+ * cleanup_glue_dir()
+ * kobject_del(glue_dir)
+ *
+ * kobject_add()
+ * kobject_add_internal()
+ * create_dir() // in glue_dir
+ * sysfs_create_dir_ns()
+ * kernfs_create_dir_ns(sd)
+ *
+ * sysfs_remove_dir() // glue_dir->sd=NULL
+ * sysfs_put() // free glue_dir->sd
+ *
+ * // sd is freed
+ * kernfs_new_node(sd)
+ * kernfs_get(glue_dir)
+ * kernfs_add_one()
+ * kernfs_put()
+ *
+ * Before CPU1 remove last child device under glue dir, if CPU2 add
+ * a new device under glue dir, the glue_dir kobject reference count
+ * will be increase to 2 in kobject_get(k). And CPU2 has been called
+ * kernfs_create_dir_ns(). Meanwhile, CPU1 call sysfs_remove_dir()
+ * and sysfs_put(). This result in glue_dir->sd is freed.
+ *
+ * Then the CPU2 will see a stale "empty" but still potentially used
+ * glue dir around in kernfs_new_node().
+ *
+ * In order to avoid this happening, we also should make sure that
+ * kernfs_node for glue_dir is released in CPU1 only when refcount
+ * for glue_dir kobj is 1.
+ */
+ ref = kref_read(&glue_dir->kref);
+ if (!kobject_has_children(glue_dir) && !--ref)
+ kobject_del(glue_dir);
+ kobject_put(glue_dir);
+ mutex_unlock(&gdp_mutex);
+}
+
+static int device_add_class_symlinks(struct device *dev)
+{
+ struct device_node *of_node = dev_of_node(dev);
+ int error;
+
+ if (of_node) {
+ error = sysfs_create_link(&dev->kobj, of_node_kobj(of_node), "of_node");
+ if (error)
+ dev_warn(dev, "Error %d creating of_node link\n",error);
+ /* An error here doesn't warrant bringing down the device */
+ }
+
+ if (!dev->class)
+ return 0;
+
+ error = sysfs_create_link(&dev->kobj,
+ &dev->class->p->subsys.kobj,
+ "subsystem");
+ if (error)
+ goto out_devnode;
+
+ if (dev->parent && device_is_not_partition(dev)) {
+ error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
+ "device");
+ if (error)
+ goto out_subsys;
+ }
+
+#ifdef CONFIG_BLOCK
+ /* /sys/block has directories and does not need symlinks */
+ if (sysfs_deprecated && dev->class == &block_class)
+ return 0;
+#endif
+
+ /* link in the class directory pointing to the device */
+ error = sysfs_create_link(&dev->class->p->subsys.kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+ goto out_device;
+
+ return 0;
+
+out_device:
+ sysfs_remove_link(&dev->kobj, "device");
+
+out_subsys:
+ sysfs_remove_link(&dev->kobj, "subsystem");
+out_devnode:
+ sysfs_remove_link(&dev->kobj, "of_node");
+ return error;
+}
+
+static void device_remove_class_symlinks(struct device *dev)
+{
+ if (dev_of_node(dev))
+ sysfs_remove_link(&dev->kobj, "of_node");
+
+ if (!dev->class)
+ return;
+
+ if (dev->parent && device_is_not_partition(dev))
+ sysfs_remove_link(&dev->kobj, "device");
+ sysfs_remove_link(&dev->kobj, "subsystem");
+#ifdef CONFIG_BLOCK
+ if (sysfs_deprecated && dev->class == &block_class)
+ return;
+#endif
+ sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev));
+}
+
+/**
+ * dev_set_name - set a device name
+ * @dev: device
+ * @fmt: format string for the device's name
+ */
+int dev_set_name(struct device *dev, const char *fmt, ...)
+{
+ va_list vargs;
+ int err;
+
+ va_start(vargs, fmt);
+ err = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
+ va_end(vargs);
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_set_name);
+
+/**
+ * device_to_dev_kobj - select a /sys/dev/ directory for the device
+ * @dev: device
+ *
+ * By default we select char/ for new entries. Setting class->dev_obj
+ * to NULL prevents an entry from being created. class->dev_kobj must
+ * be set (or cleared) before any devices are registered to the class
+ * otherwise device_create_sys_dev_entry() and
+ * device_remove_sys_dev_entry() will disagree about the presence of
+ * the link.
+ */
+static struct kobject *device_to_dev_kobj(struct device *dev)
+{
+ struct kobject *kobj;
+
+ if (dev->class)
+ kobj = dev->class->dev_kobj;
+ else
+ kobj = sysfs_dev_char_kobj;
+
+ return kobj;
+}
+
+static int device_create_sys_dev_entry(struct device *dev)
+{
+ struct kobject *kobj = device_to_dev_kobj(dev);
+ int error = 0;
+ char devt_str[15];
+
+ if (kobj) {
+ format_dev_t(devt_str, dev->devt);
+ error = sysfs_create_link(kobj, &dev->kobj, devt_str);
+ }
+
+ return error;
+}
+
+static void device_remove_sys_dev_entry(struct device *dev)
+{
+ struct kobject *kobj = device_to_dev_kobj(dev);
+ char devt_str[15];
+
+ if (kobj) {
+ format_dev_t(devt_str, dev->devt);
+ sysfs_remove_link(kobj, devt_str);
+ }
+}
+
+static int device_private_init(struct device *dev)
+{
+ dev->p = kzalloc(sizeof(*dev->p), GFP_KERNEL);
+ if (!dev->p)
+ return -ENOMEM;
+ dev->p->device = dev;
+ klist_init(&dev->p->klist_children, klist_children_get,
+ klist_children_put);
+ INIT_LIST_HEAD(&dev->p->deferred_probe);
+ return 0;
+}
+
+/**
+ * device_add - add device to device hierarchy.
+ * @dev: device.
+ *
+ * This is part 2 of device_register(), though may be called
+ * separately _iff_ device_initialize() has been called separately.
+ *
+ * This adds @dev to the kobject hierarchy via kobject_add(), adds it
+ * to the global and sibling lists for the device, then
+ * adds it to the other relevant subsystems of the driver model.
+ *
+ * Do not call this routine or device_register() more than once for
+ * any device structure. The driver model core is not designed to work
+ * with devices that get unregistered and then spring back to life.
+ * (Among other things, it's very hard to guarantee that all references
+ * to the previous incarnation of @dev have been dropped.) Allocate
+ * and register a fresh new struct device instead.
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use put_device() to give up your
+ * reference instead.
+ *
+ * Rule of thumb is: if device_add() succeeds, you should call
+ * device_del() when you want to get rid of it. If device_add() has
+ * *not* succeeded, use *only* put_device() to drop the reference
+ * count.
+ */
+int device_add(struct device *dev)
+{
+ struct device *parent;
+ struct kobject *kobj;
+ struct class_interface *class_intf;
+ int error = -EINVAL;
+ struct kobject *glue_dir = NULL;
+
+ dev = get_device(dev);
+ if (!dev)
+ goto done;
+
+ if (!dev->p) {
+ error = device_private_init(dev);
+ if (error)
+ goto done;
+ }
+
+ /*
+ * for statically allocated devices, which should all be converted
+ * some day, we need to initialize the name. We prevent reading back
+ * the name, and force the use of dev_name()
+ */
+ if (dev->init_name) {
+ dev_set_name(dev, "%s", dev->init_name);
+ dev->init_name = NULL;
+ }
+
+ /* subsystems can specify simple device enumeration */
+ if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
+ dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+
+ if (!dev_name(dev)) {
+ error = -EINVAL;
+ goto name_error;
+ }
+
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+
+ parent = get_device(dev->parent);
+ kobj = get_device_parent(dev, parent);
+ if (IS_ERR(kobj)) {
+ error = PTR_ERR(kobj);
+ goto parent_error;
+ }
+ if (kobj)
+ dev->kobj.parent = kobj;
+
+ /* use parent numa_node */
+ if (parent && (dev_to_node(dev) == NUMA_NO_NODE))
+ set_dev_node(dev, dev_to_node(parent));
+
+ /* first, register with generic layer. */
+ /* we require the name to be set before, and pass NULL */
+ error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
+ if (error) {
+ glue_dir = kobj;
+ goto Error;
+ }
+
+ /* notify platform of device entry */
+ device_platform_notify(dev);
+
+ error = device_create_file(dev, &dev_attr_uevent);
+ if (error)
+ goto attrError;
+
+ error = device_add_class_symlinks(dev);
+ if (error)
+ goto SymlinkError;
+ error = device_add_attrs(dev);
+ if (error)
+ goto AttrsError;
+ error = bus_add_device(dev);
+ if (error)
+ goto BusError;
+ error = dpm_sysfs_add(dev);
+ if (error)
+ goto DPMError;
+ device_pm_add(dev);
+
+ if (MAJOR(dev->devt)) {
+ error = device_create_file(dev, &dev_attr_dev);
+ if (error)
+ goto DevAttrError;
+
+ error = device_create_sys_dev_entry(dev);
+ if (error)
+ goto SysEntryError;
+
+ devtmpfs_create_node(dev);
+ }
+
+ /* Notify clients of device addition. This call must come
+ * after dpm_sysfs_add() and before kobject_uevent().
+ */
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_ADD_DEVICE, dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+ /*
+ * Check if any of the other devices (consumers) have been waiting for
+ * this device (supplier) to be added so that they can create a device
+ * link to it.
+ *
+ * This needs to happen after device_pm_add() because device_link_add()
+ * requires the supplier be registered before it's called.
+ *
+ * But this also needs to happen before bus_probe_device() to make sure
+ * waiting consumers can link to it before the driver is bound to the
+ * device and the driver sync_state callback is called for this device.
+ */
+ if (dev->fwnode && !dev->fwnode->dev) {
+ dev->fwnode->dev = dev;
+ fw_devlink_link_device(dev);
+ }
+
+ bus_probe_device(dev);
+
+ /*
+ * If all driver registration is done and a newly added device doesn't
+ * match with any driver, don't block its consumers from probing in
+ * case the consumer device is able to operate without this supplier.
+ */
+ if (dev->fwnode && fw_devlink_drv_reg_done && !dev->can_match)
+ fw_devlink_unblock_consumers(dev);
+
+ if (parent)
+ klist_add_tail(&dev->p->knode_parent,
+ &parent->p->klist_children);
+
+ if (dev->class) {
+ mutex_lock(&dev->class->p->mutex);
+ /* tie the class to the device */
+ klist_add_tail(&dev->p->knode_class,
+ &dev->class->p->klist_devices);
+
+ /* notify any interfaces that the device is here */
+ list_for_each_entry(class_intf,
+ &dev->class->p->interfaces, node)
+ if (class_intf->add_dev)
+ class_intf->add_dev(dev, class_intf);
+ mutex_unlock(&dev->class->p->mutex);
+ }
+done:
+ put_device(dev);
+ return error;
+ SysEntryError:
+ if (MAJOR(dev->devt))
+ device_remove_file(dev, &dev_attr_dev);
+ DevAttrError:
+ device_pm_remove(dev);
+ dpm_sysfs_remove(dev);
+ DPMError:
+ dev->driver = NULL;
+ bus_remove_device(dev);
+ BusError:
+ device_remove_attrs(dev);
+ AttrsError:
+ device_remove_class_symlinks(dev);
+ SymlinkError:
+ device_remove_file(dev, &dev_attr_uevent);
+ attrError:
+ device_platform_notify_remove(dev);
+ kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+ glue_dir = get_glue_dir(dev);
+ kobject_del(&dev->kobj);
+ Error:
+ cleanup_glue_dir(dev, glue_dir);
+parent_error:
+ put_device(parent);
+name_error:
+ kfree(dev->p);
+ dev->p = NULL;
+ goto done;
+}
+EXPORT_SYMBOL_GPL(device_add);
+
+/**
+ * device_register - register a device with the system.
+ * @dev: pointer to the device structure
+ *
+ * This happens in two clean steps - initialize the device
+ * and add it to the system. The two steps can be called
+ * separately, but this is the easiest and most common.
+ * I.e. you should only call the two helpers separately if
+ * have a clearly defined need to use and refcount the device
+ * before it is added to the hierarchy.
+ *
+ * For more information, see the kerneldoc for device_initialize()
+ * and device_add().
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use put_device() to give up the
+ * reference initialized in this function instead.
+ */
+int device_register(struct device *dev)
+{
+ device_initialize(dev);
+ return device_add(dev);
+}
+EXPORT_SYMBOL_GPL(device_register);
+
+/**
+ * get_device - increment reference count for device.
+ * @dev: device.
+ *
+ * This simply forwards the call to kobject_get(), though
+ * we do take care to provide for the case that we get a NULL
+ * pointer passed in.
+ */
+struct device *get_device(struct device *dev)
+{
+ return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
+}
+EXPORT_SYMBOL_GPL(get_device);
+
+/**
+ * put_device - decrement reference count.
+ * @dev: device in question.
+ */
+void put_device(struct device *dev)
+{
+ /* might_sleep(); */
+ if (dev)
+ kobject_put(&dev->kobj);
+}
+EXPORT_SYMBOL_GPL(put_device);
+
+bool kill_device(struct device *dev)
+{
+ /*
+ * Require the device lock and set the "dead" flag to guarantee that
+ * the update behavior is consistent with the other bitfields near
+ * it and that we cannot have an asynchronous probe routine trying
+ * to run while we are tearing out the bus/class/sysfs from
+ * underneath the device.
+ */
+ device_lock_assert(dev);
+
+ if (dev->p->dead)
+ return false;
+ dev->p->dead = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(kill_device);
+
+/**
+ * device_del - delete device from system.
+ * @dev: device.
+ *
+ * This is the first part of the device unregistration
+ * sequence. This removes the device from the lists we control
+ * from here, has it removed from the other driver model
+ * subsystems it was added to in device_add(), and removes it
+ * from the kobject hierarchy.
+ *
+ * NOTE: this should be called manually _iff_ device_add() was
+ * also called manually.
+ */
+void device_del(struct device *dev)
+{
+ struct device *parent = dev->parent;
+ struct kobject *glue_dir = NULL;
+ struct class_interface *class_intf;
+ unsigned int noio_flag;
+
+ device_lock(dev);
+ kill_device(dev);
+ device_unlock(dev);
+
+ if (dev->fwnode && dev->fwnode->dev == dev)
+ dev->fwnode->dev = NULL;
+
+ /* Notify clients of device removal. This call must come
+ * before dpm_sysfs_remove().
+ */
+ noio_flag = memalloc_noio_save();
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DEL_DEVICE, dev);
+
+ dpm_sysfs_remove(dev);
+ if (parent)
+ klist_del(&dev->p->knode_parent);
+ if (MAJOR(dev->devt)) {
+ devtmpfs_delete_node(dev);
+ device_remove_sys_dev_entry(dev);
+ device_remove_file(dev, &dev_attr_dev);
+ }
+ if (dev->class) {
+ device_remove_class_symlinks(dev);
+
+ mutex_lock(&dev->class->p->mutex);
+ /* notify any interfaces that the device is now gone */
+ list_for_each_entry(class_intf,
+ &dev->class->p->interfaces, node)
+ if (class_intf->remove_dev)
+ class_intf->remove_dev(dev, class_intf);
+ /* remove the device from the class list */
+ klist_del(&dev->p->knode_class);
+ mutex_unlock(&dev->class->p->mutex);
+ }
+ device_remove_file(dev, &dev_attr_uevent);
+ device_remove_attrs(dev);
+ bus_remove_device(dev);
+ device_pm_remove(dev);
+ driver_deferred_probe_del(dev);
+ device_platform_notify_remove(dev);
+ device_links_purge(dev);
+
+ /*
+ * If a device does not have a driver attached, we need to clean
+ * up any managed resources. We do this in device_release(), but
+ * it's never called (and we leak the device) if a managed
+ * resource holds a reference to the device. So release all
+ * managed resources here, like we do in driver_detach(). We
+ * still need to do so again in device_release() in case someone
+ * adds a new resource after this point, though.
+ */
+ devres_release_all(dev);
+
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_REMOVED_DEVICE, dev);
+ kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+ glue_dir = get_glue_dir(dev);
+ kobject_del(&dev->kobj);
+ cleanup_glue_dir(dev, glue_dir);
+ memalloc_noio_restore(noio_flag);
+ put_device(parent);
+}
+EXPORT_SYMBOL_GPL(device_del);
+
+/**
+ * device_unregister - unregister device from system.
+ * @dev: device going away.
+ *
+ * We do this in two parts, like we do device_register(). First,
+ * we remove it from all the subsystems with device_del(), then
+ * we decrement the reference count via put_device(). If that
+ * is the final reference count, the device will be cleaned up
+ * via device_release() above. Otherwise, the structure will
+ * stick around until the final reference to the device is dropped.
+ */
+void device_unregister(struct device *dev)
+{
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+ device_del(dev);
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(device_unregister);
+
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *p;
+
+ if (n) {
+ p = to_device_private_parent(n);
+ dev = p->device;
+ }
+ return dev;
+}
+
+static struct device *next_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_next(i);
+ struct device *dev = NULL;
+ struct device_private *p;
+
+ if (n) {
+ p = to_device_private_parent(n);
+ dev = p->device;
+ }
+ return dev;
+}
+
+/**
+ * device_get_devnode - path of device node file
+ * @dev: device
+ * @mode: returned file access mode
+ * @uid: returned file owner
+ * @gid: returned file group
+ * @tmp: possibly allocated string
+ *
+ * Return the relative path of a possible device node.
+ * Non-default names may need to allocate a memory to compose
+ * a name. This memory is returned in tmp and needs to be
+ * freed by the caller.
+ */
+const char *device_get_devnode(struct device *dev,
+ umode_t *mode, kuid_t *uid, kgid_t *gid,
+ const char **tmp)
+{
+ char *s;
+
+ *tmp = NULL;
+
+ /* the device type may provide a specific name */
+ if (dev->type && dev->type->devnode)
+ *tmp = dev->type->devnode(dev, mode, uid, gid);
+ if (*tmp)
+ return *tmp;
+
+ /* the class may provide a specific name */
+ if (dev->class && dev->class->devnode)
+ *tmp = dev->class->devnode(dev, mode);
+ if (*tmp)
+ return *tmp;
+
+ /* return name without allocation, tmp == NULL */
+ if (strchr(dev_name(dev), '!') == NULL)
+ return dev_name(dev);
+
+ /* replace '!' in the name with '/' */
+ s = kstrdup(dev_name(dev), GFP_KERNEL);
+ if (!s)
+ return NULL;
+ strreplace(s, '!', '/');
+ return *tmp = s;
+}
+
+/**
+ * device_for_each_child - device child iterator.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child(struct device *parent, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct klist_iter i;
+ struct device *child;
+ int error = 0;
+
+ if (!parent->p)
+ return 0;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while (!error && (child = next_device(&i)))
+ error = fn(child, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child);
+
+/**
+ * device_for_each_child_reverse - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child_reverse(struct device *parent, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct klist_iter i;
+ struct device *child;
+ int error = 0;
+
+ if (!parent->p)
+ return 0;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while ((child = prev_device(&i)) && !error)
+ error = fn(child, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
+
+/**
+ * device_find_child - device iterator for locating a particular device.
+ * @parent: parent struct device
+ * @match: Callback function to check device
+ * @data: Data to pass to match function
+ *
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_child(struct device *parent, void *data,
+ int (*match)(struct device *dev, void *data))
+{
+ struct klist_iter i;
+ struct device *child;
+
+ if (!parent)
+ return NULL;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while ((child = next_device(&i)))
+ if (match(child, data) && get_device(child))
+ break;
+ klist_iter_exit(&i);
+ return child;
+}
+EXPORT_SYMBOL_GPL(device_find_child);
+
+/**
+ * device_find_child_by_name - device iterator for locating a child device.
+ * @parent: parent struct device
+ * @name: name of the child device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a device that has the name @name.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_child_by_name(struct device *parent,
+ const char *name)
+{
+ struct klist_iter i;
+ struct device *child;
+
+ if (!parent)
+ return NULL;
+
+ klist_iter_init(&parent->p->klist_children, &i);
+ while ((child = next_device(&i)))
+ if (sysfs_streq(dev_name(child), name) && get_device(child))
+ break;
+ klist_iter_exit(&i);
+ return child;
+}
+EXPORT_SYMBOL_GPL(device_find_child_by_name);
+
+static int match_any(struct device *dev, void *unused)
+{
+ return 1;
+}
+
+/**
+ * device_find_any_child - device iterator for locating a child device, if any.
+ * @parent: parent struct device
+ *
+ * This is similar to the device_find_child() function above, but it
+ * returns a reference to a child device, if any.
+ *
+ * NOTE: you will need to drop the reference with put_device() after use.
+ */
+struct device *device_find_any_child(struct device *parent)
+{
+ return device_find_child(parent, NULL, match_any);
+}
+EXPORT_SYMBOL_GPL(device_find_any_child);
+
+int __init devices_init(void)
+{
+ devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
+ if (!devices_kset)
+ return -ENOMEM;
+ dev_kobj = kobject_create_and_add("dev", NULL);
+ if (!dev_kobj)
+ goto dev_kobj_err;
+ sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
+ if (!sysfs_dev_block_kobj)
+ goto block_kobj_err;
+ sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
+ if (!sysfs_dev_char_kobj)
+ goto char_kobj_err;
+
+ return 0;
+
+ char_kobj_err:
+ kobject_put(sysfs_dev_block_kobj);
+ block_kobj_err:
+ kobject_put(dev_kobj);
+ dev_kobj_err:
+ kset_unregister(devices_kset);
+ return -ENOMEM;
+}
+
+static int device_check_offline(struct device *dev, void *not_used)
+{
+ int ret;
+
+ ret = device_for_each_child(dev, NULL, device_check_offline);
+ if (ret)
+ return ret;
+
+ return device_supports_offline(dev) && !dev->offline ? -EBUSY : 0;
+}
+
+/**
+ * device_offline - Prepare the device for hot-removal.
+ * @dev: Device to be put offline.
+ *
+ * Execute the device bus type's .offline() callback, if present, to prepare
+ * the device for a subsequent hot-removal. If that succeeds, the device must
+ * not be used until either it is removed or its bus type's .online() callback
+ * is executed.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_offline(struct device *dev)
+{
+ int ret;
+
+ if (dev->offline_disabled)
+ return -EPERM;
+
+ ret = device_for_each_child(dev, NULL, device_check_offline);
+ if (ret)
+ return ret;
+
+ device_lock(dev);
+ if (device_supports_offline(dev)) {
+ if (dev->offline) {
+ ret = 1;
+ } else {
+ ret = dev->bus->offline(dev);
+ if (!ret) {
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+ dev->offline = true;
+ }
+ }
+ }
+ device_unlock(dev);
+
+ return ret;
+}
+
+/**
+ * device_online - Put the device back online after successful device_offline().
+ * @dev: Device to be put back online.
+ *
+ * If device_offline() has been successfully executed for @dev, but the device
+ * has not been removed subsequently, execute its bus type's .online() callback
+ * to indicate that the device can be used again.
+ *
+ * Call under device_hotplug_lock.
+ */
+int device_online(struct device *dev)
+{
+ int ret = 0;
+
+ device_lock(dev);
+ if (device_supports_offline(dev)) {
+ if (dev->offline) {
+ ret = dev->bus->online(dev);
+ if (!ret) {
+ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
+ dev->offline = false;
+ }
+ } else {
+ ret = 1;
+ }
+ }
+ device_unlock(dev);
+
+ return ret;
+}
+
+struct root_device {
+ struct device dev;
+ struct module *owner;
+};
+
+static inline struct root_device *to_root_device(struct device *d)
+{
+ return container_of(d, struct root_device, dev);
+}
+
+static void root_device_release(struct device *dev)
+{
+ kfree(to_root_device(dev));
+}
+
+/**
+ * __root_device_register - allocate and register a root device
+ * @name: root device name
+ * @owner: owner module of the root device, usually THIS_MODULE
+ *
+ * This function allocates a root device and registers it
+ * using device_register(). In order to free the returned
+ * device, use root_device_unregister().
+ *
+ * Root devices are dummy devices which allow other devices
+ * to be grouped under /sys/devices. Use this function to
+ * allocate a root device and then use it as the parent of
+ * any device which should appear under /sys/devices/{name}
+ *
+ * The /sys/devices/{name} directory will also contain a
+ * 'module' symlink which points to the @owner directory
+ * in sysfs.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: You probably want to use root_device_register().
+ */
+struct device *__root_device_register(const char *name, struct module *owner)
+{
+ struct root_device *root;
+ int err = -ENOMEM;
+
+ root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(err);
+
+ err = dev_set_name(&root->dev, "%s", name);
+ if (err) {
+ kfree(root);
+ return ERR_PTR(err);
+ }
+
+ root->dev.release = root_device_release;
+
+ err = device_register(&root->dev);
+ if (err) {
+ put_device(&root->dev);
+ return ERR_PTR(err);
+ }
+
+#ifdef CONFIG_MODULES /* gotta find a "cleaner" way to do this */
+ if (owner) {
+ struct module_kobject *mk = &owner->mkobj;
+
+ err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
+ if (err) {
+ device_unregister(&root->dev);
+ return ERR_PTR(err);
+ }
+ root->owner = owner;
+ }
+#endif
+
+ return &root->dev;
+}
+EXPORT_SYMBOL_GPL(__root_device_register);
+
+/**
+ * root_device_unregister - unregister and free a root device
+ * @dev: device going away
+ *
+ * This function unregisters and cleans up a device that was created by
+ * root_device_register().
+ */
+void root_device_unregister(struct device *dev)
+{
+ struct root_device *root = to_root_device(dev);
+
+ if (root->owner)
+ sysfs_remove_link(&root->dev.kobj, "module");
+
+ device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(root_device_unregister);
+
+
+static void device_create_release(struct device *dev)
+{
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+ kfree(dev);
+}
+
+static __printf(6, 0) struct device *
+device_create_groups_vargs(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, va_list args)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ if (IS_ERR_OR_NULL(class))
+ goto error;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->devt = devt;
+ dev->class = class;
+ dev->parent = parent;
+ dev->groups = groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, drvdata);
+
+ retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+/**
+ * device_create - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @fmt: string for the device's name
+ *
+ * This function can be used by char device classes. A struct device
+ * will be created in sysfs, registered to the specified class.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata, const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = device_create_groups_vargs(class, parent, devt, drvdata, NULL,
+ fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(device_create);
+
+/**
+ * device_create_with_groups - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @groups: NULL-terminated list of attribute groups to be created
+ * @fmt: string for the device's name
+ *
+ * This function can be used by char device classes. A struct device
+ * will be created in sysfs, registered to the specified class.
+ * Additional attributes specified in the groups parameter will also
+ * be created automatically.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create_with_groups(struct class *class,
+ struct device *parent, dev_t devt,
+ void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = device_create_groups_vargs(class, parent, devt, drvdata, groups,
+ fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(device_create_with_groups);
+
+/**
+ * device_destroy - removes a device that was created with device_create()
+ * @class: pointer to the struct class that this device was registered with
+ * @devt: the dev_t of the device that was previously registered
+ *
+ * This call unregisters and cleans up a device that was created with a
+ * call to device_create().
+ */
+void device_destroy(struct class *class, dev_t devt)
+{
+ struct device *dev;
+
+ dev = class_find_device_by_devt(class, devt);
+ if (dev) {
+ put_device(dev);
+ device_unregister(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(device_destroy);
+
+/**
+ * device_rename - renames a device
+ * @dev: the pointer to the struct device to be renamed
+ * @new_name: the new name of the device
+ *
+ * It is the responsibility of the caller to provide mutual
+ * exclusion between two different calls of device_rename
+ * on the same device to ensure that new_name is valid and
+ * won't conflict with other devices.
+ *
+ * Note: Don't call this function. Currently, the networking layer calls this
+ * function, but that will change. The following text from Kay Sievers offers
+ * some insight:
+ *
+ * Renaming devices is racy at many levels, symlinks and other stuff are not
+ * replaced atomically, and you get a "move" uevent, but it's not easy to
+ * connect the event to the old and new device. Device nodes are not renamed at
+ * all, there isn't even support for that in the kernel now.
+ *
+ * In the meantime, during renaming, your target name might be taken by another
+ * driver, creating conflicts. Or the old name is taken directly after you
+ * renamed it -- then you get events for the same DEVPATH, before you even see
+ * the "move" event. It's just a mess, and nothing new should ever rely on
+ * kernel device renaming. Besides that, it's not even implemented now for
+ * other things than (driver-core wise very simple) network devices.
+ *
+ * We are currently about to change network renaming in udev to completely
+ * disallow renaming of devices in the same namespace as the kernel uses,
+ * because we can't solve the problems properly, that arise with swapping names
+ * of multiple interfaces without races. Means, renaming of eth[0-9]* will only
+ * be allowed to some other name than eth[0-9]*, for the aforementioned
+ * reasons.
+ *
+ * Make up a "real" name in the driver before you register anything, or add
+ * some other attributes for userspace to find the device, or use udev to add
+ * symlinks -- but never rename kernel devices later, it's a complete mess. We
+ * don't even want to get into that and try to implement the missing pieces in
+ * the core. We really have other pieces to fix in the driver core mess. :)
+ */
+int device_rename(struct device *dev, const char *new_name)
+{
+ struct kobject *kobj = &dev->kobj;
+ char *old_device_name = NULL;
+ int error;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ dev_dbg(dev, "renaming to %s\n", new_name);
+
+ old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
+ if (!old_device_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ if (dev->class) {
+ error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
+ kobj, old_device_name,
+ new_name, kobject_namespace(kobj));
+ if (error)
+ goto out;
+ }
+
+ error = kobject_rename(kobj, new_name);
+ if (error)
+ goto out;
+
+out:
+ put_device(dev);
+
+ kfree(old_device_name);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_rename);
+
+static int device_move_class_links(struct device *dev,
+ struct device *old_parent,
+ struct device *new_parent)
+{
+ int error = 0;
+
+ if (old_parent)
+ sysfs_remove_link(&dev->kobj, "device");
+ if (new_parent)
+ error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
+ "device");
+ return error;
+}
+
+/**
+ * device_move - moves a device to a new parent
+ * @dev: the pointer to the struct device to be moved
+ * @new_parent: the new parent of the device (can be NULL)
+ * @dpm_order: how to reorder the dpm_list
+ */
+int device_move(struct device *dev, struct device *new_parent,
+ enum dpm_order dpm_order)
+{
+ int error;
+ struct device *old_parent;
+ struct kobject *new_parent_kobj;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ device_pm_lock();
+ new_parent = get_device(new_parent);
+ new_parent_kobj = get_device_parent(dev, new_parent);
+ if (IS_ERR(new_parent_kobj)) {
+ error = PTR_ERR(new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
+
+ pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
+ __func__, new_parent ? dev_name(new_parent) : "<NULL>");
+ error = kobject_move(&dev->kobj, new_parent_kobj);
+ if (error) {
+ cleanup_glue_dir(dev, new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
+ old_parent = dev->parent;
+ dev->parent = new_parent;
+ if (old_parent)
+ klist_remove(&dev->p->knode_parent);
+ if (new_parent) {
+ klist_add_tail(&dev->p->knode_parent,
+ &new_parent->p->klist_children);
+ set_dev_node(dev, dev_to_node(new_parent));
+ }
+
+ if (dev->class) {
+ error = device_move_class_links(dev, old_parent, new_parent);
+ if (error) {
+ /* We ignore errors on cleanup since we're hosed anyway... */
+ device_move_class_links(dev, new_parent, old_parent);
+ if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
+ if (new_parent)
+ klist_remove(&dev->p->knode_parent);
+ dev->parent = old_parent;
+ if (old_parent) {
+ klist_add_tail(&dev->p->knode_parent,
+ &old_parent->p->klist_children);
+ set_dev_node(dev, dev_to_node(old_parent));
+ }
+ }
+ cleanup_glue_dir(dev, new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
+ }
+ switch (dpm_order) {
+ case DPM_ORDER_NONE:
+ break;
+ case DPM_ORDER_DEV_AFTER_PARENT:
+ device_pm_move_after(dev, new_parent);
+ devices_kset_move_after(dev, new_parent);
+ break;
+ case DPM_ORDER_PARENT_BEFORE_DEV:
+ device_pm_move_before(new_parent, dev);
+ devices_kset_move_before(new_parent, dev);
+ break;
+ case DPM_ORDER_DEV_LAST:
+ device_pm_move_last(dev);
+ devices_kset_move_last(dev);
+ break;
+ }
+
+ put_device(old_parent);
+out:
+ device_pm_unlock();
+ put_device(dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_move);
+
+static int device_attrs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ struct kobject *kobj = &dev->kobj;
+ struct class *class = dev->class;
+ const struct device_type *type = dev->type;
+ int error;
+
+ if (class) {
+ /*
+ * Change the device groups of the device class for @dev to
+ * @kuid/@kgid.
+ */
+ error = sysfs_groups_change_owner(kobj, class->dev_groups, kuid,
+ kgid);
+ if (error)
+ return error;
+ }
+
+ if (type) {
+ /*
+ * Change the device groups of the device type for @dev to
+ * @kuid/@kgid.
+ */
+ error = sysfs_groups_change_owner(kobj, type->groups, kuid,
+ kgid);
+ if (error)
+ return error;
+ }
+
+ /* Change the device groups of @dev to @kuid/@kgid. */
+ error = sysfs_groups_change_owner(kobj, dev->groups, kuid, kgid);
+ if (error)
+ return error;
+
+ if (device_supports_offline(dev) && !dev->offline_disabled) {
+ /* Change online device attributes of @dev to @kuid/@kgid. */
+ error = sysfs_file_change_owner(kobj, dev_attr_online.attr.name,
+ kuid, kgid);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+/**
+ * device_change_owner - change the owner of an existing device.
+ * @dev: device.
+ * @kuid: new owner's kuid
+ * @kgid: new owner's kgid
+ *
+ * This changes the owner of @dev and its corresponding sysfs entries to
+ * @kuid/@kgid. This function closely mirrors how @dev was added via driver
+ * core.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int error;
+ struct kobject *kobj = &dev->kobj;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ /*
+ * Change the kobject and the default attributes and groups of the
+ * ktype associated with it to @kuid/@kgid.
+ */
+ error = sysfs_change_owner(kobj, kuid, kgid);
+ if (error)
+ goto out;
+
+ /*
+ * Change the uevent file for @dev to the new owner. The uevent file
+ * was created in a separate step when @dev got added and we mirror
+ * that step here.
+ */
+ error = sysfs_file_change_owner(kobj, dev_attr_uevent.attr.name, kuid,
+ kgid);
+ if (error)
+ goto out;
+
+ /*
+ * Change the device groups, the device groups associated with the
+ * device class, and the groups associated with the device type of @dev
+ * to @kuid/@kgid.
+ */
+ error = device_attrs_change_owner(dev, kuid, kgid);
+ if (error)
+ goto out;
+
+ error = dpm_sysfs_change_owner(dev, kuid, kgid);
+ if (error)
+ goto out;
+
+#ifdef CONFIG_BLOCK
+ if (sysfs_deprecated && dev->class == &block_class)
+ goto out;
+#endif
+
+ /*
+ * Change the owner of the symlink located in the class directory of
+ * the device class associated with @dev which points to the actual
+ * directory entry for @dev to @kuid/@kgid. This ensures that the
+ * symlink shows the same permissions as its target.
+ */
+ error = sysfs_link_change_owner(&dev->class->p->subsys.kobj, &dev->kobj,
+ dev_name(dev), kuid, kgid);
+ if (error)
+ goto out;
+
+out:
+ put_device(dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_change_owner);
+
+/**
+ * device_shutdown - call ->shutdown() on each device to shutdown.
+ */
+void device_shutdown(void)
+{
+ struct device *dev, *parent;
+
+ wait_for_device_probe();
+ device_block_probing();
+
+ cpufreq_suspend();
+
+ spin_lock(&devices_kset->list_lock);
+ /*
+ * Walk the devices list backward, shutting down each in turn.
+ * Beware that device unplug events may also start pulling
+ * devices offline, even as the system is shutting down.
+ */
+ while (!list_empty(&devices_kset->list)) {
+ dev = list_entry(devices_kset->list.prev, struct device,
+ kobj.entry);
+
+ /*
+ * hold reference count of device's parent to
+ * prevent it from being freed because parent's
+ * lock is to be held
+ */
+ parent = get_device(dev->parent);
+ get_device(dev);
+ /*
+ * Make sure the device is off the kset list, in the
+ * event that dev->*->shutdown() doesn't remove it.
+ */
+ list_del_init(&dev->kobj.entry);
+ spin_unlock(&devices_kset->list_lock);
+
+ /* hold lock to avoid race with probe/release */
+ if (parent)
+ device_lock(parent);
+ device_lock(dev);
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
+
+ if (dev->class && dev->class->shutdown_pre) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown_pre\n");
+ dev->class->shutdown_pre(dev);
+ }
+ if (dev->bus && dev->bus->shutdown) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
+ dev->bus->shutdown(dev);
+ } else if (dev->driver && dev->driver->shutdown) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
+ dev->driver->shutdown(dev);
+ }
+
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+
+ put_device(dev);
+ put_device(parent);
+
+ spin_lock(&devices_kset->list_lock);
+ }
+ spin_unlock(&devices_kset->list_lock);
+}
+
+/*
+ * Device logging functions
+ */
+
+#ifdef CONFIG_PRINTK
+static void
+set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
+{
+ const char *subsys;
+
+ memset(dev_info, 0, sizeof(*dev_info));
+
+ if (dev->class)
+ subsys = dev->class->name;
+ else if (dev->bus)
+ subsys = dev->bus->name;
+ else
+ return;
+
+ strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
+
+ /*
+ * Add device identifier DEVICE=:
+ * b12:8 block dev_t
+ * c127:3 char dev_t
+ * n8 netdev ifindex
+ * +sound:card0 subsystem:devname
+ */
+ if (MAJOR(dev->devt)) {
+ char c;
+
+ if (strcmp(subsys, "block") == 0)
+ c = 'b';
+ else
+ c = 'c';
+
+ snprintf(dev_info->device, sizeof(dev_info->device),
+ "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
+ } else if (strcmp(subsys, "net") == 0) {
+ struct net_device *net = to_net_dev(dev);
+
+ snprintf(dev_info->device, sizeof(dev_info->device),
+ "n%u", net->ifindex);
+ } else {
+ snprintf(dev_info->device, sizeof(dev_info->device),
+ "+%s:%s", subsys, dev_name(dev));
+ }
+}
+
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args)
+{
+ struct dev_printk_info dev_info;
+
+ set_dev_info(dev, &dev_info);
+
+ return vprintk_emit(0, level, &dev_info, fmt, args);
+}
+EXPORT_SYMBOL(dev_vprintk_emit);
+
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ r = dev_vprintk_emit(level, dev, fmt, args);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(dev_printk_emit);
+
+static void __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{
+ if (dev)
+ dev_printk_emit(level[1] - '0', dev, "%s %s: %pV",
+ dev_driver_string(dev), dev_name(dev), vaf);
+ else
+ printk("%s(NULL device *): %pV", level, vaf);
+}
+
+void _dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ __dev_printk(level, dev, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(_dev_printk);
+
+#define define_dev_printk_level(func, kern_level) \
+void func(const struct device *dev, const char *fmt, ...) \
+{ \
+ struct va_format vaf; \
+ va_list args; \
+ \
+ va_start(args, fmt); \
+ \
+ vaf.fmt = fmt; \
+ vaf.va = &args; \
+ \
+ __dev_printk(kern_level, dev, &vaf); \
+ \
+ va_end(args); \
+} \
+EXPORT_SYMBOL(func);
+
+define_dev_printk_level(_dev_emerg, KERN_EMERG);
+define_dev_printk_level(_dev_alert, KERN_ALERT);
+define_dev_printk_level(_dev_crit, KERN_CRIT);
+define_dev_printk_level(_dev_err, KERN_ERR);
+define_dev_printk_level(_dev_warn, KERN_WARNING);
+define_dev_printk_level(_dev_notice, KERN_NOTICE);
+define_dev_printk_level(_dev_info, KERN_INFO);
+
+#endif
+
+/**
+ * dev_err_probe - probe error check and log helper
+ * @dev: the pointer to the struct device
+ * @err: error value to test
+ * @fmt: printf-style format string
+ * @...: arguments as specified in the format string
+ *
+ * This helper implements common pattern present in probe functions for error
+ * checking: print debug or error message depending if the error value is
+ * -EPROBE_DEFER and propagate error upwards.
+ * In case of -EPROBE_DEFER it sets also defer probe reason, which can be
+ * checked later by reading devices_deferred debugfs attribute.
+ * It replaces code sequence::
+ *
+ * if (err != -EPROBE_DEFER)
+ * dev_err(dev, ...);
+ * else
+ * dev_dbg(dev, ...);
+ * return err;
+ *
+ * with::
+ *
+ * return dev_err_probe(dev, err, ...);
+ *
+ * Note that it is deemed acceptable to use this function for error
+ * prints during probe even if the @err is known to never be -EPROBE_DEFER.
+ * The benefit compared to a normal dev_err() is the standardized format
+ * of the error code and the fact that the error code is returned.
+ *
+ * Returns @err.
+ *
+ */
+int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (err != -EPROBE_DEFER) {
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ } else {
+ device_set_deferred_probe_reason(dev, &vaf);
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
+ }
+
+ va_end(args);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_err_probe);
+
+static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
+{
+ return fwnode && !IS_ERR(fwnode->secondary);
+}
+
+/**
+ * set_primary_fwnode - Change the primary firmware node of a given device.
+ * @dev: Device to handle.
+ * @fwnode: New primary firmware node of the device.
+ *
+ * Set the device's firmware node pointer to @fwnode, but if a secondary
+ * firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ * - primary --> secondary --> -ENODEV
+ * - primary --> NULL
+ * - secondary --> -ENODEV
+ * - NULL
+ */
+void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+{
+ struct device *parent = dev->parent;
+ struct fwnode_handle *fn = dev->fwnode;
+
+ if (fwnode) {
+ if (fwnode_is_primary(fn))
+ fn = fn->secondary;
+
+ if (fn) {
+ WARN_ON(fwnode->secondary);
+ fwnode->secondary = fn;
+ }
+ dev->fwnode = fwnode;
+ } else {
+ if (fwnode_is_primary(fn)) {
+ dev->fwnode = fn->secondary;
+ /* Set fn->secondary = NULL, so fn remains the primary fwnode */
+ if (!(parent && fn == parent->fwnode))
+ fn->secondary = NULL;
+ } else {
+ dev->fwnode = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(set_primary_fwnode);
+
+/**
+ * set_secondary_fwnode - Change the secondary firmware node of a given device.
+ * @dev: Device to handle.
+ * @fwnode: New secondary firmware node of the device.
+ *
+ * If a primary firmware node of the device is present, set its secondary
+ * pointer to @fwnode. Otherwise, set the device's firmware node pointer to
+ * @fwnode.
+ */
+void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
+{
+ if (fwnode)
+ fwnode->secondary = ERR_PTR(-ENODEV);
+
+ if (fwnode_is_primary(dev->fwnode))
+ dev->fwnode->secondary = fwnode;
+ else
+ dev->fwnode = fwnode;
+}
+EXPORT_SYMBOL_GPL(set_secondary_fwnode);
+
+/**
+ * device_set_of_node_from_dev - reuse device-tree node of another device
+ * @dev: device whose device-tree node is being set
+ * @dev2: device whose device-tree node is being reused
+ *
+ * Takes another reference to the new device-tree node after first dropping
+ * any reference held to the old node.
+ */
+void device_set_of_node_from_dev(struct device *dev, const struct device *dev2)
+{
+ of_node_put(dev->of_node);
+ dev->of_node = of_node_get(dev2->of_node);
+ dev->of_node_reused = true;
+}
+EXPORT_SYMBOL_GPL(device_set_of_node_from_dev);
+
+void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
+{
+ dev->fwnode = fwnode;
+ dev->of_node = to_of_node(fwnode);
+}
+EXPORT_SYMBOL_GPL(device_set_node);
+
+int device_match_name(struct device *dev, const void *name)
+{
+ return sysfs_streq(dev_name(dev), name);
+}
+EXPORT_SYMBOL_GPL(device_match_name);
+
+int device_match_of_node(struct device *dev, const void *np)
+{
+ return dev->of_node == np;
+}
+EXPORT_SYMBOL_GPL(device_match_of_node);
+
+int device_match_fwnode(struct device *dev, const void *fwnode)
+{
+ return dev_fwnode(dev) == fwnode;
+}
+EXPORT_SYMBOL_GPL(device_match_fwnode);
+
+int device_match_devt(struct device *dev, const void *pdevt)
+{
+ return dev->devt == *(dev_t *)pdevt;
+}
+EXPORT_SYMBOL_GPL(device_match_devt);
+
+int device_match_acpi_dev(struct device *dev, const void *adev)
+{
+ return ACPI_COMPANION(dev) == adev;
+}
+EXPORT_SYMBOL(device_match_acpi_dev);
+
+int device_match_acpi_handle(struct device *dev, const void *handle)
+{
+ return ACPI_HANDLE(dev) == handle;
+}
+EXPORT_SYMBOL(device_match_acpi_handle);
+
+int device_match_any(struct device *dev, const void *unused)
+{
+ return 1;
+}
+EXPORT_SYMBOL_GPL(device_match_any);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
new file mode 100644
index 000000000..dab70a653
--- /dev/null
+++ b/drivers/base/cpu.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CPU subsystem support
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/cpu.h>
+#include <linux/topology.h>
+#include <linux/device.h>
+#include <linux/node.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/percpu.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/cpufeature.h>
+#include <linux/tick.h>
+#include <linux/pm_qos.h>
+#include <linux/sched/isolation.h>
+
+#include "base.h"
+
+static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
+
+static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+{
+ /* ACPI style match is the only one that may succeed. */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void change_cpu_under_node(struct cpu *cpu,
+ unsigned int from_nid, unsigned int to_nid)
+{
+ int cpuid = cpu->dev.id;
+ unregister_cpu_under_node(cpuid, from_nid);
+ register_cpu_under_node(cpuid, to_nid);
+ cpu->node_id = to_nid;
+}
+
+static int cpu_subsys_online(struct device *dev)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = dev->id;
+ int from_nid, to_nid;
+ int ret;
+
+ from_nid = cpu_to_node(cpuid);
+ if (from_nid == NUMA_NO_NODE)
+ return -ENODEV;
+
+ ret = cpu_device_up(dev);
+ /*
+ * When hot adding memory to memoryless node and enabling a cpu
+ * on the node, node number of the cpu may internally change.
+ */
+ to_nid = cpu_to_node(cpuid);
+ if (from_nid != to_nid)
+ change_cpu_under_node(cpu, from_nid, to_nid);
+
+ return ret;
+}
+
+static int cpu_subsys_offline(struct device *dev)
+{
+ return cpu_device_down(dev);
+}
+
+void unregister_cpu(struct cpu *cpu)
+{
+ int logical_cpu = cpu->dev.id;
+
+ unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
+
+ device_unregister(&cpu->dev);
+ per_cpu(cpu_sys_devices, logical_cpu) = NULL;
+ return;
+}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+static ssize_t cpu_probe_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_probe(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
+}
+
+static ssize_t cpu_release_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ ssize_t cnt;
+ int ret;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ cnt = arch_cpu_release(buf, count);
+
+ unlock_device_hotplug();
+ return cnt;
+}
+
+static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+#endif /* CONFIG_HOTPLUG_CPU */
+
+struct bus_type cpu_subsys = {
+ .name = "cpu",
+ .dev_name = "cpu",
+ .match = cpu_subsys_match,
+#ifdef CONFIG_HOTPLUG_CPU
+ .online = cpu_subsys_online,
+ .offline = cpu_subsys_offline,
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_subsys);
+
+#ifdef CONFIG_KEXEC
+#include <linux/kexec.h>
+
+static ssize_t crash_notes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ unsigned long long addr;
+ int cpunum;
+
+ cpunum = cpu->dev.id;
+
+ /*
+ * Might be reading other cpu's data based on which cpu read thread
+ * has been scheduled. But cpu data (memory) is allocated once during
+ * boot up and this data does not change there after. Hence this
+ * operation should be safe. No locking required.
+ */
+ addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
+
+ return sysfs_emit(buf, "%llx\n", addr);
+}
+static DEVICE_ATTR_ADMIN_RO(crash_notes);
+
+static ssize_t crash_notes_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t));
+}
+static DEVICE_ATTR_ADMIN_RO(crash_notes_size);
+
+static struct attribute *crash_note_cpu_attrs[] = {
+ &dev_attr_crash_notes.attr,
+ &dev_attr_crash_notes_size.attr,
+ NULL
+};
+
+static const struct attribute_group crash_note_cpu_attr_group = {
+ .attrs = crash_note_cpu_attrs,
+};
+#endif
+
+static const struct attribute_group *common_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+ &crash_note_cpu_attr_group,
+#endif
+ NULL
+};
+
+static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
+#ifdef CONFIG_KEXEC
+ &crash_note_cpu_attr_group,
+#endif
+ NULL
+};
+
+/*
+ * Print cpu online, possible, present, and system maps
+ */
+
+struct cpu_attr {
+ struct device_attribute attr;
+ const struct cpumask *const map;
+};
+
+static ssize_t show_cpus_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
+
+ return cpumap_print_to_pagebuf(true, buf, ca->map);
+}
+
+#define _CPU_ATTR(name, map) \
+ { __ATTR(name, 0444, show_cpus_attr, NULL), map }
+
+/* Keep in sync with cpu_subsys_attrs */
+static struct cpu_attr cpu_attrs[] = {
+ _CPU_ATTR(online, &__cpu_online_mask),
+ _CPU_ATTR(possible, &__cpu_possible_mask),
+ _CPU_ATTR(present, &__cpu_present_mask),
+};
+
+/*
+ * Print values for NR_CPUS and offlined cpus
+ */
+static ssize_t print_cpus_kernel_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", NR_CPUS - 1);
+}
+static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
+
+/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
+unsigned int total_cpus;
+
+static ssize_t print_cpus_offline(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int len = 0;
+ cpumask_var_t offline;
+
+ /* display offline cpus < nr_cpu_ids */
+ if (!alloc_cpumask_var(&offline, GFP_KERNEL))
+ return -ENOMEM;
+ cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
+ len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline));
+ free_cpumask_var(offline);
+
+ /* display offline cpus >= nr_cpu_ids */
+ if (total_cpus && nr_cpu_ids < total_cpus) {
+ len += sysfs_emit_at(buf, len, ",");
+
+ if (nr_cpu_ids == total_cpus-1)
+ len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids);
+ else
+ len += sysfs_emit_at(buf, len, "%u-%d",
+ nr_cpu_ids, total_cpus - 1);
+ }
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+
+static ssize_t print_cpus_isolated(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int len;
+ cpumask_var_t isolated;
+
+ if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_andnot(isolated, cpu_possible_mask,
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+ len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
+
+ free_cpumask_var(isolated);
+
+ return len;
+}
+static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+
+#ifdef CONFIG_NO_HZ_FULL
+static ssize_t print_cpus_nohz_full(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+}
+static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+#endif
+
+static void cpu_device_release(struct device *dev)
+{
+ /*
+ * This is an empty function to prevent the driver core from spitting a
+ * warning at us. Yes, I know this is directly opposite of what the
+ * documentation for the driver core and kobjects say, and the author
+ * of this code has already been publically ridiculed for doing
+ * something as foolish as this. However, at this point in time, it is
+ * the only way to handle the issue of statically allocated cpu
+ * devices. The different architectures will have their cpu device
+ * code reworked to properly handle this in the near future, so this
+ * function will then be changed to correctly free up the memory held
+ * by the cpu device.
+ *
+ * Never copy this way of doing things, or you too will be made fun of
+ * on the linux-kernel list, you have been warned.
+ */
+}
+
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static ssize_t print_cpu_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+ u32 i;
+
+ len += sysfs_emit_at(buf, len,
+ "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
+ CPU_FEATURE_TYPEVAL);
+
+ for (i = 0; i < MAX_CPU_FEATURES; i++)
+ if (cpu_have_feature(i)) {
+ if (len + sizeof(",XXXX\n") >= PAGE_SIZE) {
+ WARN(1, "CPU features overflow page\n");
+ break;
+ }
+ len += sysfs_emit_at(buf, len, ",%04X", i);
+ }
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
+}
+
+static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buf) {
+ print_cpu_modalias(NULL, NULL, buf);
+ add_uevent_var(env, "MODALIAS=%s", buf);
+ kfree(buf);
+ }
+ return 0;
+}
+#endif
+
+/*
+ * register_cpu - Setup a sysfs device for a CPU.
+ * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
+ * sysfs for this CPU.
+ * @num - CPU number to use when creating the device.
+ *
+ * Initialize and register the CPU device.
+ */
+int register_cpu(struct cpu *cpu, int num)
+{
+ int error;
+
+ cpu->node_id = cpu_to_node(num);
+ memset(&cpu->dev, 0x00, sizeof(struct device));
+ cpu->dev.id = num;
+ cpu->dev.bus = &cpu_subsys;
+ cpu->dev.release = cpu_device_release;
+ cpu->dev.offline_disabled = !cpu->hotpluggable;
+ cpu->dev.offline = !cpu_online(num);
+ cpu->dev.of_node = of_get_cpu_node(num, NULL);
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = cpu_uevent;
+#endif
+ cpu->dev.groups = common_cpu_attr_groups;
+ if (cpu->hotpluggable)
+ cpu->dev.groups = hotplugable_cpu_attr_groups;
+ error = device_register(&cpu->dev);
+ if (error) {
+ put_device(&cpu->dev);
+ return error;
+ }
+
+ per_cpu(cpu_sys_devices, num) = &cpu->dev;
+ register_cpu_under_node(num, cpu_to_node(num));
+ dev_pm_qos_expose_latency_limit(&cpu->dev,
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
+
+ return 0;
+}
+
+struct device *get_cpu_device(unsigned int cpu)
+{
+ if (cpu < nr_cpu_ids && cpu_possible(cpu))
+ return per_cpu(cpu_sys_devices, cpu);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(get_cpu_device);
+
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+__printf(4, 0)
+static struct device *
+__cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, va_list args)
+{
+ struct device *dev = NULL;
+ int retval = -ENOMEM;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ goto error;
+
+ device_initialize(dev);
+ dev->parent = parent;
+ dev->groups = groups;
+ dev->release = device_create_release;
+ device_set_pm_not_required(dev);
+ dev_set_drvdata(dev, drvdata);
+
+ retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(cpu_device_create);
+
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
+#endif
+
+static struct attribute *cpu_root_attrs[] = {
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ &dev_attr_probe.attr,
+ &dev_attr_release.attr,
+#endif
+ &cpu_attrs[0].attr.attr,
+ &cpu_attrs[1].attr.attr,
+ &cpu_attrs[2].attr.attr,
+ &dev_attr_kernel_max.attr,
+ &dev_attr_offline.attr,
+ &dev_attr_isolated.attr,
+#ifdef CONFIG_NO_HZ_FULL
+ &dev_attr_nohz_full.attr,
+#endif
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ &dev_attr_modalias.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group cpu_root_attr_group = {
+ .attrs = cpu_root_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &cpu_root_attr_group,
+ NULL,
+};
+
+bool cpu_is_hotpluggable(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable
+ && tick_nohz_cpu_hotpluggable(cpu);
+}
+EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
+
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+#endif
+
+static void __init cpu_dev_register_generic(void)
+{
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+ int i;
+
+ for_each_possible_cpu(i) {
+ if (register_cpu(&per_cpu(cpu_devices, i), i))
+ panic("Failed to register CPU device");
+ }
+#endif
+}
+
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+
+ssize_t __weak cpu_show_meltdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_srbds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_retbleed(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_gds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "Not affected\n");
+}
+
+static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
+static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
+static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
+static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
+static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+
+static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
+ &dev_attr_l1tf.attr,
+ &dev_attr_mds.attr,
+ &dev_attr_tsx_async_abort.attr,
+ &dev_attr_itlb_multihit.attr,
+ &dev_attr_srbds.attr,
+ &dev_attr_mmio_stale_data.attr,
+ &dev_attr_retbleed.attr,
+ &dev_attr_gather_data_sampling.attr,
+ &dev_attr_spec_rstack_overflow.attr,
+ NULL
+};
+
+static const struct attribute_group cpu_root_vulnerabilities_group = {
+ .name = "vulnerabilities",
+ .attrs = cpu_root_vulnerabilities_attrs,
+};
+
+static void __init cpu_register_vulnerabilities(void)
+{
+ if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_vulnerabilities_group))
+ pr_err("Unable to register CPU vulnerabilities\n");
+}
+
+#else
+static inline void cpu_register_vulnerabilities(void) { }
+#endif
+
+void __init cpu_dev_init(void)
+{
+ if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
+ panic("Failed to register CPU subsystem");
+
+ cpu_dev_register_generic();
+ cpu_register_vulnerabilities();
+}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
new file mode 100644
index 000000000..dbbe2cebb
--- /dev/null
+++ b/drivers/base/dd.c
@@ -0,0 +1,1352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/dd.c - The core device/driver interactions.
+ *
+ * This file contains the (sometimes tricky) code that controls the
+ * interactions between devices and drivers, which primarily includes
+ * driver binding and unbinding.
+ *
+ * All of this code used to exist in drivers/base/bus.c, but was
+ * relocated to here in the name of compartmentalization (since it wasn't
+ * strictly code just for the 'struct bus_type'.
+ *
+ * Copyright (c) 2002-5 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007-2009 Novell Inc.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-map-ops.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/async.h>
+#include <linux/pm_runtime.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/slab.h>
+
+#include "base.h"
+#include "power/power.h"
+
+/*
+ * Deferred Probe infrastructure.
+ *
+ * Sometimes driver probe order matters, but the kernel doesn't always have
+ * dependency information which means some drivers will get probed before a
+ * resource it depends on is available. For example, an SDHCI driver may
+ * first need a GPIO line from an i2c GPIO controller before it can be
+ * initialized. If a required resource is not available yet, a driver can
+ * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
+ *
+ * Deferred probe maintains two lists of devices, a pending list and an active
+ * list. A driver returning -EPROBE_DEFER causes the device to be added to the
+ * pending list. A successful driver probe will trigger moving all devices
+ * from the pending to the active list so that the workqueue will eventually
+ * retry them.
+ *
+ * The deferred_probe_mutex must be held any time the deferred_probe_*_list
+ * of the (struct device*)->p->deferred_probe pointers are manipulated
+ */
+static DEFINE_MUTEX(deferred_probe_mutex);
+static LIST_HEAD(deferred_probe_pending_list);
+static LIST_HEAD(deferred_probe_active_list);
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static bool initcalls_done;
+
+/* Save the async probe drivers' name from kernel cmdline */
+#define ASYNC_DRV_NAMES_MAX_LEN 256
+static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN];
+static bool async_probe_default;
+
+/*
+ * In some cases, like suspend to RAM or hibernation, It might be reasonable
+ * to prohibit probing of devices as it could be unsafe.
+ * Once defer_all_probes is true all drivers probes will be forcibly deferred.
+ */
+static bool defer_all_probes;
+
+static void __device_set_deferred_probe_reason(const struct device *dev, char *reason)
+{
+ kfree(dev->p->deferred_probe_reason);
+ dev->p->deferred_probe_reason = reason;
+}
+
+/*
+ * deferred_probe_work_func() - Retry probing devices in the active list.
+ */
+static void deferred_probe_work_func(struct work_struct *work)
+{
+ struct device *dev;
+ struct device_private *private;
+ /*
+ * This block processes every device in the deferred 'active' list.
+ * Each device is removed from the active list and passed to
+ * bus_probe_device() to re-attempt the probe. The loop continues
+ * until every device in the active list is removed and retried.
+ *
+ * Note: Once the device is removed from the list and the mutex is
+ * released, it is possible for the device get freed by another thread
+ * and cause a illegal pointer dereference. This code uses
+ * get/put_device() to ensure the device structure cannot disappear
+ * from under our feet.
+ */
+ mutex_lock(&deferred_probe_mutex);
+ while (!list_empty(&deferred_probe_active_list)) {
+ private = list_first_entry(&deferred_probe_active_list,
+ typeof(*dev->p), deferred_probe);
+ dev = private->device;
+ list_del_init(&private->deferred_probe);
+
+ get_device(dev);
+
+ __device_set_deferred_probe_reason(dev, NULL);
+
+ /*
+ * Drop the mutex while probing each device; the probe path may
+ * manipulate the deferred list
+ */
+ mutex_unlock(&deferred_probe_mutex);
+
+ /*
+ * Force the device to the end of the dpm_list since
+ * the PM code assumes that the order we add things to
+ * the list is a good order for suspend but deferred
+ * probe makes that very unsafe.
+ */
+ device_pm_move_to_tail(dev);
+
+ dev_dbg(dev, "Retrying from deferred list\n");
+ bus_probe_device(dev);
+ mutex_lock(&deferred_probe_mutex);
+
+ put_device(dev);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
+
+void driver_deferred_probe_add(struct device *dev)
+{
+ if (!dev->can_match)
+ return;
+
+ mutex_lock(&deferred_probe_mutex);
+ if (list_empty(&dev->p->deferred_probe)) {
+ dev_dbg(dev, "Added to deferred list\n");
+ list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+
+void driver_deferred_probe_del(struct device *dev)
+{
+ mutex_lock(&deferred_probe_mutex);
+ if (!list_empty(&dev->p->deferred_probe)) {
+ dev_dbg(dev, "Removed from deferred list\n");
+ list_del_init(&dev->p->deferred_probe);
+ __device_set_deferred_probe_reason(dev, NULL);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+
+static bool driver_deferred_probe_enable;
+/**
+ * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
+ *
+ * This functions moves all devices from the pending list to the active
+ * list and schedules the deferred probe workqueue to process them. It
+ * should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occurred and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
+ */
+void driver_deferred_probe_trigger(void)
+{
+ if (!driver_deferred_probe_enable)
+ return;
+
+ /*
+ * A successful probe means that all the devices in the pending list
+ * should be triggered to be reprobed. Move all the deferred devices
+ * into the active list so they can be retried by the workqueue
+ */
+ mutex_lock(&deferred_probe_mutex);
+ atomic_inc(&deferred_trigger_count);
+ list_splice_tail_init(&deferred_probe_pending_list,
+ &deferred_probe_active_list);
+ mutex_unlock(&deferred_probe_mutex);
+
+ /*
+ * Kick the re-probe thread. It may already be scheduled, but it is
+ * safe to kick it again.
+ */
+ queue_work(system_unbound_wq, &deferred_probe_work);
+}
+
+/**
+ * device_block_probing() - Block/defer device's probes
+ *
+ * It will disable probing of devices and defer their probes instead.
+ */
+void device_block_probing(void)
+{
+ defer_all_probes = true;
+ /* sync with probes to avoid races. */
+ wait_for_device_probe();
+}
+
+/**
+ * device_unblock_probing() - Unblock/enable device's probes
+ *
+ * It will restore normal behavior and trigger re-probing of deferred
+ * devices.
+ */
+void device_unblock_probing(void)
+{
+ defer_all_probes = false;
+ driver_deferred_probe_trigger();
+}
+
+/**
+ * device_set_deferred_probe_reason() - Set defer probe reason message for device
+ * @dev: the pointer to the struct device
+ * @vaf: the pointer to va_format structure with message
+ */
+void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf)
+{
+ const char *drv = dev_driver_string(dev);
+ char *reason;
+
+ mutex_lock(&deferred_probe_mutex);
+
+ reason = kasprintf(GFP_KERNEL, "%s: %pV", drv, vaf);
+ __device_set_deferred_probe_reason(dev, reason);
+
+ mutex_unlock(&deferred_probe_mutex);
+}
+
+/*
+ * deferred_devs_show() - Show the devices in the deferred probe pending list.
+ */
+static int deferred_devs_show(struct seq_file *s, void *data)
+{
+ struct device_private *curr;
+
+ mutex_lock(&deferred_probe_mutex);
+
+ list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
+ seq_printf(s, "%s\t%s", dev_name(curr->device),
+ curr->device->p->deferred_probe_reason ?: "\n");
+
+ mutex_unlock(&deferred_probe_mutex);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(deferred_devs);
+
+#ifdef CONFIG_MODULES
+int driver_deferred_probe_timeout = 10;
+#else
+int driver_deferred_probe_timeout;
+#endif
+
+EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
+
+static int __init deferred_probe_timeout_setup(char *str)
+{
+ int timeout;
+
+ if (!kstrtoint(str, 10, &timeout))
+ driver_deferred_probe_timeout = timeout;
+ return 1;
+}
+__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Return:
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
+ return -ENODEV;
+ }
+
+ if (!driver_deferred_probe_timeout && initcalls_done) {
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
+ return -ETIMEDOUT;
+ }
+
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
+
+static void deferred_probe_timeout_work_func(struct work_struct *work)
+{
+ struct device_private *p;
+
+ fw_devlink_drivers_done();
+
+ driver_deferred_probe_timeout = 0;
+ driver_deferred_probe_trigger();
+ flush_work(&deferred_probe_work);
+
+ mutex_lock(&deferred_probe_mutex);
+ list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+ dev_info(p->device, "deferred probe pending\n");
+ mutex_unlock(&deferred_probe_mutex);
+}
+static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+
+void deferred_probe_extend_timeout(void)
+{
+ /*
+ * If the work hasn't been queued yet or if the work expired, don't
+ * start a new one.
+ */
+ if (cancel_delayed_work(&deferred_probe_timeout_work)) {
+ schedule_delayed_work(&deferred_probe_timeout_work,
+ driver_deferred_probe_timeout * HZ);
+ pr_debug("Extended deferred probe timeout by %d secs\n",
+ driver_deferred_probe_timeout);
+ }
+}
+
+/**
+ * deferred_probe_initcall() - Enable probing of deferred devices
+ *
+ * We don't want to get in the way when the bulk of drivers are getting probed.
+ * Instead, this initcall makes sure that deferred probing is delayed until
+ * late_initcall time.
+ */
+static int deferred_probe_initcall(void)
+{
+ debugfs_create_file("devices_deferred", 0444, NULL, NULL,
+ &deferred_devs_fops);
+
+ driver_deferred_probe_enable = true;
+ driver_deferred_probe_trigger();
+ /* Sort as many dependencies as possible before exiting initcalls */
+ flush_work(&deferred_probe_work);
+ initcalls_done = true;
+
+ if (!IS_ENABLED(CONFIG_MODULES))
+ fw_devlink_drivers_done();
+
+ /*
+ * Trigger deferred probe again, this time we won't defer anything
+ * that is optional
+ */
+ driver_deferred_probe_trigger();
+ flush_work(&deferred_probe_work);
+
+ if (driver_deferred_probe_timeout > 0) {
+ schedule_delayed_work(&deferred_probe_timeout_work,
+ driver_deferred_probe_timeout * HZ);
+ }
+ return 0;
+}
+late_initcall(deferred_probe_initcall);
+
+static void __exit deferred_probe_exit(void)
+{
+ debugfs_lookup_and_remove("devices_deferred", NULL);
+}
+__exitcall(deferred_probe_exit);
+
+/**
+ * device_is_bound() - Check if device is bound to a driver
+ * @dev: device to check
+ *
+ * Returns true if passed device has already finished probing successfully
+ * against a driver.
+ *
+ * This function must be called with the device lock held.
+ */
+bool device_is_bound(struct device *dev)
+{
+ return dev->p && klist_node_attached(&dev->p->knode_driver);
+}
+
+static void driver_bound(struct device *dev)
+{
+ if (device_is_bound(dev)) {
+ pr_warn("%s: device %s already bound\n",
+ __func__, kobject_name(&dev->kobj));
+ return;
+ }
+
+ pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
+ __func__, dev_name(dev));
+
+ klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ device_links_driver_bound(dev);
+
+ device_pm_check_callbacks(dev);
+
+ /*
+ * Make sure the device is no longer in one of the deferred lists and
+ * kick off retrying all pending devices
+ */
+ driver_deferred_probe_del(dev);
+ driver_deferred_probe_trigger();
+
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_BOUND_DRIVER, dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_BIND);
+}
+
+static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ device_lock(dev);
+ dev->driver->coredump(dev);
+ device_unlock(dev);
+
+ return count;
+}
+static DEVICE_ATTR_WO(coredump);
+
+static int driver_sysfs_add(struct device *dev)
+{
+ int ret;
+
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_BIND_DRIVER, dev);
+
+ ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
+ kobject_name(&dev->kobj));
+ if (ret)
+ goto fail;
+
+ ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
+ "driver");
+ if (ret)
+ goto rm_dev;
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump)
+ return 0;
+
+ ret = device_create_file(dev, &dev_attr_coredump);
+ if (!ret)
+ return 0;
+
+ sysfs_remove_link(&dev->kobj, "driver");
+
+rm_dev:
+ sysfs_remove_link(&dev->driver->p->kobj,
+ kobject_name(&dev->kobj));
+
+fail:
+ return ret;
+}
+
+static void driver_sysfs_remove(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv) {
+ if (drv->coredump)
+ device_remove_file(dev, &dev_attr_coredump);
+ sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
+ sysfs_remove_link(&dev->kobj, "driver");
+ }
+}
+
+/**
+ * device_bind_driver - bind a driver to one device.
+ * @dev: device.
+ *
+ * Allow manual attachment of a driver to a device.
+ * Caller must have already set @dev->driver.
+ *
+ * Note that this does not modify the bus reference count.
+ * Please verify that is accounted for before calling this.
+ * (It is ok to call with no other effort from a driver's probe() method.)
+ *
+ * This function must be called with the device lock held.
+ *
+ * Callers should prefer to use device_driver_attach() instead.
+ */
+int device_bind_driver(struct device *dev)
+{
+ int ret;
+
+ ret = driver_sysfs_add(dev);
+ if (!ret) {
+ device_links_force_bind(dev);
+ driver_bound(dev);
+ }
+ else if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_bind_driver);
+
+static atomic_t probe_count = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
+
+static ssize_t state_synced_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool val;
+
+ device_lock(dev);
+ val = dev->state_synced;
+ device_unlock(dev);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+static DEVICE_ATTR_RO(state_synced);
+
+static void device_unbind_cleanup(struct device *dev)
+{
+ devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
+ dev->driver = NULL;
+ dev_set_drvdata(dev, NULL);
+ if (dev->pm_domain && dev->pm_domain->dismiss)
+ dev->pm_domain->dismiss(dev);
+ pm_runtime_reinit(dev);
+ dev_pm_set_driver_flags(dev, 0);
+}
+
+static void device_remove(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_state_synced);
+ device_remove_groups(dev, dev->driver->dev_groups);
+
+ if (dev->bus && dev->bus->remove)
+ dev->bus->remove(dev);
+ else if (dev->driver->remove)
+ dev->driver->remove(dev);
+}
+
+static int call_driver_probe(struct device *dev, struct device_driver *drv)
+{
+ int ret = 0;
+
+ if (dev->bus->probe)
+ ret = dev->bus->probe(dev);
+ else if (drv->probe)
+ ret = drv->probe(dev);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EPROBE_DEFER:
+ /* Driver requested deferred probing */
+ dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
+ break;
+ case -ENODEV:
+ case -ENXIO:
+ pr_debug("%s: probe of %s rejects match %d\n",
+ drv->name, dev_name(dev), ret);
+ break;
+ default:
+ /* driver matched but the probe failed */
+ pr_warn("%s: probe of %s failed with error %d\n",
+ drv->name, dev_name(dev), ret);
+ break;
+ }
+
+ return ret;
+}
+
+static int really_probe(struct device *dev, struct device_driver *drv)
+{
+ bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
+ !drv->suppress_bind_attrs;
+ int ret, link_ret;
+
+ if (defer_all_probes) {
+ /*
+ * Value of defer_all_probes can be set only by
+ * device_block_probing() which, in turn, will call
+ * wait_for_device_probe() right after that to avoid any races.
+ */
+ dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
+ return -EPROBE_DEFER;
+ }
+
+ link_ret = device_links_check_suppliers(dev);
+ if (link_ret == -EPROBE_DEFER)
+ return link_ret;
+
+ pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
+ drv->bus->name, __func__, drv->name, dev_name(dev));
+ if (!list_empty(&dev->devres_head)) {
+ dev_crit(dev, "Resources present before probing\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+re_probe:
+ dev->driver = drv;
+
+ /* If using pinctrl, bind pins now before probing */
+ ret = pinctrl_bind_pins(dev);
+ if (ret)
+ goto pinctrl_bind_failed;
+
+ if (dev->bus->dma_configure) {
+ ret = dev->bus->dma_configure(dev);
+ if (ret)
+ goto pinctrl_bind_failed;
+ }
+
+ ret = driver_sysfs_add(dev);
+ if (ret) {
+ pr_err("%s: driver_sysfs_add(%s) failed\n",
+ __func__, dev_name(dev));
+ goto sysfs_failed;
+ }
+
+ if (dev->pm_domain && dev->pm_domain->activate) {
+ ret = dev->pm_domain->activate(dev);
+ if (ret)
+ goto probe_failed;
+ }
+
+ ret = call_driver_probe(dev, drv);
+ if (ret) {
+ /*
+ * If fw_devlink_best_effort is active (denoted by -EAGAIN), the
+ * device might actually probe properly once some of its missing
+ * suppliers have probed. So, treat this as if the driver
+ * returned -EPROBE_DEFER.
+ */
+ if (link_ret == -EAGAIN)
+ ret = -EPROBE_DEFER;
+
+ /*
+ * Return probe errors as positive values so that the callers
+ * can distinguish them from other errors.
+ */
+ ret = -ret;
+ goto probe_failed;
+ }
+
+ ret = device_add_groups(dev, drv->dev_groups);
+ if (ret) {
+ dev_err(dev, "device_add_groups() failed\n");
+ goto dev_groups_failed;
+ }
+
+ if (dev_has_sync_state(dev)) {
+ ret = device_create_file(dev, &dev_attr_state_synced);
+ if (ret) {
+ dev_err(dev, "state_synced sysfs add failed\n");
+ goto dev_sysfs_state_synced_failed;
+ }
+ }
+
+ if (test_remove) {
+ test_remove = false;
+
+ device_remove(dev);
+ driver_sysfs_remove(dev);
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
+ device_unbind_cleanup(dev);
+
+ goto re_probe;
+ }
+
+ pinctrl_init_done(dev);
+
+ if (dev->pm_domain && dev->pm_domain->sync)
+ dev->pm_domain->sync(dev);
+
+ driver_bound(dev);
+ pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
+ drv->bus->name, __func__, dev_name(dev), drv->name);
+ goto done;
+
+dev_sysfs_state_synced_failed:
+dev_groups_failed:
+ device_remove(dev);
+probe_failed:
+ driver_sysfs_remove(dev);
+sysfs_failed:
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
+pinctrl_bind_failed:
+ device_links_no_driver(dev);
+ device_unbind_cleanup(dev);
+done:
+ return ret;
+}
+
+/*
+ * For initcall_debug, show the driver probe time.
+ */
+static int really_probe_debug(struct device *dev, struct device_driver *drv)
+{
+ ktime_t calltime, rettime;
+ int ret;
+
+ calltime = ktime_get();
+ ret = really_probe(dev, drv);
+ rettime = ktime_get();
+ /*
+ * Don't change this to pr_debug() because that requires
+ * CONFIG_DYNAMIC_DEBUG and we want a simple 'initcall_debug' on the
+ * kernel commandline to print this all the time at the debug level.
+ */
+ printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
+ dev_name(dev), ret, ktime_us_delta(rettime, calltime));
+ return ret;
+}
+
+/**
+ * driver_probe_done
+ * Determine if the probe sequence is finished or not.
+ *
+ * Should somehow figure out how to use a semaphore, not an atomic variable...
+ */
+int driver_probe_done(void)
+{
+ int local_probe_count = atomic_read(&probe_count);
+
+ pr_debug("%s: probe_count = %d\n", __func__, local_probe_count);
+ if (local_probe_count)
+ return -EBUSY;
+ return 0;
+}
+
+/**
+ * wait_for_device_probe
+ * Wait for device probing to be completed.
+ */
+void wait_for_device_probe(void)
+{
+ /* wait for the deferred probe workqueue to finish */
+ flush_work(&deferred_probe_work);
+
+ /* wait for the known devices to complete their probing */
+ wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
+ async_synchronize_full();
+}
+EXPORT_SYMBOL_GPL(wait_for_device_probe);
+
+static int __driver_probe_device(struct device_driver *drv, struct device *dev)
+{
+ int ret = 0;
+
+ if (dev->p->dead || !device_is_registered(dev))
+ return -ENODEV;
+ if (dev->driver)
+ return -EBUSY;
+
+ dev->can_match = true;
+ pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
+ drv->bus->name, __func__, dev_name(dev), drv->name);
+
+ pm_runtime_get_suppliers(dev);
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
+ pm_runtime_barrier(dev);
+ if (initcall_debug)
+ ret = really_probe_debug(dev, drv);
+ else
+ ret = really_probe(dev, drv);
+ pm_request_idle(dev);
+
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+
+ pm_runtime_put_suppliers(dev);
+ return ret;
+}
+
+/**
+ * driver_probe_device - attempt to bind device & driver together
+ * @drv: driver to bind a device to
+ * @dev: device to try to bind to the driver
+ *
+ * This function returns -ENODEV if the device is not registered, -EBUSY if it
+ * already has a driver, 0 if the device is bound successfully and a positive
+ * (inverted) error code for failures from the ->probe method.
+ *
+ * This function must be called with @dev lock held. When called for a
+ * USB interface, @dev->parent lock must be held as well.
+ *
+ * If the device has a parent, runtime-resume the parent before driver probing.
+ */
+static int driver_probe_device(struct device_driver *drv, struct device *dev)
+{
+ int trigger_count = atomic_read(&deferred_trigger_count);
+ int ret;
+
+ atomic_inc(&probe_count);
+ ret = __driver_probe_device(drv, dev);
+ if (ret == -EPROBE_DEFER || ret == EPROBE_DEFER) {
+ driver_deferred_probe_add(dev);
+
+ /*
+ * Did a trigger occur while probing? Need to re-trigger if yes
+ */
+ if (trigger_count != atomic_read(&deferred_trigger_count) &&
+ !defer_all_probes)
+ driver_deferred_probe_trigger();
+ }
+ atomic_dec(&probe_count);
+ wake_up_all(&probe_waitqueue);
+ return ret;
+}
+
+static inline bool cmdline_requested_async_probing(const char *drv_name)
+{
+ bool async_drv;
+
+ async_drv = parse_option_str(async_probe_drv_names, drv_name);
+
+ return (async_probe_default != async_drv);
+}
+
+/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */
+static int __init save_async_options(char *buf)
+{
+ if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN)
+ pr_warn("Too long list of driver names for 'driver_async_probe'!\n");
+
+ strscpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN);
+ async_probe_default = parse_option_str(async_probe_drv_names, "*");
+
+ return 1;
+}
+__setup("driver_async_probe=", save_async_options);
+
+bool driver_allows_async_probing(struct device_driver *drv)
+{
+ switch (drv->probe_type) {
+ case PROBE_PREFER_ASYNCHRONOUS:
+ return true;
+
+ case PROBE_FORCE_SYNCHRONOUS:
+ return false;
+
+ default:
+ if (cmdline_requested_async_probing(drv->name))
+ return true;
+
+ if (module_requested_async_probing(drv->owner))
+ return true;
+
+ return false;
+ }
+}
+
+struct device_attach_data {
+ struct device *dev;
+
+ /*
+ * Indicates whether we are considering asynchronous probing or
+ * not. Only initial binding after device or driver registration
+ * (including deferral processing) may be done asynchronously, the
+ * rest is always synchronous, as we expect it is being done by
+ * request from userspace.
+ */
+ bool check_async;
+
+ /*
+ * Indicates if we are binding synchronous or asynchronous drivers.
+ * When asynchronous probing is enabled we'll execute 2 passes
+ * over drivers: first pass doing synchronous probing and second
+ * doing asynchronous probing (if synchronous did not succeed -
+ * most likely because there was no driver requiring synchronous
+ * probing - and we found asynchronous driver during first pass).
+ * The 2 passes are done because we can't shoot asynchronous
+ * probe for given device and driver from bus_for_each_drv() since
+ * driver pointer is not guaranteed to stay valid once
+ * bus_for_each_drv() iterates to the next driver on the bus.
+ */
+ bool want_async;
+
+ /*
+ * We'll set have_async to 'true' if, while scanning for matching
+ * driver, we'll encounter one that requests asynchronous probing.
+ */
+ bool have_async;
+};
+
+static int __device_attach_driver(struct device_driver *drv, void *_data)
+{
+ struct device_attach_data *data = _data;
+ struct device *dev = data->dev;
+ bool async_allowed;
+ int ret;
+
+ ret = driver_match_device(drv, dev);
+ if (ret == 0) {
+ /* no match */
+ return 0;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_dbg(dev, "Device match requests probe deferral\n");
+ dev->can_match = true;
+ driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
+ } else if (ret < 0) {
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
+ return ret;
+ } /* ret > 0 means positive match */
+
+ async_allowed = driver_allows_async_probing(drv);
+
+ if (async_allowed)
+ data->have_async = true;
+
+ if (data->check_async && async_allowed != data->want_async)
+ return 0;
+
+ /*
+ * Ignore errors returned by ->probe so that the next driver can try
+ * its luck.
+ */
+ ret = driver_probe_device(drv, dev);
+ if (ret < 0)
+ return ret;
+ return ret == 0;
+}
+
+static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
+{
+ struct device *dev = _dev;
+ struct device_attach_data data = {
+ .dev = dev,
+ .check_async = true,
+ .want_async = true,
+ };
+
+ device_lock(dev);
+
+ /*
+ * Check if device has already been removed or claimed. This may
+ * happen with driver loading, device discovery/registration,
+ * and deferred probe processing happens all at once with
+ * multiple threads.
+ */
+ if (dev->p->dead || dev->driver)
+ goto out_unlock;
+
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
+ bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
+ dev_dbg(dev, "async probe completed\n");
+
+ pm_request_idle(dev);
+
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+out_unlock:
+ device_unlock(dev);
+
+ put_device(dev);
+}
+
+static int __device_attach(struct device *dev, bool allow_async)
+{
+ int ret = 0;
+ bool async = false;
+
+ device_lock(dev);
+ if (dev->p->dead) {
+ goto out_unlock;
+ } else if (dev->driver) {
+ if (device_is_bound(dev)) {
+ ret = 1;
+ goto out_unlock;
+ }
+ ret = device_bind_driver(dev);
+ if (ret == 0)
+ ret = 1;
+ else {
+ dev->driver = NULL;
+ ret = 0;
+ }
+ } else {
+ struct device_attach_data data = {
+ .dev = dev,
+ .check_async = allow_async,
+ .want_async = false,
+ };
+
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+
+ ret = bus_for_each_drv(dev->bus, NULL, &data,
+ __device_attach_driver);
+ if (!ret && allow_async && data.have_async) {
+ /*
+ * If we could not find appropriate driver
+ * synchronously and we are allowed to do
+ * async probes and there are drivers that
+ * want to probe asynchronously, we'll
+ * try them.
+ */
+ dev_dbg(dev, "scheduling asynchronous probe\n");
+ get_device(dev);
+ async = true;
+ } else {
+ pm_request_idle(dev);
+ }
+
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+ }
+out_unlock:
+ device_unlock(dev);
+ if (async)
+ async_schedule_dev(__device_attach_async_helper, dev);
+ return ret;
+}
+
+/**
+ * device_attach - try to attach device to a driver.
+ * @dev: device.
+ *
+ * Walk the list of drivers that the bus has and call
+ * driver_probe_device() for each pair. If a compatible
+ * pair is found, break out and return.
+ *
+ * Returns 1 if the device was bound to a driver;
+ * 0 if no matching driver was found;
+ * -ENODEV if the device is not registered.
+ *
+ * When called for a USB interface, @dev->parent lock must be held.
+ */
+int device_attach(struct device *dev)
+{
+ return __device_attach(dev, false);
+}
+EXPORT_SYMBOL_GPL(device_attach);
+
+void device_initial_probe(struct device *dev)
+{
+ __device_attach(dev, true);
+}
+
+/*
+ * __device_driver_lock - acquire locks needed to manipulate dev->drv
+ * @dev: Device we will update driver info for
+ * @parent: Parent device. Needed if the bus requires parent lock
+ *
+ * This function will take the required locks for manipulating dev->drv.
+ * Normally this will just be the @dev lock, but when called for a USB
+ * interface, @parent lock will be held as well.
+ */
+static void __device_driver_lock(struct device *dev, struct device *parent)
+{
+ if (parent && dev->bus->need_parent_lock)
+ device_lock(parent);
+ device_lock(dev);
+}
+
+/*
+ * __device_driver_unlock - release locks needed to manipulate dev->drv
+ * @dev: Device we will update driver info for
+ * @parent: Parent device. Needed if the bus requires parent lock
+ *
+ * This function will release the required locks for manipulating dev->drv.
+ * Normally this will just be the @dev lock, but when called for a
+ * USB interface, @parent lock will be released as well.
+ */
+static void __device_driver_unlock(struct device *dev, struct device *parent)
+{
+ device_unlock(dev);
+ if (parent && dev->bus->need_parent_lock)
+ device_unlock(parent);
+}
+
+/**
+ * device_driver_attach - attach a specific driver to a specific device
+ * @drv: Driver to attach
+ * @dev: Device to attach it to
+ *
+ * Manually attach driver to a device. Will acquire both @dev lock and
+ * @dev->parent lock if needed. Returns 0 on success, -ERR on failure.
+ */
+int device_driver_attach(struct device_driver *drv, struct device *dev)
+{
+ int ret;
+
+ __device_driver_lock(dev, dev->parent);
+ ret = __driver_probe_device(drv, dev);
+ __device_driver_unlock(dev, dev->parent);
+
+ /* also return probe errors as normal negative errnos */
+ if (ret > 0)
+ ret = -ret;
+ if (ret == -EPROBE_DEFER)
+ return -EAGAIN;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_driver_attach);
+
+static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie)
+{
+ struct device *dev = _dev;
+ struct device_driver *drv;
+ int ret;
+
+ __device_driver_lock(dev, dev->parent);
+ drv = dev->p->async_driver;
+ dev->p->async_driver = NULL;
+ ret = driver_probe_device(drv, dev);
+ __device_driver_unlock(dev, dev->parent);
+
+ dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret);
+
+ put_device(dev);
+}
+
+static int __driver_attach(struct device *dev, void *data)
+{
+ struct device_driver *drv = data;
+ bool async = false;
+ int ret;
+
+ /*
+ * Lock device and try to bind to it. We drop the error
+ * here and always return 0, because we need to keep trying
+ * to bind to devices and some drivers will return an error
+ * simply if it didn't support the device.
+ *
+ * driver_probe_device() will spit a warning if there
+ * is an error.
+ */
+
+ ret = driver_match_device(drv, dev);
+ if (ret == 0) {
+ /* no match */
+ return 0;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_dbg(dev, "Device match requests probe deferral\n");
+ dev->can_match = true;
+ driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
+ } else if (ret < 0) {
+ dev_dbg(dev, "Bus failed to match device: %d\n", ret);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
+ } /* ret > 0 means positive match */
+
+ if (driver_allows_async_probing(drv)) {
+ /*
+ * Instead of probing the device synchronously we will
+ * probe it asynchronously to allow for more parallelism.
+ *
+ * We only take the device lock here in order to guarantee
+ * that the dev->driver and async_driver fields are protected
+ */
+ dev_dbg(dev, "probing driver %s asynchronously\n", drv->name);
+ device_lock(dev);
+ if (!dev->driver && !dev->p->async_driver) {
+ get_device(dev);
+ dev->p->async_driver = drv;
+ async = true;
+ }
+ device_unlock(dev);
+ if (async)
+ async_schedule_dev(__driver_attach_async_helper, dev);
+ return 0;
+ }
+
+ __device_driver_lock(dev, dev->parent);
+ driver_probe_device(drv, dev);
+ __device_driver_unlock(dev, dev->parent);
+
+ return 0;
+}
+
+/**
+ * driver_attach - try to bind driver to devices.
+ * @drv: driver.
+ *
+ * Walk the list of devices that the bus has on it and try to
+ * match the driver with each one. If driver_probe_device()
+ * returns 0 and the @dev->driver is set, we've found a
+ * compatible pair.
+ */
+int driver_attach(struct device_driver *drv)
+{
+ return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
+}
+EXPORT_SYMBOL_GPL(driver_attach);
+
+/*
+ * __device_release_driver() must be called with @dev lock held.
+ * When called for a USB interface, @dev->parent lock must be held as well.
+ */
+static void __device_release_driver(struct device *dev, struct device *parent)
+{
+ struct device_driver *drv;
+
+ drv = dev->driver;
+ if (drv) {
+ pm_runtime_get_sync(dev);
+
+ while (device_links_busy(dev)) {
+ __device_driver_unlock(dev, parent);
+
+ device_links_unbind_consumers(dev);
+
+ __device_driver_lock(dev, parent);
+ /*
+ * A concurrent invocation of the same function might
+ * have released the driver successfully while this one
+ * was waiting, so check for that.
+ */
+ if (dev->driver != drv) {
+ pm_runtime_put(dev);
+ return;
+ }
+ }
+
+ driver_sysfs_remove(dev);
+
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_UNBIND_DRIVER,
+ dev);
+
+ pm_runtime_put_sync(dev);
+
+ device_remove(dev);
+
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
+
+ device_unbind_cleanup(dev);
+ device_links_driver_cleanup(dev);
+
+ klist_remove(&dev->p->knode_driver);
+ device_pm_check_callbacks(dev);
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_UNBOUND_DRIVER,
+ dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_UNBIND);
+ }
+}
+
+void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent)
+{
+ __device_driver_lock(dev, parent);
+
+ if (!drv || drv == dev->driver)
+ __device_release_driver(dev, parent);
+
+ __device_driver_unlock(dev, parent);
+}
+
+/**
+ * device_release_driver - manually detach device from driver.
+ * @dev: device.
+ *
+ * Manually detach device from driver.
+ * When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
+ */
+void device_release_driver(struct device *dev)
+{
+ /*
+ * If anyone calls device_release_driver() recursively from
+ * within their ->remove callback for the same device, they
+ * will deadlock right here.
+ */
+ device_release_driver_internal(dev, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(device_release_driver);
+
+/**
+ * device_driver_detach - detach driver from a specific device
+ * @dev: device to detach driver from
+ *
+ * Detach driver from device. Will acquire both @dev lock and @dev->parent
+ * lock if needed.
+ */
+void device_driver_detach(struct device *dev)
+{
+ device_release_driver_internal(dev, NULL, dev->parent);
+}
+
+/**
+ * driver_detach - detach driver from all devices it controls.
+ * @drv: driver.
+ */
+void driver_detach(struct device_driver *drv)
+{
+ struct device_private *dev_prv;
+ struct device *dev;
+
+ if (driver_allows_async_probing(drv))
+ async_synchronize_full();
+
+ for (;;) {
+ spin_lock(&drv->p->klist_devices.k_lock);
+ if (list_empty(&drv->p->klist_devices.k_list)) {
+ spin_unlock(&drv->p->klist_devices.k_lock);
+ break;
+ }
+ dev_prv = list_last_entry(&drv->p->klist_devices.k_list,
+ struct device_private,
+ knode_driver.n_node);
+ dev = dev_prv->device;
+ get_device(dev);
+ spin_unlock(&drv->p->klist_devices.k_lock);
+ device_release_driver_internal(dev, drv, dev->parent);
+ put_device(dev);
+ }
+}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
new file mode 100644
index 000000000..f3bd9f104
--- /dev/null
+++ b/drivers/base/devcoredump.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devcoredump.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/workqueue.h>
+
+static struct class devcd_class;
+
+/* global disable flag, for security purposes */
+static bool devcd_disabled;
+
+/* if data isn't read by userspace after 5 minutes then delete it */
+#define DEVCD_TIMEOUT (HZ * 60 * 5)
+
+struct devcd_entry {
+ struct device devcd_dev;
+ void *data;
+ size_t datalen;
+ /*
+ * Here, mutex is required to serialize the calls to del_wk work between
+ * user/kernel space which happens when devcd is added with device_add()
+ * and that sends uevent to user space. User space reads the uevents,
+ * and calls to devcd_data_write() which try to modify the work which is
+ * not even initialized/queued from devcoredump.
+ *
+ *
+ *
+ * cpu0(X) cpu1(Y)
+ *
+ * dev_coredump() uevent sent to user space
+ * device_add() ======================> user space process Y reads the
+ * uevents writes to devcd fd
+ * which results into writes to
+ *
+ * devcd_data_write()
+ * mod_delayed_work()
+ * try_to_grab_pending()
+ * del_timer()
+ * debug_assert_init()
+ * INIT_DELAYED_WORK()
+ * schedule_delayed_work()
+ *
+ *
+ * Also, mutex alone would not be enough to avoid scheduling of
+ * del_wk work after it get flush from a call to devcd_free()
+ * mentioned as below.
+ *
+ * disabled_store()
+ * devcd_free()
+ * mutex_lock() devcd_data_write()
+ * flush_delayed_work()
+ * mutex_unlock()
+ * mutex_lock()
+ * mod_delayed_work()
+ * mutex_unlock()
+ * So, delete_work flag is required.
+ */
+ struct mutex mutex;
+ bool delete_work;
+ struct module *owner;
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen);
+ void (*free)(void *data);
+ struct delayed_work del_wk;
+ struct device *failing_dev;
+};
+
+static struct devcd_entry *dev_to_devcd(struct device *dev)
+{
+ return container_of(dev, struct devcd_entry, devcd_dev);
+}
+
+static void devcd_dev_release(struct device *dev)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ devcd->free(devcd->data);
+ module_put(devcd->owner);
+
+ /*
+ * this seems racy, but I don't see a notifier or such on
+ * a struct device to know when it goes away?
+ */
+ if (devcd->failing_dev->kobj.sd)
+ sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
+ "devcoredump");
+
+ put_device(devcd->failing_dev);
+ kfree(devcd);
+}
+
+static void devcd_del(struct work_struct *wk)
+{
+ struct devcd_entry *devcd;
+
+ devcd = container_of(wk, struct devcd_entry, del_wk.work);
+
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
+static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
+}
+
+static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work) {
+ devcd->delete_work = true;
+ mod_delayed_work(system_wq, &devcd->del_wk, 0);
+ }
+ mutex_unlock(&devcd->mutex);
+
+ return count;
+}
+
+static struct bin_attribute devcd_attr_data = {
+ .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
+ .size = 0,
+ .read = devcd_data_read,
+ .write = devcd_data_write,
+};
+
+static struct bin_attribute *devcd_dev_bin_attrs[] = {
+ &devcd_attr_data, NULL,
+};
+
+static const struct attribute_group devcd_dev_group = {
+ .bin_attrs = devcd_dev_bin_attrs,
+};
+
+static const struct attribute_group *devcd_dev_groups[] = {
+ &devcd_dev_group, NULL,
+};
+
+static int devcd_free(struct device *dev, void *data)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ mutex_lock(&devcd->mutex);
+ if (!devcd->delete_work)
+ devcd->delete_work = true;
+
+ flush_delayed_work(&devcd->del_wk);
+ mutex_unlock(&devcd->mutex);
+ return 0;
+}
+
+static ssize_t disabled_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", devcd_disabled);
+}
+
+/*
+ *
+ * disabled_store() worker()
+ * class_for_each_device(&devcd_class,
+ * NULL, NULL, devcd_free)
+ * ...
+ * ...
+ * while ((dev = class_dev_iter_next(&iter))
+ * devcd_del()
+ * device_del()
+ * put_device() <- last reference
+ * error = fn(dev, data) devcd_dev_release()
+ * devcd_free(dev, data) kfree(devcd)
+ * mutex_lock(&devcd->mutex);
+ *
+ *
+ * In the above diagram, It looks like disabled_store() would be racing with parallely
+ * running devcd_del() and result in memory abort while acquiring devcd->mutex which
+ * is called after kfree of devcd memory after dropping its last reference with
+ * put_device(). However, this will not happens as fn(dev, data) runs
+ * with its own reference to device via klist_node so it is not its last reference.
+ * so, above situation would not occur.
+ */
+
+static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ long tmp = simple_strtol(buf, NULL, 10);
+
+ /*
+ * This essentially makes the attribute write-once, since you can't
+ * go back to not having it disabled. This is intentional, it serves
+ * as a system lockdown feature.
+ */
+ if (tmp != 1)
+ return -EINVAL;
+
+ devcd_disabled = true;
+
+ class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
+
+ return count;
+}
+static CLASS_ATTR_RW(disabled);
+
+static struct attribute *devcd_class_attrs[] = {
+ &class_attr_disabled.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(devcd_class);
+
+static struct class devcd_class = {
+ .name = "devcoredump",
+ .owner = THIS_MODULE,
+ .dev_release = devcd_dev_release,
+ .dev_groups = devcd_dev_groups,
+ .class_groups = devcd_class_groups,
+};
+
+static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen)
+{
+ return memory_read_from_buffer(buffer, count, &offset, data, datalen);
+}
+
+static void devcd_freev(void *data)
+{
+ vfree(data);
+}
+
+/**
+ * dev_coredumpv - create device coredump with vmalloc data
+ * @dev: the struct device for the crashed device
+ * @data: vmalloc data containing the device coredump
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ *
+ * This function takes ownership of the vmalloc'ed data and will free
+ * it when it is no longer used. See dev_coredumpm() for more information.
+ */
+void dev_coredumpv(struct device *dev, void *data, size_t datalen,
+ gfp_t gfp)
+{
+ dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpv);
+
+static int devcd_match_failing(struct device *dev, const void *failing)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ return devcd->failing_dev == failing;
+}
+
+/**
+ * devcd_free_sgtable - free all the memory of the given scatterlist table
+ * (i.e. both pages and scatterlist instances)
+ * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
+ * using the sg_chain function then that function should be called only once
+ * on the chained table
+ * @data: pointer to sg_table to free
+ */
+static void devcd_free_sgtable(void *data)
+{
+ _devcd_free_sgtable(data);
+}
+
+/**
+ * devcd_read_from_sgtable - copy data from sg_table to a given buffer
+ * and return the number of bytes read
+ * @buffer: the buffer to copy the data to it
+ * @buf_len: the length of the buffer
+ * @data: the scatterlist table to copy from
+ * @offset: start copy from @offset@ bytes from the head of the data
+ * in the given scatterlist
+ * @data_len: the length of the data in the sg_table
+ */
+static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
+ size_t buf_len, void *data,
+ size_t data_len)
+{
+ struct scatterlist *table = data;
+
+ if (offset > data_len)
+ return -EINVAL;
+
+ if (offset + buf_len > data_len)
+ buf_len = data_len - offset;
+ return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
+ offset);
+}
+
+/**
+ * dev_coredumpm - create device coredump with read/free methods
+ * @dev: the struct device for the crashed device
+ * @owner: the module that contains the read/free functions, use %THIS_MODULE
+ * @data: data cookie for the @read/@free functions
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ * @read: function to read from the given buffer
+ * @free: function to free the given buffer
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed the @free
+ * function will be called to free the data.
+ */
+void dev_coredumpm(struct device *dev, struct module *owner,
+ void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen),
+ void (*free)(void *data))
+{
+ static atomic_t devcd_count = ATOMIC_INIT(0);
+ struct devcd_entry *devcd;
+ struct device *existing;
+
+ if (devcd_disabled)
+ goto free;
+
+ existing = class_find_device(&devcd_class, NULL, dev,
+ devcd_match_failing);
+ if (existing) {
+ put_device(existing);
+ goto free;
+ }
+
+ if (!try_module_get(owner))
+ goto free;
+
+ devcd = kzalloc(sizeof(*devcd), gfp);
+ if (!devcd)
+ goto put_module;
+
+ devcd->owner = owner;
+ devcd->data = data;
+ devcd->datalen = datalen;
+ devcd->read = read;
+ devcd->free = free;
+ devcd->failing_dev = get_device(dev);
+ devcd->delete_work = false;
+
+ mutex_init(&devcd->mutex);
+ device_initialize(&devcd->devcd_dev);
+
+ dev_set_name(&devcd->devcd_dev, "devcd%d",
+ atomic_inc_return(&devcd_count));
+ devcd->devcd_dev.class = &devcd_class;
+
+ mutex_lock(&devcd->mutex);
+ dev_set_uevent_suppress(&devcd->devcd_dev, true);
+ if (device_add(&devcd->devcd_dev))
+ goto put_device;
+
+ /*
+ * These should normally not fail, but there is no problem
+ * continuing without the links, so just warn instead of
+ * failing.
+ */
+ if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
+ "failing_device") ||
+ sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
+ "devcoredump"))
+ dev_warn(dev, "devcoredump create_link failed\n");
+
+ dev_set_uevent_suppress(&devcd->devcd_dev, false);
+ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+ mutex_unlock(&devcd->mutex);
+ return;
+ put_device:
+ put_device(&devcd->devcd_dev);
+ mutex_unlock(&devcd->mutex);
+ put_module:
+ module_put(owner);
+ free:
+ free(data);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpm);
+
+/**
+ * dev_coredumpsg - create device coredump that uses scatterlist as data
+ * parameter
+ * @dev: the struct device for the crashed device
+ * @table: the dump data
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed
+ * it will free the data.
+ */
+void dev_coredumpsg(struct device *dev, struct scatterlist *table,
+ size_t datalen, gfp_t gfp)
+{
+ dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
+ devcd_free_sgtable);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpsg);
+
+static int __init devcoredump_init(void)
+{
+ return class_register(&devcd_class);
+}
+__initcall(devcoredump_init);
+
+static void __exit devcoredump_exit(void)
+{
+ class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
+ class_unregister(&devcd_class);
+}
+__exitcall(devcoredump_exit);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
new file mode 100644
index 000000000..4ab2b50ee
--- /dev/null
+++ b/drivers/base/devres.c
@@ -0,0 +1,1224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/devres.c - device resource management
+ *
+ * Copyright (c) 2006 SUSE Linux Products GmbH
+ * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/percpu.h>
+
+#include <asm/sections.h>
+
+#include "base.h"
+#include "trace.h"
+
+struct devres_node {
+ struct list_head entry;
+ dr_release_t release;
+ const char *name;
+ size_t size;
+};
+
+struct devres {
+ struct devres_node node;
+ /*
+ * Some archs want to perform DMA into kmalloc caches
+ * and need a guaranteed alignment larger than
+ * the alignment of a 64-bit integer.
+ * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+ * buffer alignment as if it was allocated by plain kmalloc().
+ */
+ u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+};
+
+struct devres_group {
+ struct devres_node node[2];
+ void *id;
+ int color;
+ /* -- 8 pointers */
+};
+
+static void set_node_dbginfo(struct devres_node *node, const char *name,
+ size_t size)
+{
+ node->name = name;
+ node->size = size;
+}
+
+#ifdef CONFIG_DEBUG_DEVRES
+static int log_devres = 0;
+module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
+
+static void devres_dbg(struct device *dev, struct devres_node *node,
+ const char *op)
+{
+ if (unlikely(log_devres))
+ dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n",
+ op, node, node->name, node->size);
+}
+#else /* CONFIG_DEBUG_DEVRES */
+#define devres_dbg(dev, node, op) do {} while (0)
+#endif /* CONFIG_DEBUG_DEVRES */
+
+static void devres_log(struct device *dev, struct devres_node *node,
+ const char *op)
+{
+ trace_devres_log(dev, op, node, node->name, node->size);
+ devres_dbg(dev, node, op);
+}
+
+/*
+ * Release functions for devres group. These callbacks are used only
+ * for identification.
+ */
+static void group_open_release(struct device *dev, void *res)
+{
+ /* noop */
+}
+
+static void group_close_release(struct device *dev, void *res)
+{
+ /* noop */
+}
+
+static struct devres_group * node_to_group(struct devres_node *node)
+{
+ if (node->release == &group_open_release)
+ return container_of(node, struct devres_group, node[0]);
+ if (node->release == &group_close_release)
+ return container_of(node, struct devres_group, node[1]);
+ return NULL;
+}
+
+static bool check_dr_size(size_t size, size_t *tot_size)
+{
+ /* We must catch any near-SIZE_MAX cases that could overflow. */
+ if (unlikely(check_add_overflow(sizeof(struct devres),
+ size, tot_size)))
+ return false;
+
+ return true;
+}
+
+static __always_inline struct devres * alloc_dr(dr_release_t release,
+ size_t size, gfp_t gfp, int nid)
+{
+ size_t tot_size;
+ struct devres *dr;
+
+ if (!check_dr_size(size, &tot_size))
+ return NULL;
+
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
+ if (unlikely(!dr))
+ return NULL;
+
+ /* No need to clear memory twice */
+ if (!(gfp & __GFP_ZERO))
+ memset(dr, 0, offsetof(struct devres, data));
+
+ INIT_LIST_HEAD(&dr->node.entry);
+ dr->node.release = release;
+ return dr;
+}
+
+static void add_dr(struct device *dev, struct devres_node *node)
+{
+ devres_log(dev, node, "ADD");
+ BUG_ON(!list_empty(&node->entry));
+ list_add_tail(&node->entry, &dev->devres_head);
+}
+
+static void replace_dr(struct device *dev,
+ struct devres_node *old, struct devres_node *new)
+{
+ devres_log(dev, old, "REPLACE");
+ BUG_ON(!list_empty(&new->entry));
+ list_replace(&old->entry, &new->entry);
+}
+
+/**
+ * __devres_alloc_node - Allocate device resource data
+ * @release: Release function devres will be associated with
+ * @size: Allocation size
+ * @gfp: Allocation flags
+ * @nid: NUMA node
+ * @name: Name of the resource
+ *
+ * Allocate devres of @size bytes. The allocated area is zeroed, then
+ * associated with @release. The returned pointer can be passed to
+ * other devres_*() functions.
+ *
+ * RETURNS:
+ * Pointer to allocated devres on success, NULL on failure.
+ */
+void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
+ const char *name)
+{
+ struct devres *dr;
+
+ dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
+ if (unlikely(!dr))
+ return NULL;
+ set_node_dbginfo(&dr->node, name, size);
+ return dr->data;
+}
+EXPORT_SYMBOL_GPL(__devres_alloc_node);
+
+/**
+ * devres_for_each_res - Resource iterator
+ * @dev: Device to iterate resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ * @fn: Function to be called for each matched resource.
+ * @data: Data for @fn, the 3rd parameter of @fn
+ *
+ * Call @fn for each devres of @dev which is associated with @release
+ * and for which @match returns 1.
+ *
+ * RETURNS:
+ * void
+ */
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data)
+{
+ struct devres_node *node;
+ struct devres_node *tmp;
+ unsigned long flags;
+
+ if (!fn)
+ return;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ list_for_each_entry_safe_reverse(node, tmp,
+ &dev->devres_head, entry) {
+ struct devres *dr = container_of(node, struct devres, node);
+
+ if (node->release != release)
+ continue;
+ if (match && !match(dev, dr->data, match_data))
+ continue;
+ fn(dev, dr->data, data);
+ }
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+}
+EXPORT_SYMBOL_GPL(devres_for_each_res);
+
+/**
+ * devres_free - Free device resource data
+ * @res: Pointer to devres data to free
+ *
+ * Free devres created with devres_alloc().
+ */
+void devres_free(void *res)
+{
+ if (res) {
+ struct devres *dr = container_of(res, struct devres, data);
+
+ BUG_ON(!list_empty(&dr->node.entry));
+ kfree(dr);
+ }
+}
+EXPORT_SYMBOL_GPL(devres_free);
+
+/**
+ * devres_add - Register device resource
+ * @dev: Device to add resource to
+ * @res: Resource to register
+ *
+ * Register devres @res to @dev. @res should have been allocated
+ * using devres_alloc(). On driver detach, the associated release
+ * function will be invoked and devres will be freed automatically.
+ */
+void devres_add(struct device *dev, void *res)
+{
+ struct devres *dr = container_of(res, struct devres, data);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ add_dr(dev, &dr->node);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+}
+EXPORT_SYMBOL_GPL(devres_add);
+
+static struct devres *find_dr(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
+{
+ struct devres_node *node;
+
+ list_for_each_entry_reverse(node, &dev->devres_head, entry) {
+ struct devres *dr = container_of(node, struct devres, node);
+
+ if (node->release != release)
+ continue;
+ if (match && !match(dev, dr->data, match_data))
+ continue;
+ return dr;
+ }
+
+ return NULL;
+}
+
+/**
+ * devres_find - Find device resource
+ * @dev: Device to lookup resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev which is associated with @release
+ * and for which @match returns 1. If @match is NULL, it's considered
+ * to match all.
+ *
+ * RETURNS:
+ * Pointer to found devres, NULL if not found.
+ */
+void * devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
+{
+ struct devres *dr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ dr = find_dr(dev, release, match, match_data);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ if (dr)
+ return dr->data;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(devres_find);
+
+/**
+ * devres_get - Find devres, if non-existent, add one atomically
+ * @dev: Device to lookup or add devres for
+ * @new_res: Pointer to new initialized devres to add if not found
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev which has the same release function
+ * as @new_res and for which @match return 1. If found, @new_res is
+ * freed; otherwise, @new_res is added atomically.
+ *
+ * RETURNS:
+ * Pointer to found or added devres.
+ */
+void * devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data)
+{
+ struct devres *new_dr = container_of(new_res, struct devres, data);
+ struct devres *dr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ dr = find_dr(dev, new_dr->node.release, match, match_data);
+ if (!dr) {
+ add_dr(dev, &new_dr->node);
+ dr = new_dr;
+ new_res = NULL;
+ }
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+ devres_free(new_res);
+
+ return dr->data;
+}
+EXPORT_SYMBOL_GPL(devres_get);
+
+/**
+ * devres_remove - Find a device resource and remove it
+ * @dev: Device to find resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev associated with @release and for
+ * which @match returns 1. If @match is NULL, it's considered to
+ * match all. If found, the resource is removed atomically and
+ * returned.
+ *
+ * RETURNS:
+ * Pointer to removed devres on success, NULL if not found.
+ */
+void * devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
+{
+ struct devres *dr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ dr = find_dr(dev, release, match, match_data);
+ if (dr) {
+ list_del_init(&dr->node.entry);
+ devres_log(dev, &dr->node, "REM");
+ }
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ if (dr)
+ return dr->data;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(devres_remove);
+
+/**
+ * devres_destroy - Find a device resource and destroy it
+ * @dev: Device to find resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev associated with @release and for
+ * which @match returns 1. If @match is NULL, it's considered to
+ * match all. If found, the resource is removed atomically and freed.
+ *
+ * Note that the release function for the resource will not be called,
+ * only the devres-allocated data will be freed. The caller becomes
+ * responsible for freeing any other data.
+ *
+ * RETURNS:
+ * 0 if devres is found and freed, -ENOENT if not found.
+ */
+int devres_destroy(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
+{
+ void *res;
+
+ res = devres_remove(dev, release, match, match_data);
+ if (unlikely(!res))
+ return -ENOENT;
+
+ devres_free(res);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devres_destroy);
+
+
+/**
+ * devres_release - Find a device resource and destroy it, calling release
+ * @dev: Device to find resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ *
+ * Find the latest devres of @dev associated with @release and for
+ * which @match returns 1. If @match is NULL, it's considered to
+ * match all. If found, the resource is removed atomically, the
+ * release function called and the resource freed.
+ *
+ * RETURNS:
+ * 0 if devres is found and freed, -ENOENT if not found.
+ */
+int devres_release(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data)
+{
+ void *res;
+
+ res = devres_remove(dev, release, match, match_data);
+ if (unlikely(!res))
+ return -ENOENT;
+
+ (*release)(dev, res);
+ devres_free(res);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devres_release);
+
+static int remove_nodes(struct device *dev,
+ struct list_head *first, struct list_head *end,
+ struct list_head *todo)
+{
+ struct devres_node *node, *n;
+ int cnt = 0, nr_groups = 0;
+
+ /* First pass - move normal devres entries to @todo and clear
+ * devres_group colors.
+ */
+ node = list_entry(first, struct devres_node, entry);
+ list_for_each_entry_safe_from(node, n, end, entry) {
+ struct devres_group *grp;
+
+ grp = node_to_group(node);
+ if (grp) {
+ /* clear color of group markers in the first pass */
+ grp->color = 0;
+ nr_groups++;
+ } else {
+ /* regular devres entry */
+ if (&node->entry == first)
+ first = first->next;
+ list_move_tail(&node->entry, todo);
+ cnt++;
+ }
+ }
+
+ if (!nr_groups)
+ return cnt;
+
+ /* Second pass - Scan groups and color them. A group gets
+ * color value of two iff the group is wholly contained in
+ * [current node, end). That is, for a closed group, both opening
+ * and closing markers should be in the range, while just the
+ * opening marker is enough for an open group.
+ */
+ node = list_entry(first, struct devres_node, entry);
+ list_for_each_entry_safe_from(node, n, end, entry) {
+ struct devres_group *grp;
+
+ grp = node_to_group(node);
+ BUG_ON(!grp || list_empty(&grp->node[0].entry));
+
+ grp->color++;
+ if (list_empty(&grp->node[1].entry))
+ grp->color++;
+
+ BUG_ON(grp->color <= 0 || grp->color > 2);
+ if (grp->color == 2) {
+ /* No need to update current node or end. The removed
+ * nodes are always before both.
+ */
+ list_move_tail(&grp->node[0].entry, todo);
+ list_del_init(&grp->node[1].entry);
+ }
+ }
+
+ return cnt;
+}
+
+static void release_nodes(struct device *dev, struct list_head *todo)
+{
+ struct devres *dr, *tmp;
+
+ /* Release. Note that both devres and devres_group are
+ * handled as devres in the following loop. This is safe.
+ */
+ list_for_each_entry_safe_reverse(dr, tmp, todo, node.entry) {
+ devres_log(dev, &dr->node, "REL");
+ dr->node.release(dev, dr->data);
+ kfree(dr);
+ }
+}
+
+/**
+ * devres_release_all - Release all managed resources
+ * @dev: Device to release resources for
+ *
+ * Release all resources associated with @dev. This function is
+ * called on driver detach.
+ */
+int devres_release_all(struct device *dev)
+{
+ unsigned long flags;
+ LIST_HEAD(todo);
+ int cnt;
+
+ /* Looks like an uninitialized device structure */
+ if (WARN_ON(dev->devres_head.next == NULL))
+ return -ENODEV;
+
+ /* Nothing to release if list is empty */
+ if (list_empty(&dev->devres_head))
+ return 0;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ cnt = remove_nodes(dev, dev->devres_head.next, &dev->devres_head, &todo);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ release_nodes(dev, &todo);
+ return cnt;
+}
+
+/**
+ * devres_open_group - Open a new devres group
+ * @dev: Device to open devres group for
+ * @id: Separator ID
+ * @gfp: Allocation flags
+ *
+ * Open a new devres group for @dev with @id. For @id, using a
+ * pointer to an object which won't be used for another group is
+ * recommended. If @id is NULL, address-wise unique ID is created.
+ *
+ * RETURNS:
+ * ID of the new group, NULL on failure.
+ */
+void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
+{
+ struct devres_group *grp;
+ unsigned long flags;
+
+ grp = kmalloc(sizeof(*grp), gfp);
+ if (unlikely(!grp))
+ return NULL;
+
+ grp->node[0].release = &group_open_release;
+ grp->node[1].release = &group_close_release;
+ INIT_LIST_HEAD(&grp->node[0].entry);
+ INIT_LIST_HEAD(&grp->node[1].entry);
+ set_node_dbginfo(&grp->node[0], "grp<", 0);
+ set_node_dbginfo(&grp->node[1], "grp>", 0);
+ grp->id = grp;
+ if (id)
+ grp->id = id;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ add_dr(dev, &grp->node[0]);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+ return grp->id;
+}
+EXPORT_SYMBOL_GPL(devres_open_group);
+
+/* Find devres group with ID @id. If @id is NULL, look for the latest. */
+static struct devres_group * find_group(struct device *dev, void *id)
+{
+ struct devres_node *node;
+
+ list_for_each_entry_reverse(node, &dev->devres_head, entry) {
+ struct devres_group *grp;
+
+ if (node->release != &group_open_release)
+ continue;
+
+ grp = container_of(node, struct devres_group, node[0]);
+
+ if (id) {
+ if (grp->id == id)
+ return grp;
+ } else if (list_empty(&grp->node[1].entry))
+ return grp;
+ }
+
+ return NULL;
+}
+
+/**
+ * devres_close_group - Close a devres group
+ * @dev: Device to close devres group for
+ * @id: ID of target group, can be NULL
+ *
+ * Close the group identified by @id. If @id is NULL, the latest open
+ * group is selected.
+ */
+void devres_close_group(struct device *dev, void *id)
+{
+ struct devres_group *grp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+
+ grp = find_group(dev, id);
+ if (grp)
+ add_dr(dev, &grp->node[1]);
+ else
+ WARN_ON(1);
+
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+}
+EXPORT_SYMBOL_GPL(devres_close_group);
+
+/**
+ * devres_remove_group - Remove a devres group
+ * @dev: Device to remove group for
+ * @id: ID of target group, can be NULL
+ *
+ * Remove the group identified by @id. If @id is NULL, the latest
+ * open group is selected. Note that removing a group doesn't affect
+ * any other resources.
+ */
+void devres_remove_group(struct device *dev, void *id)
+{
+ struct devres_group *grp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+
+ grp = find_group(dev, id);
+ if (grp) {
+ list_del_init(&grp->node[0].entry);
+ list_del_init(&grp->node[1].entry);
+ devres_log(dev, &grp->node[0], "REM");
+ } else
+ WARN_ON(1);
+
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ kfree(grp);
+}
+EXPORT_SYMBOL_GPL(devres_remove_group);
+
+/**
+ * devres_release_group - Release resources in a devres group
+ * @dev: Device to release group for
+ * @id: ID of target group, can be NULL
+ *
+ * Release all resources in the group identified by @id. If @id is
+ * NULL, the latest open group is selected. The selected group and
+ * groups properly nested inside the selected group are removed.
+ *
+ * RETURNS:
+ * The number of released non-group resources.
+ */
+int devres_release_group(struct device *dev, void *id)
+{
+ struct devres_group *grp;
+ unsigned long flags;
+ LIST_HEAD(todo);
+ int cnt = 0;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+
+ grp = find_group(dev, id);
+ if (grp) {
+ struct list_head *first = &grp->node[0].entry;
+ struct list_head *end = &dev->devres_head;
+
+ if (!list_empty(&grp->node[1].entry))
+ end = grp->node[1].entry.next;
+
+ cnt = remove_nodes(dev, first, end, &todo);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ release_nodes(dev, &todo);
+ } else {
+ WARN_ON(1);
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+ }
+
+ return cnt;
+}
+EXPORT_SYMBOL_GPL(devres_release_group);
+
+/*
+ * Custom devres actions allow inserting a simple function call
+ * into the teardown sequence.
+ */
+
+struct action_devres {
+ void *data;
+ void (*action)(void *);
+};
+
+static int devm_action_match(struct device *dev, void *res, void *p)
+{
+ struct action_devres *devres = res;
+ struct action_devres *target = p;
+
+ return devres->action == target->action &&
+ devres->data == target->data;
+}
+
+static void devm_action_release(struct device *dev, void *res)
+{
+ struct action_devres *devres = res;
+
+ devres->action(devres->data);
+}
+
+/**
+ * devm_add_action() - add a custom action to list of managed resources
+ * @dev: Device that owns the action
+ * @action: Function that should be called
+ * @data: Pointer to data passed to @action implementation
+ *
+ * This adds a custom action to the list of managed resources so that
+ * it gets executed as part of standard resource unwinding.
+ */
+int devm_add_action(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres *devres;
+
+ devres = devres_alloc(devm_action_release,
+ sizeof(struct action_devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ devres->data = data;
+ devres->action = action;
+
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_add_action);
+
+/**
+ * devm_remove_action() - removes previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Removes instance of @action previously added by devm_add_action().
+ * Both action and data should match one of the existing entries.
+ */
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
+ &devres));
+}
+EXPORT_SYMBOL_GPL(devm_remove_action);
+
+/**
+ * devm_release_action() - release previously added custom action
+ * @dev: Device that owns the action
+ * @action: Function implementing the action
+ * @data: Pointer to data passed to @action implementation
+ *
+ * Releases and removes instance of @action previously added by
+ * devm_add_action(). Both action and data should match one of the
+ * existing entries.
+ */
+void devm_release_action(struct device *dev, void (*action)(void *), void *data)
+{
+ struct action_devres devres = {
+ .data = data,
+ .action = action,
+ };
+
+ WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
+ &devres));
+
+}
+EXPORT_SYMBOL_GPL(devm_release_action);
+
+/*
+ * Managed kmalloc/kfree
+ */
+static void devm_kmalloc_release(struct device *dev, void *res)
+{
+ /* noop */
+}
+
+static int devm_kmalloc_match(struct device *dev, void *res, void *data)
+{
+ return res == data;
+}
+
+/**
+ * devm_kmalloc - Resource-managed kmalloc
+ * @dev: Device to allocate memory for
+ * @size: Allocation size
+ * @gfp: Allocation gfp flags
+ *
+ * Managed kmalloc. Memory allocated with this function is
+ * automatically freed on driver detach. Like all other devres
+ * resources, guaranteed alignment is unsigned long long.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ struct devres *dr;
+
+ if (unlikely(!size))
+ return ZERO_SIZE_PTR;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
+ if (unlikely(!dr))
+ return NULL;
+
+ /*
+ * This is named devm_kzalloc_release for historical reasons
+ * The initial implementation did not support kmalloc, only kzalloc
+ */
+ set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
+ devres_add(dev, dr->data);
+ return dr->data;
+}
+EXPORT_SYMBOL_GPL(devm_kmalloc);
+
+/**
+ * devm_krealloc - Resource-managed krealloc()
+ * @dev: Device to re-allocate memory for
+ * @ptr: Pointer to the memory chunk to re-allocate
+ * @new_size: New allocation size
+ * @gfp: Allocation gfp flags
+ *
+ * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc().
+ * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR,
+ * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the
+ * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't
+ * change the order in which the release callback for the re-alloc'ed devres
+ * will be called (except when falling back to devm_kmalloc() or when freeing
+ * resources when new_size is zero). The contents of the memory are preserved
+ * up to the lesser of new and old sizes.
+ */
+void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
+{
+ size_t total_new_size, total_old_size;
+ struct devres *old_dr, *new_dr;
+ unsigned long flags;
+
+ if (unlikely(!new_size)) {
+ devm_kfree(dev, ptr);
+ return ZERO_SIZE_PTR;
+ }
+
+ if (unlikely(ZERO_OR_NULL_PTR(ptr)))
+ return devm_kmalloc(dev, new_size, gfp);
+
+ if (WARN_ON(is_kernel_rodata((unsigned long)ptr)))
+ /*
+ * We cannot reliably realloc a const string returned by
+ * devm_kstrdup_const().
+ */
+ return NULL;
+
+ if (!check_dr_size(new_size, &total_new_size))
+ return NULL;
+
+ total_old_size = ksize(container_of(ptr, struct devres, data));
+ if (total_old_size == 0) {
+ WARN(1, "Pointer doesn't point to dynamically allocated memory.");
+ return NULL;
+ }
+
+ /*
+ * If new size is smaller or equal to the actual number of bytes
+ * allocated previously - just return the same pointer.
+ */
+ if (total_new_size <= total_old_size)
+ return ptr;
+
+ /*
+ * Otherwise: allocate new, larger chunk. We need to allocate before
+ * taking the lock as most probably the caller uses GFP_KERNEL.
+ */
+ new_dr = alloc_dr(devm_kmalloc_release,
+ total_new_size, gfp, dev_to_node(dev));
+ if (!new_dr)
+ return NULL;
+
+ /*
+ * The spinlock protects the linked list against concurrent
+ * modifications but not the resource itself.
+ */
+ spin_lock_irqsave(&dev->devres_lock, flags);
+
+ old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr);
+ if (!old_dr) {
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+ kfree(new_dr);
+ WARN(1, "Memory chunk not managed or managed by a different device.");
+ return NULL;
+ }
+
+ replace_dr(dev, &old_dr->node, &new_dr->node);
+
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+
+ /*
+ * We can copy the memory contents after releasing the lock as we're
+ * no longer modifying the list links.
+ */
+ memcpy(new_dr->data, old_dr->data,
+ total_old_size - offsetof(struct devres, data));
+ /*
+ * Same for releasing the old devres - it's now been removed from the
+ * list. This is also the reason why we must not use devm_kfree() - the
+ * links are no longer valid.
+ */
+ kfree(old_dr);
+
+ return new_dr->data;
+}
+EXPORT_SYMBOL_GPL(devm_krealloc);
+
+/**
+ * devm_kstrdup - Allocate resource managed space and
+ * copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = devm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
+
+/**
+ * devm_kstrdup_const - resource managed conditional string duplication
+ * @dev: device for which to duplicate the string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Strings allocated by devm_kstrdup_const will be automatically freed when
+ * the associated device is detached.
+ *
+ * RETURNS:
+ * Source string if it is in .rodata section otherwise it falls back to
+ * devm_kstrdup.
+ */
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)s))
+ return s;
+
+ return devm_kstrdup(dev, s, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup_const);
+
+/**
+ * devm_kvasprintf - Allocate resource managed space and format a string
+ * into that.
+ * @dev: Device to allocate memory for
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * @fmt: The printf()-style format string
+ * @ap: Arguments for the format string
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+ va_list ap)
+{
+ unsigned int len;
+ char *p;
+ va_list aq;
+
+ va_copy(aq, ap);
+ len = vsnprintf(NULL, 0, fmt, aq);
+ va_end(aq);
+
+ p = devm_kmalloc(dev, len+1, gfp);
+ if (!p)
+ return NULL;
+
+ vsnprintf(p, len+1, fmt, ap);
+
+ return p;
+}
+EXPORT_SYMBOL(devm_kvasprintf);
+
+/**
+ * devm_kasprintf - Allocate resource managed space and format a string
+ * into that.
+ * @dev: Device to allocate memory for
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ * allocating memory
+ * @fmt: The printf()-style format string
+ * @...: Arguments for the format string
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
+{
+ va_list ap;
+ char *p;
+
+ va_start(ap, fmt);
+ p = devm_kvasprintf(dev, gfp, fmt, ap);
+ va_end(ap);
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(devm_kasprintf);
+
+/**
+ * devm_kfree - Resource-managed kfree
+ * @dev: Device this memory belongs to
+ * @p: Memory to free
+ *
+ * Free memory allocated with devm_kmalloc().
+ */
+void devm_kfree(struct device *dev, const void *p)
+{
+ int rc;
+
+ /*
+ * Special cases: pointer to a string in .rodata returned by
+ * devm_kstrdup_const() or NULL/ZERO ptr.
+ */
+ if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p)))
+ return;
+
+ rc = devres_destroy(dev, devm_kmalloc_release,
+ devm_kmalloc_match, (void *)p);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_kfree);
+
+/**
+ * devm_kmemdup - Resource-managed kmemdup
+ * @dev: Device this memory belongs to
+ * @src: Memory region to duplicate
+ * @len: Memory region length
+ * @gfp: GFP mask to use
+ *
+ * Duplicate region of a memory using resource managed kmalloc
+ */
+void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ void *p;
+
+ p = devm_kmalloc(dev, len, gfp);
+ if (p)
+ memcpy(p, src, len);
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup);
+
+struct pages_devres {
+ unsigned long addr;
+ unsigned int order;
+};
+
+static int devm_pages_match(struct device *dev, void *res, void *p)
+{
+ struct pages_devres *devres = res;
+ struct pages_devres *target = p;
+
+ return devres->addr == target->addr;
+}
+
+static void devm_pages_release(struct device *dev, void *res)
+{
+ struct pages_devres *devres = res;
+
+ free_pages(devres->addr, devres->order);
+}
+
+/**
+ * devm_get_free_pages - Resource-managed __get_free_pages
+ * @dev: Device to allocate memory for
+ * @gfp_mask: Allocation gfp flags
+ * @order: Allocation size is (1 << order) pages
+ *
+ * Managed get_free_pages. Memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Address of allocated memory on success, 0 on failure.
+ */
+
+unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order)
+{
+ struct pages_devres *devres;
+ unsigned long addr;
+
+ addr = __get_free_pages(gfp_mask, order);
+
+ if (unlikely(!addr))
+ return 0;
+
+ devres = devres_alloc(devm_pages_release,
+ sizeof(struct pages_devres), GFP_KERNEL);
+ if (unlikely(!devres)) {
+ free_pages(addr, order);
+ return 0;
+ }
+
+ devres->addr = addr;
+ devres->order = order;
+
+ devres_add(dev, devres);
+ return addr;
+}
+EXPORT_SYMBOL_GPL(devm_get_free_pages);
+
+/**
+ * devm_free_pages - Resource-managed free_pages
+ * @dev: Device this memory belongs to
+ * @addr: Memory to free
+ *
+ * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
+ * there is no need to supply the @order.
+ */
+void devm_free_pages(struct device *dev, unsigned long addr)
+{
+ struct pages_devres devres = { .addr = addr };
+
+ WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
+ &devres));
+}
+EXPORT_SYMBOL_GPL(devm_free_pages);
+
+static void devm_percpu_release(struct device *dev, void *pdata)
+{
+ void __percpu *p;
+
+ p = *(void __percpu **)pdata;
+ free_percpu(p);
+}
+
+static int devm_percpu_match(struct device *dev, void *data, void *p)
+{
+ struct devres *devr = container_of(data, struct devres, data);
+
+ return *(void **)devr->data == p;
+}
+
+/**
+ * __devm_alloc_percpu - Resource-managed alloc_percpu
+ * @dev: Device to allocate per-cpu memory for
+ * @size: Size of per-cpu memory to allocate
+ * @align: Alignment of per-cpu memory to allocate
+ *
+ * Managed alloc_percpu. Per-cpu memory allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
+ size_t align)
+{
+ void *p;
+ void __percpu *pcpu;
+
+ pcpu = __alloc_percpu(size, align);
+ if (!pcpu)
+ return NULL;
+
+ p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL);
+ if (!p) {
+ free_percpu(pcpu);
+ return NULL;
+ }
+
+ *(void __percpu **)p = pcpu;
+
+ devres_add(dev, p);
+
+ return pcpu;
+}
+EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
+
+/**
+ * devm_free_percpu - Resource-managed free_percpu
+ * @dev: Device this memory belongs to
+ * @pdata: Per-cpu memory to free
+ *
+ * Free memory allocated with devm_alloc_percpu().
+ */
+void devm_free_percpu(struct device *dev, void __percpu *pdata)
+{
+ WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
+ (__force void *)pdata));
+}
+EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
new file mode 100644
index 000000000..e4bffeabf
--- /dev/null
+++ b/drivers/base/devtmpfs.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * devtmpfs - kernel-maintained tmpfs-based /dev
+ *
+ * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
+ *
+ * During bootup, before any driver core device is registered,
+ * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
+ * device which requests a device node, will add a node in this
+ * filesystem.
+ * By default, all devices are named after the name of the device,
+ * owned by root and have a default mode of 0600. Subsystems can
+ * overwrite the default setting if needed.
+ */
+
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/mount.h>
+#include <linux/device.h>
+#include <linux/blkdev.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
+#include <linux/shmem_fs.h>
+#include <linux/ramfs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/init_syscalls.h>
+#include <uapi/linux/mount.h>
+#include "base.h"
+
+#ifdef CONFIG_DEVTMPFS_SAFE
+#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
+#else
+#define DEVTMPFS_MFLAGS (MS_SILENT)
+#endif
+
+static struct task_struct *thread;
+
+static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
+
+static DEFINE_SPINLOCK(req_lock);
+
+static struct req {
+ struct req *next;
+ struct completion done;
+ int err;
+ const char *name;
+ umode_t mode; /* 0 => delete */
+ kuid_t uid;
+ kgid_t gid;
+ struct device *dev;
+} *requests;
+
+static int __init mount_param(char *str)
+{
+ mount_dev = simple_strtoul(str, NULL, 0);
+ return 1;
+}
+__setup("devtmpfs.mount=", mount_param);
+
+static struct vfsmount *mnt;
+
+static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ struct super_block *s = mnt->mnt_sb;
+ int err;
+
+ atomic_inc(&s->s_active);
+ down_write(&s->s_umount);
+ err = reconfigure_single(s, flags, data);
+ if (err < 0) {
+ deactivate_locked_super(s);
+ return ERR_PTR(err);
+ }
+ return dget(s->s_root);
+}
+
+static struct file_system_type internal_fs_type = {
+ .name = "devtmpfs",
+#ifdef CONFIG_TMPFS
+ .init_fs_context = shmem_init_fs_context,
+#else
+ .init_fs_context = ramfs_init_fs_context,
+#endif
+ .kill_sb = kill_litter_super,
+};
+
+static struct file_system_type dev_fs_type = {
+ .name = "devtmpfs",
+ .mount = public_dev_mount,
+};
+
+#ifdef CONFIG_BLOCK
+static inline int is_blockdev(struct device *dev)
+{
+ return dev->class == &block_class;
+}
+#else
+static inline int is_blockdev(struct device *dev) { return 0; }
+#endif
+
+static int devtmpfs_submit_req(struct req *req, const char *tmp)
+{
+ init_completion(&req->done);
+
+ spin_lock(&req_lock);
+ req->next = requests;
+ requests = req;
+ spin_unlock(&req_lock);
+
+ wake_up_process(thread);
+ wait_for_completion(&req->done);
+
+ kfree(tmp);
+
+ return req->err;
+}
+
+int devtmpfs_create_node(struct device *dev)
+{
+ const char *tmp = NULL;
+ struct req req;
+
+ if (!thread)
+ return 0;
+
+ req.mode = 0;
+ req.uid = GLOBAL_ROOT_UID;
+ req.gid = GLOBAL_ROOT_GID;
+ req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
+ if (!req.name)
+ return -ENOMEM;
+
+ if (req.mode == 0)
+ req.mode = 0600;
+ if (is_blockdev(dev))
+ req.mode |= S_IFBLK;
+ else
+ req.mode |= S_IFCHR;
+
+ req.dev = dev;
+
+ return devtmpfs_submit_req(&req, tmp);
+}
+
+int devtmpfs_delete_node(struct device *dev)
+{
+ const char *tmp = NULL;
+ struct req req;
+
+ if (!thread)
+ return 0;
+
+ req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
+ if (!req.name)
+ return -ENOMEM;
+
+ req.mode = 0;
+ req.dev = dev;
+
+ return devtmpfs_submit_req(&req, tmp);
+}
+
+static int dev_mkdir(const char *name, umode_t mode)
+{
+ struct dentry *dentry;
+ struct path path;
+ int err;
+
+ dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ err = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, mode);
+ if (!err)
+ /* mark as kernel-created inode */
+ d_inode(dentry)->i_private = &thread;
+ done_path_create(&path, dentry);
+ return err;
+}
+
+static int create_path(const char *nodepath)
+{
+ char *path;
+ char *s;
+ int err = 0;
+
+ /* parent directories do not exist, create them */
+ path = kstrdup(nodepath, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ s = path;
+ for (;;) {
+ s = strchr(s, '/');
+ if (!s)
+ break;
+ s[0] = '\0';
+ err = dev_mkdir(path, 0755);
+ if (err && err != -EEXIST)
+ break;
+ s[0] = '/';
+ s++;
+ }
+ kfree(path);
+ return err;
+}
+
+static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
+ kgid_t gid, struct device *dev)
+{
+ struct dentry *dentry;
+ struct path path;
+ int err;
+
+ dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ if (dentry == ERR_PTR(-ENOENT)) {
+ create_path(nodename);
+ dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ }
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ err = vfs_mknod(&init_user_ns, d_inode(path.dentry), dentry, mode,
+ dev->devt);
+ if (!err) {
+ struct iattr newattrs;
+
+ newattrs.ia_mode = mode;
+ newattrs.ia_uid = uid;
+ newattrs.ia_gid = gid;
+ newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
+ inode_lock(d_inode(dentry));
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+
+ /* mark as kernel-created inode */
+ d_inode(dentry)->i_private = &thread;
+ }
+ done_path_create(&path, dentry);
+ return err;
+}
+
+static int dev_rmdir(const char *name)
+{
+ struct path parent;
+ struct dentry *dentry;
+ int err;
+
+ dentry = kern_path_locked(name, &parent);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ if (d_really_is_positive(dentry)) {
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(&init_user_ns, d_inode(parent.dentry),
+ dentry);
+ else
+ err = -EPERM;
+ } else {
+ err = -ENOENT;
+ }
+ dput(dentry);
+ inode_unlock(d_inode(parent.dentry));
+ path_put(&parent);
+ return err;
+}
+
+static int delete_path(const char *nodepath)
+{
+ char *path;
+ int err = 0;
+
+ path = kstrdup(nodepath, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ for (;;) {
+ char *base;
+
+ base = strrchr(path, '/');
+ if (!base)
+ break;
+ base[0] = '\0';
+ err = dev_rmdir(path);
+ if (err)
+ break;
+ }
+
+ kfree(path);
+ return err;
+}
+
+static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
+{
+ /* did we create it */
+ if (inode->i_private != &thread)
+ return 0;
+
+ /* does the dev_t match */
+ if (is_blockdev(dev)) {
+ if (!S_ISBLK(stat->mode))
+ return 0;
+ } else {
+ if (!S_ISCHR(stat->mode))
+ return 0;
+ }
+ if (stat->rdev != dev->devt)
+ return 0;
+
+ /* ours */
+ return 1;
+}
+
+static int handle_remove(const char *nodename, struct device *dev)
+{
+ struct path parent;
+ struct dentry *dentry;
+ int deleted = 0;
+ int err;
+
+ dentry = kern_path_locked(nodename, &parent);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ if (d_really_is_positive(dentry)) {
+ struct kstat stat;
+ struct path p = {.mnt = parent.mnt, .dentry = dentry};
+ err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
+ AT_STATX_SYNC_AS_STAT);
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ inode_lock(d_inode(dentry));
+ notify_change(&init_user_ns, dentry, &newattrs, NULL);
+ inode_unlock(d_inode(dentry));
+ err = vfs_unlink(&init_user_ns, d_inode(parent.dentry),
+ dentry, NULL);
+ if (!err || err == -ENOENT)
+ deleted = 1;
+ }
+ } else {
+ err = -ENOENT;
+ }
+ dput(dentry);
+ inode_unlock(d_inode(parent.dentry));
+
+ path_put(&parent);
+ if (deleted && strchr(nodename, '/'))
+ delete_path(nodename);
+ return err;
+}
+
+/*
+ * If configured, or requested by the commandline, devtmpfs will be
+ * auto-mounted after the kernel mounted the root filesystem.
+ */
+int __init devtmpfs_mount(void)
+{
+ int err;
+
+ if (!mount_dev)
+ return 0;
+
+ if (!thread)
+ return 0;
+
+ err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
+ if (err)
+ printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+ else
+ printk(KERN_INFO "devtmpfs: mounted\n");
+ return err;
+}
+
+static __initdata DECLARE_COMPLETION(setup_done);
+
+static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
+ struct device *dev)
+{
+ if (mode)
+ return handle_create(name, mode, uid, gid, dev);
+ else
+ return handle_remove(name, dev);
+}
+
+static void __noreturn devtmpfs_work_loop(void)
+{
+ while (1) {
+ spin_lock(&req_lock);
+ while (requests) {
+ struct req *req = requests;
+ requests = NULL;
+ spin_unlock(&req_lock);
+ while (req) {
+ struct req *next = req->next;
+ req->err = handle(req->name, req->mode,
+ req->uid, req->gid, req->dev);
+ complete(&req->done);
+ req = next;
+ }
+ spin_lock(&req_lock);
+ }
+ __set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock(&req_lock);
+ schedule();
+ }
+}
+
+static noinline int __init devtmpfs_setup(void *p)
+{
+ int err;
+
+ err = ksys_unshare(CLONE_NEWNS);
+ if (err)
+ goto out;
+ err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
+ if (err)
+ goto out;
+ init_chdir("/.."); /* will traverse into overmounted root */
+ init_chroot(".");
+out:
+ *(int *)p = err;
+ return err;
+}
+
+/*
+ * The __ref is because devtmpfs_setup needs to be __init for the routines it
+ * calls. That call is done while devtmpfs_init, which is marked __init,
+ * synchronously waits for it to complete.
+ */
+static int __ref devtmpfsd(void *p)
+{
+ int err = devtmpfs_setup(p);
+
+ complete(&setup_done);
+ if (err)
+ return err;
+ devtmpfs_work_loop();
+ return 0;
+}
+
+/*
+ * Create devtmpfs instance, driver-core devices will add their device
+ * nodes here.
+ */
+int __init devtmpfs_init(void)
+{
+ char opts[] = "mode=0755";
+ int err;
+
+ mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
+ if (IS_ERR(mnt)) {
+ printk(KERN_ERR "devtmpfs: unable to create devtmpfs %ld\n",
+ PTR_ERR(mnt));
+ return PTR_ERR(mnt);
+ }
+ err = register_filesystem(&dev_fs_type);
+ if (err) {
+ printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
+ "type %i\n", err);
+ return err;
+ }
+
+ thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
+ if (!IS_ERR(thread)) {
+ wait_for_completion(&setup_done);
+ } else {
+ err = PTR_ERR(thread);
+ thread = NULL;
+ }
+
+ if (err) {
+ printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
+ unregister_filesystem(&dev_fs_type);
+ thread = NULL;
+ return err;
+ }
+
+ printk(KERN_INFO "devtmpfs: initialized\n");
+ return 0;
+}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
new file mode 100644
index 000000000..676b6275d
--- /dev/null
+++ b/drivers/base/driver.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * driver.c - centralized device driver management
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007 Novell Inc.
+ */
+
+#include <linux/device/driver.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "base.h"
+
+static struct device *next_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_next(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_driver(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
+/**
+ * driver_set_override() - Helper to set or clear driver override.
+ * @dev: Device to change
+ * @override: Address of string to change (e.g. &device->driver_override);
+ * The contents will be freed and hold newly allocated override.
+ * @s: NUL-terminated string, new driver name to force a match, pass empty
+ * string to clear it ("" or "\n", where the latter is only for sysfs
+ * interface).
+ * @len: length of @s
+ *
+ * Helper to set or clear driver override in a device, intended for the cases
+ * when the driver_override field is allocated by driver/bus code.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int driver_set_override(struct device *dev, const char **override,
+ const char *s, size_t len)
+{
+ const char *new, *old;
+ char *cp;
+
+ if (!override || !s)
+ return -EINVAL;
+
+ /*
+ * The stored value will be used in sysfs show callback (sysfs_emit()),
+ * which has a length limit of PAGE_SIZE and adds a trailing newline.
+ * Thus we can store one character less to avoid truncation during sysfs
+ * show.
+ */
+ if (len >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ /*
+ * Compute the real length of the string in case userspace sends us a
+ * bunch of \0 characters like python likes to do.
+ */
+ len = strlen(s);
+
+ if (!len) {
+ /* Empty string passed - clear override */
+ device_lock(dev);
+ old = *override;
+ *override = NULL;
+ device_unlock(dev);
+ kfree(old);
+
+ return 0;
+ }
+
+ cp = strnchr(s, len, '\n');
+ if (cp)
+ len = cp - s;
+
+ new = kstrndup(s, len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ device_lock(dev);
+ old = *override;
+ if (cp != s) {
+ *override = new;
+ } else {
+ /* "\n" passed - clear override */
+ kfree(new);
+ *override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(driver_set_override);
+
+/**
+ * driver_for_each_device - Iterator for devices bound to a driver.
+ * @drv: Driver we're iterating.
+ * @start: Device to begin with
+ * @data: Data to pass to the callback.
+ * @fn: Function to call for each device.
+ *
+ * Iterate over the @drv's list of devices calling @fn for each one.
+ */
+int driver_for_each_device(struct device_driver *drv, struct device *start,
+ void *data, int (*fn)(struct device *, void *))
+{
+ struct klist_iter i;
+ struct device *dev;
+ int error = 0;
+
+ if (!drv)
+ return -EINVAL;
+
+ klist_iter_init_node(&drv->p->klist_devices, &i,
+ start ? &start->p->knode_driver : NULL);
+ while (!error && (dev = next_device(&i)))
+ error = fn(dev, data);
+ klist_iter_exit(&i);
+ return error;
+}
+EXPORT_SYMBOL_GPL(driver_for_each_device);
+
+/**
+ * driver_find_device - device iterator for locating a particular device.
+ * @drv: The device's driver
+ * @start: Device to begin with
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the driver_for_each_device() function above, but
+ * it returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ */
+struct device *driver_find_device(struct device_driver *drv,
+ struct device *start, const void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!drv || !drv->p)
+ return NULL;
+
+ klist_iter_init_node(&drv->p->klist_devices, &i,
+ (start ? &start->p->knode_driver : NULL));
+ while ((dev = next_device(&i)))
+ if (match(dev, data) && get_device(dev))
+ break;
+ klist_iter_exit(&i);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(driver_find_device);
+
+/**
+ * driver_create_file - create sysfs file for driver.
+ * @drv: driver.
+ * @attr: driver attribute descriptor.
+ */
+int driver_create_file(struct device_driver *drv,
+ const struct driver_attribute *attr)
+{
+ int error;
+
+ if (drv)
+ error = sysfs_create_file(&drv->p->kobj, &attr->attr);
+ else
+ error = -EINVAL;
+ return error;
+}
+EXPORT_SYMBOL_GPL(driver_create_file);
+
+/**
+ * driver_remove_file - remove sysfs file for driver.
+ * @drv: driver.
+ * @attr: driver attribute descriptor.
+ */
+void driver_remove_file(struct device_driver *drv,
+ const struct driver_attribute *attr)
+{
+ if (drv)
+ sysfs_remove_file(&drv->p->kobj, &attr->attr);
+}
+EXPORT_SYMBOL_GPL(driver_remove_file);
+
+int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&drv->p->kobj, groups);
+}
+
+void driver_remove_groups(struct device_driver *drv,
+ const struct attribute_group **groups)
+{
+ sysfs_remove_groups(&drv->p->kobj, groups);
+}
+
+/**
+ * driver_register - register driver with bus
+ * @drv: driver to register
+ *
+ * We pass off most of the work to the bus_add_driver() call,
+ * since most of the things we have to do deal with the bus
+ * structures.
+ */
+int driver_register(struct device_driver *drv)
+{
+ int ret;
+ struct device_driver *other;
+
+ if (!drv->bus->p) {
+ pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n",
+ drv->name, drv->bus->name);
+ return -EINVAL;
+ }
+
+ if ((drv->bus->probe && drv->probe) ||
+ (drv->bus->remove && drv->remove) ||
+ (drv->bus->shutdown && drv->shutdown))
+ pr_warn("Driver '%s' needs updating - please use "
+ "bus_type methods\n", drv->name);
+
+ other = driver_find(drv->name, drv->bus);
+ if (other) {
+ pr_err("Error: Driver '%s' is already registered, "
+ "aborting...\n", drv->name);
+ return -EBUSY;
+ }
+
+ ret = bus_add_driver(drv);
+ if (ret)
+ return ret;
+ ret = driver_add_groups(drv, drv->groups);
+ if (ret) {
+ bus_remove_driver(drv);
+ return ret;
+ }
+ kobject_uevent(&drv->p->kobj, KOBJ_ADD);
+ deferred_probe_extend_timeout();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(driver_register);
+
+/**
+ * driver_unregister - remove driver from system.
+ * @drv: driver.
+ *
+ * Again, we pass off most of the work to the bus-level call.
+ */
+void driver_unregister(struct device_driver *drv)
+{
+ if (!drv || !drv->p) {
+ WARN(1, "Unexpected driver unregister!\n");
+ return;
+ }
+ driver_remove_groups(drv, drv->groups);
+ bus_remove_driver(drv);
+}
+EXPORT_SYMBOL_GPL(driver_unregister);
+
+/**
+ * driver_find - locate driver on a bus by its name.
+ * @name: name of the driver.
+ * @bus: bus to scan for the driver.
+ *
+ * Call kset_find_obj() to iterate over list of drivers on
+ * a bus to find driver by name. Return driver if found.
+ *
+ * This routine provides no locking to prevent the driver it returns
+ * from being unregistered or unloaded while the caller is using it.
+ * The caller is responsible for preventing this.
+ */
+struct device_driver *driver_find(const char *name, struct bus_type *bus)
+{
+ struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
+ struct driver_private *priv;
+
+ if (k) {
+ /* Drop reference added by kset_find_obj() */
+ kobject_put(k);
+ priv = to_driver(k);
+ return priv->driver;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(driver_find);
diff --git a/drivers/base/firmware.c b/drivers/base/firmware.c
new file mode 100644
index 000000000..8dff940e0
--- /dev/null
+++ b/drivers/base/firmware.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * firmware.c - firmware subsystem hoohaw.
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2007 Novell Inc.
+ */
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include "base.h"
+
+struct kobject *firmware_kobj;
+EXPORT_SYMBOL_GPL(firmware_kobj);
+
+int __init firmware_init(void)
+{
+ firmware_kobj = kobject_create_and_add("firmware", NULL);
+ if (!firmware_kobj)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
new file mode 100644
index 000000000..5166b323a
--- /dev/null
+++ b/drivers/base/firmware_loader/Kconfig
@@ -0,0 +1,221 @@
+# SPDX-License-Identifier: GPL-2.0
+menu "Firmware loader"
+
+config FW_LOADER
+ tristate "Firmware loading facility" if EXPERT
+ default y
+ help
+ This enables the firmware loading facility in the kernel. The kernel
+ will first look for built-in firmware, if it has any. Next, it will
+ look for the requested firmware in a series of filesystem paths:
+
+ o firmware_class path module parameter or kernel boot param
+ o /lib/firmware/updates/UTS_RELEASE
+ o /lib/firmware/updates
+ o /lib/firmware/UTS_RELEASE
+ o /lib/firmware
+
+ Enabling this feature only increases your kernel image by about
+ 828 bytes, enable this option unless you are certain you don't
+ need firmware.
+
+ You typically want this built-in (=y) but you can also enable this
+ as a module, in which case the firmware_class module will be built.
+ You also want to be sure to enable this built-in if you are going to
+ enable built-in firmware (CONFIG_EXTRA_FIRMWARE).
+
+if FW_LOADER
+
+config FW_LOADER_PAGED_BUF
+ bool
+
+config FW_LOADER_SYSFS
+ bool
+
+config EXTRA_FIRMWARE
+ string "Build named firmware blobs into the kernel binary"
+ help
+ Device drivers which require firmware can typically deal with
+ having the kernel load firmware from the various supported
+ /lib/firmware/ paths. This option enables you to build into the
+ kernel firmware files. Built-in firmware searches are preceded
+ over firmware lookups using your filesystem over the supported
+ /lib/firmware paths documented on CONFIG_FW_LOADER.
+
+ This may be useful for testing or if the firmware is required early on
+ in boot and cannot rely on the firmware being placed in an initrd or
+ initramfs.
+
+ This option is a string and takes the (space-separated) names of the
+ firmware files -- the same names that appear in MODULE_FIRMWARE()
+ and request_firmware() in the source. These files should exist under
+ the directory specified by the EXTRA_FIRMWARE_DIR option, which is
+ /lib/firmware by default.
+
+ For example, you might set CONFIG_EXTRA_FIRMWARE="usb8388.bin", copy
+ the usb8388.bin file into /lib/firmware, and build the kernel. Then
+ any request_firmware("usb8388.bin") will be satisfied internally
+ inside the kernel without ever looking at your filesystem at runtime.
+
+ WARNING: If you include additional firmware files into your binary
+ kernel image that are not available under the terms of the GPL,
+ then it may be a violation of the GPL to distribute the resulting
+ image since it combines both GPL and non-GPL work. You should
+ consult a lawyer of your own before distributing such an image.
+
+ NOTE: Compressed files are not supported in EXTRA_FIRMWARE.
+
+config EXTRA_FIRMWARE_DIR
+ string "Firmware blobs root directory"
+ depends on EXTRA_FIRMWARE != ""
+ default "/lib/firmware"
+ help
+ This option controls the directory in which the kernel build system
+ looks for the firmware files listed in the EXTRA_FIRMWARE option.
+
+config FW_LOADER_USER_HELPER
+ bool "Enable the firmware sysfs fallback mechanism"
+ select FW_LOADER_SYSFS
+ select FW_LOADER_PAGED_BUF
+ help
+ This option enables a sysfs loading facility to enable firmware
+ loading to the kernel through userspace as a fallback mechanism
+ if and only if the kernel's direct filesystem lookup for the
+ firmware failed using the different /lib/firmware/ paths, or the
+ path specified in the firmware_class path module parameter, or the
+ firmware_class path kernel boot parameter if the firmware_class is
+ built-in. For details on how to work with the sysfs fallback mechanism
+ refer to Documentation/driver-api/firmware/fallback-mechanisms.rst.
+
+ The direct filesystem lookup for firmware is always used first now.
+
+ If the kernel's direct filesystem lookup for firmware fails to find
+ the requested firmware a sysfs fallback loading facility is made
+ available and userspace is informed about this through uevents.
+ The uevent can be suppressed if the driver explicitly requested it,
+ this is known as the driver using the custom fallback mechanism.
+ If the custom fallback mechanism is used userspace must always
+ acknowledge failure to find firmware as the timeout for the fallback
+ mechanism is disabled, and failed requests will linger forever.
+
+ This used to be the default firmware loading facility, and udev used
+ to listen for uvents to load firmware for the kernel. The firmware
+ loading facility functionality in udev has been removed, as such it
+ can no longer be relied upon as a fallback mechanism. Linux no longer
+ relies on or uses a fallback mechanism in userspace. If you need to
+ rely on one refer to the permissively licensed firmwared:
+
+ https://github.com/teg/firmwared
+
+ Since this was the default firmware loading facility at one point,
+ old userspace may exist which relies upon it, and as such this
+ mechanism can never be removed from the kernel.
+
+ You should only enable this functionality if you are certain you
+ require a fallback mechanism and have a userspace mechanism ready to
+ load firmware in case it is not found. One main reason for this may
+ be if you have drivers which require firmware built-in and for
+ whatever reason cannot place the required firmware in initramfs.
+ Another reason kernels may have this feature enabled is to support a
+ driver which explicitly relies on this fallback mechanism. Only two
+ drivers need this today:
+
+ o CONFIG_LEDS_LP55XX_COMMON
+ o CONFIG_DELL_RBU
+
+ Outside of supporting the above drivers, another reason for needing
+ this may be that your firmware resides outside of the paths the kernel
+ looks for and cannot possibly be specified using the firmware_class
+ path module parameter or kernel firmware_class path boot parameter
+ if firmware_class is built-in.
+
+ A modern use case may be to temporarily mount a custom partition
+ during provisioning which is only accessible to userspace, and then
+ to use it to look for and fetch the required firmware. Such type of
+ driver functionality may not even ever be desirable upstream by
+ vendors, and as such is only required to be supported as an interface
+ for provisioning. Since udev's firmware loading facility has been
+ removed you can use firmwared or a fork of it to customize how you
+ want to load firmware based on uevents issued.
+
+ Enabling this option will increase your kernel image size by about
+ 13436 bytes.
+
+ If you are unsure about this, say N here, unless you are Linux
+ distribution and need to support the above two drivers, or you are
+ certain you need to support some really custom firmware loading
+ facility in userspace.
+
+config FW_LOADER_USER_HELPER_FALLBACK
+ bool "Force the firmware sysfs fallback mechanism when possible"
+ depends on FW_LOADER_USER_HELPER
+ help
+ Enabling this option forces a sysfs userspace fallback mechanism
+ to be used for all firmware requests which explicitly do not disable a
+ a fallback mechanism. Firmware calls which do prohibit a fallback
+ mechanism is request_firmware_direct(). This option is kept for
+ backward compatibility purposes given this precise mechanism can also
+ be enabled by setting the proc sysctl value to true:
+
+ /proc/sys/kernel/firmware_config/force_sysfs_fallback
+
+ If you are unsure about this, say N here.
+
+config FW_LOADER_COMPRESS
+ bool "Enable compressed firmware support"
+ help
+ This option enables the support for loading compressed firmware
+ files. The caller of firmware API receives the decompressed file
+ content. The compressed file is loaded as a fallback, only after
+ loading the raw file failed at first.
+
+ Compressed firmware support does not apply to firmware images
+ that are built into the kernel image (CONFIG_EXTRA_FIRMWARE).
+
+if FW_LOADER_COMPRESS
+config FW_LOADER_COMPRESS_XZ
+ bool "Enable XZ-compressed firmware support"
+ select FW_LOADER_PAGED_BUF
+ select XZ_DEC
+ default y
+ help
+ This option adds the support for XZ-compressed files.
+ The files have to be compressed with either none or crc32
+ integrity check type (pass "-C crc32" option to xz command).
+
+config FW_LOADER_COMPRESS_ZSTD
+ bool "Enable ZSTD-compressed firmware support"
+ select ZSTD_DECOMPRESS
+ help
+ This option adds the support for ZSTD-compressed files.
+
+endif # FW_LOADER_COMPRESS
+
+config FW_CACHE
+ bool "Enable firmware caching during suspend"
+ depends on PM_SLEEP
+ default y if PM_SLEEP
+ help
+ Because firmware caching generates uevent messages that are sent
+ over a netlink socket, it can prevent suspend on many platforms.
+ It is also not always useful, so on such platforms we have the
+ option.
+
+ If unsure, say Y.
+
+config FW_UPLOAD
+ bool "Enable users to initiate firmware updates using sysfs"
+ select FW_LOADER_SYSFS
+ select FW_LOADER_PAGED_BUF
+ help
+ Enabling this option will allow device drivers to expose a persistent
+ sysfs interface that allows firmware updates to be initiated from
+ userspace. For example, FPGA based PCIe cards load firmware and FPGA
+ images from local FLASH when the card boots. The images in FLASH may
+ be updated with new images provided by the user. Enable this device
+ to support cards that rely on user-initiated updates for firmware files.
+
+ If unsure, say N.
+
+endif # FW_LOADER
+endmenu
diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile
new file mode 100644
index 000000000..60d19f9e0
--- /dev/null
+++ b/drivers/base/firmware_loader/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the Linux firmware loader
+
+obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o
+obj-$(CONFIG_FW_LOADER) += firmware_class.o
+firmware_class-objs := main.o
+firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o
+firmware_class-$(CONFIG_EFI_EMBEDDED_FIRMWARE) += fallback_platform.o
+firmware_class-$(CONFIG_FW_LOADER_SYSFS) += sysfs.o
+firmware_class-$(CONFIG_FW_UPLOAD) += sysfs_upload.o
+
+obj-y += builtin/
diff --git a/drivers/base/firmware_loader/builtin/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore
new file mode 100644
index 000000000..166f76b43
--- /dev/null
+++ b/drivers/base/firmware_loader/builtin/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+*.gen.S
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
new file mode 100644
index 000000000..6c067dedc
--- /dev/null
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += main.o
+
+# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a
+# leading /, it's relative to $(srctree).
+fwdir := $(CONFIG_EXTRA_FIRMWARE_DIR)
+fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
+
+firmware := $(addsuffix .gen.o, $(CONFIG_EXTRA_FIRMWARE))
+obj-y += $(firmware)
+
+FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
+FWSTR = $(subst $(comma),_,$(subst /,_,$(subst .,_,$(subst -,_,$(FWNAME)))))
+ASM_WORD = $(if $(CONFIG_64BIT),.quad,.long)
+ASM_ALIGN = $(if $(CONFIG_64BIT),3,2)
+PROGBITS = $(if $(CONFIG_ARM),%,@)progbits
+
+filechk_fwbin = \
+ echo "/* Generated by $(src)/Makefile */" ;\
+ echo " .section .rodata" ;\
+ echo " .p2align 4" ;\
+ echo "_fw_$(FWSTR)_bin:" ;\
+ echo " .incbin \"$(fwdir)/$(FWNAME)\"" ;\
+ echo "_fw_end:" ;\
+ echo " .section .rodata.str,\"aMS\",$(PROGBITS),1" ;\
+ echo " .p2align $(ASM_ALIGN)" ;\
+ echo "_fw_$(FWSTR)_name:" ;\
+ echo " .string \"$(FWNAME)\"" ;\
+ echo " .section .builtin_fw,\"a\",$(PROGBITS)" ;\
+ echo " .p2align $(ASM_ALIGN)" ;\
+ echo " $(ASM_WORD) _fw_$(FWSTR)_name" ;\
+ echo " $(ASM_WORD) _fw_$(FWSTR)_bin" ;\
+ echo " $(ASM_WORD) _fw_end - _fw_$(FWSTR)_bin"
+
+$(obj)/%.gen.S: FORCE
+ $(call filechk,fwbin)
+
+# The .o files depend on the binaries directly; the .S files don't.
+$(addprefix $(obj)/, $(firmware)): $(obj)/%.gen.o: $(fwdir)/%
+
+targets := $(patsubst $(obj)/%,%, \
+ $(shell find $(obj) -name \*.gen.S 2>/dev/null))
diff --git a/drivers/base/firmware_loader/builtin/main.c b/drivers/base/firmware_loader/builtin/main.c
new file mode 100644
index 000000000..a065c3150
--- /dev/null
+++ b/drivers/base/firmware_loader/builtin/main.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Builtin firmware support */
+
+#include <linux/firmware.h>
+#include "../firmware.h"
+
+/* Only if FW_LOADER=y */
+#ifdef CONFIG_FW_LOADER
+
+struct builtin_fw {
+ char *name;
+ void *data;
+ unsigned long size;
+};
+
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+static bool fw_copy_to_prealloc_buf(struct firmware *fw,
+ void *buf, size_t size)
+{
+ if (!buf)
+ return true;
+ if (size < fw->size)
+ return false;
+ memcpy(buf, fw->data, fw->size);
+ return true;
+}
+
+/**
+ * firmware_request_builtin() - load builtin firmware
+ * @fw: pointer to firmware struct
+ * @name: name of firmware file
+ *
+ * Some use cases in the kernel have a requirement so that no memory allocator
+ * is involved as these calls take place early in boot process. An example is
+ * the x86 CPU microcode loader. In these cases all the caller wants is to see
+ * if the firmware was built-in and if so use it right away. This can be used
+ * for such cases.
+ *
+ * This looks for the firmware in the built-in kernel. Only if the kernel was
+ * built-in with the firmware you are looking for will this return successfully.
+ *
+ * Callers of this API do not need to use release_firmware() as the pointer to
+ * the firmware is expected to be provided locally on the stack of the caller.
+ **/
+bool firmware_request_builtin(struct firmware *fw, const char *name)
+{
+ struct builtin_fw *b_fw;
+
+ if (!fw)
+ return false;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+ if (strcmp(name, b_fw->name) == 0) {
+ fw->size = b_fw->size;
+ fw->data = b_fw->data;
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, TEST_FIRMWARE);
+
+/**
+ * firmware_request_builtin_buf() - load builtin firmware into optional buffer
+ * @fw: pointer to firmware struct
+ * @name: name of firmware file
+ * @buf: If set this lets you use a pre-allocated buffer so that the built-in
+ * firmware into is copied into. This field can be NULL. It is used by
+ * callers such as request_firmware_into_buf() and
+ * request_partial_firmware_into_buf()
+ * @size: if buf was provided, the max size of the allocated buffer available.
+ * If the built-in firmware does not fit into the pre-allocated @buf this
+ * call will fail.
+ *
+ * This looks for the firmware in the built-in kernel. Only if the kernel was
+ * built-in with the firmware you are looking for will this call possibly
+ * succeed. If you passed a @buf the firmware will be copied into it *iff* the
+ * built-in firmware fits into the pre-allocated buffer size specified in
+ * @size.
+ *
+ * This caller is to be used internally by the firmware_loader only.
+ **/
+bool firmware_request_builtin_buf(struct firmware *fw, const char *name,
+ void *buf, size_t size)
+{
+ if (!firmware_request_builtin(fw, name))
+ return false;
+
+ return fw_copy_to_prealloc_buf(fw, buf, size);
+}
+
+bool firmware_is_builtin(const struct firmware *fw)
+{
+ struct builtin_fw *b_fw;
+
+ for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++)
+ if (fw->data == b_fw->data)
+ return true;
+
+ return false;
+}
+
+#endif
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
new file mode 100644
index 000000000..bf68e3947
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/security.h>
+#include <linux/umh.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+/*
+ * firmware fallback mechanism
+ */
+
+/*
+ * use small loading timeout for caching devices' firmware because all these
+ * firmware images have been loaded successfully at lease once, also system is
+ * ready for completing firmware loading now. The maximum size of firmware in
+ * current distributions is about 2M bytes, so 10 secs should be enough.
+ */
+void fw_fallback_set_cache_timeout(void)
+{
+ fw_fallback_config.old_timeout = __firmware_loading_timeout();
+ __fw_fallback_set_timeout(10);
+}
+
+/* Restores the timeout to the value last configured during normal operation */
+void fw_fallback_set_default_timeout(void)
+{
+ __fw_fallback_set_timeout(fw_fallback_config.old_timeout);
+}
+
+static long firmware_loading_timeout(void)
+{
+ return __firmware_loading_timeout() > 0 ?
+ __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
+}
+
+static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
+{
+ return __fw_state_wait_common(fw_priv, timeout);
+}
+
+static LIST_HEAD(pending_fw_head);
+
+void kill_pending_fw_fallback_reqs(bool only_kill_custom)
+{
+ struct fw_priv *fw_priv;
+ struct fw_priv *next;
+
+ mutex_lock(&fw_lock);
+ list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
+ pending_list) {
+ if (!fw_priv->need_uevent || !only_kill_custom)
+ __fw_load_abort(fw_priv);
+ }
+ mutex_unlock(&fw_lock);
+}
+
+/**
+ * fw_load_sysfs_fallback() - load a firmware via the sysfs fallback mechanism
+ * @fw_sysfs: firmware sysfs information for the firmware to load
+ * @timeout: timeout to wait for the load
+ *
+ * In charge of constructing a sysfs fallback interface for firmware loading.
+ **/
+static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
+{
+ int retval = 0;
+ struct device *f_dev = &fw_sysfs->dev;
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+ /* fall back on userspace loading */
+ if (!fw_priv->data)
+ fw_priv->is_paged_buf = true;
+
+ dev_set_uevent_suppress(f_dev, true);
+
+ retval = device_add(f_dev);
+ if (retval) {
+ dev_err(f_dev, "%s: device_register failed\n", __func__);
+ goto err_put_dev;
+ }
+
+ mutex_lock(&fw_lock);
+ if (fw_state_is_aborted(fw_priv)) {
+ mutex_unlock(&fw_lock);
+ retval = -EINTR;
+ goto out;
+ }
+ list_add(&fw_priv->pending_list, &pending_fw_head);
+ mutex_unlock(&fw_lock);
+
+ if (fw_priv->opt_flags & FW_OPT_UEVENT) {
+ fw_priv->need_uevent = true;
+ dev_set_uevent_suppress(f_dev, false);
+ dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
+ kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
+ } else {
+ timeout = MAX_JIFFY_OFFSET;
+ }
+
+ retval = fw_sysfs_wait_timeout(fw_priv, timeout);
+ if (retval < 0 && retval != -ENOENT) {
+ mutex_lock(&fw_lock);
+ fw_load_abort(fw_sysfs);
+ mutex_unlock(&fw_lock);
+ }
+
+ if (fw_state_is_aborted(fw_priv)) {
+ if (retval == -ERESTARTSYS)
+ retval = -EINTR;
+ } else if (fw_priv->is_paged_buf && !fw_priv->data)
+ retval = -ENOMEM;
+
+out:
+ device_del(f_dev);
+err_put_dev:
+ put_device(f_dev);
+ return retval;
+}
+
+static int fw_load_from_user_helper(struct firmware *firmware,
+ const char *name, struct device *device,
+ u32 opt_flags)
+{
+ struct fw_sysfs *fw_sysfs;
+ long timeout;
+ int ret;
+
+ timeout = firmware_loading_timeout();
+ if (opt_flags & FW_OPT_NOWAIT) {
+ timeout = usermodehelper_read_lock_wait(timeout);
+ if (!timeout) {
+ dev_dbg(device, "firmware: %s loading timed out\n",
+ name);
+ return -EBUSY;
+ }
+ } else {
+ ret = usermodehelper_read_trylock();
+ if (WARN_ON(ret)) {
+ dev_err(device, "firmware: %s will not be loaded\n",
+ name);
+ return ret;
+ }
+ }
+
+ fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
+ if (IS_ERR(fw_sysfs)) {
+ ret = PTR_ERR(fw_sysfs);
+ goto out_unlock;
+ }
+
+ fw_sysfs->fw_priv = firmware->priv;
+ ret = fw_load_sysfs_fallback(fw_sysfs, timeout);
+
+ if (!ret)
+ ret = assign_fw(firmware, device);
+
+out_unlock:
+ usermodehelper_read_unlock();
+
+ return ret;
+}
+
+static bool fw_force_sysfs_fallback(u32 opt_flags)
+{
+ if (fw_fallback_config.force_sysfs_fallback)
+ return true;
+ if (!(opt_flags & FW_OPT_USERHELPER))
+ return false;
+ return true;
+}
+
+static bool fw_run_sysfs_fallback(u32 opt_flags)
+{
+ int ret;
+
+ if (fw_fallback_config.ignore_sysfs_fallback) {
+ pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
+ return false;
+ }
+
+ if ((opt_flags & FW_OPT_NOFALLBACK_SYSFS))
+ return false;
+
+ /* Also permit LSMs and IMA to fail firmware sysfs fallback */
+ ret = security_kernel_load_data(LOADING_FIRMWARE, true);
+ if (ret < 0)
+ return false;
+
+ return fw_force_sysfs_fallback(opt_flags);
+}
+
+/**
+ * firmware_fallback_sysfs() - use the fallback mechanism to find firmware
+ * @fw: pointer to firmware image
+ * @name: name of firmware file to look for
+ * @device: device for which firmware is being loaded
+ * @opt_flags: options to control firmware loading behaviour, as defined by
+ * &enum fw_opt
+ * @ret: return value from direct lookup which triggered the fallback mechanism
+ *
+ * This function is called if direct lookup for the firmware failed, it enables
+ * a fallback mechanism through userspace by exposing a sysfs loading
+ * interface. Userspace is in charge of loading the firmware through the sysfs
+ * loading interface. This sysfs fallback mechanism may be disabled completely
+ * on a system by setting the proc sysctl value ignore_sysfs_fallback to true.
+ * If this is false we check if the internal API caller set the
+ * @FW_OPT_NOFALLBACK_SYSFS flag, if so it would also disable the fallback
+ * mechanism. A system may want to enforce the sysfs fallback mechanism at all
+ * times, it can do this by setting ignore_sysfs_fallback to false and
+ * force_sysfs_fallback to true.
+ * Enabling force_sysfs_fallback is functionally equivalent to build a kernel
+ * with CONFIG_FW_LOADER_USER_HELPER_FALLBACK.
+ **/
+int firmware_fallback_sysfs(struct firmware *fw, const char *name,
+ struct device *device,
+ u32 opt_flags,
+ int ret)
+{
+ if (!fw_run_sysfs_fallback(opt_flags))
+ return ret;
+
+ if (!(opt_flags & FW_OPT_NO_WARN))
+ dev_warn(device, "Falling back to sysfs fallback for: %s\n",
+ name);
+ else
+ dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
+ name);
+ return fw_load_from_user_helper(fw, name, device, opt_flags);
+}
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
new file mode 100644
index 000000000..144148595
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_FALLBACK_H
+#define __FIRMWARE_FALLBACK_H
+
+#include <linux/firmware.h>
+#include <linux/device.h>
+
+#include "firmware.h"
+#include "sysfs.h"
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+int firmware_fallback_sysfs(struct firmware *fw, const char *name,
+ struct device *device,
+ u32 opt_flags,
+ int ret);
+void kill_pending_fw_fallback_reqs(bool only_kill_custom);
+
+void fw_fallback_set_cache_timeout(void);
+void fw_fallback_set_default_timeout(void);
+
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
+ struct device *device,
+ u32 opt_flags,
+ int ret)
+{
+ /* Keep carrying over the same error */
+ return ret;
+}
+
+static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
+static inline void fw_fallback_set_cache_timeout(void) { }
+static inline void fw_fallback_set_default_timeout(void) { }
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+int firmware_fallback_platform(struct fw_priv *fw_priv);
+#else
+static inline int firmware_fallback_platform(struct fw_priv *fw_priv)
+{
+ return -ENOENT;
+}
+#endif
+
+#endif /* __FIRMWARE_FALLBACK_H */
diff --git a/drivers/base/firmware_loader/fallback_platform.c b/drivers/base/firmware_loader/fallback_platform.c
new file mode 100644
index 000000000..00af99f0a
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback_platform.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/efi_embedded_fw.h>
+#include <linux/property.h>
+#include <linux/security.h>
+#include <linux/vmalloc.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+int firmware_fallback_platform(struct fw_priv *fw_priv)
+{
+ const u8 *data;
+ size_t size;
+ int rc;
+
+ if (!(fw_priv->opt_flags & FW_OPT_FALLBACK_PLATFORM))
+ return -ENOENT;
+
+ rc = security_kernel_load_data(LOADING_FIRMWARE, true);
+ if (rc)
+ return rc;
+
+ rc = efi_get_embedded_fw(fw_priv->fw_name, &data, &size);
+ if (rc)
+ return rc; /* rc == -ENOENT when the fw was not found */
+
+ if (fw_priv->data && size > fw_priv->allocated_size)
+ return -ENOMEM;
+
+ rc = security_kernel_post_load_data((u8 *)data, size, LOADING_FIRMWARE,
+ "platform");
+ if (rc)
+ return rc;
+
+ if (!fw_priv->data)
+ fw_priv->data = vmalloc(size);
+ if (!fw_priv->data)
+ return -ENOMEM;
+
+ memcpy(fw_priv->data, data, size);
+ fw_priv->size = size;
+ fw_state_done(fw_priv);
+ return 0;
+}
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
new file mode 100644
index 000000000..e5ac098d0
--- /dev/null
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/security.h>
+#include <linux/highmem.h>
+#include <linux/umh.h>
+#include <linux/sysctl.h>
+
+#include "fallback.h"
+#include "firmware.h"
+
+/*
+ * firmware fallback configuration table
+ */
+
+struct firmware_fallback_config fw_fallback_config = {
+ .force_sysfs_fallback = IS_ENABLED(CONFIG_FW_LOADER_USER_HELPER_FALLBACK),
+ .loading_timeout = 60,
+ .old_timeout = 60,
+};
+EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table firmware_config_table[] = {
+ {
+ .procname = "force_sysfs_fallback",
+ .data = &fw_fallback_config.force_sysfs_fallback,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "ignore_sysfs_fallback",
+ .data = &fw_fallback_config.ignore_sysfs_fallback,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
+static struct ctl_table_header *firmware_config_sysct_table_header;
+int register_firmware_config_sysctl(void)
+{
+ firmware_config_sysct_table_header =
+ register_sysctl("kernel/firmware_config",
+ firmware_config_table);
+ if (!firmware_config_sysct_table_header)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+void unregister_firmware_config_sysctl(void)
+{
+ unregister_sysctl_table(firmware_config_sysct_table_header);
+ firmware_config_sysct_table_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
new file mode 100644
index 000000000..fe77e91c3
--- /dev/null
+++ b/drivers/base/firmware_loader/firmware.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_LOADER_H
+#define __FIRMWARE_LOADER_H
+
+#include <linux/bitops.h>
+#include <linux/firmware.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+
+#include <generated/utsrelease.h>
+
+/**
+ * enum fw_opt - options to control firmware loading behaviour
+ *
+ * @FW_OPT_UEVENT: Enables the fallback mechanism to send a kobject uevent
+ * when the firmware is not found. Userspace is in charge to load the
+ * firmware using the sysfs loading facility.
+ * @FW_OPT_NOWAIT: Used to describe the firmware request is asynchronous.
+ * @FW_OPT_USERHELPER: Enable the fallback mechanism, in case the direct
+ * filesystem lookup fails at finding the firmware. For details refer to
+ * firmware_fallback_sysfs().
+ * @FW_OPT_NO_WARN: Quiet, avoid printing warning messages.
+ * @FW_OPT_NOCACHE: Disables firmware caching. Firmware caching is used to
+ * cache the firmware upon suspend, so that upon resume races against the
+ * firmware file lookup on storage is avoided. Used for calls where the
+ * file may be too big, or where the driver takes charge of its own
+ * firmware caching mechanism.
+ * @FW_OPT_NOFALLBACK_SYSFS: Disable the sysfs fallback mechanism. Takes
+ * precedence over &FW_OPT_UEVENT and &FW_OPT_USERHELPER.
+ * @FW_OPT_FALLBACK_PLATFORM: Enable fallback to device fw copy embedded in
+ * the platform's main firmware. If both this fallback and the sysfs
+ * fallback are enabled, then this fallback will be tried first.
+ * @FW_OPT_PARTIAL: Allow partial read of firmware instead of needing to read
+ * entire file.
+ */
+enum fw_opt {
+ FW_OPT_UEVENT = BIT(0),
+ FW_OPT_NOWAIT = BIT(1),
+ FW_OPT_USERHELPER = BIT(2),
+ FW_OPT_NO_WARN = BIT(3),
+ FW_OPT_NOCACHE = BIT(4),
+ FW_OPT_NOFALLBACK_SYSFS = BIT(5),
+ FW_OPT_FALLBACK_PLATFORM = BIT(6),
+ FW_OPT_PARTIAL = BIT(7),
+};
+
+enum fw_status {
+ FW_STATUS_UNKNOWN,
+ FW_STATUS_LOADING,
+ FW_STATUS_DONE,
+ FW_STATUS_ABORTED,
+};
+
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct completion completion;
+ enum fw_status status;
+};
+
+struct fw_priv {
+ struct kref ref;
+ struct list_head list;
+ struct firmware_cache *fwc;
+ struct fw_state fw_st;
+ void *data;
+ size_t size;
+ size_t allocated_size;
+ size_t offset;
+ u32 opt_flags;
+#ifdef CONFIG_FW_LOADER_PAGED_BUF
+ bool is_paged_buf;
+ struct page **pages;
+ int nr_pages;
+ int page_array_size;
+#endif
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ bool need_uevent;
+ struct list_head pending_list;
+#endif
+ const char *fw_name;
+};
+
+extern struct mutex fw_lock;
+extern struct firmware_cache fw_cache;
+
+static inline bool __fw_state_check(struct fw_priv *fw_priv,
+ enum fw_status status)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+
+ return fw_st->status == status;
+}
+
+static inline int __fw_state_wait_common(struct fw_priv *fw_priv, long timeout)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+ long ret;
+
+ ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static inline void __fw_state_set(struct fw_priv *fw_priv,
+ enum fw_status status)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) {
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ /*
+ * Doing this here ensures that the fw_priv is deleted from
+ * the pending list in all abort/done paths.
+ */
+ list_del_init(&fw_priv->pending_list);
+#endif
+ complete_all(&fw_st->completion);
+ }
+}
+
+static inline void fw_state_aborted(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_ABORTED);
+}
+
+static inline bool fw_state_is_aborted(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_ABORTED);
+}
+
+static inline void fw_state_start(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_LOADING);
+}
+
+static inline void fw_state_done(struct fw_priv *fw_priv)
+{
+ __fw_state_set(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_state_is_done(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_DONE);
+}
+
+static inline bool fw_state_is_loading(struct fw_priv *fw_priv)
+{
+ return __fw_state_check(fw_priv, FW_STATUS_LOADING);
+}
+
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags);
+int assign_fw(struct firmware *fw, struct device *device);
+void free_fw_priv(struct fw_priv *fw_priv);
+void fw_state_init(struct fw_priv *fw_priv);
+
+#ifdef CONFIG_FW_LOADER
+bool firmware_is_builtin(const struct firmware *fw);
+bool firmware_request_builtin_buf(struct firmware *fw, const char *name,
+ void *buf, size_t size);
+#else /* module case */
+static inline bool firmware_is_builtin(const struct firmware *fw)
+{
+ return false;
+}
+static inline bool firmware_request_builtin_buf(struct firmware *fw,
+ const char *name,
+ void *buf, size_t size)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_FW_LOADER_PAGED_BUF
+void fw_free_paged_buf(struct fw_priv *fw_priv);
+int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
+int fw_map_paged_buf(struct fw_priv *fw_priv);
+bool fw_is_paged_buf(struct fw_priv *fw_priv);
+#else
+static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
+static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
+static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
+static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
+#endif
+
+#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
new file mode 100644
index 000000000..7c3590fd9
--- /dev/null
+++ b/drivers/base/firmware_loader/main.c
@@ -0,0 +1,1599 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * main.c - Multi purpose firmware loading support
+ *
+ * Copyright (c) 2003 Manuel Estrada Sainz
+ *
+ * Please see Documentation/driver-api/firmware/ for more information.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/kernel_read_file.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/highmem.h>
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/async.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+#include <linux/reboot.h>
+#include <linux/security.h>
+#include <linux/zstd.h>
+#include <linux/xz.h>
+
+#include <generated/utsrelease.h>
+
+#include "../base.h"
+#include "firmware.h"
+#include "fallback.h"
+
+MODULE_AUTHOR("Manuel Estrada Sainz");
+MODULE_DESCRIPTION("Multi purpose firmware loading support");
+MODULE_LICENSE("GPL");
+
+struct firmware_cache {
+ /* firmware_buf instance will be added into the below list */
+ spinlock_t lock;
+ struct list_head head;
+ int state;
+
+#ifdef CONFIG_FW_CACHE
+ /*
+ * Names of firmware images which have been cached successfully
+ * will be added into the below list so that device uncache
+ * helper can trace which firmware images have been cached
+ * before.
+ */
+ spinlock_t name_lock;
+ struct list_head fw_names;
+
+ struct delayed_work work;
+
+ struct notifier_block pm_notify;
+#endif
+};
+
+struct fw_cache_entry {
+ struct list_head list;
+ const char *name;
+};
+
+struct fw_name_devm {
+ unsigned long magic;
+ const char *name;
+};
+
+static inline struct fw_priv *to_fw_priv(struct kref *ref)
+{
+ return container_of(ref, struct fw_priv, ref);
+}
+
+#define FW_LOADER_NO_CACHE 0
+#define FW_LOADER_START_CACHE 1
+
+/* fw_lock could be moved to 'struct fw_sysfs' but since it is just
+ * guarding for corner cases a global lock should be OK */
+DEFINE_MUTEX(fw_lock);
+
+struct firmware_cache fw_cache;
+
+void fw_state_init(struct fw_priv *fw_priv)
+{
+ struct fw_state *fw_st = &fw_priv->fw_st;
+
+ init_completion(&fw_st->completion);
+ fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline int fw_state_wait(struct fw_priv *fw_priv)
+{
+ return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
+}
+
+static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
+
+static struct fw_priv *__allocate_fw_priv(const char *fw_name,
+ struct firmware_cache *fwc,
+ void *dbuf,
+ size_t size,
+ size_t offset,
+ u32 opt_flags)
+{
+ struct fw_priv *fw_priv;
+
+ /* For a partial read, the buffer must be preallocated. */
+ if ((opt_flags & FW_OPT_PARTIAL) && !dbuf)
+ return NULL;
+
+ /* Only partial reads are allowed to use an offset. */
+ if (offset != 0 && !(opt_flags & FW_OPT_PARTIAL))
+ return NULL;
+
+ fw_priv = kzalloc(sizeof(*fw_priv), GFP_ATOMIC);
+ if (!fw_priv)
+ return NULL;
+
+ fw_priv->fw_name = kstrdup_const(fw_name, GFP_ATOMIC);
+ if (!fw_priv->fw_name) {
+ kfree(fw_priv);
+ return NULL;
+ }
+
+ kref_init(&fw_priv->ref);
+ fw_priv->fwc = fwc;
+ fw_priv->data = dbuf;
+ fw_priv->allocated_size = size;
+ fw_priv->offset = offset;
+ fw_priv->opt_flags = opt_flags;
+ fw_state_init(fw_priv);
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ INIT_LIST_HEAD(&fw_priv->pending_list);
+#endif
+
+ pr_debug("%s: fw-%s fw_priv=%p\n", __func__, fw_name, fw_priv);
+
+ return fw_priv;
+}
+
+static struct fw_priv *__lookup_fw_priv(const char *fw_name)
+{
+ struct fw_priv *tmp;
+ struct firmware_cache *fwc = &fw_cache;
+
+ list_for_each_entry(tmp, &fwc->head, list)
+ if (!strcmp(tmp->fw_name, fw_name))
+ return tmp;
+ return NULL;
+}
+
+/* Returns 1 for batching firmware requests with the same name */
+int alloc_lookup_fw_priv(const char *fw_name, struct firmware_cache *fwc,
+ struct fw_priv **fw_priv, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags)
+{
+ struct fw_priv *tmp;
+
+ spin_lock(&fwc->lock);
+ /*
+ * Do not merge requests that are marked to be non-cached or
+ * are performing partial reads.
+ */
+ if (!(opt_flags & (FW_OPT_NOCACHE | FW_OPT_PARTIAL))) {
+ tmp = __lookup_fw_priv(fw_name);
+ if (tmp) {
+ kref_get(&tmp->ref);
+ spin_unlock(&fwc->lock);
+ *fw_priv = tmp;
+ pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
+ return 1;
+ }
+ }
+
+ tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size, offset, opt_flags);
+ if (tmp) {
+ INIT_LIST_HEAD(&tmp->list);
+ if (!(opt_flags & FW_OPT_NOCACHE))
+ list_add(&tmp->list, &fwc->head);
+ }
+ spin_unlock(&fwc->lock);
+
+ *fw_priv = tmp;
+
+ return tmp ? 0 : -ENOMEM;
+}
+
+static void __free_fw_priv(struct kref *ref)
+ __releases(&fwc->lock)
+{
+ struct fw_priv *fw_priv = to_fw_priv(ref);
+ struct firmware_cache *fwc = fw_priv->fwc;
+
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
+
+ list_del(&fw_priv->list);
+ spin_unlock(&fwc->lock);
+
+ if (fw_is_paged_buf(fw_priv))
+ fw_free_paged_buf(fw_priv);
+ else if (!fw_priv->allocated_size)
+ vfree(fw_priv->data);
+
+ kfree_const(fw_priv->fw_name);
+ kfree(fw_priv);
+}
+
+void free_fw_priv(struct fw_priv *fw_priv)
+{
+ struct firmware_cache *fwc = fw_priv->fwc;
+ spin_lock(&fwc->lock);
+ if (!kref_put(&fw_priv->ref, __free_fw_priv))
+ spin_unlock(&fwc->lock);
+}
+
+#ifdef CONFIG_FW_LOADER_PAGED_BUF
+bool fw_is_paged_buf(struct fw_priv *fw_priv)
+{
+ return fw_priv->is_paged_buf;
+}
+
+void fw_free_paged_buf(struct fw_priv *fw_priv)
+{
+ int i;
+
+ if (!fw_priv->pages)
+ return;
+
+ vunmap(fw_priv->data);
+
+ for (i = 0; i < fw_priv->nr_pages; i++)
+ __free_page(fw_priv->pages[i]);
+ kvfree(fw_priv->pages);
+ fw_priv->pages = NULL;
+ fw_priv->page_array_size = 0;
+ fw_priv->nr_pages = 0;
+ fw_priv->data = NULL;
+ fw_priv->size = 0;
+}
+
+int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed)
+{
+ /* If the array of pages is too small, grow it */
+ if (fw_priv->page_array_size < pages_needed) {
+ int new_array_size = max(pages_needed,
+ fw_priv->page_array_size * 2);
+ struct page **new_pages;
+
+ new_pages = kvmalloc_array(new_array_size, sizeof(void *),
+ GFP_KERNEL);
+ if (!new_pages)
+ return -ENOMEM;
+ memcpy(new_pages, fw_priv->pages,
+ fw_priv->page_array_size * sizeof(void *));
+ memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
+ (new_array_size - fw_priv->page_array_size));
+ kvfree(fw_priv->pages);
+ fw_priv->pages = new_pages;
+ fw_priv->page_array_size = new_array_size;
+ }
+
+ while (fw_priv->nr_pages < pages_needed) {
+ fw_priv->pages[fw_priv->nr_pages] =
+ alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+
+ if (!fw_priv->pages[fw_priv->nr_pages])
+ return -ENOMEM;
+ fw_priv->nr_pages++;
+ }
+
+ return 0;
+}
+
+int fw_map_paged_buf(struct fw_priv *fw_priv)
+{
+ /* one pages buffer should be mapped/unmapped only once */
+ if (!fw_priv->pages)
+ return 0;
+
+ vunmap(fw_priv->data);
+ fw_priv->data = vmap(fw_priv->pages, fw_priv->nr_pages, 0,
+ PAGE_KERNEL_RO);
+ if (!fw_priv->data)
+ return -ENOMEM;
+
+ return 0;
+}
+#endif
+
+/*
+ * ZSTD-compressed firmware support
+ */
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+static int fw_decompress_zstd(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ size_t len, out_size, workspace_size;
+ void *workspace, *out_buf;
+ zstd_dctx *ctx;
+ int err;
+
+ if (fw_priv->allocated_size) {
+ out_size = fw_priv->allocated_size;
+ out_buf = fw_priv->data;
+ } else {
+ zstd_frame_header params;
+
+ if (zstd_get_frame_header(&params, in_buffer, in_size) ||
+ params.frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN) {
+ dev_dbg(dev, "%s: invalid zstd header\n", __func__);
+ return -EINVAL;
+ }
+ out_size = params.frameContentSize;
+ out_buf = vzalloc(out_size);
+ if (!out_buf)
+ return -ENOMEM;
+ }
+
+ workspace_size = zstd_dctx_workspace_bound();
+ workspace = kvzalloc(workspace_size, GFP_KERNEL);
+ if (!workspace) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ctx = zstd_init_dctx(workspace, workspace_size);
+ if (!ctx) {
+ dev_dbg(dev, "%s: failed to initialize context\n", __func__);
+ err = -EINVAL;
+ goto error;
+ }
+
+ len = zstd_decompress_dctx(ctx, out_buf, out_size, in_buffer, in_size);
+ if (zstd_is_error(len)) {
+ dev_dbg(dev, "%s: failed to decompress: %d\n", __func__,
+ zstd_get_error_code(len));
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!fw_priv->allocated_size)
+ fw_priv->data = out_buf;
+ fw_priv->size = len;
+ err = 0;
+
+ error:
+ kvfree(workspace);
+ if (err && !fw_priv->allocated_size)
+ vfree(out_buf);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_COMPRESS_ZSTD */
+
+/*
+ * XZ-compressed firmware support
+ */
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
+/* show an error and return the standard error code */
+static int fw_decompress_xz_error(struct device *dev, enum xz_ret xz_ret)
+{
+ if (xz_ret != XZ_STREAM_END) {
+ dev_warn(dev, "xz decompression failed (xz_ret=%d)\n", xz_ret);
+ return xz_ret == XZ_MEM_ERROR ? -ENOMEM : -EINVAL;
+ }
+ return 0;
+}
+
+/* single-shot decompression onto the pre-allocated buffer */
+static int fw_decompress_xz_single(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ struct xz_dec *xz_dec;
+ struct xz_buf xz_buf;
+ enum xz_ret xz_ret;
+
+ xz_dec = xz_dec_init(XZ_SINGLE, (u32)-1);
+ if (!xz_dec)
+ return -ENOMEM;
+
+ xz_buf.in_size = in_size;
+ xz_buf.in = in_buffer;
+ xz_buf.in_pos = 0;
+ xz_buf.out_size = fw_priv->allocated_size;
+ xz_buf.out = fw_priv->data;
+ xz_buf.out_pos = 0;
+
+ xz_ret = xz_dec_run(xz_dec, &xz_buf);
+ xz_dec_end(xz_dec);
+
+ fw_priv->size = xz_buf.out_pos;
+ return fw_decompress_xz_error(dev, xz_ret);
+}
+
+/* decompression on paged buffer and map it */
+static int fw_decompress_xz_pages(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ struct xz_dec *xz_dec;
+ struct xz_buf xz_buf;
+ enum xz_ret xz_ret;
+ struct page *page;
+ int err = 0;
+
+ xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
+ if (!xz_dec)
+ return -ENOMEM;
+
+ xz_buf.in_size = in_size;
+ xz_buf.in = in_buffer;
+ xz_buf.in_pos = 0;
+
+ fw_priv->is_paged_buf = true;
+ fw_priv->size = 0;
+ do {
+ if (fw_grow_paged_buf(fw_priv, fw_priv->nr_pages + 1)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* decompress onto the new allocated page */
+ page = fw_priv->pages[fw_priv->nr_pages - 1];
+ xz_buf.out = kmap_local_page(page);
+ xz_buf.out_pos = 0;
+ xz_buf.out_size = PAGE_SIZE;
+ xz_ret = xz_dec_run(xz_dec, &xz_buf);
+ kunmap_local(xz_buf.out);
+ fw_priv->size += xz_buf.out_pos;
+ /* partial decompression means either end or error */
+ if (xz_buf.out_pos != PAGE_SIZE)
+ break;
+ } while (xz_ret == XZ_OK);
+
+ err = fw_decompress_xz_error(dev, xz_ret);
+ if (!err)
+ err = fw_map_paged_buf(fw_priv);
+
+ out:
+ xz_dec_end(xz_dec);
+ return err;
+}
+
+static int fw_decompress_xz(struct device *dev, struct fw_priv *fw_priv,
+ size_t in_size, const void *in_buffer)
+{
+ /* if the buffer is pre-allocated, we can perform in single-shot mode */
+ if (fw_priv->data)
+ return fw_decompress_xz_single(dev, fw_priv, in_size, in_buffer);
+ else
+ return fw_decompress_xz_pages(dev, fw_priv, in_size, in_buffer);
+}
+#endif /* CONFIG_FW_LOADER_COMPRESS_XZ */
+
+/* direct firmware loading support */
+static char fw_path_para[256];
+static const char * const fw_path[] = {
+ fw_path_para,
+ "/lib/firmware/updates/" UTS_RELEASE,
+ "/lib/firmware/updates",
+ "/lib/firmware/" UTS_RELEASE,
+ "/lib/firmware"
+};
+
+/*
+ * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
+ * from kernel command line because firmware_class is generally built in
+ * kernel instead of module.
+ */
+module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
+MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
+
+static int
+fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
+ const char *suffix,
+ int (*decompress)(struct device *dev,
+ struct fw_priv *fw_priv,
+ size_t in_size,
+ const void *in_buffer))
+{
+ size_t size;
+ int i, len;
+ int rc = -ENOENT;
+ char *path;
+ size_t msize = INT_MAX;
+ void *buffer = NULL;
+
+ /* Already populated data member means we're loading into a buffer */
+ if (!decompress && fw_priv->data) {
+ buffer = fw_priv->data;
+ msize = fw_priv->allocated_size;
+ }
+
+ path = __getname();
+ if (!path)
+ return -ENOMEM;
+
+ wait_for_initramfs();
+ for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
+ size_t file_size = 0;
+ size_t *file_size_ptr = NULL;
+
+ /* skip the unset customized path */
+ if (!fw_path[i][0])
+ continue;
+
+ len = snprintf(path, PATH_MAX, "%s/%s%s",
+ fw_path[i], fw_priv->fw_name, suffix);
+ if (len >= PATH_MAX) {
+ rc = -ENAMETOOLONG;
+ break;
+ }
+
+ fw_priv->size = 0;
+
+ /*
+ * The total file size is only examined when doing a partial
+ * read; the "full read" case needs to fail if the whole
+ * firmware was not completely loaded.
+ */
+ if ((fw_priv->opt_flags & FW_OPT_PARTIAL) && buffer)
+ file_size_ptr = &file_size;
+
+ /* load firmware files from the mount namespace of init */
+ rc = kernel_read_file_from_path_initns(path, fw_priv->offset,
+ &buffer, msize,
+ file_size_ptr,
+ READING_FIRMWARE);
+ if (rc < 0) {
+ if (rc != -ENOENT)
+ dev_warn(device, "loading %s failed with error %d\n",
+ path, rc);
+ else
+ dev_dbg(device, "loading %s failed for no such file or directory.\n",
+ path);
+ continue;
+ }
+ size = rc;
+ rc = 0;
+
+ dev_dbg(device, "Loading firmware from %s\n", path);
+ if (decompress) {
+ dev_dbg(device, "f/w decompressing %s\n",
+ fw_priv->fw_name);
+ rc = decompress(device, fw_priv, size, buffer);
+ /* discard the superfluous original content */
+ vfree(buffer);
+ buffer = NULL;
+ if (rc) {
+ fw_free_paged_buf(fw_priv);
+ continue;
+ }
+ } else {
+ dev_dbg(device, "direct-loading %s\n",
+ fw_priv->fw_name);
+ if (!fw_priv->data)
+ fw_priv->data = buffer;
+ fw_priv->size = size;
+ }
+ fw_state_done(fw_priv);
+ break;
+ }
+ __putname(path);
+
+ return rc;
+}
+
+/* firmware holds the ownership of pages */
+static void firmware_free_data(const struct firmware *fw)
+{
+ /* Loaded directly? */
+ if (!fw->priv) {
+ vfree(fw->data);
+ return;
+ }
+ free_fw_priv(fw->priv);
+}
+
+/* store the pages buffer info firmware from buf */
+static void fw_set_page_data(struct fw_priv *fw_priv, struct firmware *fw)
+{
+ fw->priv = fw_priv;
+ fw->size = fw_priv->size;
+ fw->data = fw_priv->data;
+
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name, fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
+}
+
+#ifdef CONFIG_FW_CACHE
+static void fw_name_devm_release(struct device *dev, void *res)
+{
+ struct fw_name_devm *fwn = res;
+
+ if (fwn->magic == (unsigned long)&fw_cache)
+ pr_debug("%s: fw_name-%s devm-%p released\n",
+ __func__, fwn->name, res);
+ kfree_const(fwn->name);
+}
+
+static int fw_devm_match(struct device *dev, void *res,
+ void *match_data)
+{
+ struct fw_name_devm *fwn = res;
+
+ return (fwn->magic == (unsigned long)&fw_cache) &&
+ !strcmp(fwn->name, match_data);
+}
+
+static struct fw_name_devm *fw_find_devm_name(struct device *dev,
+ const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = devres_find(dev, fw_name_devm_release,
+ fw_devm_match, (void *)name);
+ return fwn;
+}
+
+static bool fw_cache_is_setup(struct device *dev, const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = fw_find_devm_name(dev, name);
+ if (fwn)
+ return true;
+
+ return false;
+}
+
+/* add firmware name into devres list */
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ if (fw_cache_is_setup(dev, name))
+ return 0;
+
+ fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm),
+ GFP_KERNEL);
+ if (!fwn)
+ return -ENOMEM;
+ fwn->name = kstrdup_const(name, GFP_KERNEL);
+ if (!fwn->name) {
+ devres_free(fwn);
+ return -ENOMEM;
+ }
+
+ fwn->magic = (unsigned long)&fw_cache;
+ devres_add(dev, fwn);
+
+ return 0;
+}
+#else
+static bool fw_cache_is_setup(struct device *dev, const char *name)
+{
+ return false;
+}
+
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ return 0;
+}
+#endif
+
+int assign_fw(struct firmware *fw, struct device *device)
+{
+ struct fw_priv *fw_priv = fw->priv;
+ int ret;
+
+ mutex_lock(&fw_lock);
+ if (!fw_priv->size || fw_state_is_aborted(fw_priv)) {
+ mutex_unlock(&fw_lock);
+ return -ENOENT;
+ }
+
+ /*
+ * add firmware name into devres list so that we can auto cache
+ * and uncache firmware for device.
+ *
+ * device may has been deleted already, but the problem
+ * should be fixed in devres or driver core.
+ */
+ /* don't cache firmware handled without uevent */
+ if (device && (fw_priv->opt_flags & FW_OPT_UEVENT) &&
+ !(fw_priv->opt_flags & FW_OPT_NOCACHE)) {
+ ret = fw_add_devm_name(device, fw_priv->fw_name);
+ if (ret) {
+ mutex_unlock(&fw_lock);
+ return ret;
+ }
+ }
+
+ /*
+ * After caching firmware image is started, let it piggyback
+ * on request firmware.
+ */
+ if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
+ fw_priv->fwc->state == FW_LOADER_START_CACHE)
+ fw_cache_piggyback_on_request(fw_priv);
+
+ /* pass the pages buffer to driver at the last minute */
+ fw_set_page_data(fw_priv, fw);
+ mutex_unlock(&fw_lock);
+ return 0;
+}
+
+/* prepare firmware and firmware_buf structs;
+ * return 0 if a firmware is already assigned, 1 if need to load one,
+ * or a negative error code
+ */
+static int
+_request_firmware_prepare(struct firmware **firmware_p, const char *name,
+ struct device *device, void *dbuf, size_t size,
+ size_t offset, u32 opt_flags)
+{
+ struct firmware *firmware;
+ struct fw_priv *fw_priv;
+ int ret;
+
+ *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
+ if (!firmware) {
+ dev_err(device, "%s: kmalloc(struct firmware) failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (firmware_request_builtin_buf(firmware, name, dbuf, size)) {
+ dev_dbg(device, "using built-in %s\n", name);
+ return 0; /* assigned */
+ }
+
+ ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
+ offset, opt_flags);
+
+ /*
+ * bind with 'priv' now to avoid warning in failure path
+ * of requesting firmware.
+ */
+ firmware->priv = fw_priv;
+
+ if (ret > 0) {
+ ret = fw_state_wait(fw_priv);
+ if (!ret) {
+ fw_set_page_data(fw_priv, firmware);
+ return 0; /* assigned */
+ }
+ }
+
+ if (ret < 0)
+ return ret;
+ return 1; /* need to load */
+}
+
+/*
+ * Batched requests need only one wake, we need to do this step last due to the
+ * fallback mechanism. The buf is protected with kref_get(), and it won't be
+ * released until the last user calls release_firmware().
+ *
+ * Failed batched requests are possible as well, in such cases we just share
+ * the struct fw_priv and won't release it until all requests are woken
+ * and have gone through this same path.
+ */
+static void fw_abort_batch_reqs(struct firmware *fw)
+{
+ struct fw_priv *fw_priv;
+
+ /* Loaded directly? */
+ if (!fw || !fw->priv)
+ return;
+
+ fw_priv = fw->priv;
+ mutex_lock(&fw_lock);
+ if (!fw_state_is_aborted(fw_priv))
+ fw_state_aborted(fw_priv);
+ mutex_unlock(&fw_lock);
+}
+
+/* called from request_firmware() and request_firmware_work_func() */
+static int
+_request_firmware(const struct firmware **firmware_p, const char *name,
+ struct device *device, void *buf, size_t size,
+ size_t offset, u32 opt_flags)
+{
+ struct firmware *fw = NULL;
+ struct cred *kern_cred = NULL;
+ const struct cred *old_cred;
+ bool nondirect = false;
+ int ret;
+
+ if (!firmware_p)
+ return -EINVAL;
+
+ if (!name || name[0] == '\0') {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = _request_firmware_prepare(&fw, name, device, buf, size,
+ offset, opt_flags);
+ if (ret <= 0) /* error or already assigned */
+ goto out;
+
+ /*
+ * We are about to try to access the firmware file. Because we may have been
+ * called by a driver when serving an unrelated request from userland, we use
+ * the kernel credentials to read the file.
+ */
+ kern_cred = prepare_kernel_cred(NULL);
+ if (!kern_cred) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ old_cred = override_creds(kern_cred);
+
+ ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
+
+ /* Only full reads can support decompression, platform, and sysfs. */
+ if (!(opt_flags & FW_OPT_PARTIAL))
+ nondirect = true;
+
+#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+ fw_decompress_zstd);
+#endif
+#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
+ fw_decompress_xz);
+#endif
+ if (ret == -ENOENT && nondirect)
+ ret = firmware_fallback_platform(fw->priv);
+
+ if (ret) {
+ if (!(opt_flags & FW_OPT_NO_WARN))
+ dev_warn(device,
+ "Direct firmware load for %s failed with error %d\n",
+ name, ret);
+ if (nondirect)
+ ret = firmware_fallback_sysfs(fw, name, device,
+ opt_flags, ret);
+ } else
+ ret = assign_fw(fw, device);
+
+ revert_creds(old_cred);
+ put_cred(kern_cred);
+
+ out:
+ if (ret < 0) {
+ fw_abort_batch_reqs(fw);
+ release_firmware(fw);
+ fw = NULL;
+ }
+
+ *firmware_p = fw;
+ return ret;
+}
+
+/**
+ * request_firmware() - send firmware request and wait for it
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * @firmware_p will be used to return a firmware image by the name
+ * of @name for device @device.
+ *
+ * Should be called from user context where sleeping is allowed.
+ *
+ * @name will be used as $FIRMWARE in the uevent environment and
+ * should be distinctive enough not to be confused with any other
+ * firmware image for this or any other device.
+ *
+ * Caller must hold the reference count of @device.
+ *
+ * The function can be called safely inside device's suspend and
+ * resume callback.
+ **/
+int
+request_firmware(const struct firmware **firmware_p, const char *name,
+ struct device *device)
+{
+ int ret;
+
+ /* Need to pin this module until return */
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
+ FW_OPT_UEVENT);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL(request_firmware);
+
+/**
+ * firmware_request_nowarn() - request for an optional fw module
+ * @firmware: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function is similar in behaviour to request_firmware(), except it
+ * doesn't produce warning messages when the file is not found. The sysfs
+ * fallback mechanism is enabled if direct filesystem lookup fails. However,
+ * failures to find the firmware file with it are still suppressed. It is
+ * therefore up to the driver to check for the return value of this call and to
+ * decide when to inform the users of errors.
+ **/
+int firmware_request_nowarn(const struct firmware **firmware, const char *name,
+ struct device *device)
+{
+ int ret;
+
+ /* Need to pin this module until return */
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware, name, device, NULL, 0, 0,
+ FW_OPT_UEVENT | FW_OPT_NO_WARN);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(firmware_request_nowarn);
+
+/**
+ * request_firmware_direct() - load firmware directly without usermode helper
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function works pretty much like request_firmware(), but this doesn't
+ * fall back to usermode helper even if the firmware couldn't be loaded
+ * directly from fs. Hence it's useful for loading optional firmwares, which
+ * aren't always present, without extra long timeouts of udev.
+ **/
+int request_firmware_direct(const struct firmware **firmware_p,
+ const char *name, struct device *device)
+{
+ int ret;
+
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, NULL, 0, 0,
+ FW_OPT_UEVENT | FW_OPT_NO_WARN |
+ FW_OPT_NOFALLBACK_SYSFS);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(request_firmware_direct);
+
+/**
+ * firmware_request_platform() - request firmware with platform-fw fallback
+ * @firmware: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ *
+ * This function is similar in behaviour to request_firmware, except that if
+ * direct filesystem lookup fails, it will fallback to looking for a copy of the
+ * requested firmware embedded in the platform's main (e.g. UEFI) firmware.
+ **/
+int firmware_request_platform(const struct firmware **firmware,
+ const char *name, struct device *device)
+{
+ int ret;
+
+ /* Need to pin this module until return */
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware, name, device, NULL, 0, 0,
+ FW_OPT_UEVENT | FW_OPT_FALLBACK_PLATFORM);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(firmware_request_platform);
+
+/**
+ * firmware_request_cache() - cache firmware for suspend so resume can use it
+ * @name: name of firmware file
+ * @device: device for which firmware should be cached for
+ *
+ * There are some devices with an optimization that enables the device to not
+ * require loading firmware on system reboot. This optimization may still
+ * require the firmware present on resume from suspend. This routine can be
+ * used to ensure the firmware is present on resume from suspend in these
+ * situations. This helper is not compatible with drivers which use
+ * request_firmware_into_buf() or request_firmware_nowait() with no uevent set.
+ **/
+int firmware_request_cache(struct device *device, const char *name)
+{
+ int ret;
+
+ mutex_lock(&fw_lock);
+ ret = fw_add_devm_name(device, name);
+ mutex_unlock(&fw_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(firmware_request_cache);
+
+/**
+ * request_firmware_into_buf() - load firmware into a previously allocated buffer
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded and DMA region allocated
+ * @buf: address of buffer to load firmware into
+ * @size: size of buffer
+ *
+ * This function works pretty much like request_firmware(), but it doesn't
+ * allocate a buffer to hold the firmware data. Instead, the firmware
+ * is loaded directly into the buffer pointed to by @buf and the @firmware_p
+ * data member is pointed at @buf.
+ *
+ * This function doesn't cache firmware either.
+ */
+int
+request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
+ struct device *device, void *buf, size_t size)
+{
+ int ret;
+
+ if (fw_cache_is_setup(device, name))
+ return -EOPNOTSUPP;
+
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, buf, size, 0,
+ FW_OPT_UEVENT | FW_OPT_NOCACHE);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL(request_firmware_into_buf);
+
+/**
+ * request_partial_firmware_into_buf() - load partial firmware into a previously allocated buffer
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded and DMA region allocated
+ * @buf: address of buffer to load firmware into
+ * @size: size of buffer
+ * @offset: offset into file to read
+ *
+ * This function works pretty much like request_firmware_into_buf except
+ * it allows a partial read of the file.
+ */
+int
+request_partial_firmware_into_buf(const struct firmware **firmware_p,
+ const char *name, struct device *device,
+ void *buf, size_t size, size_t offset)
+{
+ int ret;
+
+ if (fw_cache_is_setup(device, name))
+ return -EOPNOTSUPP;
+
+ __module_get(THIS_MODULE);
+ ret = _request_firmware(firmware_p, name, device, buf, size, offset,
+ FW_OPT_UEVENT | FW_OPT_NOCACHE |
+ FW_OPT_PARTIAL);
+ module_put(THIS_MODULE);
+ return ret;
+}
+EXPORT_SYMBOL(request_partial_firmware_into_buf);
+
+/**
+ * release_firmware() - release the resource associated with a firmware image
+ * @fw: firmware resource to release
+ **/
+void release_firmware(const struct firmware *fw)
+{
+ if (fw) {
+ if (!firmware_is_builtin(fw))
+ firmware_free_data(fw);
+ kfree(fw);
+ }
+}
+EXPORT_SYMBOL(release_firmware);
+
+/* Async support */
+struct firmware_work {
+ struct work_struct work;
+ struct module *module;
+ const char *name;
+ struct device *device;
+ void *context;
+ void (*cont)(const struct firmware *fw, void *context);
+ u32 opt_flags;
+};
+
+static void request_firmware_work_func(struct work_struct *work)
+{
+ struct firmware_work *fw_work;
+ const struct firmware *fw;
+
+ fw_work = container_of(work, struct firmware_work, work);
+
+ _request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0, 0,
+ fw_work->opt_flags);
+ fw_work->cont(fw, fw_work->context);
+ put_device(fw_work->device); /* taken in request_firmware_nowait() */
+
+ module_put(fw_work->module);
+ kfree_const(fw_work->name);
+ kfree(fw_work);
+}
+
+/**
+ * request_firmware_nowait() - asynchronous version of request_firmware
+ * @module: module requesting the firmware
+ * @uevent: sends uevent to copy the firmware image if this flag
+ * is non-zero else the firmware copy must be done manually.
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded
+ * @gfp: allocation flags
+ * @context: will be passed over to @cont, and
+ * @fw may be %NULL if firmware request fails.
+ * @cont: function will be called asynchronously when the firmware
+ * request is over.
+ *
+ * Caller must hold the reference count of @device.
+ *
+ * Asynchronous variant of request_firmware() for user contexts:
+ * - sleep for as small periods as possible since it may
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
+ *
+ * - can't sleep at all if @gfp is GFP_ATOMIC.
+ **/
+int
+request_firmware_nowait(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ struct firmware_work *fw_work;
+
+ fw_work = kzalloc(sizeof(struct firmware_work), gfp);
+ if (!fw_work)
+ return -ENOMEM;
+
+ fw_work->module = module;
+ fw_work->name = kstrdup_const(name, gfp);
+ if (!fw_work->name) {
+ kfree(fw_work);
+ return -ENOMEM;
+ }
+ fw_work->device = device;
+ fw_work->context = context;
+ fw_work->cont = cont;
+ fw_work->opt_flags = FW_OPT_NOWAIT |
+ (uevent ? FW_OPT_UEVENT : FW_OPT_USERHELPER);
+
+ if (!uevent && fw_cache_is_setup(device, name)) {
+ kfree_const(fw_work->name);
+ kfree(fw_work);
+ return -EOPNOTSUPP;
+ }
+
+ if (!try_module_get(module)) {
+ kfree_const(fw_work->name);
+ kfree(fw_work);
+ return -EFAULT;
+ }
+
+ get_device(fw_work->device);
+ INIT_WORK(&fw_work->work, request_firmware_work_func);
+ schedule_work(&fw_work->work);
+ return 0;
+}
+EXPORT_SYMBOL(request_firmware_nowait);
+
+#ifdef CONFIG_FW_CACHE
+static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
+
+/**
+ * cache_firmware() - cache one firmware image in kernel memory space
+ * @fw_name: the firmware image name
+ *
+ * Cache firmware in kernel memory so that drivers can use it when
+ * system isn't ready for them to request firmware image from userspace.
+ * Once it returns successfully, driver can use request_firmware or its
+ * nowait version to get the cached firmware without any interacting
+ * with userspace
+ *
+ * Return 0 if the firmware image has been cached successfully
+ * Return !0 otherwise
+ *
+ */
+static int cache_firmware(const char *fw_name)
+{
+ int ret;
+ const struct firmware *fw;
+
+ pr_debug("%s: %s\n", __func__, fw_name);
+
+ ret = request_firmware(&fw, fw_name, NULL);
+ if (!ret)
+ kfree(fw);
+
+ pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
+
+ return ret;
+}
+
+static struct fw_priv *lookup_fw_priv(const char *fw_name)
+{
+ struct fw_priv *tmp;
+ struct firmware_cache *fwc = &fw_cache;
+
+ spin_lock(&fwc->lock);
+ tmp = __lookup_fw_priv(fw_name);
+ spin_unlock(&fwc->lock);
+
+ return tmp;
+}
+
+/**
+ * uncache_firmware() - remove one cached firmware image
+ * @fw_name: the firmware image name
+ *
+ * Uncache one firmware image which has been cached successfully
+ * before.
+ *
+ * Return 0 if the firmware cache has been removed successfully
+ * Return !0 otherwise
+ *
+ */
+static int uncache_firmware(const char *fw_name)
+{
+ struct fw_priv *fw_priv;
+ struct firmware fw;
+
+ pr_debug("%s: %s\n", __func__, fw_name);
+
+ if (firmware_request_builtin(&fw, fw_name))
+ return 0;
+
+ fw_priv = lookup_fw_priv(fw_name);
+ if (fw_priv) {
+ free_fw_priv(fw_priv);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
+{
+ struct fw_cache_entry *fce;
+
+ fce = kzalloc(sizeof(*fce), GFP_ATOMIC);
+ if (!fce)
+ goto exit;
+
+ fce->name = kstrdup_const(name, GFP_ATOMIC);
+ if (!fce->name) {
+ kfree(fce);
+ fce = NULL;
+ goto exit;
+ }
+exit:
+ return fce;
+}
+
+static int __fw_entry_found(const char *name)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ struct fw_cache_entry *fce;
+
+ list_for_each_entry(fce, &fwc->fw_names, list) {
+ if (!strcmp(fce->name, name))
+ return 1;
+ }
+ return 0;
+}
+
+static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
+{
+ const char *name = fw_priv->fw_name;
+ struct firmware_cache *fwc = fw_priv->fwc;
+ struct fw_cache_entry *fce;
+
+ spin_lock(&fwc->name_lock);
+ if (__fw_entry_found(name))
+ goto found;
+
+ fce = alloc_fw_cache_entry(name);
+ if (fce) {
+ list_add(&fce->list, &fwc->fw_names);
+ kref_get(&fw_priv->ref);
+ pr_debug("%s: fw: %s\n", __func__, name);
+ }
+found:
+ spin_unlock(&fwc->name_lock);
+}
+
+static void free_fw_cache_entry(struct fw_cache_entry *fce)
+{
+ kfree_const(fce->name);
+ kfree(fce);
+}
+
+static void __async_dev_cache_fw_image(void *fw_entry,
+ async_cookie_t cookie)
+{
+ struct fw_cache_entry *fce = fw_entry;
+ struct firmware_cache *fwc = &fw_cache;
+ int ret;
+
+ ret = cache_firmware(fce->name);
+ if (ret) {
+ spin_lock(&fwc->name_lock);
+ list_del(&fce->list);
+ spin_unlock(&fwc->name_lock);
+
+ free_fw_cache_entry(fce);
+ }
+}
+
+/* called with dev->devres_lock held */
+static void dev_create_fw_entry(struct device *dev, void *res,
+ void *data)
+{
+ struct fw_name_devm *fwn = res;
+ const char *fw_name = fwn->name;
+ struct list_head *head = data;
+ struct fw_cache_entry *fce;
+
+ fce = alloc_fw_cache_entry(fw_name);
+ if (fce)
+ list_add(&fce->list, head);
+}
+
+static int devm_name_match(struct device *dev, void *res,
+ void *match_data)
+{
+ struct fw_name_devm *fwn = res;
+ return (fwn->magic == (unsigned long)match_data);
+}
+
+static void dev_cache_fw_image(struct device *dev, void *data)
+{
+ LIST_HEAD(todo);
+ struct fw_cache_entry *fce;
+ struct fw_cache_entry *fce_next;
+ struct firmware_cache *fwc = &fw_cache;
+
+ devres_for_each_res(dev, fw_name_devm_release,
+ devm_name_match, &fw_cache,
+ dev_create_fw_entry, &todo);
+
+ list_for_each_entry_safe(fce, fce_next, &todo, list) {
+ list_del(&fce->list);
+
+ spin_lock(&fwc->name_lock);
+ /* only one cache entry for one firmware */
+ if (!__fw_entry_found(fce->name)) {
+ list_add(&fce->list, &fwc->fw_names);
+ } else {
+ free_fw_cache_entry(fce);
+ fce = NULL;
+ }
+ spin_unlock(&fwc->name_lock);
+
+ if (fce)
+ async_schedule_domain(__async_dev_cache_fw_image,
+ (void *)fce,
+ &fw_cache_domain);
+ }
+}
+
+static void __device_uncache_fw_images(void)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ struct fw_cache_entry *fce;
+
+ spin_lock(&fwc->name_lock);
+ while (!list_empty(&fwc->fw_names)) {
+ fce = list_entry(fwc->fw_names.next,
+ struct fw_cache_entry, list);
+ list_del(&fce->list);
+ spin_unlock(&fwc->name_lock);
+
+ uncache_firmware(fce->name);
+ free_fw_cache_entry(fce);
+
+ spin_lock(&fwc->name_lock);
+ }
+ spin_unlock(&fwc->name_lock);
+}
+
+/**
+ * device_cache_fw_images() - cache devices' firmware
+ *
+ * If one device called request_firmware or its nowait version
+ * successfully before, the firmware names are recored into the
+ * device's devres link list, so device_cache_fw_images can call
+ * cache_firmware() to cache these firmwares for the device,
+ * then the device driver can load its firmwares easily at
+ * time when system is not ready to complete loading firmware.
+ */
+static void device_cache_fw_images(void)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ DEFINE_WAIT(wait);
+
+ pr_debug("%s\n", __func__);
+
+ /* cancel uncache work */
+ cancel_delayed_work_sync(&fwc->work);
+
+ fw_fallback_set_cache_timeout();
+
+ mutex_lock(&fw_lock);
+ fwc->state = FW_LOADER_START_CACHE;
+ dpm_for_each_dev(NULL, dev_cache_fw_image);
+ mutex_unlock(&fw_lock);
+
+ /* wait for completion of caching firmware for all devices */
+ async_synchronize_full_domain(&fw_cache_domain);
+
+ fw_fallback_set_default_timeout();
+}
+
+/**
+ * device_uncache_fw_images() - uncache devices' firmware
+ *
+ * uncache all firmwares which have been cached successfully
+ * by device_uncache_fw_images earlier
+ */
+static void device_uncache_fw_images(void)
+{
+ pr_debug("%s\n", __func__);
+ __device_uncache_fw_images();
+}
+
+static void device_uncache_fw_images_work(struct work_struct *work)
+{
+ device_uncache_fw_images();
+}
+
+/**
+ * device_uncache_fw_images_delay() - uncache devices firmwares
+ * @delay: number of milliseconds to delay uncache device firmwares
+ *
+ * uncache all devices's firmwares which has been cached successfully
+ * by device_cache_fw_images after @delay milliseconds.
+ */
+static void device_uncache_fw_images_delay(unsigned long delay)
+{
+ queue_delayed_work(system_power_efficient_wq, &fw_cache.work,
+ msecs_to_jiffies(delay));
+}
+
+static int fw_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
+ /*
+ * kill pending fallback requests with a custom fallback
+ * to avoid stalling suspend.
+ */
+ kill_pending_fw_fallback_reqs(true);
+ device_cache_fw_images();
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ /*
+ * In case that system sleep failed and syscore_suspend is
+ * not called.
+ */
+ mutex_lock(&fw_lock);
+ fw_cache.state = FW_LOADER_NO_CACHE;
+ mutex_unlock(&fw_lock);
+
+ device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
+ break;
+ }
+
+ return 0;
+}
+
+/* stop caching firmware once syscore_suspend is reached */
+static int fw_suspend(void)
+{
+ fw_cache.state = FW_LOADER_NO_CACHE;
+ return 0;
+}
+
+static struct syscore_ops fw_syscore_ops = {
+ .suspend = fw_suspend,
+};
+
+static int __init register_fw_pm_ops(void)
+{
+ int ret;
+
+ spin_lock_init(&fw_cache.name_lock);
+ INIT_LIST_HEAD(&fw_cache.fw_names);
+
+ INIT_DELAYED_WORK(&fw_cache.work,
+ device_uncache_fw_images_work);
+
+ fw_cache.pm_notify.notifier_call = fw_pm_notify;
+ ret = register_pm_notifier(&fw_cache.pm_notify);
+ if (ret)
+ return ret;
+
+ register_syscore_ops(&fw_syscore_ops);
+
+ return ret;
+}
+
+static inline void unregister_fw_pm_ops(void)
+{
+ unregister_syscore_ops(&fw_syscore_ops);
+ unregister_pm_notifier(&fw_cache.pm_notify);
+}
+#else
+static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
+{
+}
+static inline int register_fw_pm_ops(void)
+{
+ return 0;
+}
+static inline void unregister_fw_pm_ops(void)
+{
+}
+#endif
+
+static void __init fw_cache_init(void)
+{
+ spin_lock_init(&fw_cache.lock);
+ INIT_LIST_HEAD(&fw_cache.head);
+ fw_cache.state = FW_LOADER_NO_CACHE;
+}
+
+static int fw_shutdown_notify(struct notifier_block *unused1,
+ unsigned long unused2, void *unused3)
+{
+ /*
+ * Kill all pending fallback requests to avoid both stalling shutdown,
+ * and avoid a deadlock with the usermode_lock.
+ */
+ kill_pending_fw_fallback_reqs(false);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block fw_shutdown_nb = {
+ .notifier_call = fw_shutdown_notify,
+};
+
+static int __init firmware_class_init(void)
+{
+ int ret;
+
+ /* No need to unfold these on exit */
+ fw_cache_init();
+
+ ret = register_fw_pm_ops();
+ if (ret)
+ return ret;
+
+ ret = register_reboot_notifier(&fw_shutdown_nb);
+ if (ret)
+ goto out;
+
+ return register_sysfs_loader();
+
+out:
+ unregister_fw_pm_ops();
+ return ret;
+}
+
+static void __exit firmware_class_exit(void)
+{
+ unregister_fw_pm_ops();
+ unregister_reboot_notifier(&fw_shutdown_nb);
+ unregister_sysfs_loader();
+}
+
+fs_initcall(firmware_class_init);
+module_exit(firmware_class_exit);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
new file mode 100644
index 000000000..5b66b3d1f
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "sysfs.h"
+
+/*
+ * sysfs support for firmware loader
+ */
+
+void __fw_load_abort(struct fw_priv *fw_priv)
+{
+ /*
+ * There is a small window in which user can write to 'loading'
+ * between loading done/aborted and disappearance of 'loading'
+ */
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ return;
+
+ fw_state_aborted(fw_priv);
+}
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", __firmware_loading_timeout());
+}
+
+/**
+ * timeout_store() - set number of seconds to wait for firmware
+ * @class: device class pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for timeout value
+ * @count: number of bytes in @buf
+ *
+ * Sets the number of seconds to wait for the firmware. Once
+ * this expires an error will be returned to the driver and no
+ * firmware will be provided.
+ *
+ * Note: zero means 'wait forever'.
+ **/
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+
+ if (tmp_loading_timeout < 0)
+ tmp_loading_timeout = 0;
+
+ __fw_fallback_set_timeout(tmp_loading_timeout);
+
+ return count;
+}
+static CLASS_ATTR_RW(timeout);
+
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(firmware_class);
+
+static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
+ return -ENOMEM;
+ if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
+ return -ENOMEM;
+ if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int err = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ err = do_firmware_uevent(fw_sysfs, env);
+ mutex_unlock(&fw_lock);
+ return err;
+}
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
+static void fw_dev_release(struct device *dev)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+
+ if (fw_sysfs->fw_upload_priv)
+ fw_upload_free(fw_sysfs);
+
+ kfree(fw_sysfs);
+}
+
+static struct class firmware_class = {
+ .name = "firmware",
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+ .class_groups = firmware_class_groups,
+ .dev_uevent = firmware_uevent,
+#endif
+ .dev_release = fw_dev_release,
+};
+
+int register_sysfs_loader(void)
+{
+ int ret = class_register(&firmware_class);
+
+ if (ret != 0)
+ return ret;
+ return register_firmware_config_sysctl();
+}
+
+void unregister_sysfs_loader(void)
+{
+ unregister_firmware_config_sysctl();
+ class_unregister(&firmware_class);
+}
+
+static ssize_t firmware_loading_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ int loading = 0;
+
+ mutex_lock(&fw_lock);
+ if (fw_sysfs->fw_priv)
+ loading = fw_state_is_loading(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+
+ return sysfs_emit(buf, "%d\n", loading);
+}
+
+/**
+ * firmware_loading_store() - set value in the 'loading' control file
+ * @dev: device pointer
+ * @attr: device attribute pointer
+ * @buf: buffer to scan for loading control value
+ * @count: number of bytes in @buf
+ *
+ * The relevant values are:
+ *
+ * 1: Start a load, discarding any previous partial load.
+ * 0: Conclude the load and hand the data to the driver code.
+ * -1: Conclude the load with an error and discard any written data.
+ **/
+static ssize_t firmware_loading_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t written = count;
+ int loading = simple_strtol(buf, NULL, 10);
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (fw_state_is_aborted(fw_priv) || fw_state_is_done(fw_priv))
+ goto out;
+
+ switch (loading) {
+ case 1:
+ /* discarding any previous partial load */
+ fw_free_paged_buf(fw_priv);
+ fw_state_start(fw_priv);
+ break;
+ case 0:
+ if (fw_state_is_loading(fw_priv)) {
+ int rc;
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ */
+ rc = fw_map_paged_buf(fw_priv);
+ if (rc)
+ dev_err(dev, "%s: map pages failed\n",
+ __func__);
+ else
+ rc = security_kernel_post_load_data(fw_priv->data,
+ fw_priv->size,
+ LOADING_FIRMWARE,
+ "blob");
+
+ /*
+ * Same logic as fw_load_abort, only the DONE bit
+ * is ignored and we set ABORT only on failure.
+ */
+ if (rc) {
+ fw_state_aborted(fw_priv);
+ written = rc;
+ } else {
+ fw_state_done(fw_priv);
+
+ /*
+ * If this is a user-initiated firmware upload
+ * then start the upload in a worker thread now.
+ */
+ rc = fw_upload_start(fw_sysfs);
+ if (rc)
+ written = rc;
+ }
+ break;
+ }
+ fallthrough;
+ default:
+ dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
+ fallthrough;
+ case -1:
+ fw_load_abort(fw_sysfs);
+ if (fw_sysfs->fw_upload_priv)
+ fw_state_init(fw_sysfs->fw_priv);
+
+ break;
+ }
+out:
+ mutex_unlock(&fw_lock);
+ return written;
+}
+
+DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
+
+static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ if (read)
+ memcpy(buffer, fw_priv->data + offset, count);
+ else
+ memcpy(fw_priv->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
+ loff_t offset, size_t count, bool read)
+{
+ while (count) {
+ int page_nr = offset >> PAGE_SHIFT;
+ int page_ofs = offset & (PAGE_SIZE - 1);
+ int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+ if (read)
+ memcpy_from_page(buffer, fw_priv->pages[page_nr],
+ page_ofs, page_cnt);
+ else
+ memcpy_to_page(fw_priv->pages[page_nr], page_ofs,
+ buffer, page_cnt);
+
+ buffer += page_cnt;
+ offset += page_cnt;
+ count -= page_cnt;
+ }
+}
+
+static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t ret_count;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ ret_count = -ENODEV;
+ goto out;
+ }
+ if (offset > fw_priv->size) {
+ ret_count = 0;
+ goto out;
+ }
+ if (count > fw_priv->size - offset)
+ count = fw_priv->size - offset;
+
+ ret_count = count;
+
+ if (fw_priv->data)
+ firmware_rw_data(fw_priv, buffer, offset, count, true);
+ else
+ firmware_rw(fw_priv, buffer, offset, count, true);
+
+out:
+ mutex_unlock(&fw_lock);
+ return ret_count;
+}
+
+static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
+{
+ int err;
+
+ err = fw_grow_paged_buf(fw_sysfs->fw_priv,
+ PAGE_ALIGN(min_size) >> PAGE_SHIFT);
+ if (err)
+ fw_load_abort(fw_sysfs);
+ return err;
+}
+
+/**
+ * firmware_data_write() - write method for firmware
+ * @filp: open sysfs file
+ * @kobj: kobject for the device
+ * @bin_attr: bin_attr structure
+ * @buffer: buffer being written
+ * @offset: buffer offset for write in total data store area
+ * @count: buffer size
+ *
+ * Data written to the 'data' attribute will be later handed to
+ * the driver as a firmware image.
+ **/
+static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
+ struct fw_priv *fw_priv;
+ ssize_t retval;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ mutex_lock(&fw_lock);
+ fw_priv = fw_sysfs->fw_priv;
+ if (!fw_priv || fw_state_is_done(fw_priv)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ if (fw_priv->data) {
+ if (offset + count > fw_priv->allocated_size) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ firmware_rw_data(fw_priv, buffer, offset, count, false);
+ retval = count;
+ } else {
+ retval = fw_realloc_pages(fw_sysfs, offset + count);
+ if (retval)
+ goto out;
+
+ retval = count;
+ firmware_rw(fw_priv, buffer, offset, count, false);
+ }
+
+ fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
+out:
+ mutex_unlock(&fw_lock);
+ return retval;
+}
+
+static struct bin_attribute firmware_attr_data = {
+ .attr = { .name = "data", .mode = 0644 },
+ .size = 0,
+ .read = firmware_data_read,
+ .write = firmware_data_write,
+};
+
+static struct attribute *fw_dev_attrs[] = {
+ &dev_attr_loading.attr,
+#ifdef CONFIG_FW_UPLOAD
+ &dev_attr_cancel.attr,
+ &dev_attr_status.attr,
+ &dev_attr_error.attr,
+ &dev_attr_remaining_size.attr,
+#endif
+ NULL
+};
+
+static struct bin_attribute *fw_dev_bin_attrs[] = {
+ &firmware_attr_data,
+ NULL
+};
+
+static const struct attribute_group fw_dev_attr_group = {
+ .attrs = fw_dev_attrs,
+ .bin_attrs = fw_dev_bin_attrs,
+#ifdef CONFIG_FW_UPLOAD
+ .is_visible = fw_upload_is_visible,
+#endif
+};
+
+static const struct attribute_group *fw_dev_attr_groups[] = {
+ &fw_dev_attr_group,
+ NULL
+};
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags)
+{
+ struct fw_sysfs *fw_sysfs;
+ struct device *f_dev;
+
+ fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
+ if (!fw_sysfs) {
+ fw_sysfs = ERR_PTR(-ENOMEM);
+ goto exit;
+ }
+
+ fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
+ fw_sysfs->fw = firmware;
+ f_dev = &fw_sysfs->dev;
+
+ device_initialize(f_dev);
+ dev_set_name(f_dev, "%s", fw_name);
+ f_dev->parent = device;
+ f_dev->class = &firmware_class;
+ f_dev->groups = fw_dev_attr_groups;
+exit:
+ return fw_sysfs;
+}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
new file mode 100644
index 000000000..df1d5add6
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __FIRMWARE_SYSFS_H
+#define __FIRMWARE_SYSFS_H
+
+#include <linux/device.h>
+
+#include "firmware.h"
+
+MODULE_IMPORT_NS(FIRMWARE_LOADER_PRIVATE);
+
+extern struct firmware_fallback_config fw_fallback_config;
+extern struct device_attribute dev_attr_loading;
+
+#ifdef CONFIG_FW_LOADER_USER_HELPER
+/**
+ * struct firmware_fallback_config - firmware fallback configuration settings
+ *
+ * Helps describe and fine tune the fallback mechanism.
+ *
+ * @force_sysfs_fallback: force the sysfs fallback mechanism to be used
+ * as if one had enabled CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y.
+ * Useful to help debug a CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+ * functionality on a kernel where that config entry has been disabled.
+ * @ignore_sysfs_fallback: force to disable the sysfs fallback mechanism.
+ * This emulates the behaviour as if we had set the kernel
+ * config CONFIG_FW_LOADER_USER_HELPER=n.
+ * @old_timeout: for internal use
+ * @loading_timeout: the timeout to wait for the fallback mechanism before
+ * giving up, in seconds.
+ */
+struct firmware_fallback_config {
+ unsigned int force_sysfs_fallback;
+ unsigned int ignore_sysfs_fallback;
+ int old_timeout;
+ int loading_timeout;
+};
+
+/* These getters are vetted to use int properly */
+static inline int __firmware_loading_timeout(void)
+{
+ return fw_fallback_config.loading_timeout;
+}
+
+/* These setters are vetted to use int properly */
+static inline void __fw_fallback_set_timeout(int timeout)
+{
+ fw_fallback_config.loading_timeout = timeout;
+}
+#endif
+
+#ifdef CONFIG_FW_LOADER_SYSFS
+int register_sysfs_loader(void);
+void unregister_sysfs_loader(void);
+#if defined(CONFIG_FW_LOADER_USER_HELPER) && defined(CONFIG_SYSCTL)
+int register_firmware_config_sysctl(void);
+void unregister_firmware_config_sysctl(void);
+#else
+static inline int register_firmware_config_sysctl(void)
+{
+ return 0;
+}
+
+static inline void unregister_firmware_config_sysctl(void) { }
+#endif /* CONFIG_FW_LOADER_USER_HELPER && CONFIG_SYSCTL */
+#else /* CONFIG_FW_LOADER_SYSFS */
+static inline int register_sysfs_loader(void)
+{
+ return 0;
+}
+
+static inline void unregister_sysfs_loader(void)
+{
+}
+#endif /* CONFIG_FW_LOADER_SYSFS */
+
+struct fw_sysfs {
+ bool nowait;
+ struct device dev;
+ struct fw_priv *fw_priv;
+ struct firmware *fw;
+ void *fw_upload_priv;
+};
+
+static inline struct fw_sysfs *to_fw_sysfs(struct device *dev)
+{
+ return container_of(dev, struct fw_sysfs, dev);
+}
+
+void __fw_load_abort(struct fw_priv *fw_priv);
+
+static inline void fw_load_abort(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+
+ __fw_load_abort(fw_priv);
+}
+
+struct fw_sysfs *
+fw_create_instance(struct firmware *firmware, const char *fw_name,
+ struct device *device, u32 opt_flags);
+
+#ifdef CONFIG_FW_UPLOAD
+extern struct device_attribute dev_attr_status;
+extern struct device_attribute dev_attr_error;
+extern struct device_attribute dev_attr_cancel;
+extern struct device_attribute dev_attr_remaining_size;
+
+int fw_upload_start(struct fw_sysfs *fw_sysfs);
+void fw_upload_free(struct fw_sysfs *fw_sysfs);
+umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+#else
+static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ return 0;
+}
+
+static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+}
+#endif
+
+#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
new file mode 100644
index 000000000..a0af8f5f1
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "sysfs_upload.h"
+
+/*
+ * Support for user-space to initiate a firmware upload to a device.
+ */
+
+static const char * const fw_upload_prog_str[] = {
+ [FW_UPLOAD_PROG_IDLE] = "idle",
+ [FW_UPLOAD_PROG_RECEIVING] = "receiving",
+ [FW_UPLOAD_PROG_PREPARING] = "preparing",
+ [FW_UPLOAD_PROG_TRANSFERRING] = "transferring",
+ [FW_UPLOAD_PROG_PROGRAMMING] = "programming"
+};
+
+static const char * const fw_upload_err_str[] = {
+ [FW_UPLOAD_ERR_NONE] = "none",
+ [FW_UPLOAD_ERR_HW_ERROR] = "hw-error",
+ [FW_UPLOAD_ERR_TIMEOUT] = "timeout",
+ [FW_UPLOAD_ERR_CANCELED] = "user-abort",
+ [FW_UPLOAD_ERR_BUSY] = "device-busy",
+ [FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
+ [FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
+ [FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+};
+
+static const char *fw_upload_progress(struct device *dev,
+ enum fw_upload_prog prog)
+{
+ const char *status = "unknown-status";
+
+ if (prog < FW_UPLOAD_PROG_MAX)
+ status = fw_upload_prog_str[prog];
+ else
+ dev_err(dev, "Invalid status during secure update: %d\n", prog);
+
+ return status;
+}
+
+static const char *fw_upload_error(struct device *dev,
+ enum fw_upload_err err_code)
+{
+ const char *error = "unknown-error";
+
+ if (err_code < FW_UPLOAD_ERR_MAX)
+ error = fw_upload_err_str[err_code];
+ else
+ dev_err(dev, "Invalid error code during secure update: %d\n",
+ err_code);
+
+ return error;
+}
+
+static ssize_t
+status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%s\n", fw_upload_progress(dev, fwlp->progress));
+}
+DEVICE_ATTR_RO(status);
+
+static ssize_t
+error_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret;
+
+ mutex_lock(&fwlp->lock);
+
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE)
+ ret = -EBUSY;
+ else if (!fwlp->err_code)
+ ret = 0;
+ else
+ ret = sysfs_emit(buf, "%s:%s\n",
+ fw_upload_progress(dev, fwlp->err_progress),
+ fw_upload_error(dev, fwlp->err_code));
+
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_RO(error);
+
+static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+ int ret = count;
+ bool cancel;
+
+ if (kstrtobool(buf, &cancel) || !cancel)
+ return -EINVAL;
+
+ mutex_lock(&fwlp->lock);
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
+ ret = -ENODEV;
+
+ fwlp->ops->cancel(fwlp->fw_upload);
+ mutex_unlock(&fwlp->lock);
+
+ return ret;
+}
+DEVICE_ATTR_WO(cancel);
+
+static ssize_t remaining_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fw_upload_priv *fwlp = to_fw_sysfs(dev)->fw_upload_priv;
+
+ return sysfs_emit(buf, "%u\n", fwlp->remaining_size);
+}
+DEVICE_ATTR_RO(remaining_size);
+
+umode_t
+fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ static struct fw_sysfs *fw_sysfs;
+
+ fw_sysfs = to_fw_sysfs(kobj_to_dev(kobj));
+
+ if (fw_sysfs->fw_upload_priv || attr == &dev_attr_loading.attr)
+ return attr->mode;
+
+ return 0;
+}
+
+static void fw_upload_update_progress(struct fw_upload_priv *fwlp,
+ enum fw_upload_prog new_progress)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = new_progress;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_set_error(struct fw_upload_priv *fwlp,
+ enum fw_upload_err err_code)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->err_progress = fwlp->progress;
+ fwlp->err_code = err_code;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_prog_complete(struct fw_upload_priv *fwlp)
+{
+ mutex_lock(&fwlp->lock);
+ fwlp->progress = FW_UPLOAD_PROG_IDLE;
+ mutex_unlock(&fwlp->lock);
+}
+
+static void fw_upload_main(struct work_struct *work)
+{
+ struct fw_upload_priv *fwlp;
+ struct fw_sysfs *fw_sysfs;
+ u32 written = 0, offset = 0;
+ enum fw_upload_err ret;
+ struct device *fw_dev;
+ struct fw_upload *fwl;
+
+ fwlp = container_of(work, struct fw_upload_priv, work);
+ fwl = fwlp->fw_upload;
+ fw_sysfs = (struct fw_sysfs *)fwl->priv;
+ fw_dev = &fw_sysfs->dev;
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PREPARING);
+ ret = fwlp->ops->prepare(fwl, fwlp->data, fwlp->remaining_size);
+ if (ret != FW_UPLOAD_ERR_NONE) {
+ fw_upload_set_error(fwlp, ret);
+ goto putdev_exit;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_TRANSFERRING);
+ while (fwlp->remaining_size) {
+ ret = fwlp->ops->write(fwl, fwlp->data, offset,
+ fwlp->remaining_size, &written);
+ if (ret != FW_UPLOAD_ERR_NONE || !written) {
+ if (ret == FW_UPLOAD_ERR_NONE) {
+ dev_warn(fw_dev, "write-op wrote zero data\n");
+ ret = FW_UPLOAD_ERR_RW_ERROR;
+ }
+ fw_upload_set_error(fwlp, ret);
+ goto done;
+ }
+
+ fwlp->remaining_size -= written;
+ offset += written;
+ }
+
+ fw_upload_update_progress(fwlp, FW_UPLOAD_PROG_PROGRAMMING);
+ ret = fwlp->ops->poll_complete(fwl);
+ if (ret != FW_UPLOAD_ERR_NONE)
+ fw_upload_set_error(fwlp, ret);
+
+done:
+ if (fwlp->ops->cleanup)
+ fwlp->ops->cleanup(fwl);
+
+putdev_exit:
+ put_device(fw_dev->parent);
+
+ /*
+ * Note: fwlp->remaining_size is left unmodified here to provide
+ * additional information on errors. It will be reinitialized when
+ * the next firmeware upload begins.
+ */
+ mutex_lock(&fw_lock);
+ fw_free_paged_buf(fw_sysfs->fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ mutex_unlock(&fw_lock);
+ fwlp->data = NULL;
+ fw_upload_prog_complete(fwlp);
+}
+
+/*
+ * Start a worker thread to upload data to the parent driver.
+ * Must be called with fw_lock held.
+ */
+int fw_upload_start(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_priv *fw_priv = fw_sysfs->fw_priv;
+ struct device *fw_dev = &fw_sysfs->dev;
+ struct fw_upload_priv *fwlp;
+
+ if (!fw_sysfs->fw_upload_priv)
+ return 0;
+
+ if (!fw_priv->size) {
+ fw_free_paged_buf(fw_priv);
+ fw_state_init(fw_sysfs->fw_priv);
+ return 0;
+ }
+
+ fwlp = fw_sysfs->fw_upload_priv;
+ mutex_lock(&fwlp->lock);
+
+ /* Do not interfere with an on-going fw_upload */
+ if (fwlp->progress != FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -EBUSY;
+ }
+
+ get_device(fw_dev->parent); /* released in fw_upload_main */
+
+ fwlp->progress = FW_UPLOAD_PROG_RECEIVING;
+ fwlp->err_code = 0;
+ fwlp->remaining_size = fw_priv->size;
+ fwlp->data = fw_priv->data;
+
+ pr_debug("%s: fw-%s fw_priv=%p data=%p size=%u\n",
+ __func__, fw_priv->fw_name,
+ fw_priv, fw_priv->data,
+ (unsigned int)fw_priv->size);
+
+ queue_work(system_long_wq, &fwlp->work);
+ mutex_unlock(&fwlp->lock);
+
+ return 0;
+}
+
+void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_upload_priv->fw_upload);
+ kfree(fw_upload_priv);
+}
+
+/**
+ * firmware_upload_register() - register for the firmware upload sysfs API
+ * @module: kernel module of this device
+ * @parent: parent device instantiating firmware upload
+ * @name: firmware name to be associated with this device
+ * @ops: pointer to structure of firmware upload ops
+ * @dd_handle: pointer to parent driver private data
+ *
+ * @name must be unique among all users of firmware upload. The firmware
+ * sysfs files for this device will be found at /sys/class/firmware/@name.
+ *
+ * Return: struct fw_upload pointer or ERR_PTR()
+ *
+ **/
+struct fw_upload *
+firmware_upload_register(struct module *module, struct device *parent,
+ const char *name, const struct fw_upload_ops *ops,
+ void *dd_handle)
+{
+ u32 opt_flags = FW_OPT_NOCACHE;
+ struct fw_upload *fw_upload;
+ struct fw_upload_priv *fw_upload_priv;
+ struct fw_sysfs *fw_sysfs;
+ struct fw_priv *fw_priv;
+ struct device *fw_dev;
+ int ret;
+
+ if (!name || name[0] == '\0')
+ return ERR_PTR(-EINVAL);
+
+ if (!ops || !ops->cancel || !ops->prepare ||
+ !ops->write || !ops->poll_complete) {
+ dev_err(parent, "Attempt to register without all required ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!try_module_get(module))
+ return ERR_PTR(-EFAULT);
+
+ fw_upload = kzalloc(sizeof(*fw_upload), GFP_KERNEL);
+ if (!fw_upload) {
+ ret = -ENOMEM;
+ goto exit_module_put;
+ }
+
+ fw_upload_priv = kzalloc(sizeof(*fw_upload_priv), GFP_KERNEL);
+ if (!fw_upload_priv) {
+ ret = -ENOMEM;
+ goto free_fw_upload;
+ }
+
+ fw_upload_priv->fw_upload = fw_upload;
+ fw_upload_priv->ops = ops;
+ mutex_init(&fw_upload_priv->lock);
+ fw_upload_priv->module = module;
+ fw_upload_priv->name = name;
+ fw_upload_priv->err_code = 0;
+ fw_upload_priv->progress = FW_UPLOAD_PROG_IDLE;
+ INIT_WORK(&fw_upload_priv->work, fw_upload_main);
+ fw_upload->dd_handle = dd_handle;
+
+ fw_sysfs = fw_create_instance(NULL, name, parent, opt_flags);
+ if (IS_ERR(fw_sysfs)) {
+ ret = PTR_ERR(fw_sysfs);
+ goto free_fw_upload_priv;
+ }
+ fw_upload->priv = fw_sysfs;
+ fw_sysfs->fw_upload_priv = fw_upload_priv;
+ fw_dev = &fw_sysfs->dev;
+
+ ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, NULL, 0, 0,
+ FW_OPT_NOCACHE);
+ if (ret != 0) {
+ if (ret > 0)
+ ret = -EINVAL;
+ goto free_fw_sysfs;
+ }
+ fw_priv->is_paged_buf = true;
+ fw_sysfs->fw_priv = fw_priv;
+
+ ret = device_add(fw_dev);
+ if (ret) {
+ dev_err(fw_dev, "%s: device_register failed\n", __func__);
+ put_device(fw_dev);
+ goto exit_module_put;
+ }
+
+ return fw_upload;
+
+free_fw_sysfs:
+ kfree(fw_sysfs);
+
+free_fw_upload_priv:
+ kfree(fw_upload_priv);
+
+free_fw_upload:
+ kfree(fw_upload);
+
+exit_module_put:
+ module_put(module);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_register);
+
+/**
+ * firmware_upload_unregister() - Unregister firmware upload interface
+ * @fw_upload: pointer to struct fw_upload
+ **/
+void firmware_upload_unregister(struct fw_upload *fw_upload)
+{
+ struct fw_sysfs *fw_sysfs = fw_upload->priv;
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+ struct module *module = fw_upload_priv->module;
+
+ mutex_lock(&fw_upload_priv->lock);
+ if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fw_upload_priv->lock);
+ goto unregister;
+ }
+
+ fw_upload_priv->ops->cancel(fw_upload);
+ mutex_unlock(&fw_upload_priv->lock);
+
+ /* Ensure lower-level device-driver is finished */
+ flush_work(&fw_upload_priv->work);
+
+unregister:
+ device_unregister(&fw_sysfs->dev);
+ module_put(module);
+}
+EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/firmware_loader/sysfs_upload.h b/drivers/base/firmware_loader/sysfs_upload.h
new file mode 100644
index 000000000..31931ff78
--- /dev/null
+++ b/drivers/base/firmware_loader/sysfs_upload.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __SYSFS_UPLOAD_H
+#define __SYSFS_UPLOAD_H
+
+#include <linux/device.h>
+
+#include "sysfs.h"
+
+/**
+ * enum fw_upload_prog - firmware upload progress codes
+ * @FW_UPLOAD_PROG_IDLE: there is no firmware upload in progress
+ * @FW_UPLOAD_PROG_RECEIVING: worker thread is receiving firmware data
+ * @FW_UPLOAD_PROG_PREPARING: target device is preparing for firmware upload
+ * @FW_UPLOAD_PROG_TRANSFERRING: data is being copied to the device
+ * @FW_UPLOAD_PROG_PROGRAMMING: device is performing the firmware update
+ * @FW_UPLOAD_PROG_MAX: Maximum progress code marker
+ */
+enum fw_upload_prog {
+ FW_UPLOAD_PROG_IDLE,
+ FW_UPLOAD_PROG_RECEIVING,
+ FW_UPLOAD_PROG_PREPARING,
+ FW_UPLOAD_PROG_TRANSFERRING,
+ FW_UPLOAD_PROG_PROGRAMMING,
+ FW_UPLOAD_PROG_MAX
+};
+
+struct fw_upload_priv {
+ struct fw_upload *fw_upload;
+ struct module *module;
+ const char *name;
+ const struct fw_upload_ops *ops;
+ struct mutex lock; /* protect data structure contents */
+ struct work_struct work;
+ const u8 *data; /* pointer to update data */
+ u32 remaining_size; /* size remaining to transfer */
+ enum fw_upload_prog progress;
+ enum fw_upload_prog err_progress; /* progress at time of failure */
+ enum fw_upload_err err_code; /* security manager error code */
+};
+
+#endif /* __SYSFS_UPLOAD_H */
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c
new file mode 100644
index 000000000..1ce59b4b5
--- /dev/null
+++ b/drivers/base/hypervisor.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * hypervisor.c - /sys/hypervisor subsystem.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (C) 2007 Novell Inc.
+ */
+
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include "base.h"
+
+struct kobject *hypervisor_kobj;
+EXPORT_SYMBOL_GPL(hypervisor_kobj);
+
+int __init hypervisor_init(void)
+{
+ hypervisor_kobj = kobject_create_and_add("hypervisor", NULL);
+ if (!hypervisor_kobj)
+ return -ENOMEM;
+ return 0;
+}
diff --git a/drivers/base/init.c b/drivers/base/init.c
new file mode 100644
index 000000000..397eb9880
--- /dev/null
+++ b/drivers/base/init.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/memory.h>
+#include <linux/of.h>
+#include <linux/backing-dev.h>
+
+#include "base.h"
+
+/**
+ * driver_init - initialize driver model.
+ *
+ * Call the driver model init functions to initialize their
+ * subsystems. Called early from init/main.c.
+ */
+void __init driver_init(void)
+{
+ /* These are the core pieces */
+ bdi_init(&noop_backing_dev_info);
+ devtmpfs_init();
+ devices_init();
+ buses_init();
+ classes_init();
+ firmware_init();
+ hypervisor_init();
+
+ /* These are also core pieces, but must come after the
+ * core core pieces.
+ */
+ of_core_init();
+ platform_bus_init();
+ auxiliary_bus_init();
+ cpu_dev_init();
+ memory_dev_init();
+ node_dev_init();
+ container_dev_init();
+}
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
new file mode 100644
index 000000000..55e3ee2da
--- /dev/null
+++ b/drivers/base/isa.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISA bus.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/isa.h>
+
+static struct device isa_bus = {
+ .init_name = "isa"
+};
+
+struct isa_dev {
+ struct device dev;
+ struct device *next;
+ unsigned int id;
+};
+
+#define to_isa_dev(x) container_of((x), struct isa_dev, dev)
+
+static int isa_bus_match(struct device *dev, struct device_driver *driver)
+{
+ struct isa_driver *isa_driver = to_isa_driver(driver);
+
+ if (dev->platform_data == isa_driver) {
+ if (!isa_driver->match ||
+ isa_driver->match(dev, to_isa_dev(dev)->id))
+ return 1;
+ dev->platform_data = NULL;
+ }
+ return 0;
+}
+
+static int isa_bus_probe(struct device *dev)
+{
+ struct isa_driver *isa_driver = dev->platform_data;
+
+ if (isa_driver && isa_driver->probe)
+ return isa_driver->probe(dev, to_isa_dev(dev)->id);
+
+ return 0;
+}
+
+static void isa_bus_remove(struct device *dev)
+{
+ struct isa_driver *isa_driver = dev->platform_data;
+
+ if (isa_driver && isa_driver->remove)
+ isa_driver->remove(dev, to_isa_dev(dev)->id);
+}
+
+static void isa_bus_shutdown(struct device *dev)
+{
+ struct isa_driver *isa_driver = dev->platform_data;
+
+ if (isa_driver && isa_driver->shutdown)
+ isa_driver->shutdown(dev, to_isa_dev(dev)->id);
+}
+
+static int isa_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct isa_driver *isa_driver = dev->platform_data;
+
+ if (isa_driver && isa_driver->suspend)
+ return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
+
+ return 0;
+}
+
+static int isa_bus_resume(struct device *dev)
+{
+ struct isa_driver *isa_driver = dev->platform_data;
+
+ if (isa_driver && isa_driver->resume)
+ return isa_driver->resume(dev, to_isa_dev(dev)->id);
+
+ return 0;
+}
+
+static struct bus_type isa_bus_type = {
+ .name = "isa",
+ .match = isa_bus_match,
+ .probe = isa_bus_probe,
+ .remove = isa_bus_remove,
+ .shutdown = isa_bus_shutdown,
+ .suspend = isa_bus_suspend,
+ .resume = isa_bus_resume
+};
+
+static void isa_dev_release(struct device *dev)
+{
+ kfree(to_isa_dev(dev));
+}
+
+void isa_unregister_driver(struct isa_driver *isa_driver)
+{
+ struct device *dev = isa_driver->devices;
+
+ while (dev) {
+ struct device *tmp = to_isa_dev(dev)->next;
+ device_unregister(dev);
+ dev = tmp;
+ }
+ driver_unregister(&isa_driver->driver);
+}
+EXPORT_SYMBOL_GPL(isa_unregister_driver);
+
+int isa_register_driver(struct isa_driver *isa_driver, unsigned int ndev)
+{
+ int error;
+ unsigned int id;
+
+ isa_driver->driver.bus = &isa_bus_type;
+ isa_driver->devices = NULL;
+
+ error = driver_register(&isa_driver->driver);
+ if (error)
+ return error;
+
+ for (id = 0; id < ndev; id++) {
+ struct isa_dev *isa_dev;
+
+ isa_dev = kzalloc(sizeof *isa_dev, GFP_KERNEL);
+ if (!isa_dev) {
+ error = -ENOMEM;
+ break;
+ }
+
+ isa_dev->dev.parent = &isa_bus;
+ isa_dev->dev.bus = &isa_bus_type;
+
+ dev_set_name(&isa_dev->dev, "%s.%u",
+ isa_driver->driver.name, id);
+ isa_dev->dev.platform_data = isa_driver;
+ isa_dev->dev.release = isa_dev_release;
+ isa_dev->id = id;
+
+ isa_dev->dev.coherent_dma_mask = DMA_BIT_MASK(24);
+ isa_dev->dev.dma_mask = &isa_dev->dev.coherent_dma_mask;
+
+ error = device_register(&isa_dev->dev);
+ if (error) {
+ put_device(&isa_dev->dev);
+ break;
+ }
+
+ if (isa_dev->dev.platform_data) {
+ isa_dev->next = isa_driver->devices;
+ isa_driver->devices = &isa_dev->dev;
+ } else
+ device_unregister(&isa_dev->dev);
+ }
+
+ if (!error && !isa_driver->devices)
+ error = -ENODEV;
+
+ if (error)
+ isa_unregister_driver(isa_driver);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(isa_register_driver);
+
+static int __init isa_bus_init(void)
+{
+ int error;
+
+ error = bus_register(&isa_bus_type);
+ if (!error) {
+ error = device_register(&isa_bus);
+ if (error)
+ bus_unregister(&isa_bus_type);
+ }
+ return error;
+}
+
+postcore_initcall(isa_bus_init);
diff --git a/drivers/base/map.c b/drivers/base/map.c
new file mode 100644
index 000000000..83aeb09ca
--- /dev/null
+++ b/drivers/base/map.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/drivers/base/map.c
+ *
+ * (C) Copyright Al Viro 2002,2003
+ *
+ * NOTE: data structure needs to be changed. It works, but for large dev_t
+ * it will be too slow. It is isolated, though, so these changes will be
+ * local to that file.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/kdev_t.h>
+#include <linux/kobject.h>
+#include <linux/kobj_map.h>
+
+struct kobj_map {
+ struct probe {
+ struct probe *next;
+ dev_t dev;
+ unsigned long range;
+ struct module *owner;
+ kobj_probe_t *get;
+ int (*lock)(dev_t, void *);
+ void *data;
+ } *probes[255];
+ struct mutex *lock;
+};
+
+int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range,
+ struct module *module, kobj_probe_t *probe,
+ int (*lock)(dev_t, void *), void *data)
+{
+ unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
+ unsigned int index = MAJOR(dev);
+ unsigned int i;
+ struct probe *p;
+
+ if (n > 255)
+ n = 255;
+
+ p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL);
+ if (p == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++, p++) {
+ p->owner = module;
+ p->get = probe;
+ p->lock = lock;
+ p->dev = dev;
+ p->range = range;
+ p->data = data;
+ }
+ mutex_lock(domain->lock);
+ for (i = 0, p -= n; i < n; i++, p++, index++) {
+ struct probe **s = &domain->probes[index % 255];
+ while (*s && (*s)->range < range)
+ s = &(*s)->next;
+ p->next = *s;
+ *s = p;
+ }
+ mutex_unlock(domain->lock);
+ return 0;
+}
+
+void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range)
+{
+ unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
+ unsigned int index = MAJOR(dev);
+ unsigned int i;
+ struct probe *found = NULL;
+
+ if (n > 255)
+ n = 255;
+
+ mutex_lock(domain->lock);
+ for (i = 0; i < n; i++, index++) {
+ struct probe **s;
+ for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) {
+ struct probe *p = *s;
+ if (p->dev == dev && p->range == range) {
+ *s = p->next;
+ if (!found)
+ found = p;
+ break;
+ }
+ }
+ }
+ mutex_unlock(domain->lock);
+ kfree(found);
+}
+
+struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index)
+{
+ struct kobject *kobj;
+ struct probe *p;
+ unsigned long best = ~0UL;
+
+retry:
+ mutex_lock(domain->lock);
+ for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) {
+ struct kobject *(*probe)(dev_t, int *, void *);
+ struct module *owner;
+ void *data;
+
+ if (p->dev > dev || p->dev + p->range - 1 < dev)
+ continue;
+ if (p->range - 1 >= best)
+ break;
+ if (!try_module_get(p->owner))
+ continue;
+ owner = p->owner;
+ data = p->data;
+ probe = p->get;
+ best = p->range - 1;
+ *index = dev - p->dev;
+ if (p->lock && p->lock(dev, data) < 0) {
+ module_put(owner);
+ continue;
+ }
+ mutex_unlock(domain->lock);
+ kobj = probe(dev, index, data);
+ /* Currently ->owner protects _only_ ->probe() itself. */
+ module_put(owner);
+ if (kobj)
+ return kobj;
+ goto retry;
+ }
+ mutex_unlock(domain->lock);
+ return NULL;
+}
+
+struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock)
+{
+ struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL);
+ struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL);
+ int i;
+
+ if ((p == NULL) || (base == NULL)) {
+ kfree(p);
+ kfree(base);
+ return NULL;
+ }
+
+ base->dev = 1;
+ base->range = ~0;
+ base->get = base_probe;
+ for (i = 0; i < 255; i++)
+ p->probes[i] = base;
+ p->lock = lock;
+ return p;
+}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
new file mode 100644
index 000000000..5d39f3e37
--- /dev/null
+++ b/drivers/base/memory.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory subsystem support
+ *
+ * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
+ * Dave Hansen <haveblue@us.ibm.com>
+ *
+ * This file provides the necessary infrastructure to represent
+ * a SPARSEMEM-memory-model system's physical memory in /sysfs.
+ * All arch-independent code that assumes MEMORY_HOTPLUG requires
+ * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/topology.h>
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <linux/mm.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+#define MEMORY_CLASS_NAME "memory"
+
+static const char *const online_type_to_str[] = {
+ [MMOP_OFFLINE] = "offline",
+ [MMOP_ONLINE] = "online",
+ [MMOP_ONLINE_KERNEL] = "online_kernel",
+ [MMOP_ONLINE_MOVABLE] = "online_movable",
+};
+
+int mhp_online_type_from_str(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
+ if (sysfs_streq(str, online_type_to_str[i]))
+ return i;
+ }
+ return -EINVAL;
+}
+
+#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
+
+static int sections_per_block;
+
+static inline unsigned long memory_block_id(unsigned long section_nr)
+{
+ return section_nr / sections_per_block;
+}
+
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
+static int memory_subsys_online(struct device *dev);
+static int memory_subsys_offline(struct device *dev);
+
+static struct bus_type memory_subsys = {
+ .name = MEMORY_CLASS_NAME,
+ .dev_name = MEMORY_CLASS_NAME,
+ .online = memory_subsys_online,
+ .offline = memory_subsys_offline,
+};
+
+/*
+ * Memory blocks are cached in a local radix tree to avoid
+ * a costly linear search for the corresponding device on
+ * the subsystem bus.
+ */
+static DEFINE_XARRAY(memory_blocks);
+
+/*
+ * Memory groups, indexed by memory group id (mgid).
+ */
+static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
+#define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
+
+static BLOCKING_NOTIFIER_HEAD(memory_chain);
+
+int register_memory_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&memory_chain, nb);
+}
+EXPORT_SYMBOL(register_memory_notifier);
+
+void unregister_memory_notifier(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&memory_chain, nb);
+}
+EXPORT_SYMBOL(unregister_memory_notifier);
+
+static void memory_block_release(struct device *dev)
+{
+ struct memory_block *mem = to_memory_block(dev);
+
+ kfree(mem);
+}
+
+unsigned long __weak memory_block_size_bytes(void)
+{
+ return MIN_MEMORY_BLOCK_SIZE;
+}
+EXPORT_SYMBOL_GPL(memory_block_size_bytes);
+
+/*
+ * Show the first physical section index (number) of this memory block.
+ */
+static ssize_t phys_index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long phys_index;
+
+ phys_index = mem->start_section_nr / sections_per_block;
+
+ return sysfs_emit(buf, "%08lx\n", phys_index);
+}
+
+/*
+ * Legacy interface that we cannot remove. Always indicate "removable"
+ * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
+ */
+static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
+}
+
+/*
+ * online, offline, going offline, etc.
+ */
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ const char *output;
+
+ /*
+ * We can probably put these states in a nice little array
+ * so that they're not open-coded
+ */
+ switch (mem->state) {
+ case MEM_ONLINE:
+ output = "online";
+ break;
+ case MEM_OFFLINE:
+ output = "offline";
+ break;
+ case MEM_GOING_OFFLINE:
+ output = "going-offline";
+ break;
+ default:
+ WARN_ON(1);
+ return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
+ }
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+int memory_notify(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(&memory_chain, val, v);
+}
+
+/*
+ * Must acquire mem_hotplug_lock in write mode.
+ */
+static int memory_block_online(struct memory_block *mem)
+{
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
+ struct zone *zone;
+ int ret;
+
+ zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
+ start_pfn, nr_pages);
+
+ /*
+ * Although vmemmap pages have a different lifecycle than the pages
+ * they describe (they remain until the memory is unplugged), doing
+ * their initialization and accounting at memory onlining/offlining
+ * stage helps to keep accounting easier to follow - e.g vmemmaps
+ * belong to the same zone as the memory they backed.
+ */
+ mem_hotplug_begin();
+ if (nr_vmemmap_pages) {
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
+ if (ret)
+ goto out;
+ }
+
+ ret = online_pages(start_pfn + nr_vmemmap_pages,
+ nr_pages - nr_vmemmap_pages, zone, mem->group);
+ if (ret) {
+ if (nr_vmemmap_pages)
+ mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+ goto out;
+ }
+
+ /*
+ * Account once onlining succeeded. If the zone was unpopulated, it is
+ * now already properly populated.
+ */
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ nr_vmemmap_pages);
+
+ mem->zone = zone;
+out:
+ mem_hotplug_done();
+ return ret;
+}
+
+/*
+ * Must acquire mem_hotplug_lock in write mode.
+ */
+static int memory_block_offline(struct memory_block *mem)
+{
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
+ int ret;
+
+ if (!mem->zone)
+ return -EINVAL;
+
+ /*
+ * Unaccount before offlining, such that unpopulated zone and kthreads
+ * can properly be torn down in offline_pages().
+ */
+ mem_hotplug_begin();
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
+ -nr_vmemmap_pages);
+
+ ret = offline_pages(start_pfn + nr_vmemmap_pages,
+ nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
+ if (ret) {
+ /* offline_pages() failed. Account back. */
+ if (nr_vmemmap_pages)
+ adjust_present_page_count(pfn_to_page(start_pfn),
+ mem->group, nr_vmemmap_pages);
+ goto out;
+ }
+
+ if (nr_vmemmap_pages)
+ mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
+
+ mem->zone = NULL;
+out:
+ mem_hotplug_done();
+ return ret;
+}
+
+/*
+ * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
+ * OK to have direct references to sparsemem variables in here.
+ */
+static int
+memory_block_action(struct memory_block *mem, unsigned long action)
+{
+ int ret;
+
+ switch (action) {
+ case MEM_ONLINE:
+ ret = memory_block_online(mem);
+ break;
+ case MEM_OFFLINE:
+ ret = memory_block_offline(mem);
+ break;
+ default:
+ WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
+ "%ld\n", __func__, mem->start_section_nr, action, action);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state, unsigned long from_state_req)
+{
+ int ret = 0;
+
+ if (mem->state != from_state_req)
+ return -EINVAL;
+
+ if (to_state == MEM_OFFLINE)
+ mem->state = MEM_GOING_OFFLINE;
+
+ ret = memory_block_action(mem, to_state);
+ mem->state = ret ? from_state_req : to_state;
+
+ return ret;
+}
+
+/* The device lock serializes operations on memory_subsys_[online|offline] */
+static int memory_subsys_online(struct device *dev)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ int ret;
+
+ if (mem->state == MEM_ONLINE)
+ return 0;
+
+ /*
+ * When called via device_online() without configuring the online_type,
+ * we want to default to MMOP_ONLINE.
+ */
+ if (mem->online_type == MMOP_OFFLINE)
+ mem->online_type = MMOP_ONLINE;
+
+ ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
+ mem->online_type = MMOP_OFFLINE;
+
+ return ret;
+}
+
+static int memory_subsys_offline(struct device *dev)
+{
+ struct memory_block *mem = to_memory_block(dev);
+
+ if (mem->state == MEM_OFFLINE)
+ return 0;
+
+ return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ const int online_type = mhp_online_type_from_str(buf);
+ struct memory_block *mem = to_memory_block(dev);
+ int ret;
+
+ if (online_type < 0)
+ return -EINVAL;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ switch (online_type) {
+ case MMOP_ONLINE_KERNEL:
+ case MMOP_ONLINE_MOVABLE:
+ case MMOP_ONLINE:
+ /* mem->online_type is protected by device_hotplug_lock */
+ mem->online_type = online_type;
+ ret = device_online(&mem->dev);
+ break;
+ case MMOP_OFFLINE:
+ ret = device_offline(&mem->dev);
+ break;
+ default:
+ ret = -EINVAL; /* should never happen */
+ }
+
+ unlock_device_hotplug();
+
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return -EINVAL;
+
+ return count;
+}
+
+/*
+ * Legacy interface that we cannot remove: s390x exposes the storage increment
+ * covered by a memory block, allowing for identifying which memory blocks
+ * comprise a storage increment. Since a memory block spans complete
+ * storage increments nowadays, this interface is basically unused. Other
+ * archs never exposed != 0.
+ */
+static ssize_t phys_device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+
+ return sysfs_emit(buf, "%d\n",
+ arch_get_memory_phys_device(start_pfn));
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static int print_allowed_zone(char *buf, int len, int nid,
+ struct memory_group *group,
+ unsigned long start_pfn, unsigned long nr_pages,
+ int online_type, struct zone *default_zone)
+{
+ struct zone *zone;
+
+ zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
+ if (zone == default_zone)
+ return 0;
+
+ return sysfs_emit_at(buf, len, " %s", zone->name);
+}
+
+static ssize_t valid_zones_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct memory_group *group = mem->group;
+ struct zone *default_zone;
+ int nid = mem->nid;
+ int len = 0;
+
+ /*
+ * Check the existing zone. Make sure that we do that only on the
+ * online nodes otherwise the page_zone is not reliable
+ */
+ if (mem->state == MEM_ONLINE) {
+ /*
+ * If !mem->zone, the memory block spans multiple zones and
+ * cannot get offlined.
+ */
+ default_zone = mem->zone;
+ if (!default_zone)
+ return sysfs_emit(buf, "%s\n", "none");
+ len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ goto out;
+ }
+
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
+ start_pfn, nr_pages);
+
+ len += sysfs_emit_at(buf, len, "%s", default_zone->name);
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
+ MMOP_ONLINE_KERNEL, default_zone);
+ len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
+ MMOP_ONLINE_MOVABLE, default_zone);
+out:
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
+}
+static DEVICE_ATTR_RO(valid_zones);
+#endif
+
+static DEVICE_ATTR_RO(phys_index);
+static DEVICE_ATTR_RW(state);
+static DEVICE_ATTR_RO(phys_device);
+static DEVICE_ATTR_RO(removable);
+
+/*
+ * Show the memory block size (shared by all memory blocks).
+ */
+static ssize_t block_size_bytes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
+}
+
+static DEVICE_ATTR_RO(block_size_bytes);
+
+/*
+ * Memory auto online policy.
+ */
+
+static ssize_t auto_online_blocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ online_type_to_str[mhp_default_online_type]);
+}
+
+static ssize_t auto_online_blocks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ const int online_type = mhp_online_type_from_str(buf);
+
+ if (online_type < 0)
+ return -EINVAL;
+
+ mhp_default_online_type = online_type;
+ return count;
+}
+
+static DEVICE_ATTR_RW(auto_online_blocks);
+
+/*
+ * Some architectures will have custom drivers to do this, and
+ * will not need to do it from userspace. The fake hot-add code
+ * as well as ppc64 will do all of their discovery in userspace
+ * and will require this interface.
+ */
+#ifdef CONFIG_ARCH_MEMORY_PROBE
+static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 phys_addr;
+ int nid, ret;
+ unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
+
+ ret = kstrtoull(buf, 0, &phys_addr);
+ if (ret)
+ return ret;
+
+ if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
+ return -EINVAL;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ nid = memory_add_physaddr_to_nid(phys_addr);
+ ret = __add_memory(nid, phys_addr,
+ MIN_MEMORY_BLOCK_SIZE * sections_per_block,
+ MHP_NONE);
+
+ if (ret)
+ goto out;
+
+ ret = count;
+out:
+ unlock_device_hotplug();
+ return ret;
+}
+
+static DEVICE_ATTR_WO(probe);
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Support for offlining pages of memory
+ */
+
+/* Soft offline a page */
+static ssize_t soft_offline_page_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (kstrtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ ret = soft_offline_page(pfn, 0);
+ return ret == 0 ? count : ret;
+}
+
+/* Forcibly offline a page, including killing processes. */
+static ssize_t hard_offline_page_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (kstrtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ ret = memory_failure(pfn, MF_SW_SIMULATED);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_WO(soft_offline_page);
+static DEVICE_ATTR_WO(hard_offline_page);
+#endif
+
+/* See phys_device_show(). */
+int __weak arch_get_memory_phys_device(unsigned long start_pfn)
+{
+ return 0;
+}
+
+/*
+ * A reference for the returned memory block device is acquired.
+ *
+ * Called under device_hotplug_lock.
+ */
+static struct memory_block *find_memory_block_by_id(unsigned long block_id)
+{
+ struct memory_block *mem;
+
+ mem = xa_load(&memory_blocks, block_id);
+ if (mem)
+ get_device(&mem->dev);
+ return mem;
+}
+
+/*
+ * Called under device_hotplug_lock.
+ */
+struct memory_block *find_memory_block(unsigned long section_nr)
+{
+ unsigned long block_id = memory_block_id(section_nr);
+
+ return find_memory_block_by_id(block_id);
+}
+
+static struct attribute *memory_memblk_attrs[] = {
+ &dev_attr_phys_index.attr,
+ &dev_attr_state.attr,
+ &dev_attr_phys_device.attr,
+ &dev_attr_removable.attr,
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ &dev_attr_valid_zones.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group memory_memblk_attr_group = {
+ .attrs = memory_memblk_attrs,
+};
+
+static const struct attribute_group *memory_memblk_attr_groups[] = {
+ &memory_memblk_attr_group,
+ NULL,
+};
+
+static int __add_memory_block(struct memory_block *memory)
+{
+ int ret;
+
+ memory->dev.bus = &memory_subsys;
+ memory->dev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.release = memory_block_release;
+ memory->dev.groups = memory_memblk_attr_groups;
+ memory->dev.offline = memory->state == MEM_OFFLINE;
+
+ ret = device_register(&memory->dev);
+ if (ret) {
+ put_device(&memory->dev);
+ return ret;
+ }
+ ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
+ GFP_KERNEL));
+ if (ret)
+ device_unregister(&memory->dev);
+
+ return ret;
+}
+
+static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
+ int nid)
+{
+ const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct zone *zone, *matching_zone = NULL;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int i;
+
+ /*
+ * This logic only works for early memory, when the applicable zones
+ * already span the memory block. We don't expect overlapping zones on
+ * a single node for early memory. So if we're told that some PFNs
+ * of a node fall into this memory block, we can assume that all node
+ * zones that intersect with the memory block are actually applicable.
+ * No need to look at the memmap.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (!populated_zone(zone))
+ continue;
+ if (!zone_intersects(zone, start_pfn, nr_pages))
+ continue;
+ if (!matching_zone) {
+ matching_zone = zone;
+ continue;
+ }
+ /* Spans multiple zones ... */
+ matching_zone = NULL;
+ break;
+ }
+ return matching_zone;
+}
+
+#ifdef CONFIG_NUMA
+/**
+ * memory_block_add_nid() - Indicate that system RAM falling into this memory
+ * block device (partially) belongs to the given node.
+ * @mem: The memory block device.
+ * @nid: The node id.
+ * @context: The memory initialization context.
+ *
+ * Indicate that system RAM falling into this memory block (partially) belongs
+ * to the given node. If the context indicates ("early") that we are adding the
+ * node during node device subsystem initialization, this will also properly
+ * set/adjust mem->zone based on the zone ranges of the given node.
+ */
+void memory_block_add_nid(struct memory_block *mem, int nid,
+ enum meminit_context context)
+{
+ if (context == MEMINIT_EARLY && mem->nid != nid) {
+ /*
+ * For early memory we have to determine the zone when setting
+ * the node id and handle multiple nodes spanning a single
+ * memory block by indicate via zone == NULL that we're not
+ * dealing with a single zone. So if we're setting the node id
+ * the first time, determine if there is a single zone. If we're
+ * setting the node id a second time to a different node,
+ * invalidate the single detected zone.
+ */
+ if (mem->nid == NUMA_NO_NODE)
+ mem->zone = early_node_zone_for_memory_block(mem, nid);
+ else
+ mem->zone = NULL;
+ }
+
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
+}
+#endif
+
+static int add_memory_block(unsigned long block_id, unsigned long state,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
+{
+ struct memory_block *mem;
+ int ret = 0;
+
+ mem = find_memory_block_by_id(block_id);
+ if (mem) {
+ put_device(&mem->dev);
+ return -EEXIST;
+ }
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ mem->start_section_nr = block_id * sections_per_block;
+ mem->state = state;
+ mem->nid = NUMA_NO_NODE;
+ mem->nr_vmemmap_pages = nr_vmemmap_pages;
+ INIT_LIST_HEAD(&mem->group_next);
+
+#ifndef CONFIG_NUMA
+ if (state == MEM_ONLINE)
+ /*
+ * MEM_ONLINE at this point implies early memory. With NUMA,
+ * we'll determine the zone when setting the node id via
+ * memory_block_add_nid(). Memory hotplug updated the zone
+ * manually when memory onlining/offlining succeeds.
+ */
+ mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
+#endif /* CONFIG_NUMA */
+
+ ret = __add_memory_block(mem);
+ if (ret)
+ return ret;
+
+ if (group) {
+ mem->group = group;
+ list_add(&mem->group_next, &group->memory_blocks);
+ }
+
+ return 0;
+}
+
+static int __init add_boot_memory_block(unsigned long base_section_nr)
+{
+ int section_count = 0;
+ unsigned long nr;
+
+ for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
+ nr++)
+ if (present_section_nr(nr))
+ section_count++;
+
+ if (section_count == 0)
+ return 0;
+ return add_memory_block(memory_block_id(base_section_nr),
+ MEM_ONLINE, 0, NULL);
+}
+
+static int add_hotplug_memory_block(unsigned long block_id,
+ unsigned long nr_vmemmap_pages,
+ struct memory_group *group)
+{
+ return add_memory_block(block_id, MEM_OFFLINE, nr_vmemmap_pages, group);
+}
+
+static void remove_memory_block(struct memory_block *memory)
+{
+ if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
+ return;
+
+ WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
+
+ if (memory->group) {
+ list_del(&memory->group_next);
+ memory->group = NULL;
+ }
+
+ /* drop the ref. we got via find_memory_block() */
+ put_device(&memory->dev);
+ device_unregister(&memory->dev);
+}
+
+/*
+ * Create memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * will be initialized as offline.
+ *
+ * Called under device_hotplug_lock.
+ */
+int create_memory_block_devices(unsigned long start, unsigned long size,
+ unsigned long vmemmap_pages,
+ struct memory_group *group)
+{
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
+ struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
+
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
+ return -EINVAL;
+
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ ret = add_hotplug_memory_block(block_id, vmemmap_pages, group);
+ if (ret)
+ break;
+ }
+ if (ret) {
+ end_block_id = block_id;
+ for (block_id = start_block_id; block_id != end_block_id;
+ block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (WARN_ON_ONCE(!mem))
+ continue;
+ remove_memory_block(mem);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Remove memory block devices for the given memory area. Start and size
+ * have to be aligned to memory block granularity. Memory block devices
+ * have to be offline.
+ *
+ * Called under device_hotplug_lock.
+ */
+void remove_memory_block_devices(unsigned long start, unsigned long size)
+{
+ const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
+ const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
+ struct memory_block *mem;
+ unsigned long block_id;
+
+ if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
+ !IS_ALIGNED(size, memory_block_size_bytes())))
+ return;
+
+ for (block_id = start_block_id; block_id != end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (WARN_ON_ONCE(!mem))
+ continue;
+ unregister_memory_block_under_nodes(mem);
+ remove_memory_block(mem);
+ }
+}
+
+static struct attribute *memory_root_attrs[] = {
+#ifdef CONFIG_ARCH_MEMORY_PROBE
+ &dev_attr_probe.attr,
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+ &dev_attr_soft_offline_page.attr,
+ &dev_attr_hard_offline_page.attr,
+#endif
+
+ &dev_attr_block_size_bytes.attr,
+ &dev_attr_auto_online_blocks.attr,
+ NULL
+};
+
+static const struct attribute_group memory_root_attr_group = {
+ .attrs = memory_root_attrs,
+};
+
+static const struct attribute_group *memory_root_attr_groups[] = {
+ &memory_root_attr_group,
+ NULL,
+};
+
+/*
+ * Initialize the sysfs support for memory devices. At the time this function
+ * is called, we cannot have concurrent creation/deletion of memory block
+ * devices, the device_hotplug_lock is not needed.
+ */
+void __init memory_dev_init(void)
+{
+ int ret;
+ unsigned long block_sz, nr;
+
+ /* Validate the configured memory block size */
+ block_sz = memory_block_size_bytes();
+ if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
+ panic("Memory block size not suitable: 0x%lx\n", block_sz);
+ sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+
+ ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
+ if (ret)
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
+
+ /*
+ * Create entries for memory sections that were found
+ * during boot and have been initialized
+ */
+ for (nr = 0; nr <= __highest_present_section_nr;
+ nr += sections_per_block) {
+ ret = add_boot_memory_block(nr);
+ if (ret)
+ panic("%s() failed to add memory block: %d\n", __func__,
+ ret);
+ }
+}
+
+/**
+ * walk_memory_blocks - walk through all present memory blocks overlapped
+ * by the range [start, start + size)
+ *
+ * @start: start address of the memory range
+ * @size: size of the memory range
+ * @arg: argument passed to func
+ * @func: callback for each memory section walked
+ *
+ * This function walks through all present memory blocks overlapped by the
+ * range [start, start + size), calling func on each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ *
+ * Called under device_hotplug_lock.
+ */
+int walk_memory_blocks(unsigned long start, unsigned long size,
+ void *arg, walk_memory_blocks_func_t func)
+{
+ const unsigned long start_block_id = phys_to_block_id(start);
+ const unsigned long end_block_id = phys_to_block_id(start + size - 1);
+ struct memory_block *mem;
+ unsigned long block_id;
+ int ret = 0;
+
+ if (!size)
+ return 0;
+
+ for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
+ mem = find_memory_block_by_id(block_id);
+ if (!mem)
+ continue;
+
+ ret = func(mem, arg);
+ put_device(&mem->dev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+struct for_each_memory_block_cb_data {
+ walk_memory_blocks_func_t func;
+ void *arg;
+};
+
+static int for_each_memory_block_cb(struct device *dev, void *data)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ struct for_each_memory_block_cb_data *cb_data = data;
+
+ return cb_data->func(mem, cb_data->arg);
+}
+
+/**
+ * for_each_memory_block - walk through all present memory blocks
+ *
+ * @arg: argument passed to func
+ * @func: callback for each memory block walked
+ *
+ * This function walks through all present memory blocks, calling func on
+ * each memory block.
+ *
+ * In case func() returns an error, walking is aborted and the error is
+ * returned.
+ */
+int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
+{
+ struct for_each_memory_block_cb_data cb_data = {
+ .func = func,
+ .arg = arg,
+ };
+
+ return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
+ for_each_memory_block_cb);
+}
+
+/*
+ * This is an internal helper to unify allocation and initialization of
+ * memory groups. Note that the passed memory group will be copied to a
+ * dynamically allocated memory group. After this call, the passed
+ * memory group should no longer be used.
+ */
+static int memory_group_register(struct memory_group group)
+{
+ struct memory_group *new_group;
+ uint32_t mgid;
+ int ret;
+
+ if (!node_possible(group.nid))
+ return -EINVAL;
+
+ new_group = kzalloc(sizeof(group), GFP_KERNEL);
+ if (!new_group)
+ return -ENOMEM;
+ *new_group = group;
+ INIT_LIST_HEAD(&new_group->memory_blocks);
+
+ ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(new_group);
+ return ret;
+ } else if (group.is_dynamic) {
+ xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
+ }
+ return mgid;
+}
+
+/**
+ * memory_group_register_static() - Register a static memory group.
+ * @nid: The node id.
+ * @max_pages: The maximum number of pages we'll have in this static memory
+ * group.
+ *
+ * Register a new static memory group and return the memory group id.
+ * All memory in the group belongs to a single unit, such as a DIMM. All
+ * memory belonging to a static memory group is added in one go to be removed
+ * in one go -- it's static.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
+ * returns the new memory group id.
+ */
+int memory_group_register_static(int nid, unsigned long max_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .s = {
+ .max_pages = max_pages,
+ },
+ };
+
+ if (!max_pages)
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_static);
+
+/**
+ * memory_group_register_dynamic() - Register a dynamic memory group.
+ * @nid: The node id.
+ * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
+ * memory group.
+ *
+ * Register a new dynamic memory group and return the memory group id.
+ * Memory within a dynamic memory group is added/removed dynamically
+ * in unit_pages.
+ *
+ * Returns an error if out of memory, if the node id is invalid, if no new
+ * memory groups can be registered, or if unit_pages is invalid (0, not a
+ * power of two, smaller than a single memory block). Otherwise, returns the
+ * new memory group id.
+ */
+int memory_group_register_dynamic(int nid, unsigned long unit_pages)
+{
+ struct memory_group group = {
+ .nid = nid,
+ .is_dynamic = true,
+ .d = {
+ .unit_pages = unit_pages,
+ },
+ };
+
+ if (!unit_pages || !is_power_of_2(unit_pages) ||
+ unit_pages < PHYS_PFN(memory_block_size_bytes()))
+ return -EINVAL;
+ return memory_group_register(group);
+}
+EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
+
+/**
+ * memory_group_unregister() - Unregister a memory group.
+ * @mgid: the memory group id
+ *
+ * Unregister a memory group. If any memory block still belongs to this
+ * memory group, unregistering will fail.
+ *
+ * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
+ * memory blocks still belong to this memory group and returns 0 if
+ * unregistering succeeded.
+ */
+int memory_group_unregister(int mgid)
+{
+ struct memory_group *group;
+
+ if (mgid < 0)
+ return -EINVAL;
+
+ group = xa_load(&memory_groups, mgid);
+ if (!group)
+ return -EINVAL;
+ if (!list_empty(&group->memory_blocks))
+ return -EBUSY;
+ xa_erase(&memory_groups, mgid);
+ kfree(group);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(memory_group_unregister);
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * lookup a memory group. We don't care about locking, as we don't expect a
+ * memory group to get unregistered while adding memory to it -- because
+ * the group and the memory is managed by the same driver.
+ */
+struct memory_group *memory_group_find_by_id(int mgid)
+{
+ return xa_load(&memory_groups, mgid);
+}
+
+/*
+ * This is an internal helper only to be used in core memory hotplug code to
+ * walk all dynamic memory groups excluding a given memory group, either
+ * belonging to a specific node, or belonging to any node.
+ */
+int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
+ struct memory_group *excluded, void *arg)
+{
+ struct memory_group *group;
+ unsigned long index;
+ int ret = 0;
+
+ xa_for_each_marked(&memory_groups, index, group,
+ MEMORY_GROUP_MARK_DYNAMIC) {
+ if (group == excluded)
+ continue;
+#ifdef CONFIG_NUMA
+ if (nid != NUMA_NO_NODE && group->nid != nid)
+ continue;
+#endif /* CONFIG_NUMA */
+ ret = func(group, arg);
+ if (ret)
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/base/module.c b/drivers/base/module.c
new file mode 100644
index 000000000..46ad4d636
--- /dev/null
+++ b/drivers/base/module.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * module.c - module sysfs fun for drivers
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "base.h"
+
+static char *make_driver_name(struct device_driver *drv)
+{
+ char *driver_name;
+
+ driver_name = kasprintf(GFP_KERNEL, "%s:%s", drv->bus->name, drv->name);
+ if (!driver_name)
+ return NULL;
+
+ return driver_name;
+}
+
+static void module_create_drivers_dir(struct module_kobject *mk)
+{
+ static DEFINE_MUTEX(drivers_dir_mutex);
+
+ mutex_lock(&drivers_dir_mutex);
+ if (mk && !mk->drivers_dir)
+ mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+ mutex_unlock(&drivers_dir_mutex);
+}
+
+void module_add_driver(struct module *mod, struct device_driver *drv)
+{
+ char *driver_name;
+ int no_warn;
+ struct module_kobject *mk = NULL;
+
+ if (!drv)
+ return;
+
+ if (mod)
+ mk = &mod->mkobj;
+ else if (drv->mod_name) {
+ struct kobject *mkobj;
+
+ /* Lookup built-in module entry in /sys/modules */
+ mkobj = kset_find_obj(module_kset, drv->mod_name);
+ if (mkobj) {
+ mk = container_of(mkobj, struct module_kobject, kobj);
+ /* remember our module structure */
+ drv->p->mkobj = mk;
+ /* kset_find_obj took a reference */
+ kobject_put(mkobj);
+ }
+ }
+
+ if (!mk)
+ return;
+
+ /* Don't check return codes; these calls are idempotent */
+ no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
+ driver_name = make_driver_name(drv);
+ if (driver_name) {
+ module_create_drivers_dir(mk);
+ no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
+ driver_name);
+ kfree(driver_name);
+ }
+}
+
+void module_remove_driver(struct device_driver *drv)
+{
+ struct module_kobject *mk = NULL;
+ char *driver_name;
+
+ if (!drv)
+ return;
+
+ sysfs_remove_link(&drv->p->kobj, "module");
+
+ if (drv->owner)
+ mk = &drv->owner->mkobj;
+ else if (drv->p->mkobj)
+ mk = drv->p->mkobj;
+ if (mk && mk->drivers_dir) {
+ driver_name = make_driver_name(drv);
+ if (driver_name) {
+ sysfs_remove_link(mk->drivers_dir, driver_name);
+ kfree(driver_name);
+ }
+ }
+}
diff --git a/drivers/base/node.c b/drivers/base/node.c
new file mode 100644
index 000000000..a4141b57b
--- /dev/null
+++ b/drivers/base/node.c
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic Node interface support
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memory.h>
+#include <linux/vmstat.h>
+#include <linux/notifier.h>
+#include <linux/node.h>
+#include <linux/hugetlb.h>
+#include <linux/compaction.h>
+#include <linux/cpumask.h>
+#include <linux/topology.h>
+#include <linux/nodemask.h>
+#include <linux/cpu.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/hugetlb.h>
+
+static struct bus_type node_subsys = {
+ .name = "node",
+ .dev_name = "node",
+};
+
+static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct node *node_dev = to_node(dev);
+ cpumask_var_t mask;
+ ssize_t n;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return 0;
+
+ cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+ n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
+ free_cpumask_var(mask);
+
+ return n;
+}
+
+static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
+
+static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct node *node_dev = to_node(dev);
+ cpumask_var_t mask;
+ ssize_t n;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return 0;
+
+ cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
+ n = cpumap_print_list_to_buf(buf, mask, off, count);
+ free_cpumask_var(mask);
+
+ return n;
+}
+
+static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
+
+/**
+ * struct node_access_nodes - Access class device to hold user visible
+ * relationships to other nodes.
+ * @dev: Device for this memory access class
+ * @list_node: List element in the node's access list
+ * @access: The access class rank
+ * @hmem_attrs: Heterogeneous memory performance attributes
+ */
+struct node_access_nodes {
+ struct device dev;
+ struct list_head list_node;
+ unsigned int access;
+#ifdef CONFIG_HMEM_REPORTING
+ struct node_hmem_attrs hmem_attrs;
+#endif
+};
+#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
+
+static struct attribute *node_init_access_node_attrs[] = {
+ NULL,
+};
+
+static struct attribute *node_targ_access_node_attrs[] = {
+ NULL,
+};
+
+static const struct attribute_group initiators = {
+ .name = "initiators",
+ .attrs = node_init_access_node_attrs,
+};
+
+static const struct attribute_group targets = {
+ .name = "targets",
+ .attrs = node_targ_access_node_attrs,
+};
+
+static const struct attribute_group *node_access_node_groups[] = {
+ &initiators,
+ &targets,
+ NULL,
+};
+
+static void node_remove_accesses(struct node *node)
+{
+ struct node_access_nodes *c, *cnext;
+
+ list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
+ list_del(&c->list_node);
+ device_unregister(&c->dev);
+ }
+}
+
+static void node_access_release(struct device *dev)
+{
+ kfree(to_access_nodes(dev));
+}
+
+static struct node_access_nodes *node_init_node_access(struct node *node,
+ unsigned int access)
+{
+ struct node_access_nodes *access_node;
+ struct device *dev;
+
+ list_for_each_entry(access_node, &node->access_list, list_node)
+ if (access_node->access == access)
+ return access_node;
+
+ access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
+ if (!access_node)
+ return NULL;
+
+ access_node->access = access;
+ dev = &access_node->dev;
+ dev->parent = &node->dev;
+ dev->release = node_access_release;
+ dev->groups = node_access_node_groups;
+ if (dev_set_name(dev, "access%u", access))
+ goto free;
+
+ if (device_register(dev))
+ goto free_name;
+
+ pm_runtime_no_callbacks(dev);
+ list_add_tail(&access_node->list_node, &node->access_list);
+ return access_node;
+free_name:
+ kfree_const(dev->kobj.name);
+free:
+ kfree(access_node);
+ return NULL;
+}
+
+#ifdef CONFIG_HMEM_REPORTING
+#define ACCESS_ATTR(name) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sysfs_emit(buf, "%u\n", \
+ to_access_nodes(dev)->hmem_attrs.name); \
+} \
+static DEVICE_ATTR_RO(name)
+
+ACCESS_ATTR(read_bandwidth);
+ACCESS_ATTR(read_latency);
+ACCESS_ATTR(write_bandwidth);
+ACCESS_ATTR(write_latency);
+
+static struct attribute *access_attrs[] = {
+ &dev_attr_read_bandwidth.attr,
+ &dev_attr_read_latency.attr,
+ &dev_attr_write_bandwidth.attr,
+ &dev_attr_write_latency.attr,
+ NULL,
+};
+
+/**
+ * node_set_perf_attrs - Set the performance values for given access class
+ * @nid: Node identifier to be set
+ * @hmem_attrs: Heterogeneous memory performance attributes
+ * @access: The access class the for the given attributes
+ */
+void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+ unsigned int access)
+{
+ struct node_access_nodes *c;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ c = node_init_node_access(node, access);
+ if (!c)
+ return;
+
+ c->hmem_attrs = *hmem_attrs;
+ for (i = 0; access_attrs[i] != NULL; i++) {
+ if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
+ "initiators")) {
+ pr_info("failed to add performance attribute to node %d\n",
+ nid);
+ break;
+ }
+ }
+}
+
+/**
+ * struct node_cache_info - Internal tracking for memory node caches
+ * @dev: Device represeting the cache level
+ * @node: List element for tracking in the node
+ * @cache_attrs:Attributes for this cache level
+ */
+struct node_cache_info {
+ struct device dev;
+ struct list_head node;
+ struct node_cache_attrs cache_attrs;
+};
+#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
+
+#define CACHE_ATTR(name, fmt) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sysfs_emit(buf, fmt "\n", \
+ to_cache_info(dev)->cache_attrs.name); \
+} \
+static DEVICE_ATTR_RO(name);
+
+CACHE_ATTR(size, "%llu")
+CACHE_ATTR(line_size, "%u")
+CACHE_ATTR(indexing, "%u")
+CACHE_ATTR(write_policy, "%u")
+
+static struct attribute *cache_attrs[] = {
+ &dev_attr_indexing.attr,
+ &dev_attr_size.attr,
+ &dev_attr_line_size.attr,
+ &dev_attr_write_policy.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cache);
+
+static void node_cache_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static void node_cacheinfo_release(struct device *dev)
+{
+ struct node_cache_info *info = to_cache_info(dev);
+ kfree(info);
+}
+
+static void node_init_cache_dev(struct node *node)
+{
+ struct device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
+
+ device_initialize(dev);
+ dev->parent = &node->dev;
+ dev->release = node_cache_release;
+ if (dev_set_name(dev, "memory_side_cache"))
+ goto put_device;
+
+ if (device_add(dev))
+ goto put_device;
+
+ pm_runtime_no_callbacks(dev);
+ node->cache_dev = dev;
+ return;
+put_device:
+ put_device(dev);
+}
+
+/**
+ * node_add_cache() - add cache attribute to a memory node
+ * @nid: Node identifier that has new cache attributes
+ * @cache_attrs: Attributes for the cache being added
+ */
+void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
+{
+ struct node_cache_info *info;
+ struct device *dev;
+ struct node *node;
+
+ if (!node_online(nid) || !node_devices[nid])
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(info, &node->cache_attrs, node) {
+ if (info->cache_attrs.level == cache_attrs->level) {
+ dev_warn(&node->dev,
+ "attempt to add duplicate cache level:%d\n",
+ cache_attrs->level);
+ return;
+ }
+ }
+
+ if (!node->cache_dev)
+ node_init_cache_dev(node);
+ if (!node->cache_dev)
+ return;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ dev = &info->dev;
+ device_initialize(dev);
+ dev->parent = node->cache_dev;
+ dev->release = node_cacheinfo_release;
+ dev->groups = cache_groups;
+ if (dev_set_name(dev, "index%d", cache_attrs->level))
+ goto put_device;
+
+ info->cache_attrs = *cache_attrs;
+ if (device_add(dev)) {
+ dev_warn(&node->dev, "failed to add cache level:%d\n",
+ cache_attrs->level);
+ goto put_device;
+ }
+ pm_runtime_no_callbacks(dev);
+ list_add_tail(&info->node, &node->cache_attrs);
+ return;
+put_device:
+ put_device(dev);
+}
+
+static void node_remove_caches(struct node *node)
+{
+ struct node_cache_info *info, *next;
+
+ if (!node->cache_dev)
+ return;
+
+ list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
+ list_del(&info->node);
+ device_unregister(&info->dev);
+ }
+ device_unregister(node->cache_dev);
+}
+
+static void node_init_caches(unsigned int nid)
+{
+ INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
+}
+#else
+static void node_init_caches(unsigned int nid) { }
+static void node_remove_caches(struct node *node) { }
+#endif
+
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+static ssize_t node_read_meminfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int len = 0;
+ int nid = dev->id;
+ struct pglist_data *pgdat = NODE_DATA(nid);
+ struct sysinfo i;
+ unsigned long sreclaimable, sunreclaimable;
+ unsigned long swapcached = 0;
+
+ si_meminfo_node(&i, nid);
+ sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
+ sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
+#ifdef CONFIG_SWAP
+ swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
+#endif
+ len = sysfs_emit_at(buf, len,
+ "Node %d MemTotal: %8lu kB\n"
+ "Node %d MemFree: %8lu kB\n"
+ "Node %d MemUsed: %8lu kB\n"
+ "Node %d SwapCached: %8lu kB\n"
+ "Node %d Active: %8lu kB\n"
+ "Node %d Inactive: %8lu kB\n"
+ "Node %d Active(anon): %8lu kB\n"
+ "Node %d Inactive(anon): %8lu kB\n"
+ "Node %d Active(file): %8lu kB\n"
+ "Node %d Inactive(file): %8lu kB\n"
+ "Node %d Unevictable: %8lu kB\n"
+ "Node %d Mlocked: %8lu kB\n",
+ nid, K(i.totalram),
+ nid, K(i.freeram),
+ nid, K(i.totalram - i.freeram),
+ nid, K(swapcached),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
+ node_page_state(pgdat, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
+ node_page_state(pgdat, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
+ nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
+ nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
+ nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
+
+#ifdef CONFIG_HIGHMEM
+ len += sysfs_emit_at(buf, len,
+ "Node %d HighTotal: %8lu kB\n"
+ "Node %d HighFree: %8lu kB\n"
+ "Node %d LowTotal: %8lu kB\n"
+ "Node %d LowFree: %8lu kB\n",
+ nid, K(i.totalhigh),
+ nid, K(i.freehigh),
+ nid, K(i.totalram - i.totalhigh),
+ nid, K(i.freeram - i.freehigh));
+#endif
+ len += sysfs_emit_at(buf, len,
+ "Node %d Dirty: %8lu kB\n"
+ "Node %d Writeback: %8lu kB\n"
+ "Node %d FilePages: %8lu kB\n"
+ "Node %d Mapped: %8lu kB\n"
+ "Node %d AnonPages: %8lu kB\n"
+ "Node %d Shmem: %8lu kB\n"
+ "Node %d KernelStack: %8lu kB\n"
+#ifdef CONFIG_SHADOW_CALL_STACK
+ "Node %d ShadowCallStack:%8lu kB\n"
+#endif
+ "Node %d PageTables: %8lu kB\n"
+ "Node %d SecPageTables: %8lu kB\n"
+ "Node %d NFS_Unstable: %8lu kB\n"
+ "Node %d Bounce: %8lu kB\n"
+ "Node %d WritebackTmp: %8lu kB\n"
+ "Node %d KReclaimable: %8lu kB\n"
+ "Node %d Slab: %8lu kB\n"
+ "Node %d SReclaimable: %8lu kB\n"
+ "Node %d SUnreclaim: %8lu kB\n"
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ "Node %d AnonHugePages: %8lu kB\n"
+ "Node %d ShmemHugePages: %8lu kB\n"
+ "Node %d ShmemPmdMapped: %8lu kB\n"
+ "Node %d FileHugePages: %8lu kB\n"
+ "Node %d FilePmdMapped: %8lu kB\n"
+#endif
+ ,
+ nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK)),
+ nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
+ nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
+ nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
+ nid, K(i.sharedram),
+ nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
+#ifdef CONFIG_SHADOW_CALL_STACK
+ nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
+#endif
+ nid, K(node_page_state(pgdat, NR_PAGETABLE)),
+ nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
+ nid, 0UL,
+ nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
+ nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
+ nid, K(sreclaimable +
+ node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
+ nid, K(sreclaimable + sunreclaimable),
+ nid, K(sreclaimable),
+ nid, K(sunreclaimable)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ ,
+ nid, K(node_page_state(pgdat, NR_ANON_THPS)),
+ nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
+ nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
+ nid, K(node_page_state(pgdat, NR_FILE_THPS)),
+ nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
+#endif
+ );
+ len += hugetlb_report_node_meminfo(buf, len, nid);
+ return len;
+}
+
+#undef K
+static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
+
+static ssize_t node_read_numastat(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ fold_vm_numa_events();
+ return sysfs_emit(buf,
+ "numa_hit %lu\n"
+ "numa_miss %lu\n"
+ "numa_foreign %lu\n"
+ "interleave_hit %lu\n"
+ "local_node %lu\n"
+ "other_node %lu\n",
+ sum_zone_numa_event_state(dev->id, NUMA_HIT),
+ sum_zone_numa_event_state(dev->id, NUMA_MISS),
+ sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
+ sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
+ sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
+ sum_zone_numa_event_state(dev->id, NUMA_OTHER));
+}
+static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
+
+static ssize_t node_read_vmstat(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nid = dev->id;
+ struct pglist_data *pgdat = NODE_DATA(nid);
+ int i;
+ int len = 0;
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ len += sysfs_emit_at(buf, len, "%s %lu\n",
+ zone_stat_name(i),
+ sum_zone_node_page_state(nid, i));
+
+#ifdef CONFIG_NUMA
+ fold_vm_numa_events();
+ for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
+ len += sysfs_emit_at(buf, len, "%s %lu\n",
+ numa_stat_name(i),
+ sum_zone_numa_event_state(nid, i));
+
+#endif
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
+ unsigned long pages = node_page_state_pages(pgdat, i);
+
+ if (vmstat_item_print_in_thp(i))
+ pages /= HPAGE_PMD_NR;
+ len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
+ pages);
+ }
+
+ return len;
+}
+static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
+
+static ssize_t node_read_distance(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nid = dev->id;
+ int len = 0;
+ int i;
+
+ /*
+ * buf is currently PAGE_SIZE in length and each node needs 4 chars
+ * at the most (distance + space or newline).
+ */
+ BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
+
+ for_each_online_node(i) {
+ len += sysfs_emit_at(buf, len, "%s%d",
+ i ? " " : "", node_distance(nid, i));
+ }
+
+ len += sysfs_emit_at(buf, len, "\n");
+ return len;
+}
+static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
+
+static struct attribute *node_dev_attrs[] = {
+ &dev_attr_meminfo.attr,
+ &dev_attr_numastat.attr,
+ &dev_attr_distance.attr,
+ &dev_attr_vmstat.attr,
+ NULL
+};
+
+static struct bin_attribute *node_dev_bin_attrs[] = {
+ &bin_attr_cpumap,
+ &bin_attr_cpulist,
+ NULL
+};
+
+static const struct attribute_group node_dev_group = {
+ .attrs = node_dev_attrs,
+ .bin_attrs = node_dev_bin_attrs
+};
+
+static const struct attribute_group *node_dev_groups[] = {
+ &node_dev_group,
+#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
+ &arch_node_dev_group,
+#endif
+ NULL
+};
+
+static void node_device_release(struct device *dev)
+{
+ kfree(to_node(dev));
+}
+
+/*
+ * register_node - Setup a sysfs device for a node.
+ * @num - Node number to use when creating the device.
+ *
+ * Initialize and register the node device.
+ */
+static int register_node(struct node *node, int num)
+{
+ int error;
+
+ node->dev.id = num;
+ node->dev.bus = &node_subsys;
+ node->dev.release = node_device_release;
+ node->dev.groups = node_dev_groups;
+ error = device_register(&node->dev);
+
+ if (error) {
+ put_device(&node->dev);
+ } else {
+ hugetlb_register_node(node);
+ compaction_register_node(node);
+ }
+
+ return error;
+}
+
+/**
+ * unregister_node - unregister a node device
+ * @node: node going away
+ *
+ * Unregisters a node device @node. All the devices on the node must be
+ * unregistered before calling this function.
+ */
+void unregister_node(struct node *node)
+{
+ hugetlb_unregister_node(node);
+ compaction_unregister_node(node);
+ node_remove_accesses(node);
+ node_remove_caches(node);
+ device_unregister(&node->dev);
+}
+
+struct node *node_devices[MAX_NUMNODES];
+
+/*
+ * register cpu under node
+ */
+int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ int ret;
+ struct device *obj;
+
+ if (!node_online(nid))
+ return 0;
+
+ obj = get_cpu_device(cpu);
+ if (!obj)
+ return 0;
+
+ ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
+ &obj->kobj,
+ kobject_name(&obj->kobj));
+ if (ret)
+ return ret;
+
+ return sysfs_create_link(&obj->kobj,
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
+}
+
+/**
+ * register_memory_node_under_compute_node - link memory node to its compute
+ * node for a given access class.
+ * @mem_nid: Memory node number
+ * @cpu_nid: Cpu node number
+ * @access: Access class to register
+ *
+ * Description:
+ * For use with platforms that may have separate memory and compute nodes.
+ * This function will export node relationships linking which memory
+ * initiator nodes can access memory targets at a given ranked access
+ * class.
+ */
+int register_memory_node_under_compute_node(unsigned int mem_nid,
+ unsigned int cpu_nid,
+ unsigned int access)
+{
+ struct node *init_node, *targ_node;
+ struct node_access_nodes *initiator, *target;
+ int ret;
+
+ if (!node_online(cpu_nid) || !node_online(mem_nid))
+ return -ENODEV;
+
+ init_node = node_devices[cpu_nid];
+ targ_node = node_devices[mem_nid];
+ initiator = node_init_node_access(init_node, access);
+ target = node_init_node_access(targ_node, access);
+ if (!initiator || !target)
+ return -ENOMEM;
+
+ ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
+ &targ_node->dev.kobj,
+ dev_name(&targ_node->dev));
+ if (ret)
+ return ret;
+
+ ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
+ &init_node->dev.kobj,
+ dev_name(&init_node->dev));
+ if (ret)
+ goto err;
+
+ return 0;
+ err:
+ sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
+ dev_name(&targ_node->dev));
+ return ret;
+}
+
+int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ struct device *obj;
+
+ if (!node_online(nid))
+ return 0;
+
+ obj = get_cpu_device(cpu);
+ if (!obj)
+ return 0;
+
+ sysfs_remove_link(&node_devices[nid]->dev.kobj,
+ kobject_name(&obj->kobj));
+ sysfs_remove_link(&obj->kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
+
+ return 0;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int __ref get_nid_for_pfn(unsigned long pfn)
+{
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+ if (system_state < SYSTEM_RUNNING)
+ return early_pfn_to_nid(pfn);
+#endif
+ return pfn_to_nid(pfn);
+}
+
+static void do_register_memory_block_under_node(int nid,
+ struct memory_block *mem_blk,
+ enum meminit_context context)
+{
+ int ret;
+
+ memory_block_add_nid(mem_blk, nid, context);
+
+ ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
+ &mem_blk->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ if (ret && ret != -EEXIST)
+ dev_err_ratelimited(&node_devices[nid]->dev,
+ "can't create link to %s in sysfs (%d)\n",
+ kobject_name(&mem_blk->dev.kobj), ret);
+
+ ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+ &node_devices[nid]->dev.kobj,
+ kobject_name(&node_devices[nid]->dev.kobj));
+ if (ret && ret != -EEXIST)
+ dev_err_ratelimited(&mem_blk->dev,
+ "can't create link to %s in sysfs (%d)\n",
+ kobject_name(&node_devices[nid]->dev.kobj),
+ ret);
+}
+
+/* register memory section under specified node if it spans that node */
+static int register_mem_block_under_node_early(struct memory_block *mem_blk,
+ void *arg)
+{
+ unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
+ unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
+ unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
+ int nid = *(int *)arg;
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
+ int page_nid;
+
+ /*
+ * memory block could have several absent sections from start.
+ * skip pfn range from absent section
+ */
+ if (!pfn_in_present_section(pfn)) {
+ pfn = round_down(pfn + PAGES_PER_SECTION,
+ PAGES_PER_SECTION) - 1;
+ continue;
+ }
+
+ /*
+ * We need to check if page belongs to nid only at the boot
+ * case because node's ranges can be interleaved.
+ */
+ page_nid = get_nid_for_pfn(pfn);
+ if (page_nid < 0)
+ continue;
+ if (page_nid != nid)
+ continue;
+
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
+ return 0;
+ }
+ /* mem section does not span the specified node */
+ return 0;
+}
+
+/*
+ * During hotplug we know that all pages in the memory block belong to the same
+ * node.
+ */
+static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
+ void *arg)
+{
+ int nid = *(int *)arg;
+
+ do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
+ return 0;
+}
+
+/*
+ * Unregister a memory block device under the node it spans. Memory blocks
+ * with multiple nodes cannot be offlined and therefore also never be removed.
+ */
+void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
+{
+ if (mem_blk->nid == NUMA_NO_NODE)
+ return;
+
+ sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ sysfs_remove_link(&mem_blk->dev.kobj,
+ kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
+}
+
+void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
+ unsigned long end_pfn,
+ enum meminit_context context)
+{
+ walk_memory_blocks_func_t func;
+
+ if (context == MEMINIT_HOTPLUG)
+ func = register_mem_block_under_node_hotplug;
+ else
+ func = register_mem_block_under_node_early;
+
+ walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
+ (void *)&nid, func);
+ return;
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
+int __register_one_node(int nid)
+{
+ int error;
+ int cpu;
+ struct node *node;
+
+ node = kzalloc(sizeof(struct node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&node->access_list);
+ node_devices[nid] = node;
+
+ error = register_node(node_devices[nid], nid);
+
+ /* link cpu under this node */
+ for_each_present_cpu(cpu) {
+ if (cpu_to_node(cpu) == nid)
+ register_cpu_under_node(cpu, nid);
+ }
+
+ node_init_caches(nid);
+
+ return error;
+}
+
+void unregister_one_node(int nid)
+{
+ if (!node_devices[nid])
+ return;
+
+ unregister_node(node_devices[nid]);
+ node_devices[nid] = NULL;
+}
+
+/*
+ * node states attributes
+ */
+
+struct node_attr {
+ struct device_attribute attr;
+ enum node_states state;
+};
+
+static ssize_t show_node_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct node_attr *na = container_of(attr, struct node_attr, attr);
+
+ return sysfs_emit(buf, "%*pbl\n",
+ nodemask_pr_args(&node_states[na->state]));
+}
+
+#define _NODE_ATTR(name, state) \
+ { __ATTR(name, 0444, show_node_state, NULL), state }
+
+static struct node_attr node_state_attr[] = {
+ [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
+ [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
+ [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
+#ifdef CONFIG_HIGHMEM
+ [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
+#endif
+ [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
+ [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
+ [N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
+ N_GENERIC_INITIATOR),
+};
+
+static struct attribute *node_state_attrs[] = {
+ &node_state_attr[N_POSSIBLE].attr.attr,
+ &node_state_attr[N_ONLINE].attr.attr,
+ &node_state_attr[N_NORMAL_MEMORY].attr.attr,
+#ifdef CONFIG_HIGHMEM
+ &node_state_attr[N_HIGH_MEMORY].attr.attr,
+#endif
+ &node_state_attr[N_MEMORY].attr.attr,
+ &node_state_attr[N_CPU].attr.attr,
+ &node_state_attr[N_GENERIC_INITIATOR].attr.attr,
+ NULL
+};
+
+static const struct attribute_group memory_root_attr_group = {
+ .attrs = node_state_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &memory_root_attr_group,
+ NULL,
+};
+
+void __init node_dev_init(void)
+{
+ int ret, i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
+ BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
+
+ ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
+ if (ret)
+ panic("%s() failed to register subsystem: %d\n", __func__, ret);
+
+ /*
+ * Create all node devices, which will properly link the node
+ * to applicable memory block devices and already created cpu devices.
+ */
+ for_each_online_node(i) {
+ ret = register_one_node(i);
+ if (ret)
+ panic("%s() failed to add node: %d\n", __func__, ret);
+ }
+}
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
new file mode 100644
index 000000000..951819e71
--- /dev/null
+++ b/drivers/base/physical_location.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/sysfs.h>
+
+#include "physical_location.h"
+
+bool dev_add_physical_location(struct device *dev)
+{
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ if (!has_acpi_companion(dev))
+ return false;
+
+ status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ dev->physical_location =
+ kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
+ if (!dev->physical_location) {
+ ACPI_FREE(pld);
+ return false;
+ }
+
+ dev->physical_location->panel = pld->panel;
+ dev->physical_location->vertical_position = pld->vertical_position;
+ dev->physical_location->horizontal_position = pld->horizontal_position;
+ dev->physical_location->dock = pld->dock;
+ dev->physical_location->lid = pld->lid;
+
+ ACPI_FREE(pld);
+ return true;
+}
+
+static ssize_t panel_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const char *panel;
+
+ switch (dev->physical_location->panel) {
+ case DEVICE_PANEL_TOP:
+ panel = "top";
+ break;
+ case DEVICE_PANEL_BOTTOM:
+ panel = "bottom";
+ break;
+ case DEVICE_PANEL_LEFT:
+ panel = "left";
+ break;
+ case DEVICE_PANEL_RIGHT:
+ panel = "right";
+ break;
+ case DEVICE_PANEL_FRONT:
+ panel = "front";
+ break;
+ case DEVICE_PANEL_BACK:
+ panel = "back";
+ break;
+ default:
+ panel = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", panel);
+}
+static DEVICE_ATTR_RO(panel);
+
+static ssize_t vertical_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *vertical_position;
+
+ switch (dev->physical_location->vertical_position) {
+ case DEVICE_VERT_POS_UPPER:
+ vertical_position = "upper";
+ break;
+ case DEVICE_VERT_POS_CENTER:
+ vertical_position = "center";
+ break;
+ case DEVICE_VERT_POS_LOWER:
+ vertical_position = "lower";
+ break;
+ default:
+ vertical_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", vertical_position);
+}
+static DEVICE_ATTR_RO(vertical_position);
+
+static ssize_t horizontal_position_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *horizontal_position;
+
+ switch (dev->physical_location->horizontal_position) {
+ case DEVICE_HORI_POS_LEFT:
+ horizontal_position = "left";
+ break;
+ case DEVICE_HORI_POS_CENTER:
+ horizontal_position = "center";
+ break;
+ case DEVICE_HORI_POS_RIGHT:
+ horizontal_position = "right";
+ break;
+ default:
+ horizontal_position = "unknown";
+ }
+ return sysfs_emit(buf, "%s\n", horizontal_position);
+}
+static DEVICE_ATTR_RO(horizontal_position);
+
+static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->dock ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(dock);
+
+static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->physical_location->lid ? "yes" : "no");
+}
+static DEVICE_ATTR_RO(lid);
+
+static struct attribute *dev_attr_physical_location[] = {
+ &dev_attr_panel.attr,
+ &dev_attr_vertical_position.attr,
+ &dev_attr_horizontal_position.attr,
+ &dev_attr_dock.attr,
+ &dev_attr_lid.attr,
+ NULL,
+};
+
+const struct attribute_group dev_attr_physical_location_group = {
+ .name = "physical_location",
+ .attrs = dev_attr_physical_location,
+};
+
diff --git a/drivers/base/physical_location.h b/drivers/base/physical_location.h
new file mode 100644
index 000000000..82cde9f1b
--- /dev/null
+++ b/drivers/base/physical_location.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device physical location support
+ *
+ * Author: Won Chung <wonchung@google.com>
+ */
+
+#include <linux/device.h>
+
+#ifdef CONFIG_ACPI
+extern bool dev_add_physical_location(struct device *dev);
+extern const struct attribute_group dev_attr_physical_location_group;
+#else
+static inline bool dev_add_physical_location(struct device *dev) { return false; };
+static const struct attribute_group dev_attr_physical_location_group = {};
+#endif
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644
index 000000000..c22864458
--- /dev/null
+++ b/drivers/base/pinctrl.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver core interface to the pinctrl subsystem.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/device.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/slab.h>
+
+/**
+ * pinctrl_bind_pins() - called by the device core before probe
+ * @dev: the device that is just about to probe
+ */
+int pinctrl_bind_pins(struct device *dev)
+{
+ int ret;
+
+ if (dev->of_node_reused)
+ return 0;
+
+ dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
+ if (!dev->pins)
+ return -ENOMEM;
+
+ dev->pins->p = devm_pinctrl_get(dev);
+ if (IS_ERR(dev->pins->p)) {
+ dev_dbg(dev, "no pinctrl handle\n");
+ ret = PTR_ERR(dev->pins->p);
+ goto cleanup_alloc;
+ }
+
+ dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(dev->pins->default_state)) {
+ dev_dbg(dev, "no default pinctrl state\n");
+ ret = 0;
+ goto cleanup_get;
+ }
+
+ dev->pins->init_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_INIT);
+ if (IS_ERR(dev->pins->init_state)) {
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no init pinctrl state\n");
+
+ ret = pinctrl_select_state(dev->pins->p,
+ dev->pins->default_state);
+ } else {
+ ret = pinctrl_select_state(dev->pins->p, dev->pins->init_state);
+ }
+
+ if (ret) {
+ dev_dbg(dev, "failed to activate initial pinctrl state\n");
+ goto cleanup_get;
+ }
+
+#ifdef CONFIG_PM
+ /*
+ * If power management is enabled, we also look for the optional
+ * sleep and idle pin states, with semantics as defined in
+ * <linux/pinctrl/pinctrl-state.h>
+ */
+ dev->pins->sleep_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(dev->pins->sleep_state))
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no sleep pinctrl state\n");
+
+ dev->pins->idle_state = pinctrl_lookup_state(dev->pins->p,
+ PINCTRL_STATE_IDLE);
+ if (IS_ERR(dev->pins->idle_state))
+ /* Not supplying this state is perfectly legal */
+ dev_dbg(dev, "no idle pinctrl state\n");
+#endif
+
+ return 0;
+
+ /*
+ * If no pinctrl handle or default state was found for this device,
+ * let's explicitly free the pin container in the device, there is
+ * no point in keeping it around.
+ */
+cleanup_get:
+ devm_pinctrl_put(dev->pins->p);
+cleanup_alloc:
+ devm_kfree(dev, dev->pins);
+ dev->pins = NULL;
+
+ /* Return deferrals */
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ /* Return serious errors */
+ if (ret == -EINVAL)
+ return ret;
+ /* We ignore errors like -ENOENT meaning no pinctrl state */
+
+ return 0;
+}
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
new file mode 100644
index 000000000..12b044151
--- /dev/null
+++ b/drivers/base/platform-msi.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MSI framework for platform devices
+ *
+ * Copyright (C) 2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+
+#define DEV_ID_SHIFT 21
+#define MAX_DEV_MSIS (1 << (32 - DEV_ID_SHIFT))
+
+/*
+ * Internal data structure containing a (made up, but unique) devid
+ * and the callback to write the MSI message.
+ */
+struct platform_msi_priv_data {
+ struct device *dev;
+ void *host_data;
+ msi_alloc_info_t arg;
+ irq_write_msi_msg_t write_msg;
+ int devid;
+};
+
+/* The devid allocator */
+static DEFINE_IDA(platform_msi_devid_ida);
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Convert an msi_desc to a globaly unique identifier (per-device
+ * devid + msi_desc position in the msi_list).
+ */
+static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
+{
+ u32 devid = desc->dev->msi.data->platform_data->devid;
+
+ return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
+}
+
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = platform_msi_calc_hwirq(desc);
+}
+
+static int platform_msi_init(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg)
+{
+ return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ info->chip, info->chip_data);
+}
+
+static void platform_msi_set_proxy_dev(msi_alloc_info_t *arg)
+{
+ arg->flags |= MSI_ALLOC_FLAGS_PROXY_DEVICE;
+}
+#else
+#define platform_msi_set_desc NULL
+#define platform_msi_init NULL
+#define platform_msi_set_proxy_dev(x) do {} while(0)
+#endif
+
+static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ BUG_ON(!ops);
+
+ if (ops->msi_init == NULL)
+ ops->msi_init = platform_msi_init;
+ if (ops->set_desc == NULL)
+ ops->set_desc = platform_msi_set_desc;
+}
+
+static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+ desc->dev->msi.data->platform_data->write_msg(desc, msg);
+}
+
+static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
+ if (!chip->irq_mask)
+ chip->irq_mask = irq_chip_mask_parent;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = irq_chip_unmask_parent;
+ if (!chip->irq_eoi)
+ chip->irq_eoi = irq_chip_eoi_parent;
+ if (!chip->irq_set_affinity)
+ chip->irq_set_affinity = msi_domain_set_affinity;
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = platform_msi_write_msg;
+ if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
+ !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
+ info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
+}
+
+/**
+ * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a platform MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ platform_msi_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ platform_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
+ MSI_FLAG_FREE_MSI_DESCS;
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (domain)
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
+
+ return domain;
+}
+EXPORT_SYMBOL_GPL(platform_msi_create_irq_domain);
+
+static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ struct platform_msi_priv_data *datap;
+ int err;
+
+ /*
+ * Limit the number of interrupts to 2048 per device. Should we
+ * need to bump this up, DEV_ID_SHIFT should be adjusted
+ * accordingly (which would impact the max number of MSI
+ * capable devices).
+ */
+ if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
+ return -EINVAL;
+
+ if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+ dev_err(dev, "Incompatible msi_domain, giving up\n");
+ return -EINVAL;
+ }
+
+ err = msi_setup_device_data(dev);
+ if (err)
+ return err;
+
+ /* Already initialized? */
+ if (dev->msi.data->platform_data)
+ return -EBUSY;
+
+ datap = kzalloc(sizeof(*datap), GFP_KERNEL);
+ if (!datap)
+ return -ENOMEM;
+
+ datap->devid = ida_simple_get(&platform_msi_devid_ida,
+ 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ if (datap->devid < 0) {
+ err = datap->devid;
+ kfree(datap);
+ return err;
+ }
+
+ datap->write_msg = write_msi_msg;
+ datap->dev = dev;
+ dev->msi.data->platform_data = datap;
+ return 0;
+}
+
+static void platform_msi_free_priv_data(struct device *dev)
+{
+ struct platform_msi_priv_data *data = dev->msi.data->platform_data;
+
+ dev->msi.data->platform_data = NULL;
+ ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ kfree(data);
+}
+
+/**
+ * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * @dev: The device for which to allocate interrupts
+ * @nvec: The number of interrupts to allocate
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
+{
+ int err;
+
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ if (err)
+ return err;
+
+ err = msi_domain_alloc_irqs(dev->msi.domain, dev, nvec);
+ if (err)
+ platform_msi_free_priv_data(dev);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
+
+/**
+ * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
+ * @dev: The device for which to free interrupts
+ */
+void platform_msi_domain_free_irqs(struct device *dev)
+{
+ msi_domain_free_irqs(dev->msi.domain, dev);
+ platform_msi_free_priv_data(dev);
+}
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
+
+/**
+ * platform_msi_get_host_data - Query the private data associated with
+ * a platform-msi domain
+ * @domain: The platform-msi domain
+ *
+ * Return: The private data provided when calling
+ * platform_msi_create_device_domain().
+ */
+void *platform_msi_get_host_data(struct irq_domain *domain)
+{
+ struct platform_msi_priv_data *data = domain->host_data;
+
+ return data->host_data;
+}
+
+static struct lock_class_key platform_device_msi_lock_class;
+
+/**
+ * __platform_msi_create_device_domain - Create a platform-msi device domain
+ *
+ * @dev: The device generating the MSIs
+ * @nvec: The number of MSIs that need to be allocated
+ * @is_tree: flag to indicate tree hierarchy
+ * @write_msi_msg: Callback to write an interrupt message for @dev
+ * @ops: The hierarchy domain operations to use
+ * @host_data: Private data associated to this domain
+ *
+ * Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
+ *
+ * This is for interrupt domains which stack on a platform-msi domain
+ * created by platform_msi_create_irq_domain(). @dev->msi.domain points to
+ * that platform-msi domain which is the parent for the new domain.
+ */
+struct irq_domain *
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ struct platform_msi_priv_data *data;
+ struct irq_domain *domain;
+ int err;
+
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ if (err)
+ return NULL;
+
+ /*
+ * Use a separate lock class for the MSI descriptor mutex on
+ * platform MSI device domains because the descriptor mutex nests
+ * into the domain mutex. See alloc/free below.
+ */
+ lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
+
+ data = dev->msi.data->platform_data;
+ data->host_data = host_data;
+ domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
+ is_tree ? 0 : nvec,
+ dev->fwnode, ops, data);
+ if (!domain)
+ goto free_priv;
+
+ platform_msi_set_proxy_dev(&data->arg);
+ err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
+ if (err)
+ goto free_domain;
+
+ return domain;
+
+free_domain:
+ irq_domain_remove(domain);
+free_priv:
+ platform_msi_free_priv_data(dev);
+ return NULL;
+}
+
+/**
+ * platform_msi_device_domain_free - Free interrupts associated with a platform-msi
+ * device domain
+ *
+ * @domain: The platform-msi device domain
+ * @virq: The base irq from which to perform the free operation
+ * @nr_irqs: How many interrupts to free from @virq
+ */
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct platform_msi_priv_data *data = domain->host_data;
+
+ msi_lock_descs(data->dev);
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ msi_free_msi_descs_range(data->dev, MSI_DESC_ALL, virq, virq + nr_irqs - 1);
+ msi_unlock_descs(data->dev);
+}
+
+/**
+ * platform_msi_device_domain_alloc - Allocate interrupts associated with
+ * a platform-msi device domain
+ *
+ * @domain: The platform-msi device domain
+ * @virq: The base irq from which to perform the allocate operation
+ * @nr_irqs: How many interrupts to allocate from @virq
+ *
+ * Return 0 on success, or an error code on failure. Must be called
+ * with irq_domain_mutex held (which can only be done as part of a
+ * top-level interrupt allocation).
+ */
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct platform_msi_priv_data *data = domain->host_data;
+ struct device *dev = data->dev;
+
+ return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
+}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
new file mode 100644
index 000000000..3a06c214c
--- /dev/null
+++ b/drivers/base/platform.c
@@ -0,0 +1,1529 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * platform.c - platform 'pseudo' bus for legacy devices
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ *
+ * Please see Documentation/driver-api/driver-model/platform.rst for more
+ * information.
+ */
+
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/idr.h>
+#include <linux/acpi.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/limits.h>
+#include <linux/property.h>
+#include <linux/kmemleak.h>
+#include <linux/types.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
+
+#include "base.h"
+#include "power/power.h"
+
+/* For automatically allocated device IDs */
+static DEFINE_IDA(platform_devid_ida);
+
+struct device platform_bus = {
+ .init_name = "platform",
+};
+EXPORT_SYMBOL_GPL(platform_bus);
+
+/**
+ * platform_get_resource - get a resource for a device
+ * @dev: platform device
+ * @type: resource type
+ * @num: resource index
+ *
+ * Return: a pointer to the resource or NULL on failure.
+ */
+struct resource *platform_get_resource(struct platform_device *dev,
+ unsigned int type, unsigned int num)
+{
+ u32 i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (type == resource_type(r) && num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(platform_get_resource);
+
+struct resource *platform_get_mem_or_io(struct platform_device *dev,
+ unsigned int num)
+{
+ u32 i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0)
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(platform_get_mem_or_io);
+
+#ifdef CONFIG_HAS_IOMEM
+/**
+ * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a
+ * platform device and get resource
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @index: resource index
+ * @res: optional output parameter to store a pointer to the obtained resource.
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
+ */
+void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res)
+{
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ if (res)
+ *res = r;
+ return devm_ioremap_resource(&pdev->dev, r);
+}
+EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource);
+
+/**
+ * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform
+ * device
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @index: resource index
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
+ */
+void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev,
+ unsigned int index)
+{
+ return devm_platform_get_and_ioremap_resource(pdev, index, NULL);
+}
+EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource);
+
+/**
+ * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for
+ * a platform device, retrieve the
+ * resource by name
+ *
+ * @pdev: platform device to use both for memory resource lookup as well as
+ * resource management
+ * @name: name of the resource
+ *
+ * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
+ * on failure.
+ */
+void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ return devm_ioremap_resource(&pdev->dev, res);
+}
+EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
+#endif /* CONFIG_HAS_IOMEM */
+
+/**
+ * platform_get_irq_optional - get an optional IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ *
+ * Gets an IRQ for a platform device. Device drivers should check the return
+ * value for errors so as to not pass a negative integer value to the
+ * request_irq() APIs. This is the same as platform_get_irq(), except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * For example::
+ *
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: non-zero IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+{
+ int ret;
+#ifdef CONFIG_SPARC
+ /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+ if (!dev || num >= dev->archdata.num_irqs)
+ goto out_not_found;
+ ret = dev->archdata.irqs[num];
+ goto out;
+#else
+ struct resource *r;
+
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ ret = of_irq_get(dev->dev.of_node, num);
+ if (ret > 0 || ret == -EPROBE_DEFER)
+ goto out;
+ }
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+ if (has_acpi_companion(&dev->dev)) {
+ if (r && r->flags & IORESOURCE_DISABLED) {
+ ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
+ if (ret)
+ goto out;
+ }
+ }
+
+ /*
+ * The resources may pass trigger flags to the irqs that need
+ * to be set up. It so happens that the trigger flags for
+ * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
+ * settings.
+ */
+ if (r && r->flags & IORESOURCE_BITS) {
+ struct irq_data *irqd;
+
+ irqd = irq_get_irq_data(r->start);
+ if (!irqd)
+ goto out_not_found;
+ irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS);
+ }
+
+ if (r) {
+ ret = r->start;
+ goto out;
+ }
+
+ /*
+ * For the index 0 interrupt, allow falling back to GpioInt
+ * resources. While a device could have both Interrupt and GpioInt
+ * resources, making this fallback ambiguous, in many common cases
+ * the device will only expose one IRQ, and this fallback
+ * allows a common code path across either kind of resource.
+ */
+ if (num == 0 && has_acpi_companion(&dev->dev)) {
+ ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+ /* Our callers expect -ENXIO for missing IRQs. */
+ if (ret >= 0 || ret == -EPROBE_DEFER)
+ goto out;
+ }
+
+#endif
+out_not_found:
+ ret = -ENXIO;
+out:
+ if (WARN(!ret, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_optional);
+
+/**
+ * platform_get_irq - get an IRQ for a device
+ * @dev: platform device
+ * @num: IRQ number index
+ *
+ * Gets an IRQ for a platform device and prints an error message if finding the
+ * IRQ fails. Device drivers should check the return value for errors so as to
+ * not pass a negative integer value to the request_irq() APIs.
+ *
+ * For example::
+ *
+ * int irq = platform_get_irq(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: non-zero IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq(struct platform_device *dev, unsigned int num)
+{
+ int ret;
+
+ ret = platform_get_irq_optional(dev, num);
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret,
+ "IRQ index %u not found\n", num);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(platform_get_irq);
+
+/**
+ * platform_irq_count - Count the number of IRQs a platform device uses
+ * @dev: platform device
+ *
+ * Return: Number of IRQs a platform device uses or EPROBE_DEFER
+ */
+int platform_irq_count(struct platform_device *dev)
+{
+ int ret, nr = 0;
+
+ while ((ret = platform_get_irq_optional(dev, nr)) >= 0)
+ nr++;
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ return nr;
+}
+EXPORT_SYMBOL_GPL(platform_irq_count);
+
+struct irq_affinity_devres {
+ unsigned int count;
+ unsigned int irq[];
+};
+
+static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
+{
+ struct resource *r;
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, index);
+ if (r)
+ irqresource_disabled(r, 0);
+}
+
+static void devm_platform_get_irqs_affinity_release(struct device *dev,
+ void *res)
+{
+ struct irq_affinity_devres *ptr = res;
+ int i;
+
+ for (i = 0; i < ptr->count; i++) {
+ irq_dispose_mapping(ptr->irq[i]);
+
+ if (has_acpi_companion(dev))
+ platform_disable_acpi_irq(to_platform_device(dev), i);
+ }
+}
+
+/**
+ * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a
+ * device using an interrupt affinity descriptor
+ * @dev: platform device pointer
+ * @affd: affinity descriptor
+ * @minvec: minimum count of interrupt vectors
+ * @maxvec: maximum count of interrupt vectors
+ * @irqs: pointer holder for IRQ numbers
+ *
+ * Gets a set of IRQs for a platform device, and updates IRQ afffinty according
+ * to the passed affinity descriptor
+ *
+ * Return: Number of vectors on success, negative error number on failure.
+ */
+int devm_platform_get_irqs_affinity(struct platform_device *dev,
+ struct irq_affinity *affd,
+ unsigned int minvec,
+ unsigned int maxvec,
+ int **irqs)
+{
+ struct irq_affinity_devres *ptr;
+ struct irq_affinity_desc *desc;
+ size_t size;
+ int i, ret, nvec;
+
+ if (!affd)
+ return -EPERM;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ nvec = platform_irq_count(dev);
+ if (nvec < 0)
+ return nvec;
+
+ if (nvec < minvec)
+ return -ENOSPC;
+
+ nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
+ if (nvec < minvec)
+ return -ENOSPC;
+
+ if (nvec > maxvec)
+ nvec = maxvec;
+
+ size = sizeof(*ptr) + sizeof(unsigned int) * nvec;
+ ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size,
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ptr->count = nvec;
+
+ for (i = 0; i < nvec; i++) {
+ int irq = platform_get_irq(dev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto err_free_devres;
+ }
+ ptr->irq[i] = irq;
+ }
+
+ desc = irq_create_affinity_masks(nvec, affd);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto err_free_devres;
+ }
+
+ for (i = 0; i < nvec; i++) {
+ ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]);
+ if (ret) {
+ dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n",
+ ptr->irq[i], ret);
+ goto err_free_desc;
+ }
+ }
+
+ devres_add(&dev->dev, ptr);
+
+ kfree(desc);
+
+ *irqs = ptr->irq;
+
+ return nvec;
+
+err_free_desc:
+ kfree(desc);
+err_free_devres:
+ devres_free(ptr);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity);
+
+/**
+ * platform_get_resource_byname - get a resource for a device by name
+ * @dev: platform device
+ * @type: resource type
+ * @name: resource name
+ */
+struct resource *platform_get_resource_byname(struct platform_device *dev,
+ unsigned int type,
+ const char *name)
+{
+ u32 i;
+
+ for (i = 0; i < dev->num_resources; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (unlikely(!r->name))
+ continue;
+
+ if (type == resource_type(r) && !strcmp(r->name, name))
+ return r;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(platform_get_resource_byname);
+
+static int __platform_get_irq_byname(struct platform_device *dev,
+ const char *name)
+{
+ struct resource *r;
+ int ret;
+
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+ ret = of_irq_get_byname(dev->dev.of_node, name);
+ if (ret > 0 || ret == -EPROBE_DEFER)
+ return ret;
+ }
+
+ r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
+ if (r) {
+ if (WARN(!r->start, "0 is an invalid IRQ number\n"))
+ return -EINVAL;
+ return r->start;
+ }
+
+ return -ENXIO;
+}
+
+/**
+ * platform_get_irq_byname - get an IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an IRQ like platform_get_irq(), but then by name rather then by index.
+ *
+ * Return: non-zero IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
+{
+ int ret;
+
+ ret = __platform_get_irq_byname(dev, name);
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n",
+ name);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_byname);
+
+/**
+ * platform_get_irq_byname_optional - get an optional IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Return: non-zero IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname_optional(struct platform_device *dev,
+ const char *name)
+{
+ return __platform_get_irq_byname(dev, name);
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
+
+/**
+ * platform_add_devices - add a numbers of platform devices
+ * @devs: array of platform devices to add
+ * @num: number of platform devices in array
+ */
+int platform_add_devices(struct platform_device **devs, int num)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < num; i++) {
+ ret = platform_device_register(devs[i]);
+ if (ret) {
+ while (--i >= 0)
+ platform_device_unregister(devs[i]);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(platform_add_devices);
+
+struct platform_object {
+ struct platform_device pdev;
+ char name[];
+};
+
+/*
+ * Set up default DMA mask for platform devices if the they weren't
+ * previously set by the architecture / DT.
+ */
+static void setup_pdev_dma_masks(struct platform_device *pdev)
+{
+ pdev->dev.dma_parms = &pdev->dma_parms;
+
+ if (!pdev->dev.coherent_dma_mask)
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ if (!pdev->dev.dma_mask) {
+ pdev->platform_dma_mask = DMA_BIT_MASK(32);
+ pdev->dev.dma_mask = &pdev->platform_dma_mask;
+ }
+};
+
+/**
+ * platform_device_put - destroy a platform device
+ * @pdev: platform device to free
+ *
+ * Free all memory associated with a platform device. This function must
+ * _only_ be externally called in error cases. All other usage is a bug.
+ */
+void platform_device_put(struct platform_device *pdev)
+{
+ if (!IS_ERR_OR_NULL(pdev))
+ put_device(&pdev->dev);
+}
+EXPORT_SYMBOL_GPL(platform_device_put);
+
+static void platform_device_release(struct device *dev)
+{
+ struct platform_object *pa = container_of(dev, struct platform_object,
+ pdev.dev);
+
+ of_node_put(pa->pdev.dev.of_node);
+ kfree(pa->pdev.dev.platform_data);
+ kfree(pa->pdev.mfd_cell);
+ kfree(pa->pdev.resource);
+ kfree(pa->pdev.driver_override);
+ kfree(pa);
+}
+
+/**
+ * platform_device_alloc - create a platform device
+ * @name: base name of the device we're adding
+ * @id: instance id
+ *
+ * Create a platform device object which can have other objects attached
+ * to it, and which will have attached objects freed when it is released.
+ */
+struct platform_device *platform_device_alloc(const char *name, int id)
+{
+ struct platform_object *pa;
+
+ pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL);
+ if (pa) {
+ strcpy(pa->name, name);
+ pa->pdev.name = pa->name;
+ pa->pdev.id = id;
+ device_initialize(&pa->pdev.dev);
+ pa->pdev.dev.release = platform_device_release;
+ setup_pdev_dma_masks(&pa->pdev);
+ }
+
+ return pa ? &pa->pdev : NULL;
+}
+EXPORT_SYMBOL_GPL(platform_device_alloc);
+
+/**
+ * platform_device_add_resources - add resources to a platform device
+ * @pdev: platform device allocated by platform_device_alloc to add resources to
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
+ *
+ * Add a copy of the resources to the platform device. The memory
+ * associated with the resources will be freed when the platform device is
+ * released.
+ */
+int platform_device_add_resources(struct platform_device *pdev,
+ const struct resource *res, unsigned int num)
+{
+ struct resource *r = NULL;
+
+ if (res) {
+ r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+ }
+
+ kfree(pdev->resource);
+ pdev->resource = r;
+ pdev->num_resources = num;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(platform_device_add_resources);
+
+/**
+ * platform_device_add_data - add platform-specific data to a platform device
+ * @pdev: platform device allocated by platform_device_alloc to add resources to
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * Add a copy of platform specific data to the platform device's
+ * platform_data pointer. The memory associated with the platform data
+ * will be freed when the platform device is released.
+ */
+int platform_device_add_data(struct platform_device *pdev, const void *data,
+ size_t size)
+{
+ void *d = NULL;
+
+ if (data) {
+ d = kmemdup(data, size, GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ }
+
+ kfree(pdev->dev.platform_data);
+ pdev->dev.platform_data = d;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(platform_device_add_data);
+
+/**
+ * platform_device_add - add a platform device to device hierarchy
+ * @pdev: platform device we're adding
+ *
+ * This is part 2 of platform_device_register(), though may be called
+ * separately _iff_ pdev was allocated by platform_device_alloc().
+ */
+int platform_device_add(struct platform_device *pdev)
+{
+ u32 i;
+ int ret;
+
+ if (!pdev)
+ return -EINVAL;
+
+ if (!pdev->dev.parent)
+ pdev->dev.parent = &platform_bus;
+
+ pdev->dev.bus = &platform_bus_type;
+
+ switch (pdev->id) {
+ default:
+ dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+ break;
+ case PLATFORM_DEVID_NONE:
+ dev_set_name(&pdev->dev, "%s", pdev->name);
+ break;
+ case PLATFORM_DEVID_AUTO:
+ /*
+ * Automatically allocated device ID. We mark it as such so
+ * that we remember it must be freed, and we append a suffix
+ * to avoid namespace collision with explicit IDs.
+ */
+ ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_out;
+ pdev->id = ret;
+ pdev->id_auto = true;
+ dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
+ break;
+ }
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ struct resource *p, *r = &pdev->resource[i];
+
+ if (r->name == NULL)
+ r->name = dev_name(&pdev->dev);
+
+ p = r->parent;
+ if (!p) {
+ if (resource_type(r) == IORESOURCE_MEM)
+ p = &iomem_resource;
+ else if (resource_type(r) == IORESOURCE_IO)
+ p = &ioport_resource;
+ }
+
+ if (p) {
+ ret = insert_resource(p, r);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
+ goto failed;
+ }
+ }
+ }
+
+ pr_debug("Registering platform device '%s'. Parent at %s\n",
+ dev_name(&pdev->dev), dev_name(pdev->dev.parent));
+
+ ret = device_add(&pdev->dev);
+ if (ret == 0)
+ return ret;
+
+ failed:
+ if (pdev->id_auto) {
+ ida_free(&platform_devid_ida, pdev->id);
+ pdev->id = PLATFORM_DEVID_AUTO;
+ }
+
+ while (i--) {
+ struct resource *r = &pdev->resource[i];
+ if (r->parent)
+ release_resource(r);
+ }
+
+ err_out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(platform_device_add);
+
+/**
+ * platform_device_del - remove a platform-level device
+ * @pdev: platform device we're removing
+ *
+ * Note that this function will also release all memory- and port-based
+ * resources owned by the device (@dev->resource). This function must
+ * _only_ be externally called in error cases. All other usage is a bug.
+ */
+void platform_device_del(struct platform_device *pdev)
+{
+ u32 i;
+
+ if (!IS_ERR_OR_NULL(pdev)) {
+ device_del(&pdev->dev);
+
+ if (pdev->id_auto) {
+ ida_free(&platform_devid_ida, pdev->id);
+ pdev->id = PLATFORM_DEVID_AUTO;
+ }
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ struct resource *r = &pdev->resource[i];
+ if (r->parent)
+ release_resource(r);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(platform_device_del);
+
+/**
+ * platform_device_register - add a platform-level device
+ * @pdev: platform device we're adding
+ *
+ * NOTE: _Never_ directly free @pdev after calling this function, even if it
+ * returned an error! Always use platform_device_put() to give up the
+ * reference initialised in this function instead.
+ */
+int platform_device_register(struct platform_device *pdev)
+{
+ device_initialize(&pdev->dev);
+ setup_pdev_dma_masks(pdev);
+ return platform_device_add(pdev);
+}
+EXPORT_SYMBOL_GPL(platform_device_register);
+
+/**
+ * platform_device_unregister - unregister a platform-level device
+ * @pdev: platform device we're unregistering
+ *
+ * Unregistration is done in 2 steps. First we release all resources
+ * and remove it from the subsystem, then we drop reference count by
+ * calling platform_device_put().
+ */
+void platform_device_unregister(struct platform_device *pdev)
+{
+ platform_device_del(pdev);
+ platform_device_put(pdev);
+}
+EXPORT_SYMBOL_GPL(platform_device_unregister);
+
+/**
+ * platform_device_register_full - add a platform-level device with
+ * resources and platform-specific data
+ *
+ * @pdevinfo: data used to create device
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+struct platform_device *platform_device_register_full(
+ const struct platform_device_info *pdevinfo)
+{
+ int ret;
+ struct platform_device *pdev;
+
+ pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id);
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
+
+ pdev->dev.parent = pdevinfo->parent;
+ pdev->dev.fwnode = pdevinfo->fwnode;
+ pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode));
+ pdev->dev.of_node_reused = pdevinfo->of_node_reused;
+
+ if (pdevinfo->dma_mask) {
+ pdev->platform_dma_mask = pdevinfo->dma_mask;
+ pdev->dev.dma_mask = &pdev->platform_dma_mask;
+ pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
+ }
+
+ ret = platform_device_add_resources(pdev,
+ pdevinfo->res, pdevinfo->num_res);
+ if (ret)
+ goto err;
+
+ ret = platform_device_add_data(pdev,
+ pdevinfo->data, pdevinfo->size_data);
+ if (ret)
+ goto err;
+
+ if (pdevinfo->properties) {
+ ret = device_create_managed_software_node(&pdev->dev,
+ pdevinfo->properties, NULL);
+ if (ret)
+ goto err;
+ }
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+err:
+ ACPI_COMPANION_SET(&pdev->dev, NULL);
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
+
+ return pdev;
+}
+EXPORT_SYMBOL_GPL(platform_device_register_full);
+
+/**
+ * __platform_driver_register - register a driver for platform-level devices
+ * @drv: platform driver structure
+ * @owner: owning module/driver
+ */
+int __platform_driver_register(struct platform_driver *drv,
+ struct module *owner)
+{
+ drv->driver.owner = owner;
+ drv->driver.bus = &platform_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__platform_driver_register);
+
+/**
+ * platform_driver_unregister - unregister a driver for platform-level devices
+ * @drv: platform driver structure
+ */
+void platform_driver_unregister(struct platform_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(platform_driver_unregister);
+
+static int platform_probe_fail(struct platform_device *pdev)
+{
+ return -ENXIO;
+}
+
+/**
+ * __platform_driver_probe - register driver for non-hotpluggable device
+ * @drv: platform driver structure
+ * @probe: the driver probe routine, probably from an __init section
+ * @module: module which will be the owner of the driver
+ *
+ * Use this instead of platform_driver_register() when you know the device
+ * is not hotpluggable and has already been registered, and you want to
+ * remove its run-once probe() infrastructure from memory after the driver
+ * has bound to the device.
+ *
+ * One typical use for this would be with drivers for controllers integrated
+ * into system-on-chip processors, where the controller devices have been
+ * configured as part of board setup.
+ *
+ * Note that this is incompatible with deferred probing.
+ *
+ * Returns zero if the driver registered and bound to a device, else returns
+ * a negative error code and with the driver not registered.
+ */
+int __init_or_module __platform_driver_probe(struct platform_driver *drv,
+ int (*probe)(struct platform_device *), struct module *module)
+{
+ int retval, code;
+
+ if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
+ pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
+ drv->driver.name, __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * We have to run our probes synchronously because we check if
+ * we find any devices to bind to and exit with error if there
+ * are any.
+ */
+ drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+
+ /*
+ * Prevent driver from requesting probe deferral to avoid further
+ * futile probe attempts.
+ */
+ drv->prevent_deferred_probe = true;
+
+ /* make sure driver won't have bind/unbind attributes */
+ drv->driver.suppress_bind_attrs = true;
+
+ /* temporary section violation during probe() */
+ drv->probe = probe;
+ retval = code = __platform_driver_register(drv, module);
+ if (retval)
+ return retval;
+
+ /*
+ * Fixup that section violation, being paranoid about code scanning
+ * the list of drivers in order to probe new devices. Check to see
+ * if the probe was successful, and make sure any forced probes of
+ * new devices fail.
+ */
+ spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
+ drv->probe = platform_probe_fail;
+ if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
+ retval = -ENODEV;
+ spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
+
+ if (code != retval)
+ platform_driver_unregister(drv);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__platform_driver_probe);
+
+/**
+ * __platform_create_bundle - register driver and create corresponding device
+ * @driver: platform driver structure
+ * @probe: the driver probe routine, probably from an __init section
+ * @res: set of resources that needs to be allocated for the device
+ * @n_res: number of resources
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ * @module: module which will be the owner of the driver
+ *
+ * Use this in legacy-style modules that probe hardware directly and
+ * register a single platform device and corresponding platform driver.
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+struct platform_device * __init_or_module __platform_create_bundle(
+ struct platform_driver *driver,
+ int (*probe)(struct platform_device *),
+ struct resource *res, unsigned int n_res,
+ const void *data, size_t size, struct module *module)
+{
+ struct platform_device *pdev;
+ int error;
+
+ pdev = platform_device_alloc(driver->driver.name, -1);
+ if (!pdev) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+
+ error = platform_device_add_resources(pdev, res, n_res);
+ if (error)
+ goto err_pdev_put;
+
+ error = platform_device_add_data(pdev, data, size);
+ if (error)
+ goto err_pdev_put;
+
+ error = platform_device_add(pdev);
+ if (error)
+ goto err_pdev_put;
+
+ error = __platform_driver_probe(driver, probe, module);
+ if (error)
+ goto err_pdev_del;
+
+ return pdev;
+
+err_pdev_del:
+ platform_device_del(pdev);
+err_pdev_put:
+ platform_device_put(pdev);
+err_out:
+ return ERR_PTR(error);
+}
+EXPORT_SYMBOL_GPL(__platform_create_bundle);
+
+/**
+ * __platform_register_drivers - register an array of platform drivers
+ * @drivers: an array of drivers to register
+ * @count: the number of drivers to register
+ * @owner: module owning the drivers
+ *
+ * Registers platform drivers specified by an array. On failure to register a
+ * driver, all previously registered drivers will be unregistered. Callers of
+ * this API should use platform_unregister_drivers() to unregister drivers in
+ * the reverse order.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int __platform_register_drivers(struct platform_driver * const *drivers,
+ unsigned int count, struct module *owner)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < count; i++) {
+ pr_debug("registering platform driver %ps\n", drivers[i]);
+
+ err = __platform_driver_register(drivers[i], owner);
+ if (err < 0) {
+ pr_err("failed to register platform driver %ps: %d\n",
+ drivers[i], err);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ while (i--) {
+ pr_debug("unregistering platform driver %ps\n", drivers[i]);
+ platform_driver_unregister(drivers[i]);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(__platform_register_drivers);
+
+/**
+ * platform_unregister_drivers - unregister an array of platform drivers
+ * @drivers: an array of drivers to unregister
+ * @count: the number of drivers to unregister
+ *
+ * Unregisters platform drivers specified by an array. This is typically used
+ * to complement an earlier call to platform_register_drivers(). Drivers are
+ * unregistered in the reverse order in which they were registered.
+ */
+void platform_unregister_drivers(struct platform_driver * const *drivers,
+ unsigned int count)
+{
+ while (count--) {
+ pr_debug("unregistering platform driver %ps\n", drivers[count]);
+ platform_driver_unregister(drivers[count]);
+ }
+}
+EXPORT_SYMBOL_GPL(platform_unregister_drivers);
+
+static const struct platform_device_id *platform_match_id(
+ const struct platform_device_id *id,
+ struct platform_device *pdev)
+{
+ while (id->name[0]) {
+ if (strcmp(pdev->name, id->name) == 0) {
+ pdev->id_entry = id;
+ return id;
+ }
+ id++;
+ }
+ return NULL;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+ struct platform_driver *pdrv = to_platform_driver(dev->driver);
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->suspend)
+ ret = pdrv->suspend(pdev, mesg);
+
+ return ret;
+}
+
+static int platform_legacy_resume(struct device *dev)
+{
+ struct platform_driver *pdrv = to_platform_driver(dev->driver);
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret = 0;
+
+ if (dev->driver && pdrv->resume)
+ ret = pdrv->resume(pdev);
+
+ return ret;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int platform_pm_suspend(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->suspend)
+ ret = drv->pm->suspend(dev);
+ } else {
+ ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
+ }
+
+ return ret;
+}
+
+int platform_pm_resume(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->resume)
+ ret = drv->pm->resume(dev);
+ } else {
+ ret = platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+#endif /* CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+
+int platform_pm_freeze(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->freeze)
+ ret = drv->pm->freeze(dev);
+ } else {
+ ret = platform_legacy_suspend(dev, PMSG_FREEZE);
+ }
+
+ return ret;
+}
+
+int platform_pm_thaw(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->thaw)
+ ret = drv->pm->thaw(dev);
+ } else {
+ ret = platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+int platform_pm_poweroff(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->poweroff)
+ ret = drv->pm->poweroff(dev);
+ } else {
+ ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
+ }
+
+ return ret;
+}
+
+int platform_pm_restore(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (!drv)
+ return 0;
+
+ if (drv->pm) {
+ if (drv->pm->restore)
+ ret = drv->pm->restore(dev);
+ } else {
+ ret = platform_legacy_resume(dev);
+ }
+
+ return ret;
+}
+
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+
+/* modalias support enables more hands-off userspace setup:
+ * (a) environment variable lets new-style hotplug events work once system is
+ * fully running: "modprobe $MODALIAS"
+ * (b) sysfs attribute lets new-style coldplug recover from hotplug events
+ * mishandled before system is fully running: "modprobe $(cat modalias)"
+ */
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int len;
+
+ len = of_device_modalias(dev, buf, PAGE_SIZE);
+ if (len != -ENODEV)
+ return len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+
+ return sysfs_emit(buf, "platform:%s\n", pdev->name);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t numa_node_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", dev_to_node(dev));
+}
+static DEVICE_ATTR_RO(numa_node);
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ ret = driver_set_override(dev, &pdev->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *platform_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ &dev_attr_numa_node.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+
+ if (a == &dev_attr_numa_node.attr &&
+ dev_to_node(dev) == NUMA_NO_NODE)
+ return 0;
+
+ return a->mode;
+}
+
+static const struct attribute_group platform_dev_group = {
+ .attrs = platform_dev_attrs,
+ .is_visible = platform_dev_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(platform_dev);
+
+
+/**
+ * platform_match - bind platform device to platform driver.
+ * @dev: device.
+ * @drv: driver.
+ *
+ * Platform device IDs are assumed to be encoded like this:
+ * "<name><instance>", where <name> is a short description of the type of
+ * device, like "pci" or "floppy", and <instance> is the enumerated
+ * instance of the device, like '0' or '42'. Driver IDs are simply
+ * "<name>". So, extract the <name> from the platform_device structure,
+ * and compare it against the name of the driver. Return whether they match
+ * or not.
+ */
+static int platform_match(struct device *dev, struct device_driver *drv)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct platform_driver *pdrv = to_platform_driver(drv);
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (pdev->driver_override)
+ return !strcmp(pdev->driver_override, drv->name);
+
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try ACPI style match */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try to match against the id table */
+ if (pdrv->id_table)
+ return platform_match_id(pdrv->id_table, pdev) != NULL;
+
+ /* fall-back to driver name match */
+ return (strcmp(pdev->name, drv->name) == 0);
+}
+
+static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int rc;
+
+ /* Some devices have extra OF data and an OF-style MODALIAS */
+ rc = of_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX,
+ pdev->name);
+ return 0;
+}
+
+static int platform_probe(struct device *_dev)
+{
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+ struct platform_device *dev = to_platform_device(_dev);
+ int ret;
+
+ /*
+ * A driver registered using platform_driver_probe() cannot be bound
+ * again later because the probe function usually lives in __init code
+ * and so is gone. For these drivers .probe is set to
+ * platform_probe_fail in __platform_driver_probe(). Don't even prepare
+ * clocks and PM domains for these to match the traditional behaviour.
+ */
+ if (unlikely(drv->probe == platform_probe_fail))
+ return -ENXIO;
+
+ ret = of_clk_set_defaults(_dev->of_node, false);
+ if (ret < 0)
+ return ret;
+
+ ret = dev_pm_domain_attach(_dev, true);
+ if (ret)
+ goto out;
+
+ if (drv->probe) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ }
+
+out:
+ if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
+ dev_warn(_dev, "probe deferral not supported\n");
+ ret = -ENXIO;
+ }
+
+ return ret;
+}
+
+static void platform_remove(struct device *_dev)
+{
+ struct platform_driver *drv = to_platform_driver(_dev->driver);
+ struct platform_device *dev = to_platform_device(_dev);
+
+ if (drv->remove_new) {
+ drv->remove_new(dev);
+ } else if (drv->remove) {
+ int ret = drv->remove(dev);
+
+ if (ret)
+ dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
+ }
+ dev_pm_domain_detach(_dev, true);
+}
+
+static void platform_shutdown(struct device *_dev)
+{
+ struct platform_device *dev = to_platform_device(_dev);
+ struct platform_driver *drv;
+
+ if (!_dev->driver)
+ return;
+
+ drv = to_platform_driver(_dev->driver);
+ if (drv->shutdown)
+ drv->shutdown(dev);
+}
+
+static int platform_dma_configure(struct device *dev)
+{
+ struct platform_driver *drv = to_platform_driver(dev->driver);
+ enum dev_dma_attr attr;
+ int ret = 0;
+
+ if (dev->of_node) {
+ ret = of_dma_configure(dev, dev->of_node, true);
+ } else if (has_acpi_companion(dev)) {
+ attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
+ ret = acpi_dma_configure(dev, attr);
+ }
+
+ if (!ret && !drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
+ return ret;
+}
+
+static void platform_dma_cleanup(struct device *dev)
+{
+ struct platform_driver *drv = to_platform_driver(dev->driver);
+
+ if (!drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
+static const struct dev_pm_ops platform_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL)
+ USE_PLATFORM_PM_SLEEP_OPS
+};
+
+struct bus_type platform_bus_type = {
+ .name = "platform",
+ .dev_groups = platform_dev_groups,
+ .match = platform_match,
+ .uevent = platform_uevent,
+ .probe = platform_probe,
+ .remove = platform_remove,
+ .shutdown = platform_shutdown,
+ .dma_configure = platform_dma_configure,
+ .dma_cleanup = platform_dma_cleanup,
+ .pm = &platform_dev_pm_ops,
+};
+EXPORT_SYMBOL_GPL(platform_bus_type);
+
+static inline int __platform_match(struct device *dev, const void *drv)
+{
+ return platform_match(dev, (struct device_driver *)drv);
+}
+
+/**
+ * platform_find_device_by_driver - Find a platform device with a given
+ * driver.
+ * @start: The device to start the search from.
+ * @drv: The device driver to look for.
+ */
+struct device *platform_find_device_by_driver(struct device *start,
+ const struct device_driver *drv)
+{
+ return bus_find_device(&platform_bus_type, start, drv,
+ __platform_match);
+}
+EXPORT_SYMBOL_GPL(platform_find_device_by_driver);
+
+void __weak __init early_platform_cleanup(void) { }
+
+int __init platform_bus_init(void)
+{
+ int error;
+
+ early_platform_cleanup();
+
+ error = device_register(&platform_bus);
+ if (error) {
+ put_device(&platform_bus);
+ return error;
+ }
+ error = bus_register(&platform_bus_type);
+ if (error)
+ device_unregister(&platform_bus);
+ of_platform_register_reconfig_notifier();
+ return error;
+}
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
new file mode 100644
index 000000000..8fdd0073e
--- /dev/null
+++ b/drivers/base/power/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
+obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
+obj-$(CONFIG_PM_TRACE_RTC) += trace.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
+obj-$(CONFIG_HAVE_CLK) += clock_ops.o
+obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
new file mode 100644
index 000000000..4110c19c0
--- /dev/null
+++ b/drivers/base/power/clock_ops.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks
+ *
+ * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <linux/pm_clock.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/of_clk.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM_CLK
+
+enum pce_status {
+ PCE_STATUS_NONE = 0,
+ PCE_STATUS_ACQUIRED,
+ PCE_STATUS_PREPARED,
+ PCE_STATUS_ENABLED,
+ PCE_STATUS_ERROR,
+};
+
+struct pm_clock_entry {
+ struct list_head node;
+ char *con_id;
+ struct clk *clk;
+ enum pce_status status;
+ bool enabled_when_prepared;
+};
+
+/**
+ * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
+ * entry list.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count to be modified.
+ *
+ * Get exclusive access before modifying the PM clock entry list and the
+ * clock_op_might_sleep count to guard against concurrent modifications.
+ * This also protects against a concurrent clock_op_might_sleep and PM clock
+ * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
+ * happen in atomic context, hence both the mutex and the spinlock must be
+ * taken here.
+ */
+static void pm_clk_list_lock(struct pm_subsys_data *psd)
+ __acquires(&psd->lock)
+{
+ mutex_lock(&psd->clock_mutex);
+ spin_lock_irq(&psd->lock);
+}
+
+/**
+ * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_list_lock().
+ */
+static void pm_clk_list_unlock(struct pm_subsys_data *psd)
+ __releases(&psd->lock)
+{
+ spin_unlock_irq(&psd->lock);
+ mutex_unlock(&psd->clock_mutex);
+}
+
+/**
+ * pm_clk_op_lock - ensure exclusive access for performing clock operations.
+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
+ * and clk_op_might_sleep count being used.
+ * @flags: stored irq flags.
+ * @fn: string for the caller function's name.
+ *
+ * This is used by pm_clk_suspend() and pm_clk_resume() to guard
+ * against concurrent modifications to the clock entry list and the
+ * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
+ * only the mutex can be locked and those functions can only be used in
+ * non atomic context. If clock_op_might_sleep == 0 then these functions
+ * may be used in any context and only the spinlock can be locked.
+ * Returns -EINVAL if called in atomic context when clock ops might sleep.
+ */
+static int pm_clk_op_lock(struct pm_subsys_data *psd, unsigned long *flags,
+ const char *fn)
+ /* sparse annotations don't work here as exit state isn't static */
+{
+ bool atomic_context = in_atomic() || irqs_disabled();
+
+try_again:
+ spin_lock_irqsave(&psd->lock, *flags);
+ if (!psd->clock_op_might_sleep) {
+ /* the __release is there to work around sparse limitations */
+ __release(&psd->lock);
+ return 0;
+ }
+
+ /* bail out if in atomic context */
+ if (atomic_context) {
+ pr_err("%s: atomic context with clock_ops_might_sleep = %d",
+ fn, psd->clock_op_might_sleep);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ might_sleep();
+ return -EPERM;
+ }
+
+ /* we must switch to the mutex */
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ mutex_lock(&psd->clock_mutex);
+
+ /*
+ * There was a possibility for psd->clock_op_might_sleep
+ * to become 0 above. Keep the mutex only if not the case.
+ */
+ if (likely(psd->clock_op_might_sleep))
+ return 0;
+
+ mutex_unlock(&psd->clock_mutex);
+ goto try_again;
+}
+
+/**
+ * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
+ * @psd: the same pm_subsys_data instance previously passed to
+ * pm_clk_op_lock().
+ * @flags: irq flags provided by pm_clk_op_lock().
+ */
+static void pm_clk_op_unlock(struct pm_subsys_data *psd, unsigned long *flags)
+ /* sparse annotations don't work here as entry state isn't static */
+{
+ if (psd->clock_op_might_sleep) {
+ mutex_unlock(&psd->clock_mutex);
+ } else {
+ /* the __acquire is there to work around sparse limitations */
+ __acquire(&psd->lock);
+ spin_unlock_irqrestore(&psd->lock, *flags);
+ }
+}
+
+/**
+ * __pm_clk_enable - Enable a clock, reporting any errors
+ * @dev: The device for the given clock
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+{
+ int ret;
+
+ switch (ce->status) {
+ case PCE_STATUS_ACQUIRED:
+ ret = clk_prepare_enable(ce->clk);
+ break;
+ case PCE_STATUS_PREPARED:
+ ret = clk_enable(ce->clk);
+ break;
+ default:
+ return;
+ }
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ else
+ dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ __func__, ce->clk, ret);
+}
+
+/**
+ * pm_clk_acquire - Acquire a device clock.
+ * @dev: Device whose clock is to be acquired.
+ * @ce: PM clock entry corresponding to the clock.
+ */
+static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
+{
+ if (!ce->clk)
+ ce->clk = clk_get(dev, ce->con_id);
+ if (IS_ERR(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ return;
+ } else if (clk_is_enabled_when_prepared(ce->clk)) {
+ /* we defer preparing the clock in that case */
+ ce->status = PCE_STATUS_ACQUIRED;
+ ce->enabled_when_prepared = true;
+ } else if (clk_prepare(ce->clk)) {
+ ce->status = PCE_STATUS_ERROR;
+ dev_err(dev, "clk_prepare() failed\n");
+ return;
+ } else {
+ ce->status = PCE_STATUS_PREPARED;
+ }
+ dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
+ ce->clk, ce->con_id);
+}
+
+static int __pm_clk_add(struct device *dev, const char *con_id,
+ struct clk *clk)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd)
+ return -EINVAL;
+
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce)
+ return -ENOMEM;
+
+ if (con_id) {
+ ce->con_id = kstrdup(con_id, GFP_KERNEL);
+ if (!ce->con_id) {
+ kfree(ce);
+ return -ENOMEM;
+ }
+ } else {
+ if (IS_ERR(clk)) {
+ kfree(ce);
+ return -ENOENT;
+ }
+ ce->clk = clk;
+ }
+
+ pm_clk_acquire(dev, ce);
+
+ pm_clk_list_lock(psd);
+ list_add_tail(&ce->node, &psd->clock_list);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep++;
+ pm_clk_list_unlock(psd);
+ return 0;
+}
+
+/**
+ * pm_clk_add - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @con_id: Connection ID of the clock.
+ *
+ * Add the clock represented by @con_id to the list of clocks used for
+ * the power management of @dev.
+ */
+int pm_clk_add(struct device *dev, const char *con_id)
+{
+ return __pm_clk_add(dev, con_id, NULL);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add);
+
+/**
+ * pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @clk: Clock pointer
+ *
+ * Add the clock to the list of clocks used for the power management of @dev.
+ * The power-management code will take control of the clock reference, so
+ * callers should not call clk_put() on @clk after this function sucessfully
+ * returned.
+ */
+int pm_clk_add_clk(struct device *dev, struct clk *clk)
+{
+ return __pm_clk_add(dev, NULL, clk);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add_clk);
+
+
+/**
+ * of_pm_clk_add_clk - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
+ * @name: Name of clock that is going to be used for power management.
+ *
+ * Add the clock described in the 'clocks' device-tree node that matches
+ * with the 'name' provided, to the list of clocks used for the power
+ * management of @dev. On success, returns 0. Returns a negative error
+ * code if the clock is not found or cannot be added.
+ */
+int of_pm_clk_add_clk(struct device *dev, const char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ if (!dev || !dev->of_node || !name)
+ return -EINVAL;
+
+ clk = of_clk_get_by_name(dev->of_node, name);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = pm_clk_add_clk(dev, clk);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
+
+/**
+ * of_pm_clk_add_clks - Start using device clock(s) for power management.
+ * @dev: Device whose clock(s) is going to be used for power management.
+ *
+ * Add a series of clocks described in the 'clocks' device-tree node for
+ * a device to the list of clocks used for the power management of @dev.
+ * On success, returns the number of clocks added. Returns a negative
+ * error code if there are no clocks in the device node for the device
+ * or if adding a clock fails.
+ */
+int of_pm_clk_add_clks(struct device *dev)
+{
+ struct clk **clks;
+ int i, count;
+ int ret;
+
+ if (!dev || !dev->of_node)
+ return -EINVAL;
+
+ count = of_clk_get_parent_count(dev->of_node);
+ if (count <= 0)
+ return -ENODEV;
+
+ clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ clks[i] = of_clk_get(dev->of_node, i);
+ if (IS_ERR(clks[i])) {
+ ret = PTR_ERR(clks[i]);
+ goto error;
+ }
+
+ ret = pm_clk_add_clk(dev, clks[i]);
+ if (ret) {
+ clk_put(clks[i]);
+ goto error;
+ }
+ }
+
+ kfree(clks);
+
+ return i;
+
+error:
+ while (i--)
+ pm_clk_remove_clk(dev, clks[i]);
+
+ kfree(clks);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
+
+/**
+ * __pm_clk_remove - Destroy PM clock entry.
+ * @ce: PM clock entry to destroy.
+ */
+static void __pm_clk_remove(struct pm_clock_entry *ce)
+{
+ if (!ce)
+ return;
+
+ switch (ce->status) {
+ case PCE_STATUS_ENABLED:
+ clk_disable(ce->clk);
+ fallthrough;
+ case PCE_STATUS_PREPARED:
+ clk_unprepare(ce->clk);
+ fallthrough;
+ case PCE_STATUS_ACQUIRED:
+ case PCE_STATUS_ERROR:
+ if (!IS_ERR(ce->clk))
+ clk_put(ce->clk);
+ break;
+ default:
+ break;
+ }
+
+ kfree(ce->con_id);
+ kfree(ce);
+}
+
+/**
+ * pm_clk_remove - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @con_id: Connection ID of the clock.
+ *
+ * Remove the clock represented by @con_id from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove(struct device *dev, const char *con_id)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd)
+ return;
+
+ pm_clk_list_lock(psd);
+
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (!con_id && !ce->con_id)
+ goto remove;
+ else if (!con_id || !ce->con_id)
+ continue;
+ else if (!strcmp(con_id, ce->con_id))
+ goto remove;
+ }
+
+ pm_clk_list_unlock(psd);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
+
+ __pm_clk_remove(ce);
+}
+EXPORT_SYMBOL_GPL(pm_clk_remove);
+
+/**
+ * pm_clk_remove_clk - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
+ * @clk: Clock pointer
+ *
+ * Remove the clock pointed to by @clk from the list of clocks used for
+ * the power management of @dev.
+ */
+void pm_clk_remove_clk(struct device *dev, struct clk *clk)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+
+ if (!psd || !clk)
+ return;
+
+ pm_clk_list_lock(psd);
+
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (clk == ce->clk)
+ goto remove;
+ }
+
+ pm_clk_list_unlock(psd);
+ return;
+
+ remove:
+ list_del(&ce->node);
+ if (ce->enabled_when_prepared)
+ psd->clock_op_might_sleep--;
+ pm_clk_list_unlock(psd);
+
+ __pm_clk_remove(ce);
+}
+EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
+
+/**
+ * pm_clk_init - Initialize a device's list of power management clocks.
+ * @dev: Device to initialize the list of PM clocks for.
+ *
+ * Initialize the lock and clock_list members of the device's pm_subsys_data
+ * object, set the count of clocks that might sleep to 0.
+ */
+void pm_clk_init(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ if (psd) {
+ INIT_LIST_HEAD(&psd->clock_list);
+ mutex_init(&psd->clock_mutex);
+ psd->clock_op_might_sleep = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(pm_clk_init);
+
+/**
+ * pm_clk_create - Create and initialize a device's list of PM clocks.
+ * @dev: Device to create and initialize the list of PM clocks for.
+ *
+ * Allocate a struct pm_subsys_data object, initialize its lock and clock_list
+ * members and make the @dev's power.subsys_data field point to it.
+ */
+int pm_clk_create(struct device *dev)
+{
+ return dev_pm_get_subsys_data(dev);
+}
+EXPORT_SYMBOL_GPL(pm_clk_create);
+
+/**
+ * pm_clk_destroy - Destroy a device's list of power management clocks.
+ * @dev: Device to destroy the list of PM clocks for.
+ *
+ * Clear the @dev's power.subsys_data field, remove the list of clock entries
+ * from the struct pm_subsys_data object pointed to by it before and free
+ * that object.
+ */
+void pm_clk_destroy(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce, *c;
+ struct list_head list;
+
+ if (!psd)
+ return;
+
+ INIT_LIST_HEAD(&list);
+
+ pm_clk_list_lock(psd);
+
+ list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
+ list_move(&ce->node, &list);
+ psd->clock_op_might_sleep = 0;
+
+ pm_clk_list_unlock(psd);
+
+ dev_pm_put_subsys_data(dev);
+
+ list_for_each_entry_safe_reverse(ce, c, &list, node) {
+ list_del(&ce->node);
+ __pm_clk_remove(ce);
+ }
+}
+EXPORT_SYMBOL_GPL(pm_clk_destroy);
+
+static void pm_clk_destroy_action(void *data)
+{
+ pm_clk_destroy(data);
+}
+
+int devm_pm_clk_create(struct device *dev)
+{
+ int ret;
+
+ ret = pm_clk_create(dev);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, pm_clk_destroy_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_clk_create);
+
+/**
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+ * @dev: Device to disable the clocks for.
+ */
+int pm_clk_suspend(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (!psd)
+ return 0;
+
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
+
+ list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+ if (ce->status == PCE_STATUS_ENABLED) {
+ if (ce->enabled_when_prepared) {
+ clk_disable_unprepare(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ } else {
+ clk_disable(ce->clk);
+ ce->status = PCE_STATUS_PREPARED;
+ }
+ }
+ }
+
+ pm_clk_op_unlock(psd, &flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_suspend);
+
+/**
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
+ * @dev: Device to enable the clocks for.
+ */
+int pm_clk_resume(struct device *dev)
+{
+ struct pm_subsys_data *psd = dev_to_psd(dev);
+ struct pm_clock_entry *ce;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (!psd)
+ return 0;
+
+ ret = pm_clk_op_lock(psd, &flags, __func__);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(ce, &psd->clock_list, node)
+ __pm_clk_enable(dev, ce);
+
+ pm_clk_op_unlock(psd, &flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_resume);
+
+/**
+ * pm_clk_notify - Notify routine for device addition and removal.
+ * @nb: Notifier block object this function is a member of.
+ * @action: Operation being carried out by the caller.
+ * @data: Device the routine is being run for.
+ *
+ * For this function to work, @nb must be a member of an object of type
+ * struct pm_clk_notifier_block containing all of the requisite data.
+ * Specifically, the pm_domain member of that object is copied to the device's
+ * pm_domain field and its con_ids member is used to populate the device's list
+ * of PM clocks, depending on @action.
+ *
+ * If the device's pm_domain field is already populated with a value different
+ * from the one stored in the struct pm_clk_notifier_block object, the function
+ * does nothing.
+ */
+static int pm_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pm_clk_notifier_block *clknb;
+ struct device *dev = data;
+ char **con_id;
+ int error;
+
+ dev_dbg(dev, "%s() %ld\n", __func__, action);
+
+ clknb = container_of(nb, struct pm_clk_notifier_block, nb);
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ if (dev->pm_domain)
+ break;
+
+ error = pm_clk_create(dev);
+ if (error)
+ break;
+
+ dev_pm_domain_set(dev, clknb->pm_domain);
+ if (clknb->con_ids[0]) {
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ pm_clk_add(dev, *con_id);
+ } else {
+ pm_clk_add(dev, NULL);
+ }
+
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (dev->pm_domain != clknb->pm_domain)
+ break;
+
+ dev_pm_domain_set(dev, NULL);
+ pm_clk_destroy(dev);
+ break;
+ }
+
+ return 0;
+}
+
+int pm_clk_runtime_suspend(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret) {
+ dev_err(dev, "failed to suspend device\n");
+ return ret;
+ }
+
+ ret = pm_clk_suspend(dev);
+ if (ret) {
+ dev_err(dev, "failed to suspend clock\n");
+ pm_generic_runtime_resume(dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
+
+int pm_clk_runtime_resume(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_clk_resume(dev);
+ if (ret) {
+ dev_err(dev, "failed to resume clock\n");
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
+
+#else /* !CONFIG_PM_CLK */
+
+/**
+ * enable_clock - Enable a device clock.
+ * @dev: Device whose clock is to be enabled.
+ * @con_id: Connection ID of the clock.
+ */
+static void enable_clock(struct device *dev, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, con_id);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ dev_info(dev, "Runtime PM disabled, clock forced on.\n");
+ }
+}
+
+/**
+ * disable_clock - Disable a device clock.
+ * @dev: Device whose clock is to be disabled.
+ * @con_id: Connection ID of the clock.
+ */
+static void disable_clock(struct device *dev, const char *con_id)
+{
+ struct clk *clk;
+
+ clk = clk_get(dev, con_id);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ dev_info(dev, "Runtime PM disabled, clock forced off.\n");
+ }
+}
+
+/**
+ * pm_clk_notify - Notify routine for device addition and removal.
+ * @nb: Notifier block object this function is a member of.
+ * @action: Operation being carried out by the caller.
+ * @data: Device the routine is being run for.
+ *
+ * For this function to work, @nb must be a member of an object of type
+ * struct pm_clk_notifier_block containing all of the requisite data.
+ * Specifically, the con_ids member of that object is used to enable or disable
+ * the device's clocks, depending on @action.
+ */
+static int pm_clk_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pm_clk_notifier_block *clknb;
+ struct device *dev = data;
+ char **con_id;
+
+ dev_dbg(dev, "%s() %ld\n", __func__, action);
+
+ clknb = container_of(nb, struct pm_clk_notifier_block, nb);
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ if (clknb->con_ids[0]) {
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ enable_clock(dev, *con_id);
+ } else {
+ enable_clock(dev, NULL);
+ }
+ break;
+ case BUS_NOTIFY_DRIVER_NOT_BOUND:
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ if (clknb->con_ids[0]) {
+ for (con_id = clknb->con_ids; *con_id; con_id++)
+ disable_clock(dev, *con_id);
+ } else {
+ disable_clock(dev, NULL);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* !CONFIG_PM_CLK */
+
+/**
+ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
+ * @bus: Bus type to add the notifier to.
+ * @clknb: Notifier to be added to the given bus type.
+ *
+ * The nb member of @clknb is not expected to be initialized and its
+ * notifier_call member will be replaced with pm_clk_notify(). However,
+ * the remaining members of @clknb should be populated prior to calling this
+ * routine.
+ */
+void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb)
+{
+ if (!bus || !clknb)
+ return;
+
+ clknb->nb.notifier_call = pm_clk_notify;
+ bus_register_notifier(bus, &clknb->nb);
+}
+EXPORT_SYMBOL_GPL(pm_clk_add_notifier);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
new file mode 100644
index 000000000..72115917e
--- /dev/null
+++ b/drivers/base/power/common.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/common.c - Common device power management code.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/pm_clock.h>
+#include <linux/acpi.h>
+#include <linux/pm_domain.h>
+
+#include "power.h"
+
+/**
+ * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
+ * @dev: Device to handle.
+ *
+ * If power.subsys_data is NULL, point it to a new object, otherwise increment
+ * its reference counter. Return 0 if new object has been created or refcount
+ * increased, otherwise negative error code.
+ */
+int dev_pm_get_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+
+ psd = kzalloc(sizeof(*psd), GFP_KERNEL);
+ if (!psd)
+ return -ENOMEM;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data) {
+ dev->power.subsys_data->refcount++;
+ } else {
+ spin_lock_init(&psd->lock);
+ psd->refcount = 1;
+ dev->power.subsys_data = psd;
+ pm_clk_init(dev);
+ psd = NULL;
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ /* kfree() verifies that its argument is nonzero. */
+ kfree(psd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
+
+/**
+ * dev_pm_put_subsys_data - Drop reference to power.subsys_data.
+ * @dev: Device to handle.
+ *
+ * If the reference counter of power.subsys_data is zero after dropping the
+ * reference, power.subsys_data is removed.
+ */
+void dev_pm_put_subsys_data(struct device *dev)
+{
+ struct pm_subsys_data *psd;
+
+ spin_lock_irq(&dev->power.lock);
+
+ psd = dev_to_psd(dev);
+ if (!psd)
+ goto out;
+
+ if (--psd->refcount == 0)
+ dev->power.subsys_data = NULL;
+ else
+ psd = NULL;
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+ kfree(psd);
+}
+EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
+
+/**
+ * dev_pm_domain_attach - Attach a device to its PM domain.
+ * @dev: Device to attach.
+ * @power_on: Used to indicate whether we should power on the device.
+ *
+ * The @dev may only be attached to a single PM domain. By iterating through
+ * the available alternatives we try to find a valid PM domain for the device.
+ * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
+ * should be assigned by the corresponding attach function.
+ *
+ * This function should typically be invoked from subsystem level code during
+ * the probe phase. Especially for those that holds devices which requires
+ * power management through PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns 0 on successfully attached PM domain, or when it is found that the
+ * device doesn't need a PM domain, else a negative error code.
+ */
+int dev_pm_domain_attach(struct device *dev, bool power_on)
+{
+ int ret;
+
+ if (dev->pm_domain)
+ return 0;
+
+ ret = acpi_dev_pm_attach(dev, power_on);
+ if (!ret)
+ ret = genpd_dev_pm_attach(dev);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
+
+/**
+ * dev_pm_domain_attach_by_id - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @index: The index of the PM domain.
+ *
+ * As @dev may only be attached to a single PM domain, the backend PM domain
+ * provider creates a virtual device to attach instead. If attachment succeeds,
+ * the ->detach() callback in the struct dev_pm_domain are assigned by the
+ * corresponding backend attach function, as to deal with detaching of the
+ * created virtual device.
+ *
+ * This function should typically be invoked by a driver during the probe phase,
+ * in case its device requires power management through multiple PM domains. The
+ * driver may benefit from using the received device, to configure device-links
+ * towards its original device. Depending on the use-case and if needed, the
+ * links may be dynamically changed by the driver, which allows it to control
+ * the power to the PM domains independently from each other.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns the virtual created device when successfully attached to its PM
+ * domain, NULL in case @dev don't need a PM domain, else an ERR_PTR().
+ * Note that, to detach the returned virtual device, the driver shall call
+ * dev_pm_domain_detach() on it, typically during the remove phase.
+ */
+struct device *dev_pm_domain_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ if (dev->pm_domain)
+ return ERR_PTR(-EEXIST);
+
+ return genpd_dev_pm_attach_by_id(dev, index);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id);
+
+/**
+ * dev_pm_domain_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * For a detailed function description, see dev_pm_domain_attach_by_id().
+ */
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ const char *name)
+{
+ if (dev->pm_domain)
+ return ERR_PTR(-EEXIST);
+
+ return genpd_dev_pm_attach_by_name(dev, name);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_name);
+
+/**
+ * dev_pm_domain_detach - Detach a device from its PM domain.
+ * @dev: Device to detach.
+ * @power_off: Used to indicate whether we should power off the device.
+ *
+ * This functions will reverse the actions from dev_pm_domain_attach(),
+ * dev_pm_domain_attach_by_id() and dev_pm_domain_attach_by_name(), thus it
+ * detaches @dev from its PM domain. Typically it should be invoked during the
+ * remove phase, either from subsystem level code or from drivers.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach(struct device *dev, bool power_off)
+{
+ if (dev->pm_domain && dev->pm_domain->detach)
+ dev->pm_domain->detach(dev, power_off);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
+
+/**
+ * dev_pm_domain_start - Start the device through its PM domain.
+ * @dev: Device to start.
+ *
+ * This function should typically be called during probe by a subsystem/driver,
+ * when it needs to start its device from the PM domain's perspective. Note
+ * that, it's assumed that the PM domain is already powered on when this
+ * function is called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_domain_start(struct device *dev)
+{
+ if (dev->pm_domain && dev->pm_domain->start)
+ return dev->pm_domain->start(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_start);
+
+/**
+ * dev_pm_domain_set - Set PM domain of a device.
+ * @dev: Device whose PM domain is to be set.
+ * @pd: PM domain to be set, or NULL.
+ *
+ * Sets the PM domain the device belongs to. The PM domain of a device needs
+ * to be set before its probe finishes (it's bound to a driver).
+ *
+ * This function must be called with the device lock held.
+ */
+void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
+{
+ if (dev->pm_domain == pd)
+ return;
+
+ WARN(pd && device_is_bound(dev),
+ "PM domains can only be changed for unbound devices\n");
+ dev->pm_domain = pd;
+ device_pm_check_callbacks(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_set);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 000000000..56ceba469
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,3407 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/domain.c - Common code related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+#define pr_fmt(fmt) "PM: " fmt
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_clock.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/export.h>
+#include <linux/cpu.h>
+#include <linux/debugfs.h>
+
+#include "power.h"
+
+#define GENPD_RETRY_MAX_MS 250 /* Approximate */
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
+({ \
+ type (*__routine)(struct device *__d); \
+ type __ret = (type)0; \
+ \
+ __routine = genpd->dev_ops.callback; \
+ if (__routine) { \
+ __ret = __routine(dev); \
+ } \
+ __ret; \
+})
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
+
+struct genpd_lock_ops {
+ void (*lock)(struct generic_pm_domain *genpd);
+ void (*lock_nested)(struct generic_pm_domain *genpd, int depth);
+ int (*lock_interruptible)(struct generic_pm_domain *genpd);
+ void (*unlock)(struct generic_pm_domain *genpd);
+};
+
+static void genpd_lock_mtx(struct generic_pm_domain *genpd)
+{
+ mutex_lock(&genpd->mlock);
+}
+
+static void genpd_lock_nested_mtx(struct generic_pm_domain *genpd,
+ int depth)
+{
+ mutex_lock_nested(&genpd->mlock, depth);
+}
+
+static int genpd_lock_interruptible_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_lock_interruptible(&genpd->mlock);
+}
+
+static void genpd_unlock_mtx(struct generic_pm_domain *genpd)
+{
+ return mutex_unlock(&genpd->mlock);
+}
+
+static const struct genpd_lock_ops genpd_mtx_ops = {
+ .lock = genpd_lock_mtx,
+ .lock_nested = genpd_lock_nested_mtx,
+ .lock_interruptible = genpd_lock_interruptible_mtx,
+ .unlock = genpd_unlock_mtx,
+};
+
+static void genpd_lock_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+}
+
+static void genpd_lock_nested_spin(struct generic_pm_domain *genpd,
+ int depth)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave_nested(&genpd->slock, flags, depth);
+ genpd->lock_flags = flags;
+}
+
+static int genpd_lock_interruptible_spin(struct generic_pm_domain *genpd)
+ __acquires(&genpd->slock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&genpd->slock, flags);
+ genpd->lock_flags = flags;
+ return 0;
+}
+
+static void genpd_unlock_spin(struct generic_pm_domain *genpd)
+ __releases(&genpd->slock)
+{
+ spin_unlock_irqrestore(&genpd->slock, genpd->lock_flags);
+}
+
+static const struct genpd_lock_ops genpd_spin_ops = {
+ .lock = genpd_lock_spin,
+ .lock_nested = genpd_lock_nested_spin,
+ .lock_interruptible = genpd_lock_interruptible_spin,
+ .unlock = genpd_unlock_spin,
+};
+
+#define genpd_lock(p) p->lock_ops->lock(p)
+#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
+#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
+#define genpd_unlock(p) p->lock_ops->unlock(p)
+
+#define genpd_status_on(genpd) (genpd->status == GENPD_STATE_ON)
+#define genpd_is_irq_safe(genpd) (genpd->flags & GENPD_FLAG_IRQ_SAFE)
+#define genpd_is_always_on(genpd) (genpd->flags & GENPD_FLAG_ALWAYS_ON)
+#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
+#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
+#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
+
+static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
+ const struct generic_pm_domain *genpd)
+{
+ bool ret;
+
+ ret = pm_runtime_is_irq_safe(dev) && !genpd_is_irq_safe(genpd);
+
+ /*
+ * Warn once if an IRQ safe device is attached to a domain, which
+ * callbacks are allowed to sleep. This indicates a suboptimal
+ * configuration for PM, but it doesn't matter for an always on domain.
+ */
+ if (genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd))
+ return ret;
+
+ if (ret)
+ dev_warn_once(dev, "PM domain %s will not be powered off\n",
+ genpd->name);
+
+ return ret;
+}
+
+static int genpd_runtime_suspend(struct device *dev);
+
+/*
+ * Get the generic PM domain for a particular struct device.
+ * This validates the struct device pointer, the PM domain pointer,
+ * and checks that the PM domain pointer is a real generic PM domain.
+ * Any failure results in NULL being returned.
+ */
+static struct generic_pm_domain *dev_to_genpd_safe(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
+ return NULL;
+
+ /* A genpd's always have its ->runtime_suspend() callback assigned. */
+ if (dev->pm_domain->ops.runtime_suspend == genpd_runtime_suspend)
+ return pd_to_genpd(dev->pm_domain);
+
+ return NULL;
+}
+
+/*
+ * This should only be used where we are certain that the pm_domain
+ * attached to the device is a genpd domain.
+ */
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev->pm_domain))
+ return ERR_PTR(-EINVAL);
+
+ return pd_to_genpd(dev->pm_domain);
+}
+
+static int genpd_stop_dev(const struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
+}
+
+static int genpd_start_dev(const struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+}
+
+static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+{
+ bool ret = false;
+
+ if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
+ ret = !!atomic_dec_and_test(&genpd->sd_count);
+
+ return ret;
+}
+
+static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
+{
+ atomic_inc(&genpd->sd_count);
+ smp_mb__after_atomic();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *genpd_debugfs_dir;
+
+static void genpd_debug_add(struct generic_pm_domain *genpd);
+
+static void genpd_debug_remove(struct generic_pm_domain *genpd)
+{
+ if (!genpd_debugfs_dir)
+ return;
+
+ debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
+}
+
+static void genpd_update_accounting(struct generic_pm_domain *genpd)
+{
+ u64 delta, now;
+
+ now = ktime_get_mono_fast_ns();
+ if (now <= genpd->accounting_time)
+ return;
+
+ delta = now - genpd->accounting_time;
+
+ /*
+ * If genpd->status is active, it means we are just
+ * out of off and so update the idle time and vice
+ * versa.
+ */
+ if (genpd->status == GENPD_STATE_ON)
+ genpd->states[genpd->state_idx].idle_time += delta;
+ else
+ genpd->on_time += delta;
+
+ genpd->accounting_time = now;
+}
+#else
+static inline void genpd_debug_add(struct generic_pm_domain *genpd) {}
+static inline void genpd_debug_remove(struct generic_pm_domain *genpd) {}
+static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
+#endif
+
+static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct generic_pm_domain_data *pd_data;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ return state;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ return state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ /*
+ * Traverse all sub-domains within the domain. This can be
+ * done without any additional locking as the link->performance_state
+ * field is protected by the parent genpd->lock, which is already taken.
+ *
+ * Also note that link->performance_state (subdomain's performance state
+ * requirement to parent domain) is different from
+ * link->child->performance_state (current performance state requirement
+ * of the devices/sub-domains of the subdomain) and so can have a
+ * different value.
+ *
+ * Note that we also take vote from powered-off sub-domains into account
+ * as the same is done for devices right now.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ if (link->performance_state > state)
+ state = link->performance_state;
+ }
+
+ return state;
+}
+
+static int genpd_xlate_performance_state(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *parent,
+ unsigned int pstate)
+{
+ if (!parent->set_performance_state)
+ return pstate;
+
+ return dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ parent->opp_table,
+ pstate);
+}
+
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state, int depth)
+{
+ struct generic_pm_domain *parent;
+ struct gpd_link *link;
+ int parent_state, ret;
+
+ if (state == genpd->performance_state)
+ return 0;
+
+ /* Propagate to parents of genpd */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ parent = link->parent;
+
+ /* Find parent's performance state */
+ ret = genpd_xlate_performance_state(genpd, parent, state);
+ if (unlikely(ret < 0))
+ goto err;
+
+ parent_state = ret;
+
+ genpd_lock_nested(parent, depth + 1);
+
+ link->prev_performance_state = link->performance_state;
+ link->performance_state = parent_state;
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ ret = _genpd_set_performance_state(parent, parent_state, depth + 1);
+ if (ret)
+ link->performance_state = link->prev_performance_state;
+
+ genpd_unlock(parent);
+
+ if (ret)
+ goto err;
+ }
+
+ if (genpd->set_performance_state) {
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+ }
+
+ genpd->performance_state = state;
+ return 0;
+
+err:
+ /* Encountered an error, lets rollback */
+ list_for_each_entry_continue_reverse(link, &genpd->child_links,
+ child_node) {
+ parent = link->parent;
+
+ genpd_lock_nested(parent, depth + 1);
+
+ parent_state = link->prev_performance_state;
+ link->performance_state = parent_state;
+
+ parent_state = _genpd_reeval_performance_state(parent,
+ parent_state);
+ if (_genpd_set_performance_state(parent, parent_state, depth + 1)) {
+ pr_err("%s: Failed to roll back to %d performance state\n",
+ parent->name, parent_state);
+ }
+
+ genpd_unlock(parent);
+ }
+
+ return ret;
+}
+
+static int genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+ struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
+ unsigned int prev_state;
+ int ret;
+
+ prev_state = gpd_data->performance_state;
+ if (prev_state == state)
+ return 0;
+
+ gpd_data->performance_state = state;
+ state = _genpd_reeval_performance_state(genpd, state);
+
+ ret = _genpd_set_performance_state(genpd, state, 0);
+ if (ret)
+ gpd_data->performance_state = prev_state;
+
+ return ret;
+}
+
+static int genpd_drop_performance_state(struct device *dev)
+{
+ unsigned int prev_state = dev_gpd_data(dev)->performance_state;
+
+ if (!genpd_set_performance_state(dev, 0))
+ return prev_state;
+
+ return 0;
+}
+
+static void genpd_restore_performance_state(struct device *dev,
+ unsigned int state)
+{
+ if (state)
+ genpd_set_performance_state(dev, state);
+}
+
+/**
+ * dev_pm_genpd_set_performance_state- Set performance state of device's power
+ * domain.
+ *
+ * @dev: Device for which the performance-state needs to be set.
+ * @state: Target performance state of the device. This can be set as 0 when the
+ * device doesn't have any performance state constraints left (And so
+ * the device wouldn't participate anymore to find the target
+ * performance state of the genpd).
+ *
+ * It is assumed that the users guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ genpd_lock(genpd);
+ if (pm_runtime_suspended(dev)) {
+ dev_gpd_data(dev)->rpm_pstate = state;
+ } else {
+ ret = genpd_set_performance_state(dev, state);
+ if (!ret)
+ dev_gpd_data(dev)->rpm_pstate = 0;
+ }
+ genpd_unlock(genpd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
+
+/**
+ * dev_pm_genpd_set_next_wakeup - Notify PM framework of an impending wakeup.
+ *
+ * @dev: Device to handle
+ * @next: impending interrupt/wakeup for the device
+ *
+ *
+ * Allow devices to inform of the next wakeup. It's assumed that the users
+ * guarantee that the genpd wouldn't be detached while this routine is getting
+ * called. Additionally, it's also assumed that @dev isn't runtime suspended
+ * (RPM_SUSPENDED)."
+ * Although devices are expected to update the next_wakeup after the end of
+ * their usecase as well, it is possible the devices themselves may not know
+ * about that, so stale @next will be ignored when powering off the domain.
+ */
+void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
+{
+ struct generic_pm_domain *genpd;
+ struct gpd_timing_data *td;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ td = to_gpd_data(dev->power.subsys_data->domain_data)->td;
+ if (td)
+ td->next_wakeup = next;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
+
+static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
+{
+ unsigned int state_idx = genpd->state_idx;
+ ktime_t time_start;
+ s64 elapsed_ns;
+ int ret;
+
+ /* Notify consumers that we are about to power on. */
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
+ GENPD_NOTIFY_PRE_ON,
+ GENPD_NOTIFY_OFF, NULL);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ return ret;
+
+ if (!genpd->power_on)
+ goto out;
+
+ timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
+ if (!timed) {
+ ret = genpd->power_on(genpd);
+ if (ret)
+ goto err;
+
+ goto out;
+ }
+
+ time_start = ktime_get();
+ ret = genpd->power_on(genpd);
+ if (ret)
+ goto err;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
+ goto out;
+
+ genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
+ genpd->gd->max_off_time_changed = true;
+ pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+ genpd->name, "on", elapsed_ns);
+
+out:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+ return 0;
+err:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
+ NULL);
+ return ret;
+}
+
+static int _genpd_power_off(struct generic_pm_domain *genpd, bool timed)
+{
+ unsigned int state_idx = genpd->state_idx;
+ ktime_t time_start;
+ s64 elapsed_ns;
+ int ret;
+
+ /* Notify consumers that we are about to power off. */
+ ret = raw_notifier_call_chain_robust(&genpd->power_notifiers,
+ GENPD_NOTIFY_PRE_OFF,
+ GENPD_NOTIFY_ON, NULL);
+ ret = notifier_to_errno(ret);
+ if (ret)
+ return ret;
+
+ if (!genpd->power_off)
+ goto out;
+
+ timed = timed && genpd->gd && !genpd->states[state_idx].fwnode;
+ if (!timed) {
+ ret = genpd->power_off(genpd);
+ if (ret)
+ goto busy;
+
+ goto out;
+ }
+
+ time_start = ktime_get();
+ ret = genpd->power_off(genpd);
+ if (ret)
+ goto busy;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
+ goto out;
+
+ genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
+ genpd->gd->max_off_time_changed = true;
+ pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
+ genpd->name, "off", elapsed_ns);
+
+out:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_OFF,
+ NULL);
+ return 0;
+busy:
+ raw_notifier_call_chain(&genpd->power_notifiers, GENPD_NOTIFY_ON, NULL);
+ return ret;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of genpd_power_off().
+ * @genpd: PM domain to power off.
+ *
+ * Queue up the execution of genpd_power_off() unless it's already been done
+ * before.
+ */
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
+ * genpd_power_off - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ * @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
+ * RPM status of the releated device is in an intermediate state, not yet turned
+ * into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
+ * be RPM_SUSPENDED, while it tries to power off the PM domain.
+ * @depth: nesting count for lockdep.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, remove power from @genpd.
+ */
+static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
+ unsigned int depth)
+{
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+ unsigned int not_suspended = 0;
+ int ret;
+
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ */
+ if (!genpd_status_on(genpd) || genpd->prepared_count > 0)
+ return 0;
+
+ /*
+ * Abort power off for the PM domain in the following situations:
+ * (1) The domain is configured as always on.
+ * (2) When the domain has a subdomain being powered on.
+ */
+ if (genpd_is_always_on(genpd) ||
+ genpd_is_rpm_always_on(genpd) ||
+ atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
+
+ /*
+ * The children must be in their deepest (powered-off) states to allow
+ * the parent to be powered off. Note that, there's no need for
+ * additional locking, as powering on a child, requires the parent's
+ * lock to be acquired first.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return -EBUSY;
+ }
+
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ /*
+ * Do not allow PM domain to be powered off, when an IRQ safe
+ * device is part of a non-IRQ safe domain.
+ */
+ if (!pm_runtime_suspended(pdd->dev) ||
+ irq_safe_dev_in_sleep_domain(pdd->dev, genpd))
+ not_suspended++;
+ }
+
+ if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
+ return -EBUSY;
+
+ if (genpd->gov && genpd->gov->power_down_ok) {
+ if (!genpd->gov->power_down_ok(&genpd->domain))
+ return -EAGAIN;
+ }
+
+ /* Default to shallowest state. */
+ if (!genpd->gov)
+ genpd->state_idx = 0;
+
+ /* Don't power off, if a child domain is waiting to power on. */
+ if (atomic_read(&genpd->sd_count) > 0)
+ return -EBUSY;
+
+ ret = _genpd_power_off(genpd, true);
+ if (ret) {
+ genpd->states[genpd->state_idx].rejected++;
+ return ret;
+ }
+
+ genpd->status = GENPD_STATE_OFF;
+ genpd_update_accounting(genpd);
+ genpd->states[genpd->state_idx].usage++;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
+ }
+
+ return 0;
+}
+
+/**
+ * genpd_power_on - Restore power to a given PM domain and its parents.
+ * @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
+ *
+ * Restore power to @genpd and all of its parents so that it is possible to
+ * resume a device belonging to it.
+ */
+static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
+{
+ struct gpd_link *link;
+ int ret = 0;
+
+ if (genpd_status_on(genpd))
+ return 0;
+
+ /*
+ * The list is guaranteed not to change while the loop below is being
+ * executed, unless one of the parents' .power_on() callbacks fiddles
+ * with it.
+ */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
+
+ genpd_sd_counter_inc(parent);
+
+ genpd_lock_nested(parent, depth + 1);
+ ret = genpd_power_on(parent, depth + 1);
+ genpd_unlock(parent);
+
+ if (ret) {
+ genpd_sd_counter_dec(parent);
+ goto err;
+ }
+ }
+
+ ret = _genpd_power_on(genpd, true);
+ if (ret)
+ goto err;
+
+ genpd->status = GENPD_STATE_ON;
+ genpd_update_accounting(genpd);
+
+ return 0;
+
+ err:
+ list_for_each_entry_continue_reverse(link,
+ &genpd->child_links,
+ child_node) {
+ genpd_sd_counter_dec(link->parent);
+ genpd_lock_nested(link->parent, depth + 1);
+ genpd_power_off(link->parent, false, depth + 1);
+ genpd_unlock(link->parent);
+ }
+
+ return ret;
+}
+
+static int genpd_dev_pm_start(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+
+ return genpd_start_dev(genpd, dev);
+}
+
+static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
+ unsigned long val, void *ptr)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct device *dev;
+
+ gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
+ dev = gpd_data->base.dev;
+
+ for (;;) {
+ struct generic_pm_domain *genpd = ERR_PTR(-ENODATA);
+ struct pm_domain_data *pdd;
+ struct gpd_timing_data *td;
+
+ spin_lock_irq(&dev->power.lock);
+
+ pdd = dev->power.subsys_data ?
+ dev->power.subsys_data->domain_data : NULL;
+ if (pdd) {
+ td = to_gpd_data(pdd)->td;
+ if (td) {
+ td->constraint_changed = true;
+ genpd = dev_to_genpd(dev);
+ }
+ }
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!IS_ERR(genpd)) {
+ genpd_lock(genpd);
+ genpd->gd->max_off_time_changed = true;
+ genpd_unlock(genpd);
+ }
+
+ dev = dev->parent;
+ if (!dev || dev->power.ignore_children)
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void genpd_power_off_work_fn(struct work_struct *work)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = container_of(work, struct generic_pm_domain, power_off_work);
+
+ genpd_lock(genpd);
+ genpd_power_off(genpd, false, 0);
+ genpd_unlock(genpd);
+}
+
+/**
+ * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_suspend;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_suspend;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_resume;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_resume;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a runtime suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int genpd_runtime_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ bool (*suspend_ok)(struct device *__dev);
+ struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
+ struct gpd_timing_data *td = gpd_data->td;
+ bool runtime_pm = pm_runtime_enabled(dev);
+ ktime_t time_start = 0;
+ s64 elapsed_ns;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * A runtime PM centric subsystem/driver may re-use the runtime PM
+ * callbacks for other purposes than runtime PM. In those scenarios
+ * runtime PM is disabled. Under these circumstances, we shall skip
+ * validating/measuring the PM QoS latency.
+ */
+ suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
+ if (runtime_pm && suspend_ok && !suspend_ok(dev))
+ return -EBUSY;
+
+ /* Measure suspend latency. */
+ if (td && runtime_pm)
+ time_start = ktime_get();
+
+ ret = __genpd_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ __genpd_runtime_resume(dev);
+ return ret;
+ }
+
+ /* Update suspend latency value if the measured time exceeds it. */
+ if (td && runtime_pm) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->suspend_latency_ns) {
+ td->suspend_latency_ns = elapsed_ns;
+ dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->gd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
+ }
+
+ /*
+ * If power.irq_safe is set, this routine may be run with
+ * IRQs disabled, so suspend only if the PM domain also is irq_safe.
+ */
+ if (irq_safe_dev_in_sleep_domain(dev, genpd))
+ return 0;
+
+ genpd_lock(genpd);
+ gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
+ genpd_power_off(genpd, true, 0);
+ genpd_unlock(genpd);
+
+ return 0;
+}
+
+/**
+ * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out a runtime resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int genpd_runtime_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data = dev_gpd_data(dev);
+ struct gpd_timing_data *td = gpd_data->td;
+ bool timed = td && pm_runtime_enabled(dev);
+ ktime_t time_start = 0;
+ s64 elapsed_ns;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * As we don't power off a non IRQ safe domain, which holds
+ * an IRQ safe device, we don't need to restore power to it.
+ */
+ if (irq_safe_dev_in_sleep_domain(dev, genpd))
+ goto out;
+
+ genpd_lock(genpd);
+ ret = genpd_power_on(genpd, 0);
+ if (!ret)
+ genpd_restore_performance_state(dev, gpd_data->rpm_pstate);
+ genpd_unlock(genpd);
+
+ if (ret)
+ return ret;
+
+ out:
+ /* Measure resume latency. */
+ if (timed)
+ time_start = ktime_get();
+
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ goto err_poweroff;
+
+ ret = __genpd_runtime_resume(dev);
+ if (ret)
+ goto err_stop;
+
+ /* Update resume latency value if the measured time exceeds it. */
+ if (timed) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->resume_latency_ns) {
+ td->resume_latency_ns = elapsed_ns;
+ dev_dbg(dev, "resume latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->gd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
+ }
+
+ return 0;
+
+err_stop:
+ genpd_stop_dev(genpd, dev);
+err_poweroff:
+ if (!pm_runtime_is_irq_safe(dev) || genpd_is_irq_safe(genpd)) {
+ genpd_lock(genpd);
+ gpd_data->rpm_pstate = genpd_drop_performance_state(dev);
+ genpd_power_off(genpd, true, 0);
+ genpd_unlock(genpd);
+ }
+
+ return ret;
+}
+
+static bool pd_ignore_unused;
+static int __init pd_ignore_unused_setup(char *__unused)
+{
+ pd_ignore_unused = true;
+ return 1;
+}
+__setup("pd_ignore_unused", pd_ignore_unused_setup);
+
+/**
+ * genpd_power_off_unused - Power off all PM domains with no devices in use.
+ */
+static int __init genpd_power_off_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ if (pd_ignore_unused) {
+ pr_warn("genpd: Not disabling unused power domains\n");
+ return 0;
+ }
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
+
+ return 0;
+}
+late_initcall(genpd_power_off_unused);
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * genpd_sync_power_off - Synchronously power off a PM domain and its parents.
+ * @genpd: PM domain to power off, if possible.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
+ *
+ * Check if the given PM domain can be powered off (during system suspend or
+ * hibernation) and do that if so. Also, in that case propagate to its parents.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
+ */
+static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
+ unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_status_on(genpd) || genpd_is_always_on(genpd))
+ return;
+
+ if (genpd->suspended_count != genpd->device_count
+ || atomic_read(&genpd->sd_count) > 0)
+ return;
+
+ /* Check that the children are in their deepest (powered-off) state. */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct generic_pm_domain *child = link->child;
+ if (child->state_idx < child->state_count - 1)
+ return;
+ }
+
+ /* Choose the deepest state when suspending */
+ genpd->state_idx = genpd->state_count - 1;
+ if (_genpd_power_off(genpd, false))
+ return;
+
+ genpd->status = GENPD_STATE_OFF;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_dec(link->parent);
+
+ if (use_lock)
+ genpd_lock_nested(link->parent, depth + 1);
+
+ genpd_sync_power_off(link->parent, use_lock, depth + 1);
+
+ if (use_lock)
+ genpd_unlock(link->parent);
+ }
+}
+
+/**
+ * genpd_sync_power_on - Synchronously power on a PM domain and its parents.
+ * @genpd: PM domain to power on.
+ * @use_lock: use the lock.
+ * @depth: nesting count for lockdep.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions. The "noirq" callbacks may be executed asynchronously, thus in
+ * these cases the lock must be held.
+ */
+static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
+ unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (genpd_status_on(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ genpd_sd_counter_inc(link->parent);
+
+ if (use_lock)
+ genpd_lock_nested(link->parent, depth + 1);
+
+ genpd_sync_power_on(link->parent, use_lock, depth + 1);
+
+ if (use_lock)
+ genpd_unlock(link->parent);
+ }
+
+ _genpd_power_on(genpd, false);
+ genpd->status = GENPD_STATE_ON;
+}
+
+/**
+ * genpd_prepare - Start power transition of a device in a PM domain.
+ * @dev: Device to start the transition of.
+ *
+ * Start a power transition of a device (during a system-wide power transition)
+ * under the assumption that its pm_domain field points to the domain member of
+ * an object of type struct generic_pm_domain representing a PM domain
+ * consisting of I/O devices.
+ */
+static int genpd_prepare(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ genpd_lock(genpd);
+
+ if (genpd->prepared_count++ == 0)
+ genpd->suspended_count = 0;
+
+ genpd_unlock(genpd);
+
+ ret = pm_generic_prepare(dev);
+ if (ret < 0) {
+ genpd_lock(genpd);
+
+ genpd->prepared_count--;
+
+ genpd_unlock(genpd);
+ }
+
+ /* Never return 1, as genpd don't cope with the direct_complete path. */
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
+ * genpd_finish_suspend - Completion of suspend or hibernation of device in an
+ * I/O pm domain.
+ * @dev: Device to suspend.
+ * @poweroff: Specifies if this is a poweroff_noirq or suspend_noirq callback.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_finish_suspend(struct device *dev, bool poweroff)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (poweroff)
+ ret = pm_generic_poweroff_noirq(dev);
+ else
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
+ return 0;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret) {
+ if (poweroff)
+ pm_generic_restore_noirq(dev);
+ else
+ pm_generic_resume_noirq(dev);
+ return ret;
+ }
+ }
+
+ genpd_lock(genpd);
+ genpd->suspended_count++;
+ genpd_sync_power_off(genpd, true, 0);
+ genpd_unlock(genpd);
+
+ return 0;
+}
+
+/**
+ * genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_suspend_noirq(struct device *dev)
+{
+ dev_dbg(dev, "%s()\n", __func__);
+
+ return genpd_finish_suspend(dev, false);
+}
+
+/**
+ * genpd_resume_noirq - Start of resume of device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Restore power to the device's PM domain, if necessary, and start the device.
+ */
+static int genpd_resume_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (device_wakeup_path(dev) && genpd_is_active_wakeup(genpd))
+ return pm_generic_resume_noirq(dev);
+
+ genpd_lock(genpd);
+ genpd_sync_power_on(genpd, true, 0);
+ genpd->suspended_count--;
+ genpd_unlock(genpd);
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_resume_noirq(dev);
+}
+
+/**
+ * genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int genpd_freeze_noirq(struct device *dev)
+{
+ const struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ ret = pm_generic_freeze_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev))
+ ret = genpd_stop_dev(genpd, dev);
+
+ return ret;
+}
+
+/**
+ * genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * @dev: Device to thaw.
+ *
+ * Start the device, unless power has been removed from the domain already
+ * before the system transition.
+ */
+static int genpd_thaw_noirq(struct device *dev)
+{
+ const struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * genpd_poweroff_noirq - Completion of hibernation of device in an
+ * I/O PM domain.
+ * @dev: Device to poweroff.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int genpd_poweroff_noirq(struct device *dev)
+{
+ dev_dbg(dev, "%s()\n", __func__);
+
+ return genpd_finish_suspend(dev, true);
+}
+
+/**
+ * genpd_restore_noirq - Start of restore of device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Make sure the domain will be in the same power state as before the
+ * hibernation the system is resuming from and start the device if necessary.
+ */
+static int genpd_restore_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * At this point suspended_count == 0 means we are being run for the
+ * first time for the given domain in the present cycle.
+ */
+ genpd_lock(genpd);
+ if (genpd->suspended_count++ == 0) {
+ /*
+ * The boot kernel might put the domain into arbitrary state,
+ * so make it appear as powered off to genpd_sync_power_on(),
+ * so that it tries to power it on in case it was really off.
+ */
+ genpd->status = GENPD_STATE_OFF;
+ }
+
+ genpd_sync_power_on(genpd, true, 0);
+ genpd_unlock(genpd);
+
+ if (genpd->dev_ops.stop && genpd->dev_ops.start &&
+ !pm_runtime_status_suspended(dev)) {
+ ret = genpd_start_dev(genpd, dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_restore_noirq(dev);
+}
+
+/**
+ * genpd_complete - Complete power transition of a device in a power domain.
+ * @dev: Device to complete the transition of.
+ *
+ * Complete a power transition of a device (during a system-wide power
+ * transition) under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static void genpd_complete(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return;
+
+ pm_generic_complete(dev);
+
+ genpd_lock(genpd);
+
+ genpd->prepared_count--;
+ if (!genpd->prepared_count)
+ genpd_queue_power_off_work(genpd);
+
+ genpd_unlock(genpd);
+}
+
+static void genpd_switch_state(struct device *dev, bool suspend)
+{
+ struct generic_pm_domain *genpd;
+ bool use_lock;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return;
+
+ use_lock = genpd_is_irq_safe(genpd);
+
+ if (use_lock)
+ genpd_lock(genpd);
+
+ if (suspend) {
+ genpd->suspended_count++;
+ genpd_sync_power_off(genpd, use_lock, 0);
+ } else {
+ genpd_sync_power_on(genpd, use_lock, 0);
+ genpd->suspended_count--;
+ }
+
+ if (use_lock)
+ genpd_unlock(genpd);
+}
+
+/**
+ * dev_pm_genpd_suspend - Synchronously try to suspend the genpd for @dev
+ * @dev: The device that is attached to the genpd, that can be suspended.
+ *
+ * This routine should typically be called for a device that needs to be
+ * suspended during the syscore suspend phase. It may also be called during
+ * suspend-to-idle to suspend a corresponding CPU device that is attached to a
+ * genpd.
+ */
+void dev_pm_genpd_suspend(struct device *dev)
+{
+ genpd_switch_state(dev, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_suspend);
+
+/**
+ * dev_pm_genpd_resume - Synchronously try to resume the genpd for @dev
+ * @dev: The device that is attached to the genpd, which needs to be resumed.
+ *
+ * This routine should typically be called for a device that needs to be resumed
+ * during the syscore resume phase. It may also be called during suspend-to-idle
+ * to resume a corresponding CPU device that is attached to a genpd.
+ */
+void dev_pm_genpd_resume(struct device *dev)
+{
+ genpd_switch_state(dev, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_resume);
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define genpd_prepare NULL
+#define genpd_suspend_noirq NULL
+#define genpd_resume_noirq NULL
+#define genpd_freeze_noirq NULL
+#define genpd_thaw_noirq NULL
+#define genpd_poweroff_noirq NULL
+#define genpd_restore_noirq NULL
+#define genpd_complete NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
+ bool has_governor)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct gpd_timing_data *td;
+ int ret;
+
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ gpd_data->base.dev = dev;
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+
+ /* Allocate data used by a governor. */
+ if (has_governor) {
+ td = kzalloc(sizeof(*td), GFP_KERNEL);
+ if (!td) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ td->constraint_changed = true;
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ td->next_wakeup = KTIME_MAX;
+ gpd_data->td = td;
+ }
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data->domain_data)
+ ret = -EINVAL;
+ else
+ dev->power.subsys_data->domain_data = &gpd_data->base;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ if (ret)
+ goto err_free;
+
+ return gpd_data;
+
+ err_free:
+ kfree(gpd_data->td);
+ kfree(gpd_data);
+ err_put:
+ dev_pm_put_subsys_data(dev);
+ return ERR_PTR(ret);
+}
+
+static void genpd_free_dev_data(struct device *dev,
+ struct generic_pm_domain_data *gpd_data)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.subsys_data->domain_data = NULL;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ kfree(gpd_data->td);
+ kfree(gpd_data);
+ dev_pm_put_subsys_data(dev);
+}
+
+static void genpd_update_cpumask(struct generic_pm_domain *genpd,
+ int cpu, bool set, unsigned int depth)
+{
+ struct gpd_link *link;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return;
+
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct generic_pm_domain *parent = link->parent;
+
+ genpd_lock_nested(parent, depth + 1);
+ genpd_update_cpumask(parent, cpu, set, depth + 1);
+ genpd_unlock(parent);
+ }
+
+ if (set)
+ cpumask_set_cpu(cpu, genpd->cpus);
+ else
+ cpumask_clear_cpu(cpu, genpd->cpus);
+}
+
+static void genpd_set_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, true, 0);
+}
+
+static void genpd_clear_cpumask(struct generic_pm_domain *genpd, int cpu)
+{
+ if (cpu >= 0)
+ genpd_update_cpumask(genpd, cpu, false, 0);
+}
+
+static int genpd_get_cpu(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int cpu;
+
+ if (!genpd_is_cpu_domain(genpd))
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (get_cpu_device(cpu) == dev)
+ return cpu;
+ }
+
+ return -1;
+}
+
+static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ struct device *base_dev)
+{
+ struct genpd_governor_data *gd = genpd->gd;
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ gpd_data = genpd_alloc_dev_data(dev, gd);
+ if (IS_ERR(gpd_data))
+ return PTR_ERR(gpd_data);
+
+ gpd_data->cpu = genpd_get_cpu(genpd, base_dev);
+
+ ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
+ if (ret)
+ goto out;
+
+ genpd_lock(genpd);
+
+ genpd_set_cpumask(genpd, gpd_data->cpu);
+ dev_pm_domain_set(dev, &genpd->domain);
+
+ genpd->device_count++;
+ if (gd)
+ gd->max_off_time_changed = true;
+
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+
+ genpd_unlock(genpd);
+ out:
+ if (ret)
+ genpd_free_dev_data(dev, gpd_data);
+ else
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * @genpd: PM domain to add the device to.
+ * @dev: Device to be added.
+ */
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+{
+ int ret;
+
+ if (!genpd || !dev)
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+ ret = genpd_add_device(genpd, dev, dev);
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_device);
+
+static int genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ pdd = dev->power.subsys_data->domain_data;
+ gpd_data = to_gpd_data(pdd);
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb,
+ DEV_PM_QOS_RESUME_LATENCY);
+
+ genpd_lock(genpd);
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ genpd->device_count--;
+ if (genpd->gd)
+ genpd->gd->max_off_time_changed = true;
+
+ genpd_clear_cpumask(genpd, gpd_data->cpu);
+ dev_pm_domain_set(dev, NULL);
+
+ list_del_init(&pdd->list_node);
+
+ genpd_unlock(genpd);
+
+ if (genpd->detach_dev)
+ genpd->detach_dev(genpd, dev);
+
+ genpd_free_dev_data(dev, gpd_data);
+
+ return 0;
+
+ out:
+ genpd_unlock(genpd);
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb, DEV_PM_QOS_RESUME_LATENCY);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+ * @dev: Device to be removed.
+ */
+int pm_genpd_remove_device(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd_safe(dev);
+
+ if (!genpd)
+ return -EINVAL;
+
+ return genpd_remove_device(genpd, dev);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
+
+/**
+ * dev_pm_genpd_add_notifier - Add a genpd power on/off notifier for @dev
+ *
+ * @dev: Device that should be associated with the notifier
+ * @nb: The notifier block to register
+ *
+ * Users may call this function to add a genpd power on/off notifier for an
+ * attached @dev. Only one notifier per device is allowed. The notifier is
+ * sent when genpd is powering on/off the PM domain.
+ *
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (gpd_data->power_nb)
+ return -EEXIST;
+
+ genpd_lock(genpd);
+ ret = raw_notifier_chain_register(&genpd->power_notifiers, nb);
+ genpd_unlock(genpd);
+
+ if (ret) {
+ dev_warn(dev, "failed to add notifier for PM domain %s\n",
+ genpd->name);
+ return ret;
+ }
+
+ gpd_data->power_nb = nb;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_add_notifier);
+
+/**
+ * dev_pm_genpd_remove_notifier - Remove a genpd power on/off notifier for @dev
+ *
+ * @dev: Device that is associated with the notifier
+ *
+ * Users may call this function to remove a genpd power on/off notifier for an
+ * attached @dev.
+ *
+ * It is assumed that the user guarantee that the genpd wouldn't be detached
+ * while this routine is getting called.
+ *
+ * Returns 0 on success and negative error values on failures.
+ */
+int dev_pm_genpd_remove_notifier(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
+ int ret;
+
+ genpd = dev_to_genpd_safe(dev);
+ if (!genpd)
+ return -ENODEV;
+
+ if (WARN_ON(!dev->power.subsys_data ||
+ !dev->power.subsys_data->domain_data))
+ return -EINVAL;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (!gpd_data->power_nb)
+ return -ENODEV;
+
+ genpd_lock(genpd);
+ ret = raw_notifier_chain_unregister(&genpd->power_notifiers,
+ gpd_data->power_nb);
+ genpd_unlock(genpd);
+
+ if (ret) {
+ dev_warn(dev, "failed to remove notifier for PM domain %s\n",
+ genpd->name);
+ return ret;
+ }
+
+ gpd_data->power_nb = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
+
+static int genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain)
+{
+ struct gpd_link *link, *itr;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
+ || genpd == subdomain)
+ return -EINVAL;
+
+ /*
+ * If the domain can be powered on/off in an IRQ safe
+ * context, ensure that the subdomain can also be
+ * powered on/off in that context.
+ */
+ if (!genpd_is_irq_safe(genpd) && genpd_is_irq_safe(subdomain)) {
+ WARN(1, "Parent %s of subdomain %s must be IRQ safe\n",
+ genpd->name, subdomain->name);
+ return -EINVAL;
+ }
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
+
+ if (!genpd_status_on(genpd) && genpd_status_on(subdomain)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(itr, &genpd->parent_links, parent_node) {
+ if (itr->child == subdomain && itr->parent == genpd) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ link->parent = genpd;
+ list_add_tail(&link->parent_node, &genpd->parent_links);
+ link->child = subdomain;
+ list_add_tail(&link->child_node, &subdomain->child_links);
+ if (genpd_status_on(subdomain))
+ genpd_sd_counter_inc(genpd);
+
+ out:
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
+ if (ret)
+ kfree(link);
+ return ret;
+}
+
+/**
+ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @genpd: Leader PM domain to add the subdomain to.
+ * @subdomain: Subdomain to be added.
+ */
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain)
+{
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+ ret = genpd_add_subdomain(genpd, subdomain);
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
+
+/**
+ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @genpd: Leader PM domain to remove the subdomain from.
+ * @subdomain: Subdomain to be removed.
+ */
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *subdomain)
+{
+ struct gpd_link *l, *link;
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+ return -EINVAL;
+
+ genpd_lock(subdomain);
+ genpd_lock_nested(genpd, SINGLE_DEPTH_NESTING);
+
+ if (!list_empty(&subdomain->parent_links) || subdomain->device_count) {
+ pr_warn("%s: unable to remove subdomain %s\n",
+ genpd->name, subdomain->name);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ list_for_each_entry_safe(link, l, &genpd->parent_links, parent_node) {
+ if (link->child != subdomain)
+ continue;
+
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
+ kfree(link);
+ if (genpd_status_on(subdomain))
+ genpd_sd_counter_dec(genpd);
+
+ ret = 0;
+ break;
+ }
+
+out:
+ genpd_unlock(genpd);
+ genpd_unlock(subdomain);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
+
+static void genpd_free_default_power_state(struct genpd_power_state *states,
+ unsigned int state_count)
+{
+ kfree(states);
+}
+
+static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+{
+ struct genpd_power_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ genpd->states = state;
+ genpd->state_count = 1;
+ genpd->free_states = genpd_free_default_power_state;
+
+ return 0;
+}
+
+static int genpd_alloc_data(struct generic_pm_domain *genpd)
+{
+ struct genpd_governor_data *gd = NULL;
+ int ret;
+
+ if (genpd_is_cpu_domain(genpd) &&
+ !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (genpd->gov) {
+ gd = kzalloc(sizeof(*gd), GFP_KERNEL);
+ if (!gd) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ gd->max_off_time_ns = -1;
+ gd->max_off_time_changed = true;
+ gd->next_wakeup = KTIME_MAX;
+ }
+
+ /* Use only one "off" state if there were no states declared */
+ if (genpd->state_count == 0) {
+ ret = genpd_set_default_power_state(genpd);
+ if (ret)
+ goto free;
+ }
+
+ genpd->gd = gd;
+ return 0;
+
+free:
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ kfree(gd);
+ return ret;
+}
+
+static void genpd_free_data(struct generic_pm_domain *genpd)
+{
+ if (genpd_is_cpu_domain(genpd))
+ free_cpumask_var(genpd->cpus);
+ if (genpd->free_states)
+ genpd->free_states(genpd->states, genpd->state_count);
+ kfree(genpd->gd);
+}
+
+static void genpd_lock_init(struct generic_pm_domain *genpd)
+{
+ if (genpd->flags & GENPD_FLAG_IRQ_SAFE) {
+ spin_lock_init(&genpd->slock);
+ genpd->lock_ops = &genpd_spin_ops;
+ } else {
+ mutex_init(&genpd->mlock);
+ genpd->lock_ops = &genpd_mtx_ops;
+ }
+}
+
+/**
+ * pm_genpd_init - Initialize a generic I/O PM domain object.
+ * @genpd: PM domain object to initialize.
+ * @gov: PM domain governor to associate with the domain (may be NULL).
+ * @is_off: Initial value of the domain's power_is_off field.
+ *
+ * Returns 0 on successful initialization, else a negative error code.
+ */
+int pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&genpd->parent_links);
+ INIT_LIST_HEAD(&genpd->child_links);
+ INIT_LIST_HEAD(&genpd->dev_list);
+ RAW_INIT_NOTIFIER_HEAD(&genpd->power_notifiers);
+ genpd_lock_init(genpd);
+ genpd->gov = gov;
+ INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+ atomic_set(&genpd->sd_count, 0);
+ genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
+ genpd->device_count = 0;
+ genpd->provider = NULL;
+ genpd->has_provider = false;
+ genpd->accounting_time = ktime_get_mono_fast_ns();
+ genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
+ genpd->domain.ops.runtime_resume = genpd_runtime_resume;
+ genpd->domain.ops.prepare = genpd_prepare;
+ genpd->domain.ops.suspend_noirq = genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = genpd_resume_noirq;
+ genpd->domain.ops.freeze_noirq = genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = genpd_thaw_noirq;
+ genpd->domain.ops.poweroff_noirq = genpd_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = genpd_restore_noirq;
+ genpd->domain.ops.complete = genpd_complete;
+ genpd->domain.start = genpd_dev_pm_start;
+
+ if (genpd->flags & GENPD_FLAG_PM_CLK) {
+ genpd->dev_ops.stop = pm_clk_suspend;
+ genpd->dev_ops.start = pm_clk_resume;
+ }
+
+ /* The always-on governor works better with the corresponding flag. */
+ if (gov == &pm_domain_always_on_gov)
+ genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
+
+ /* Always-on domains must be powered on at initialization. */
+ if ((genpd_is_always_on(genpd) || genpd_is_rpm_always_on(genpd)) &&
+ !genpd_status_on(genpd)) {
+ pr_err("always-on PM domain %s is not on\n", genpd->name);
+ return -EINVAL;
+ }
+
+ /* Multiple states but no governor doesn't make sense. */
+ if (!gov && genpd->state_count > 1)
+ pr_warn("%s: no governor for states\n", genpd->name);
+
+ ret = genpd_alloc_data(genpd);
+ if (ret)
+ return ret;
+
+ device_initialize(&genpd->dev);
+ dev_set_name(&genpd->dev, "%s", genpd->name);
+
+ mutex_lock(&gpd_list_lock);
+ list_add(&genpd->gpd_list_node, &gpd_list);
+ mutex_unlock(&gpd_list_lock);
+ genpd_debug_add(genpd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_init);
+
+static int genpd_remove(struct generic_pm_domain *genpd)
+{
+ struct gpd_link *l, *link;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return -EINVAL;
+
+ genpd_lock(genpd);
+
+ if (genpd->has_provider) {
+ genpd_unlock(genpd);
+ pr_err("Provider present, unable to remove %s\n", genpd->name);
+ return -EBUSY;
+ }
+
+ if (!list_empty(&genpd->parent_links) || genpd->device_count) {
+ genpd_unlock(genpd);
+ pr_err("%s: unable to remove %s\n", __func__, genpd->name);
+ return -EBUSY;
+ }
+
+ list_for_each_entry_safe(link, l, &genpd->child_links, child_node) {
+ list_del(&link->parent_node);
+ list_del(&link->child_node);
+ kfree(link);
+ }
+
+ list_del(&genpd->gpd_list_node);
+ genpd_unlock(genpd);
+ genpd_debug_remove(genpd);
+ cancel_work_sync(&genpd->power_off_work);
+ genpd_free_data(genpd);
+
+ pr_debug("%s: removed %s\n", __func__, genpd->name);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_remove - Remove a generic I/O PM domain
+ * @genpd: Pointer to PM domain that is to be removed.
+ *
+ * To remove the PM domain, this function:
+ * - Removes the PM domain as a subdomain to any parent domains,
+ * if it was added.
+ * - Removes the PM domain from the list of registered PM domains.
+ *
+ * The PM domain will only be removed, if the associated provider has
+ * been removed, it is not a parent to any other PM domain and has no
+ * devices associated with it.
+ */
+int pm_genpd_remove(struct generic_pm_domain *genpd)
+{
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+ ret = genpd_remove(genpd);
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_remove);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+
+/*
+ * Device Tree based PM domain providers.
+ *
+ * The code below implements generic device tree based PM domain providers that
+ * bind device tree nodes with generic PM domains registered in the system.
+ *
+ * Any driver that registers generic PM domains and needs to support binding of
+ * devices to these domains is supposed to register a PM domain provider, which
+ * maps a PM domain specifier retrieved from the device tree to a PM domain.
+ *
+ * Two simple mapping functions have been provided for convenience:
+ * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
+ * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
+ * index.
+ */
+
+/**
+ * struct of_genpd_provider - PM domain provider registration structure
+ * @link: Entry in global list of PM domain providers
+ * @node: Pointer to device tree node of PM domain provider
+ * @xlate: Provider-specific xlate callback mapping a set of specifier cells
+ * into a PM domain.
+ * @data: context pointer to be passed into @xlate callback
+ */
+struct of_genpd_provider {
+ struct list_head link;
+ struct device_node *node;
+ genpd_xlate_t xlate;
+ void *data;
+};
+
+/* List of registered PM domain providers. */
+static LIST_HEAD(of_genpd_providers);
+/* Mutex to protect the list above. */
+static DEFINE_MUTEX(of_genpd_mutex);
+
+/**
+ * genpd_xlate_simple() - Xlate function for direct node-domain mapping
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct generic_pm_domain
+ *
+ * This is a generic xlate function that can be used to model PM domains that
+ * have their own device tree nodes. The private data of xlate function needs
+ * to be a valid pointer to struct generic_pm_domain.
+ */
+static struct generic_pm_domain *genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ return data;
+}
+
+/**
+ * genpd_xlate_onecell() - Xlate function using a single index.
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct genpd_onecell_data
+ *
+ * This is a generic xlate function that can be used to model simple PM domain
+ * controllers that have one device tree node and provide multiple PM domains.
+ * A single cell is used as an index into an array of PM domains specified in
+ * the genpd_onecell_data struct when registering the provider.
+ */
+static struct generic_pm_domain *genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
+
+ if (genpdspec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
+
+ return genpd_data->domains[idx];
+}
+
+/**
+ * genpd_add_provider() - Register a PM domain provider for a node
+ * @np: Device node pointer associated with the PM domain provider.
+ * @xlate: Callback for decoding PM domain from phandle arguments.
+ * @data: Context pointer for @xlate callback.
+ */
+static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data)
+{
+ struct of_genpd_provider *cp;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->node = of_node_get(np);
+ cp->data = data;
+ cp->xlate = xlate;
+ fwnode_dev_initialized(&np->fwnode, true);
+
+ mutex_lock(&of_genpd_mutex);
+ list_add(&cp->link, &of_genpd_providers);
+ mutex_unlock(&of_genpd_mutex);
+ pr_debug("Added domain provider from %pOF\n", np);
+
+ return 0;
+}
+
+static bool genpd_present(const struct generic_pm_domain *genpd)
+{
+ bool ret = false;
+ const struct generic_pm_domain *gpd;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (gpd == genpd) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+
+/**
+ * of_genpd_add_provider_simple() - Register a simple PM domain provider
+ * @np: Device node pointer associated with the PM domain provider.
+ * @genpd: Pointer to PM domain associated with the PM domain provider.
+ */
+int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
+{
+ int ret;
+
+ if (!np || !genpd)
+ return -EINVAL;
+
+ if (!genpd_present(genpd))
+ return -EINVAL;
+
+ genpd->dev.of_node = np;
+
+ /* Parse genpd OPP table */
+ if (genpd->set_performance_state) {
+ ret = dev_pm_opp_of_add_table(&genpd->dev);
+ if (ret)
+ return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
+
+ /*
+ * Save table for faster processing while setting performance
+ * state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
+ WARN_ON(IS_ERR(genpd->opp_table));
+ }
+
+ ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
+ if (ret) {
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
+ dev_pm_opp_of_remove_table(&genpd->dev);
+ }
+
+ return ret;
+ }
+
+ genpd->provider = &np->fwnode;
+ genpd->has_provider = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
+
+/**
+ * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
+ * @np: Device node pointer associated with the PM domain provider.
+ * @data: Pointer to the data associated with the PM domain provider.
+ */
+int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
+{
+ struct generic_pm_domain *genpd;
+ unsigned int i;
+ int ret = -EINVAL;
+
+ if (!np || !data)
+ return -EINVAL;
+
+ if (!data->xlate)
+ data->xlate = genpd_xlate_onecell;
+
+ for (i = 0; i < data->num_domains; i++) {
+ genpd = data->domains[i];
+
+ if (!genpd)
+ continue;
+ if (!genpd_present(genpd))
+ goto error;
+
+ genpd->dev.of_node = np;
+
+ /* Parse genpd OPP table */
+ if (genpd->set_performance_state) {
+ ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
+ if (ret) {
+ dev_err_probe(&genpd->dev, ret,
+ "Failed to add OPP table for index %d\n", i);
+ goto error;
+ }
+
+ /*
+ * Save table for faster processing while setting
+ * performance state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
+ WARN_ON(IS_ERR(genpd->opp_table));
+ }
+
+ genpd->provider = &np->fwnode;
+ genpd->has_provider = true;
+ }
+
+ ret = genpd_add_provider(np, data->xlate, data);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ while (i--) {
+ genpd = data->domains[i];
+
+ if (!genpd)
+ continue;
+
+ genpd->provider = NULL;
+ genpd->has_provider = false;
+
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
+ dev_pm_opp_of_remove_table(&genpd->dev);
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
+
+/**
+ * of_genpd_del_provider() - Remove a previously registered PM domain provider
+ * @np: Device node pointer associated with the PM domain provider
+ */
+void of_genpd_del_provider(struct device_node *np)
+{
+ struct of_genpd_provider *cp, *tmp;
+ struct generic_pm_domain *gpd;
+
+ mutex_lock(&gpd_list_lock);
+ mutex_lock(&of_genpd_mutex);
+ list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
+ if (cp->node == np) {
+ /*
+ * For each PM domain associated with the
+ * provider, set the 'has_provider' to false
+ * so that the PM domain can be safely removed.
+ */
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (gpd->provider == &np->fwnode) {
+ gpd->has_provider = false;
+
+ if (!gpd->set_performance_state)
+ continue;
+
+ dev_pm_opp_put_opp_table(gpd->opp_table);
+ dev_pm_opp_of_remove_table(&gpd->dev);
+ }
+ }
+
+ fwnode_dev_initialized(&cp->node->fwnode, false);
+ list_del(&cp->link);
+ of_node_put(cp->node);
+ kfree(cp);
+ break;
+ }
+ }
+ mutex_unlock(&of_genpd_mutex);
+ mutex_unlock(&gpd_list_lock);
+}
+EXPORT_SYMBOL_GPL(of_genpd_del_provider);
+
+/**
+ * genpd_get_from_provider() - Look-up PM domain
+ * @genpdspec: OF phandle args to use for look-up
+ *
+ * Looks for a PM domain provider under the node specified by @genpdspec and if
+ * found, uses xlate function of the provider to map phandle args to a PM
+ * domain.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
+ * on failure.
+ */
+static struct generic_pm_domain *genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
+ struct of_genpd_provider *provider;
+
+ if (!genpdspec)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&of_genpd_mutex);
+
+ /* Check if we have such a provider in our array */
+ list_for_each_entry(provider, &of_genpd_providers, link) {
+ if (provider->node == genpdspec->np)
+ genpd = provider->xlate(genpdspec, provider->data);
+ if (!IS_ERR(genpd))
+ break;
+ }
+
+ mutex_unlock(&of_genpd_mutex);
+
+ return genpd;
+}
+
+/**
+ * of_genpd_add_device() - Add a device to an I/O PM domain
+ * @genpdspec: OF phandle args to use for look-up PM domain
+ * @dev: Device to be added.
+ *
+ * Looks-up an I/O PM domain based upon phandle args provided and adds
+ * the device to the PM domain. Returns a negative error code on failure.
+ */
+int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+
+ genpd = genpd_get_from_provider(genpdspec);
+ if (IS_ERR(genpd)) {
+ ret = PTR_ERR(genpd);
+ goto out;
+ }
+
+ ret = genpd_add_device(genpd, dev, dev);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_device);
+
+/**
+ * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and adds the subdomain to the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ struct generic_pm_domain *parent, *subdomain;
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+
+ parent = genpd_get_from_provider(parent_spec);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ goto out;
+ }
+
+ subdomain = genpd_get_from_provider(subdomain_spec);
+ if (IS_ERR(subdomain)) {
+ ret = PTR_ERR(subdomain);
+ goto out;
+ }
+
+ ret = genpd_add_subdomain(parent, subdomain);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret == -ENOENT ? -EPROBE_DEFER : ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
+
+/**
+ * of_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @parent_spec: OF phandle args to use for parent PM domain look-up
+ * @subdomain_spec: OF phandle args to use for subdomain look-up
+ *
+ * Looks-up a parent PM domain and subdomain based upon phandle args
+ * provided and removes the subdomain from the parent PM domain. Returns a
+ * negative error code on failure.
+ */
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ struct generic_pm_domain *parent, *subdomain;
+ int ret;
+
+ mutex_lock(&gpd_list_lock);
+
+ parent = genpd_get_from_provider(parent_spec);
+ if (IS_ERR(parent)) {
+ ret = PTR_ERR(parent);
+ goto out;
+ }
+
+ subdomain = genpd_get_from_provider(subdomain_spec);
+ if (IS_ERR(subdomain)) {
+ ret = PTR_ERR(subdomain);
+ goto out;
+ }
+
+ ret = pm_genpd_remove_subdomain(parent, subdomain);
+
+out:
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_subdomain);
+
+/**
+ * of_genpd_remove_last - Remove the last PM domain registered for a provider
+ * @np: Pointer to device node associated with provider
+ *
+ * Find the last PM domain that was added by a particular provider and
+ * remove this PM domain from the list of PM domains. The provider is
+ * identified by the 'provider' device structure that is passed. The PM
+ * domain will only be removed, if the provider associated with domain
+ * has been removed.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or
+ * ERR_PTR() on failure.
+ */
+struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
+{
+ struct generic_pm_domain *gpd, *tmp, *genpd = ERR_PTR(-ENOENT);
+ int ret;
+
+ if (IS_ERR_OR_NULL(np))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry_safe(gpd, tmp, &gpd_list, gpd_list_node) {
+ if (gpd->provider == &np->fwnode) {
+ ret = genpd_remove(gpd);
+ genpd = ret ? ERR_PTR(ret) : gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return genpd;
+}
+EXPORT_SYMBOL_GPL(of_genpd_remove_last);
+
+static void genpd_release_dev(struct device *dev)
+{
+ of_node_put(dev->of_node);
+ kfree(dev);
+}
+
+static struct bus_type genpd_bus_type = {
+ .name = "genpd",
+};
+
+/**
+ * genpd_dev_pm_detach - Detach a device from its PM domain.
+ * @dev: Device to detach.
+ * @power_off: Currently not used
+ *
+ * Try to locate a corresponding generic PM domain, which the device was
+ * attached to previously. If such is found, the device is detached from it.
+ */
+static void genpd_dev_pm_detach(struct device *dev, bool power_off)
+{
+ struct generic_pm_domain *pd;
+ unsigned int i;
+ int ret = 0;
+
+ pd = dev_to_genpd(dev);
+ if (IS_ERR(pd))
+ return;
+
+ dev_dbg(dev, "removing from PM domain %s\n", pd->name);
+
+ /* Drop the default performance state */
+ if (dev_gpd_data(dev)->default_pstate) {
+ dev_pm_genpd_set_performance_state(dev, 0);
+ dev_gpd_data(dev)->default_pstate = 0;
+ }
+
+ for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
+ ret = genpd_remove_device(pd, dev);
+ if (ret != -EAGAIN)
+ break;
+
+ mdelay(i);
+ cond_resched();
+ }
+
+ if (ret < 0) {
+ dev_err(dev, "failed to remove from PM domain %s: %d",
+ pd->name, ret);
+ return;
+ }
+
+ /* Check if PM domain can be powered off after removing this device. */
+ genpd_queue_power_off_work(pd);
+
+ /* Unregister the device if it was created by genpd. */
+ if (dev->bus == &genpd_bus_type)
+ device_unregister(dev);
+}
+
+static void genpd_dev_pm_sync(struct device *dev)
+{
+ struct generic_pm_domain *pd;
+
+ pd = dev_to_genpd(dev);
+ if (IS_ERR(pd))
+ return;
+
+ genpd_queue_power_off_work(pd);
+}
+
+static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
+ unsigned int index, bool power_on)
+{
+ struct of_phandle_args pd_args;
+ struct generic_pm_domain *pd;
+ int pstate;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells", index, &pd_args);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&gpd_list_lock);
+ pd = genpd_get_from_provider(&pd_args);
+ of_node_put(pd_args.np);
+ if (IS_ERR(pd)) {
+ mutex_unlock(&gpd_list_lock);
+ dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
+ __func__, PTR_ERR(pd));
+ return driver_deferred_probe_check_state(base_dev);
+ }
+
+ dev_dbg(dev, "adding to PM domain %s\n", pd->name);
+
+ ret = genpd_add_device(pd, dev, base_dev);
+ mutex_unlock(&gpd_list_lock);
+
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add to PM domain %s\n", pd->name);
+
+ dev->pm_domain->detach = genpd_dev_pm_detach;
+ dev->pm_domain->sync = genpd_dev_pm_sync;
+
+ if (power_on) {
+ genpd_lock(pd);
+ ret = genpd_power_on(pd, 0);
+ genpd_unlock(pd);
+ }
+
+ if (ret) {
+ genpd_remove_device(pd, dev);
+ return -EPROBE_DEFER;
+ }
+
+ /* Set the default performance state */
+ pstate = of_get_required_opp_performance_state(dev->of_node, index);
+ if (pstate < 0 && pstate != -ENODEV && pstate != -EOPNOTSUPP) {
+ ret = pstate;
+ goto err;
+ } else if (pstate > 0) {
+ ret = dev_pm_genpd_set_performance_state(dev, pstate);
+ if (ret)
+ goto err;
+ dev_gpd_data(dev)->default_pstate = pstate;
+ }
+ return 1;
+
+err:
+ dev_err(dev, "failed to set required performance state for power-domain %s: %d\n",
+ pd->name, ret);
+ genpd_remove_device(pd, dev);
+ return ret;
+}
+
+/**
+ * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
+ * @dev: Device to attach.
+ *
+ * Parse device's OF node to find a PM domain specifier. If such is found,
+ * attaches the device to retrieved pm_domain ops.
+ *
+ * Returns 1 on successfully attached PM domain, 0 when the device don't need a
+ * PM domain or when multiple power-domains exists for it, else a negative error
+ * code. Note that if a power-domain exists for the device, but it cannot be
+ * found or turned on, then return -EPROBE_DEFER to ensure that the device is
+ * not probed and to re-try again later.
+ */
+int genpd_dev_pm_attach(struct device *dev)
+{
+ if (!dev->of_node)
+ return 0;
+
+ /*
+ * Devices with multiple PM domains must be attached separately, as we
+ * can only attach one PM domain per device.
+ */
+ if (of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells") != 1)
+ return 0;
+
+ return __genpd_dev_pm_attach(dev, dev, 0, true);
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+
+/**
+ * genpd_dev_pm_attach_by_id - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @index: The index of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier at the provided @index.
+ * If such is found, creates a virtual device and attaches it to the retrieved
+ * pm_domain ops. To deal with detaching of the virtual device, the ->detach()
+ * callback in the struct dev_pm_domain are assigned to genpd_dev_pm_detach().
+ *
+ * Returns the created virtual device if successfully attached PM domain, NULL
+ * when the device don't need a PM domain, else an ERR_PTR() in case of
+ * failures. If a power-domain exists for the device, but cannot be found or
+ * turned on, then ERR_PTR(-EPROBE_DEFER) is returned to ensure that the device
+ * is not probed and to re-try again later.
+ */
+struct device *genpd_dev_pm_attach_by_id(struct device *dev,
+ unsigned int index)
+{
+ struct device *virt_dev;
+ int num_domains;
+ int ret;
+
+ if (!dev->of_node)
+ return NULL;
+
+ /* Verify that the index is within a valid range. */
+ num_domains = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ if (index >= num_domains)
+ return NULL;
+
+ /* Allocate and register device on the genpd bus. */
+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
+ if (!virt_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
+ virt_dev->bus = &genpd_bus_type;
+ virt_dev->release = genpd_release_dev;
+ virt_dev->of_node = of_node_get(dev->of_node);
+
+ ret = device_register(virt_dev);
+ if (ret) {
+ put_device(virt_dev);
+ return ERR_PTR(ret);
+ }
+
+ /* Try to attach the device to the PM domain at the specified index. */
+ ret = __genpd_dev_pm_attach(virt_dev, dev, index, false);
+ if (ret < 1) {
+ device_unregister(virt_dev);
+ return ret ? ERR_PTR(ret) : NULL;
+ }
+
+ pm_runtime_enable(virt_dev);
+ genpd_queue_power_off_work(dev_to_genpd(virt_dev));
+
+ return virt_dev;
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
+
+/**
+ * genpd_dev_pm_attach_by_name - Associate a device with one of its PM domains.
+ * @dev: The device used to lookup the PM domain.
+ * @name: The name of the PM domain.
+ *
+ * Parse device's OF node to find a PM domain specifier using the
+ * power-domain-names DT property. For further description see
+ * genpd_dev_pm_attach_by_id().
+ */
+struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name)
+{
+ int index;
+
+ if (!dev->of_node)
+ return NULL;
+
+ index = of_property_match_string(dev->of_node, "power-domain-names",
+ name);
+ if (index < 0)
+ return NULL;
+
+ return genpd_dev_pm_attach_by_id(dev, index);
+}
+
+static const struct of_device_id idle_state_match[] = {
+ { .compatible = "domain-idle-state", },
+ { }
+};
+
+static int genpd_parse_state(struct genpd_power_state *genpd_state,
+ struct device_node *state_node)
+{
+ int err;
+ u32 residency;
+ u32 entry_latency, exit_latency;
+
+ err = of_property_read_u32(state_node, "entry-latency-us",
+ &entry_latency);
+ if (err) {
+ pr_debug(" * %pOF missing entry-latency-us property\n",
+ state_node);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "exit-latency-us",
+ &exit_latency);
+ if (err) {
+ pr_debug(" * %pOF missing exit-latency-us property\n",
+ state_node);
+ return -EINVAL;
+ }
+
+ err = of_property_read_u32(state_node, "min-residency-us", &residency);
+ if (!err)
+ genpd_state->residency_ns = 1000LL * residency;
+
+ genpd_state->power_on_latency_ns = 1000LL * exit_latency;
+ genpd_state->power_off_latency_ns = 1000LL * entry_latency;
+ genpd_state->fwnode = &state_node->fwnode;
+
+ return 0;
+}
+
+static int genpd_iterate_idle_states(struct device_node *dn,
+ struct genpd_power_state *states)
+{
+ int ret;
+ struct of_phandle_iterator it;
+ struct device_node *np;
+ int i = 0;
+
+ ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+ if (ret <= 0)
+ return ret == -ENOENT ? 0 : ret;
+
+ /* Loop over the phandles until all the requested entry is found */
+ of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
+ np = it.node;
+ if (!of_match_node(idle_state_match, np))
+ continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ if (states) {
+ ret = genpd_parse_state(&states[i], np);
+ if (ret) {
+ pr_err("Parsing idle state node %pOF failed with err %d\n",
+ np, ret);
+ of_node_put(np);
+ return ret;
+ }
+ }
+ i++;
+ }
+
+ return i;
+}
+
+/**
+ * of_genpd_parse_idle_states: Return array of idle states for the genpd.
+ *
+ * @dn: The genpd device node
+ * @states: The pointer to which the state array will be saved.
+ * @n: The count of elements in the array returned from this function.
+ *
+ * Returns the device states parsed from the OF node. The memory for the states
+ * is allocated by this function and is the responsibility of the caller to
+ * free the memory after use. If any or zero compatible domain idle states is
+ * found it returns 0 and in case of errors, a negative error code is returned.
+ */
+int of_genpd_parse_idle_states(struct device_node *dn,
+ struct genpd_power_state **states, int *n)
+{
+ struct genpd_power_state *st;
+ int ret;
+
+ ret = genpd_iterate_idle_states(dn, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (!ret) {
+ *states = NULL;
+ *n = 0;
+ return 0;
+ }
+
+ st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ ret = genpd_iterate_idle_states(dn, st);
+ if (ret <= 0) {
+ kfree(st);
+ return ret < 0 ? ret : -EINVAL;
+ }
+
+ *states = st;
+ *n = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
+
+/**
+ * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
+ *
+ * @genpd_dev: Genpd's device for which the performance-state needs to be found.
+ * @opp: struct dev_pm_opp of the OPP for which we need to find performance
+ * state.
+ *
+ * Returns performance state encoded in the OPP of the genpd. This calls
+ * platform specific genpd->opp_to_performance_state() callback to translate
+ * power domain OPP to performance state.
+ *
+ * Returns performance state on success and 0 on failure.
+ */
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
+{
+ struct generic_pm_domain *genpd = NULL;
+ int state;
+
+ genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
+
+ if (unlikely(!genpd->opp_to_performance_state))
+ return 0;
+
+ genpd_lock(genpd);
+ state = genpd->opp_to_performance_state(genpd, opp);
+ genpd_unlock(genpd);
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
+
+static int __init genpd_bus_init(void)
+{
+ return bus_register(&genpd_bus_type);
+}
+core_initcall(genpd_bus_init);
+
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
+
+
+/*** debugfs support ***/
+
+#ifdef CONFIG_DEBUG_FS
+/*
+ * TODO: This function is a slightly modified version of rtpm_status_show
+ * from sysfs.c, so generalize it.
+ */
+static void rtpm_status_str(struct seq_file *s, struct device *dev)
+{
+ static const char * const status_lookup[] = {
+ [RPM_ACTIVE] = "active",
+ [RPM_RESUMING] = "resuming",
+ [RPM_SUSPENDED] = "suspended",
+ [RPM_SUSPENDING] = "suspending"
+ };
+ const char *p = "";
+
+ if (dev->power.runtime_error)
+ p = "error";
+ else if (dev->power.disable_depth)
+ p = "unsupported";
+ else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
+ p = status_lookup[dev->power.runtime_status];
+ else
+ WARN_ON(1);
+
+ seq_printf(s, "%-25s ", p);
+}
+
+static void perf_status_str(struct seq_file *s, struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ seq_put_decimal_ull(s, "", gpd_data->performance_state);
+}
+
+static int genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *genpd)
+{
+ static const char * const status_lookup[] = {
+ [GENPD_STATE_ON] = "on",
+ [GENPD_STATE_OFF] = "off"
+ };
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ struct gpd_link *link;
+ char state[16];
+ int ret;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+ if (!genpd_status_on(genpd))
+ snprintf(state, sizeof(state), "%s-%u",
+ status_lookup[genpd->status], genpd->state_idx);
+ else
+ snprintf(state, sizeof(state), "%s",
+ status_lookup[genpd->status]);
+ seq_printf(s, "%-30s %-50s %u", genpd->name, state, genpd->performance_state);
+
+ /*
+ * Modifications on the list require holding locks on both
+ * parent and child, so we are safe.
+ * Also genpd->name is immutable.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ if (list_is_first(&link->parent_node, &genpd->parent_links))
+ seq_printf(s, "\n%48s", " ");
+ seq_printf(s, "%s", link->child->name);
+ if (!list_is_last(&link->parent_node, &genpd->parent_links))
+ seq_puts(s, ", ");
+ }
+
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "\n %-50s ", kobj_path);
+ rtpm_status_str(s, pm_data->dev);
+ perf_status_str(s, pm_data->dev);
+ kfree(kobj_path);
+ }
+
+ seq_puts(s, "\n");
+exit:
+ genpd_unlock(genpd);
+
+ return 0;
+}
+
+static int summary_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd;
+ int ret = 0;
+
+ seq_puts(s, "domain status children performance\n");
+ seq_puts(s, " /device runtime status\n");
+ seq_puts(s, "----------------------------------------------------------------------------------------------\n");
+
+ ret = mutex_lock_interruptible(&gpd_list_lock);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+ ret = genpd_summary_one(s, genpd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+
+static int status_show(struct seq_file *s, void *data)
+{
+ static const char * const status_lookup[] = {
+ [GENPD_STATE_ON] = "on",
+ [GENPD_STATE_OFF] = "off"
+ };
+
+ struct generic_pm_domain *genpd = s->private;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+
+ if (genpd->status == GENPD_STATE_OFF)
+ seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
+ genpd->state_idx);
+ else
+ seq_printf(s, "%s\n", status_lookup[genpd->status]);
+exit:
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int sub_domains_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct gpd_link *link;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(link, &genpd->parent_links, parent_node)
+ seq_printf(s, "%s\n", link->child->name);
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int idle_states_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ u64 now, delta, idle_time = 0;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ seq_puts(s, "State Time Spent(ms) Usage Rejected\n");
+
+ for (i = 0; i < genpd->state_count; i++) {
+ idle_time += genpd->states[i].idle_time;
+
+ if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
+ now = ktime_get_mono_fast_ns();
+ if (now > genpd->accounting_time) {
+ delta = now - genpd->accounting_time;
+ idle_time += delta;
+ }
+ }
+
+ do_div(idle_time, NSEC_PER_MSEC);
+ seq_printf(s, "S%-13i %-14llu %-14llu %llu\n", i, idle_time,
+ genpd->states[i].usage, genpd->states[i].rejected);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int active_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ u64 now, on_time, delta = 0;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (genpd->status == GENPD_STATE_ON) {
+ now = ktime_get_mono_fast_ns();
+ if (now > genpd->accounting_time)
+ delta = now - genpd->accounting_time;
+ }
+
+ on_time = genpd->on_time + delta;
+ do_div(on_time, NSEC_PER_MSEC);
+ seq_printf(s, "%llu ms\n", on_time);
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int total_idle_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ u64 now, delta, total = 0;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (i = 0; i < genpd->state_count; i++) {
+ total += genpd->states[i].idle_time;
+
+ if (genpd->status == GENPD_STATE_OFF && genpd->state_idx == i) {
+ now = ktime_get_mono_fast_ns();
+ if (now > genpd->accounting_time) {
+ delta = now - genpd->accounting_time;
+ total += delta;
+ }
+ }
+ }
+
+ do_div(total, NSEC_PER_MSEC);
+ seq_printf(s, "%llu ms\n", total);
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+
+static int devices_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "%s\n", kobj_path);
+ kfree(kobj_path);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int perf_state_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+
+ if (genpd_lock_interruptible(genpd))
+ return -ERESTARTSYS;
+
+ seq_printf(s, "%u\n", genpd->performance_state);
+
+ genpd_unlock(genpd);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(summary);
+DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(sub_domains);
+DEFINE_SHOW_ATTRIBUTE(idle_states);
+DEFINE_SHOW_ATTRIBUTE(active_time);
+DEFINE_SHOW_ATTRIBUTE(total_idle_time);
+DEFINE_SHOW_ATTRIBUTE(devices);
+DEFINE_SHOW_ATTRIBUTE(perf_state);
+
+static void genpd_debug_add(struct generic_pm_domain *genpd)
+{
+ struct dentry *d;
+
+ if (!genpd_debugfs_dir)
+ return;
+
+ d = debugfs_create_dir(genpd->name, genpd_debugfs_dir);
+
+ debugfs_create_file("current_state", 0444,
+ d, genpd, &status_fops);
+ debugfs_create_file("sub_domains", 0444,
+ d, genpd, &sub_domains_fops);
+ debugfs_create_file("idle_states", 0444,
+ d, genpd, &idle_states_fops);
+ debugfs_create_file("active_time", 0444,
+ d, genpd, &active_time_fops);
+ debugfs_create_file("total_idle_time", 0444,
+ d, genpd, &total_idle_time_fops);
+ debugfs_create_file("devices", 0444,
+ d, genpd, &devices_fops);
+ if (genpd->set_performance_state)
+ debugfs_create_file("perf_state", 0444,
+ d, genpd, &perf_state_fops);
+}
+
+static int __init genpd_debug_init(void)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+
+ debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir,
+ NULL, &summary_fops);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_debug_add(genpd);
+
+ return 0;
+}
+late_initcall(genpd_debug_init);
+
+static void __exit genpd_debug_exit(void)
+{
+ debugfs_remove_recursive(genpd_debugfs_dir);
+}
+__exitcall(genpd_debug_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 000000000..282a3a135
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ */
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/ktime.h>
+
+static int dev_update_qos_constraint(struct device *dev, void *data)
+{
+ s64 *constraint_ns_p = data;
+ s64 constraint_ns;
+
+ if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
+ struct gpd_timing_data *td = dev_gpd_data(dev)->td;
+
+ /*
+ * Only take suspend-time QoS constraints of devices into
+ * account, because constraints updated after the device has
+ * been suspended are not guaranteed to be taken into account
+ * anyway. In order for them to take effect, the device has to
+ * be resumed and suspended again.
+ */
+ constraint_ns = td ? td->effective_constraint_ns :
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ } else {
+ /*
+ * The child is not in a domain and there's no info on its
+ * suspend/resume latencies, so assume them to be negligible and
+ * take its current PM QoS constraint (that's the only thing
+ * known at this point anyway).
+ */
+ constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
+ constraint_ns *= NSEC_PER_USEC;
+ }
+
+ if (constraint_ns < *constraint_ns_p)
+ *constraint_ns_p = constraint_ns;
+
+ return 0;
+}
+
+/**
+ * default_suspend_ok - Default PM domain governor routine to suspend devices.
+ * @dev: Device to check.
+ */
+static bool default_suspend_ok(struct device *dev)
+{
+ struct gpd_timing_data *td = dev_gpd_data(dev)->td;
+ unsigned long flags;
+ s64 constraint_ns;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!td->constraint_changed) {
+ bool ret = td->cached_suspend_ok;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return ret;
+ }
+ td->constraint_changed = false;
+ td->cached_suspend_ok = false;
+ td->effective_constraint_ns = 0;
+ constraint_ns = __dev_pm_qos_resume_latency(dev);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (constraint_ns == 0)
+ return false;
+
+ constraint_ns *= NSEC_PER_USEC;
+ /*
+ * We can walk the children without any additional locking, because
+ * they all have been suspended at this point and their
+ * effective_constraint_ns fields won't be modified in parallel with us.
+ */
+ if (!dev->power.ignore_children)
+ device_for_each_child(dev, &constraint_ns,
+ dev_update_qos_constraint);
+
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
+ /* "No restriction", so the device is allowed to suspend. */
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
+ td->cached_suspend_ok = true;
+ } else if (constraint_ns == 0) {
+ /*
+ * This triggers if one of the children that don't belong to a
+ * domain has a zero PM QoS constraint and it's better not to
+ * suspend then. effective_constraint_ns is zero already and
+ * cached_suspend_ok is false, so bail out.
+ */
+ return false;
+ } else {
+ constraint_ns -= td->suspend_latency_ns +
+ td->resume_latency_ns;
+ /*
+ * effective_constraint_ns is zero already and cached_suspend_ok
+ * is false, so if the computed value is not positive, return
+ * right away.
+ */
+ if (constraint_ns <= 0)
+ return false;
+
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
+ }
+
+ /*
+ * The children have been suspended already, so we don't need to take
+ * their suspend latencies into account here.
+ */
+ return td->cached_suspend_ok;
+}
+
+static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
+{
+ ktime_t domain_wakeup = KTIME_MAX;
+ ktime_t next_wakeup;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
+ return;
+
+ /*
+ * Devices that have a predictable wakeup pattern, may specify
+ * their next wakeup. Let's find the next wakeup from all the
+ * devices attached to this domain and from all the sub-domains.
+ * It is possible that component's a next wakeup may have become
+ * stale when we read that here. We will ignore to ensure the domain
+ * is able to enter its optimal idle state.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ next_wakeup = to_gpd_data(pdd)->td->next_wakeup;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct genpd_governor_data *cgd = link->child->gd;
+
+ next_wakeup = cgd ? cgd->next_wakeup : KTIME_MAX;
+ if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
+ if (ktime_before(next_wakeup, domain_wakeup))
+ domain_wakeup = next_wakeup;
+ }
+
+ genpd->gd->next_wakeup = domain_wakeup;
+}
+
+static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
+ unsigned int state, ktime_t now)
+{
+ ktime_t domain_wakeup = genpd->gd->next_wakeup;
+ s64 idle_time_ns, min_sleep_ns;
+
+ min_sleep_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].residency_ns;
+
+ idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+
+ return idle_time_ns >= min_sleep_ns;
+}
+
+static bool __default_power_down_ok(struct dev_pm_domain *pd,
+ unsigned int state)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+ struct pm_domain_data *pdd;
+ s64 min_off_time_ns;
+ s64 off_on_time_ns;
+
+ off_on_time_ns = genpd->states[state].power_off_latency_ns +
+ genpd->states[state].power_on_latency_ns;
+
+ min_off_time_ns = -1;
+ /*
+ * Check if subdomains can be off for enough time.
+ *
+ * All subdomains have been powered off already at this point.
+ */
+ list_for_each_entry(link, &genpd->parent_links, parent_node) {
+ struct genpd_governor_data *cgd = link->child->gd;
+
+ s64 sd_max_off_ns = cgd ? cgd->max_off_time_ns : -1;
+
+ if (sd_max_off_ns < 0)
+ continue;
+
+ /*
+ * Check if the subdomain is allowed to be off long enough for
+ * the current domain to turn off and on (that's how much time
+ * it will have to wait worst case).
+ */
+ if (sd_max_off_ns <= off_on_time_ns)
+ return false;
+
+ if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
+ min_off_time_ns = sd_max_off_ns;
+ }
+
+ /*
+ * Check if the devices in the domain can be off enough time.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td;
+ s64 constraint_ns;
+
+ /*
+ * Check if the device is allowed to be off long enough for the
+ * domain to turn off and on (that's how much time it will
+ * have to wait worst case).
+ */
+ td = to_gpd_data(pdd)->td;
+ constraint_ns = td->effective_constraint_ns;
+ /*
+ * Zero means "no suspend at all" and this runs only when all
+ * devices in the domain are suspended, so it must be positive.
+ */
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
+ continue;
+
+ if (constraint_ns <= off_on_time_ns)
+ return false;
+
+ if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
+ min_off_time_ns = constraint_ns;
+ }
+
+ /*
+ * If the computed minimum device off time is negative, there are no
+ * latency constraints, so the domain can spend arbitrary time in the
+ * "off" state.
+ */
+ if (min_off_time_ns < 0)
+ return true;
+
+ /*
+ * The difference between the computed minimum subdomain or device off
+ * time and the time needed to turn the domain on is the maximum
+ * theoretical time this domain can spend in the "off" state.
+ */
+ genpd->gd->max_off_time_ns = min_off_time_ns -
+ genpd->states[state].power_on_latency_ns;
+ return true;
+}
+
+/**
+ * _default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ * @now: current ktime.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct genpd_governor_data *gd = genpd->gd;
+ int state_idx = genpd->state_count - 1;
+ struct gpd_link *link;
+
+ /*
+ * Find the next wakeup from devices that can determine their own wakeup
+ * to find when the domain would wakeup and do it for every device down
+ * the hierarchy. It is not worth while to sleep if the state's residency
+ * cannot be met.
+ */
+ update_domain_next_wakeup(genpd, now);
+ if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (gd->next_wakeup != KTIME_MAX)) {
+ /* Let's find out the deepest domain idle state, the devices prefer */
+ while (state_idx >= 0) {
+ if (next_wakeup_allows_state(genpd, state_idx, now)) {
+ gd->max_off_time_changed = true;
+ break;
+ }
+ state_idx--;
+ }
+
+ if (state_idx < 0) {
+ state_idx = 0;
+ gd->cached_power_down_ok = false;
+ goto done;
+ }
+ }
+
+ if (!gd->max_off_time_changed) {
+ genpd->state_idx = gd->cached_power_down_state_idx;
+ return gd->cached_power_down_ok;
+ }
+
+ /*
+ * We have to invalidate the cached results for the parents, so
+ * use the observation that default_power_down_ok() is not
+ * going to be called for any parent until this instance
+ * returns.
+ */
+ list_for_each_entry(link, &genpd->child_links, child_node) {
+ struct genpd_governor_data *pgd = link->parent->gd;
+
+ if (pgd)
+ pgd->max_off_time_changed = true;
+ }
+
+ gd->max_off_time_ns = -1;
+ gd->max_off_time_changed = false;
+ gd->cached_power_down_ok = true;
+
+ /*
+ * Find a state to power down to, starting from the state
+ * determined by the next wakeup.
+ */
+ while (!__default_power_down_ok(pd, state_idx)) {
+ if (state_idx == 0) {
+ gd->cached_power_down_ok = false;
+ break;
+ }
+ state_idx--;
+ }
+
+done:
+ genpd->state_idx = state_idx;
+ gd->cached_power_down_state_idx = genpd->state_idx;
+ return gd->cached_power_down_ok;
+}
+
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ return _default_power_down_ok(pd, ktime_get());
+}
+
+#ifdef CONFIG_CPU_IDLE
+static bool cpu_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct cpuidle_device *dev;
+ ktime_t domain_wakeup, next_hrtimer;
+ ktime_t now = ktime_get();
+ s64 idle_duration_ns;
+ int cpu, i;
+
+ /* Validate dev PM QoS constraints. */
+ if (!_default_power_down_ok(pd, now))
+ return false;
+
+ if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
+ return true;
+
+ /*
+ * Find the next wakeup for any of the online CPUs within the PM domain
+ * and its subdomains. Note, we only need the genpd->cpus, as it already
+ * contains a mask of all CPUs from subdomains.
+ */
+ domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
+ for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
+ dev = per_cpu(cpuidle_devices, cpu);
+ if (dev) {
+ next_hrtimer = READ_ONCE(dev->next_hrtimer);
+ if (ktime_before(next_hrtimer, domain_wakeup))
+ domain_wakeup = next_hrtimer;
+ }
+ }
+
+ /* The minimum idle duration is from now - until the next wakeup. */
+ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
+ if (idle_duration_ns <= 0)
+ return false;
+
+ /*
+ * Find the deepest idle state that has its residency value satisfied
+ * and by also taking into account the power off latency for the state.
+ * Start at the state picked by the dev PM QoS constraint validation.
+ */
+ i = genpd->state_idx;
+ do {
+ if (idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) {
+ genpd->state_idx = i;
+ return true;
+ }
+ } while (--i >= 0);
+
+ return false;
+}
+
+struct dev_power_governor pm_domain_cpu_gov = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = cpu_power_down_ok,
+};
+#endif
+
+struct dev_power_governor simple_qos_governor = {
+ .suspend_ok = default_suspend_ok,
+ .power_down_ok = default_power_down_ok,
+};
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+ .suspend_ok = default_suspend_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
new file mode 100644
index 000000000..4fa525668
--- /dev/null
+++ b/drivers/base/power/generic_ops.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ */
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/export.h>
+
+#ifdef CONFIG_PM
+/**
+ * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
+ * @dev: Device to suspend.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_suspend(), execute it and return its error code. Otherwise,
+ * return 0.
+ */
+int pm_generic_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
+
+/**
+ * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
+ * @dev: Device to resume.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_resume(), execute it and return its error code. Otherwise,
+ * return 0.
+ */
+int pm_generic_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * pm_generic_prepare - Generic routine preparing a device for power transition.
+ * @dev: Device to prepare.
+ *
+ * Prepare a device for a system-wide power transition.
+ */
+int pm_generic_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ ret = drv->pm->prepare(dev);
+
+ return ret;
+}
+
+/**
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+
+/**
+ * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
+
+/**
+ * pm_generic_suspend - Generic suspend callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend);
+
+/**
+ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+
+/**
+ * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
+
+/**
+ * pm_generic_freeze - Generic freeze callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze);
+
+/**
+ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+
+/**
+ * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
+
+/**
+ * pm_generic_poweroff - Generic poweroff callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+
+/**
+ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+
+/**
+ * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
+
+/**
+ * pm_generic_thaw - Generic thaw callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw);
+
+/**
+ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+
+/**
+ * pm_generic_resume_early - Generic resume_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_early);
+
+/**
+ * pm_generic_resume - Generic resume callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume ? pm->resume(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume);
+
+/**
+ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore_noirq(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+
+/**
+ * pm_generic_restore_early - Generic restore_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_restore_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_early);
+
+/**
+ * pm_generic_restore - Generic restore callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore ? pm->restore(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore);
+
+/**
+ * pm_generic_complete - Generic routine completing a device power transition.
+ * @dev: Device to handle.
+ *
+ * Complete a device power transition during a system-wide power transition.
+ */
+void pm_generic_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
new file mode 100644
index 000000000..9c5a5f4db
--- /dev/null
+++ b/drivers/base/power/main.c
@@ -0,0 +1,2027 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/main.c - Where the driver meets power management.
+ *
+ * Copyright (c) 2003 Patrick Mochel
+ * Copyright (c) 2003 Open Source Development Lab
+ *
+ * The driver model core calls device_pm_add() when a device is registered.
+ * This will initialize the embedded device_pm_info object in the device
+ * and add it to the list of power-controlled devices. sysfs entries for
+ * controlling device power management will also be added.
+ *
+ * A separate list is used for keeping track of power info, because the power
+ * domain dependencies may differ from the ancestral dependencies that the
+ * subsystem list maintains.
+ */
+
+#define pr_fmt(fmt) "PM: " fmt
+#define dev_fmt pr_fmt
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm-trace.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/async.h>
+#include <linux/suspend.h>
+#include <trace/events/power.h>
+#include <linux/cpufreq.h>
+#include <linux/devfreq.h>
+#include <linux/timer.h>
+
+#include "../base.h"
+#include "power.h"
+
+typedef int (*pm_callback_t)(struct device *);
+
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ device_links_read_lock_held())
+
+/*
+ * The entries in the dpm_list list are in a depth first order, simply
+ * because children are guaranteed to be discovered after parents, and
+ * are inserted at the back of the list on discovery.
+ *
+ * Since device_pm_add() may be called with a device lock held,
+ * we must never try to acquire a device lock while holding
+ * dpm_list_mutex.
+ */
+
+LIST_HEAD(dpm_list);
+static LIST_HEAD(dpm_prepared_list);
+static LIST_HEAD(dpm_suspended_list);
+static LIST_HEAD(dpm_late_early_list);
+static LIST_HEAD(dpm_noirq_list);
+
+struct suspend_stats suspend_stats;
+static DEFINE_MUTEX(dpm_list_mtx);
+static pm_message_t pm_transition;
+
+static int async_error;
+
+static const char *pm_verb(int event)
+{
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ return "suspend";
+ case PM_EVENT_RESUME:
+ return "resume";
+ case PM_EVENT_FREEZE:
+ return "freeze";
+ case PM_EVENT_QUIESCE:
+ return "quiesce";
+ case PM_EVENT_HIBERNATE:
+ return "hibernate";
+ case PM_EVENT_THAW:
+ return "thaw";
+ case PM_EVENT_RESTORE:
+ return "restore";
+ case PM_EVENT_RECOVER:
+ return "recover";
+ default:
+ return "(unknown PM event)";
+ }
+}
+
+/**
+ * device_pm_sleep_init - Initialize system suspend-related device fields.
+ * @dev: Device object being initialized.
+ */
+void device_pm_sleep_init(struct device *dev)
+{
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
+ dev->power.is_noirq_suspended = false;
+ dev->power.is_late_suspended = false;
+ init_completion(&dev->power.completion);
+ complete_all(&dev->power.completion);
+ dev->power.wakeup = NULL;
+ INIT_LIST_HEAD(&dev->power.entry);
+}
+
+/**
+ * device_pm_lock - Lock the list of active devices used by the PM core.
+ */
+void device_pm_lock(void)
+{
+ mutex_lock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_unlock - Unlock the list of active devices used by the PM core.
+ */
+void device_pm_unlock(void)
+{
+ mutex_unlock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_add - Add a device to the PM core's list of active devices.
+ * @dev: Device to add to the list.
+ */
+void device_pm_add(struct device *dev)
+{
+ /* Skip PM setup/initialization. */
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Adding info for %s:%s\n",
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+ device_pm_check_callbacks(dev);
+ mutex_lock(&dpm_list_mtx);
+ if (dev->parent && dev->parent->power.is_prepared)
+ dev_warn(dev, "parent %s should not be sleeping\n",
+ dev_name(dev->parent));
+ list_add_tail(&dev->power.entry, &dpm_list);
+ dev->power.in_dpm_list = true;
+ mutex_unlock(&dpm_list_mtx);
+}
+
+/**
+ * device_pm_remove - Remove a device from the PM core's list of active devices.
+ * @dev: Device to be removed from the list.
+ */
+void device_pm_remove(struct device *dev)
+{
+ if (device_pm_not_required(dev))
+ return;
+
+ pr_debug("Removing info for %s:%s\n",
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+ complete_all(&dev->power.completion);
+ mutex_lock(&dpm_list_mtx);
+ list_del_init(&dev->power.entry);
+ dev->power.in_dpm_list = false;
+ mutex_unlock(&dpm_list_mtx);
+ device_wakeup_disable(dev);
+ pm_runtime_remove(dev);
+ device_pm_check_callbacks(dev);
+}
+
+/**
+ * device_pm_move_before - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come before.
+ */
+void device_pm_move_before(struct device *deva, struct device *devb)
+{
+ pr_debug("Moving %s:%s before %s:%s\n",
+ deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+ devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+ /* Delete deva from dpm_list and reinsert before devb. */
+ list_move_tail(&deva->power.entry, &devb->power.entry);
+}
+
+/**
+ * device_pm_move_after - Move device in the PM core's list of active devices.
+ * @deva: Device to move in dpm_list.
+ * @devb: Device @deva should come after.
+ */
+void device_pm_move_after(struct device *deva, struct device *devb)
+{
+ pr_debug("Moving %s:%s after %s:%s\n",
+ deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
+ devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
+ /* Delete deva from dpm_list and reinsert after devb. */
+ list_move(&deva->power.entry, &devb->power.entry);
+}
+
+/**
+ * device_pm_move_last - Move device to end of the PM core's list of devices.
+ * @dev: Device to move in dpm_list.
+ */
+void device_pm_move_last(struct device *dev)
+{
+ pr_debug("Moving %s:%s to end of list\n",
+ dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
+ list_move_tail(&dev->power.entry, &dpm_list);
+}
+
+static ktime_t initcall_debug_start(struct device *dev, void *cb)
+{
+ if (!pm_print_times_enabled)
+ return 0;
+
+ dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
+ task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
+ return ktime_get();
+}
+
+static void initcall_debug_report(struct device *dev, ktime_t calltime,
+ void *cb, int error)
+{
+ ktime_t rettime;
+
+ if (!pm_print_times_enabled)
+ return;
+
+ rettime = ktime_get();
+ dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
+ (unsigned long long)ktime_us_delta(rettime, calltime));
+}
+
+/**
+ * dpm_wait - Wait for a PM operation to complete.
+ * @dev: Device to wait for.
+ * @async: If unset, wait only if the device's power.async_suspend flag is set.
+ */
+static void dpm_wait(struct device *dev, bool async)
+{
+ if (!dev)
+ return;
+
+ if (async || (pm_async_enabled && dev->power.async_suspend))
+ wait_for_completion(&dev->power.completion);
+}
+
+static int dpm_wait_fn(struct device *dev, void *async_ptr)
+{
+ dpm_wait(dev, *((bool *)async_ptr));
+ return 0;
+}
+
+static void dpm_wait_for_children(struct device *dev, bool async)
+{
+ device_for_each_child(dev, &async, dpm_wait_fn);
+}
+
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * If the supplier goes away right after we've checked the link to it,
+ * we'll wait for its completion to change the state, but that's fine,
+ * because the only things that will block as a result are the SRCU
+ * callbacks freeing the link objects for the links in the list we're
+ * walking.
+ */
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->supplier, async);
+
+ device_links_read_unlock(idx);
+}
+
+static bool dpm_wait_for_superior(struct device *dev, bool async)
+{
+ struct device *parent;
+
+ /*
+ * If the device is resumed asynchronously and the parent's callback
+ * deletes both the device and the parent itself, the parent object may
+ * be freed while this function is running, so avoid that by reference
+ * counting the parent once more unless the device has been deleted
+ * already (in which case return right away).
+ */
+ mutex_lock(&dpm_list_mtx);
+
+ if (!device_pm_initialized(dev)) {
+ mutex_unlock(&dpm_list_mtx);
+ return false;
+ }
+
+ parent = get_device(dev->parent);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ dpm_wait(parent, async);
+ put_device(parent);
+
+ dpm_wait_for_suppliers(dev, async);
+
+ /*
+ * If the parent's callback has deleted the device, attempting to resume
+ * it would be invalid, so avoid doing that then.
+ */
+ return device_pm_initialized(dev);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * The status of a device link can only be changed from "dormant" by a
+ * probe, but that cannot happen during system suspend/resume. In
+ * theory it can change to "dormant" at that time, but then it is
+ * reasonable to wait for the target device anyway (eg. if it goes
+ * away, it's better to wait for it to go away completely and then
+ * continue instead of trying to continue in parallel with its
+ * unregistration).
+ */
+ list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->consumer, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+ dpm_wait_for_children(dev, async);
+ dpm_wait_for_consumers(dev, async);
+}
+
+/**
+ * pm_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ */
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend;
+ case PM_EVENT_RESUME:
+ return ops->resume;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw;
+ case PM_EVENT_RESTORE:
+ return ops->restore;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+/**
+ * pm_late_early_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
+ pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend_late;
+ case PM_EVENT_RESUME:
+ return ops->resume_early;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze_late;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff_late;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw_early;
+ case PM_EVENT_RESTORE:
+ return ops->restore_early;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+/**
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend_noirq;
+ case PM_EVENT_RESUME:
+ return ops->resume_noirq;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze_noirq;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff_noirq;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw_noirq;
+ case PM_EVENT_RESTORE:
+ return ops->restore_noirq;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
+{
+ dev_dbg(dev, "%s%s%s driver flags: %x\n", info, pm_verb(state.event),
+ ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
+ ", may wakeup" : "", dev->power.driver_flags);
+}
+
+static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
+ int error)
+{
+ dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
+ error);
+}
+
+static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
+ const char *info)
+{
+ ktime_t calltime;
+ u64 usecs64;
+ int usecs;
+
+ calltime = ktime_get();
+ usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs = usecs64;
+ if (usecs == 0)
+ usecs = 1;
+
+ pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ error ? "aborted" : "complete",
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+}
+
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+ pm_message_t state, const char *info)
+{
+ ktime_t calltime;
+ int error;
+
+ if (!cb)
+ return 0;
+
+ calltime = initcall_debug_start(dev, cb);
+
+ pm_dev_dbg(dev, state, info);
+ trace_device_pm_callback_start(dev, info, state.event);
+ error = cb(dev);
+ trace_device_pm_callback_end(dev, error);
+ suspend_report_result(dev, cb, error);
+
+ initcall_debug_report(dev, calltime, cb, error);
+
+ return error;
+}
+
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+ struct device *dev;
+ struct task_struct *tsk;
+ struct timer_list timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+ struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @t: The timer that PM watchdog depends on.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(struct timer_list *t)
+{
+ struct dpm_watchdog *wd = from_timer(wd, t, timer);
+
+ dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+ show_stack(wd->tsk, NULL, KERN_EMERG);
+ panic("%s %s: unrecoverable failure\n",
+ dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+ struct timer_list *timer = &wd->timer;
+
+ wd->dev = dev;
+ wd->tsk = current;
+
+ timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
+ /* use same timeout value for both suspend and resume */
+ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+ add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+ struct timer_list *timer = &wd->timer;
+
+ del_timer_sync(timer);
+ destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
+/*------------------------- Resume routines -------------------------*/
+
+/**
+ * dev_pm_skip_resume - System-wide device resume optimization check.
+ * @dev: Target device.
+ *
+ * Return:
+ * - %false if the transition under way is RESTORE.
+ * - Return value of dev_pm_skip_suspend() if the transition under way is THAW.
+ * - The logical negation of %power.must_resume otherwise (that is, when the
+ * transition under way is RESUME).
+ */
+bool dev_pm_skip_resume(struct device *dev)
+{
+ if (pm_transition.event == PM_EVENT_RESTORE)
+ return false;
+
+ if (pm_transition.event == PM_EVENT_THAW)
+ return dev_pm_skip_suspend(dev);
+
+ return !dev->power.must_resume;
+}
+
+/**
+ * __device_resume_noirq - Execute a "noirq resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ bool skip_resume;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Out;
+
+ if (!dev->power.is_noirq_suspended)
+ goto Out;
+
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
+
+ skip_resume = dev_pm_skip_resume(dev);
+ /*
+ * If the driver callback is skipped below or by the middle layer
+ * callback and device_resume_early() also skips the driver callback for
+ * this device later, it needs to appear as "suspended" to PM-runtime,
+ * so change its status accordingly.
+ *
+ * Otherwise, the device is going to be resumed, so set its PM-runtime
+ * status to "active", but do that only if DPM_FLAG_SMART_SUSPEND is set
+ * to avoid confusing drivers that don't use it.
+ */
+ if (skip_resume)
+ pm_runtime_set_suspended(dev);
+ else if (dev_pm_skip_suspend(dev))
+ pm_runtime_set_active(dev);
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (skip_resume)
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "noirq driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
+ dev->power.is_noirq_suspended = false;
+
+Out:
+ complete_all(&dev->power.completion);
+ TRACE_RESUME(error);
+
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
+ }
+}
+
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static bool dpm_async_fn(struct device *dev, async_func_t func)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (!is_async(dev))
+ return false;
+
+ get_device(dev);
+
+ if (async_schedule_dev_nocall(func, dev))
+ return true;
+
+ put_device(dev);
+
+ return false;
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = data;
+
+ __device_resume_noirq(dev, pm_transition, true);
+ put_device(dev);
+}
+
+static void device_resume_noirq(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_noirq))
+ return;
+
+ __device_resume_noirq(dev, pm_transition, false);
+}
+
+static void dpm_noirq_resume_devices(pm_message_t state)
+{
+ struct device *dev;
+ ktime_t starttime = ktime_get();
+
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
+ get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_late_early_list);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume_noirq(dev);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ dpm_show_time(starttime, state, 0, "noirq");
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+/**
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
+ * allow device drivers' interrupt handlers to be called.
+ */
+void dpm_resume_noirq(pm_message_t state)
+{
+ dpm_noirq_resume_devices(state);
+
+ resume_device_irqs();
+ device_wakeup_disarm_wake_irqs();
+}
+
+/**
+ * __device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Out;
+
+ if (!dev->power.is_late_suspended)
+ goto Out;
+
+ if (!dpm_wait_for_superior(dev, async))
+ goto Out;
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_resume(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "early driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+
+Skip:
+ dev->power.is_late_suspended = false;
+
+Out:
+ TRACE_RESUME(error);
+
+ pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
+
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async early" : " early", error);
+ }
+}
+
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = data;
+
+ __device_resume_early(dev, pm_transition, true);
+ put_device(dev);
+}
+
+static void device_resume_early(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume_early))
+ return;
+
+ __device_resume_early(dev, pm_transition, false);
+}
+
+/**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_early(pm_message_t state)
+{
+ struct device *dev;
+ ktime_t starttime = ktime_get();
+
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
+ get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_suspended_list);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume_early(dev);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ dpm_show_time(starttime, state, 0, "early");
+ trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
+}
+
+/**
+ * dpm_resume_start - Execute "noirq" and "early" device callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_start(pm_message_t state)
+{
+ dpm_resume_noirq(state);
+ dpm_resume_early(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_start);
+
+/**
+ * __device_resume - Execute "resume" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
+ */
+static void __device_resume(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->power.syscore)
+ goto Complete;
+
+ if (dev->power.direct_complete) {
+ /* Match the pm_runtime_disable() in __device_suspend(). */
+ pm_runtime_enable(dev);
+ goto Complete;
+ }
+
+ if (!dpm_wait_for_superior(dev, async))
+ goto Complete;
+
+ dpm_watchdog_set(&wd, dev);
+ device_lock(dev);
+
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
+
+ if (!dev->power.is_suspended)
+ goto Unlock;
+
+ if (dev->pm_domain) {
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Driver;
+ }
+
+ if (dev->type && dev->type->pm) {
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Driver;
+ }
+
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
+ }
+
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
+ } else if (dev->bus->resume) {
+ info = "legacy bus ";
+ callback = dev->bus->resume;
+ goto End;
+ }
+ }
+
+ Driver:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ End:
+ error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_suspended = false;
+
+ Unlock:
+ device_unlock(dev);
+ dpm_watchdog_clear(&wd);
+
+ Complete:
+ complete_all(&dev->power.completion);
+
+ TRACE_RESUME(error);
+
+ if (error) {
+ suspend_stats.failed_resume++;
+ dpm_save_failed_step(SUSPEND_RESUME);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, async ? " async" : "", error);
+ }
+}
+
+static void async_resume(void *data, async_cookie_t cookie)
+{
+ struct device *dev = data;
+
+ __device_resume(dev, pm_transition, true);
+ put_device(dev);
+}
+
+static void device_resume(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_resume))
+ return;
+
+ __device_resume(dev, pm_transition, false);
+}
+
+/**
+ * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the appropriate "resume" callback for all devices whose status
+ * indicates that they are suspended.
+ */
+void dpm_resume(pm_message_t state)
+{
+ struct device *dev;
+ ktime_t starttime = ktime_get();
+
+ trace_suspend_resume(TPS("dpm_resume"), state.event, true);
+ might_sleep();
+
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
+ while (!list_empty(&dpm_suspended_list)) {
+ dev = to_device(dpm_suspended_list.next);
+
+ get_device(dev);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ device_resume(dev);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ dpm_show_time(starttime, state, 0, NULL);
+
+ cpufreq_resume();
+ devfreq_resume();
+ trace_suspend_resume(TPS("dpm_resume"), state.event, false);
+}
+
+/**
+ * device_complete - Complete a PM transition for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ */
+static void device_complete(struct device *dev, pm_message_t state)
+{
+ void (*callback)(struct device *) = NULL;
+ const char *info = NULL;
+
+ if (dev->power.syscore)
+ goto out;
+
+ device_lock(dev);
+
+ if (dev->pm_domain) {
+ info = "completing power domain ";
+ callback = dev->pm_domain->ops.complete;
+ } else if (dev->type && dev->type->pm) {
+ info = "completing type ";
+ callback = dev->type->pm->complete;
+ } else if (dev->class && dev->class->pm) {
+ info = "completing class ";
+ callback = dev->class->pm->complete;
+ } else if (dev->bus && dev->bus->pm) {
+ info = "completing bus ";
+ callback = dev->bus->pm->complete;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "completing driver ";
+ callback = dev->driver->pm->complete;
+ }
+
+ if (callback) {
+ pm_dev_dbg(dev, state, info);
+ callback(dev);
+ }
+
+ device_unlock(dev);
+
+out:
+ pm_runtime_put(dev);
+}
+
+/**
+ * dpm_complete - Complete a PM transition for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->complete() callbacks for all devices whose PM status is not
+ * DPM_ON (this allows new devices to be registered).
+ */
+void dpm_complete(pm_message_t state)
+{
+ struct list_head list;
+
+ trace_suspend_resume(TPS("dpm_complete"), state.event, true);
+ might_sleep();
+
+ INIT_LIST_HEAD(&list);
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
+
+ get_device(dev);
+ dev->power.is_prepared = false;
+ list_move(&dev->power.entry, &list);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ trace_device_pm_callback_start(dev, "", state.event);
+ device_complete(dev, state);
+ trace_device_pm_callback_end(dev, 0);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
+ list_splice(&list, &dpm_list);
+ mutex_unlock(&dpm_list_mtx);
+
+ /* Allow device probing and trigger re-probing of deferred devices */
+ device_unblock_probing();
+ trace_suspend_resume(TPS("dpm_complete"), state.event, false);
+}
+
+/**
+ * dpm_resume_end - Execute "resume" callbacks and complete system transition.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute "resume" callbacks for all devices and complete the PM transition of
+ * the system.
+ */
+void dpm_resume_end(pm_message_t state)
+{
+ dpm_resume(state);
+ dpm_complete(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_end);
+
+
+/*------------------------- Suspend routines -------------------------*/
+
+/**
+ * resume_event - Return a "resume" message for given "suspend" sleep state.
+ * @sleep_state: PM message representing a sleep state.
+ *
+ * Return a PM message representing the resume event corresponding to given
+ * sleep state.
+ */
+static pm_message_t resume_event(pm_message_t sleep_state)
+{
+ switch (sleep_state.event) {
+ case PM_EVENT_SUSPEND:
+ return PMSG_RESUME;
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return PMSG_RECOVER;
+ case PM_EVENT_HIBERNATE:
+ return PMSG_RESTORE;
+ }
+ return PMSG_ON;
+}
+
+static void dpm_superior_set_must_resume(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent)
+ dev->parent->power.must_resume = true;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ link->supplier->power.must_resume = true;
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ *
+ * The driver of @dev will not receive interrupts while this function is being
+ * executed.
+ */
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
+ dpm_wait_for_subordinate(dev, async);
+
+ if (async_error)
+ goto Complete;
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+ if (dev->pm_domain) {
+ info = "noirq power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "noirq type ";
+ callback = pm_noirq_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "noirq class ";
+ callback = pm_noirq_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "noirq bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_suspend(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "noirq driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+ if (error) {
+ async_error = error;
+ goto Complete;
+ }
+
+Skip:
+ dev->power.is_noirq_suspended = true;
+
+ /*
+ * Skipping the resume of devices that were in use right before the
+ * system suspend (as indicated by their PM-runtime usage counters)
+ * would be suboptimal. Also resume them if doing that is not allowed
+ * to be skipped.
+ */
+ if (atomic_read(&dev->power.usage_count) > 1 ||
+ !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) &&
+ dev->power.may_skip_resume))
+ dev->power.must_resume = true;
+
+ if (dev->power.must_resume)
+ dpm_superior_set_must_resume(dev);
+
+Complete:
+ complete_all(&dev->power.completion);
+ TRACE_SUSPEND(error);
+ return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_suspend_noirq))
+ return 0;
+
+ return __device_suspend_noirq(dev, pm_transition, false);
+}
+
+static int dpm_noirq_suspend_devices(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
+ while (!list_empty(&dpm_late_early_list)) {
+ struct device *dev = to_device(dpm_late_early_list.prev);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend_noirq(dev);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (error) {
+ pm_dev_err(dev, state, " noirq", error);
+ dpm_save_failed_dev(dev_name(dev));
+ } else if (!list_empty(&dev->power.entry)) {
+ list_move(&dev->power.entry, &dpm_noirq_list);
+ }
+
+ mutex_unlock(&dpm_list_mtx);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (error || async_error)
+ break;
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+
+ if (error) {
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
+ }
+ dpm_show_time(starttime, state, error, "noirq");
+ trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
+ return error;
+}
+
+/**
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prevent device drivers' interrupt handlers from being called and invoke
+ * "noirq" suspend callbacks for all non-sysdev devices.
+ */
+int dpm_suspend_noirq(pm_message_t state)
+{
+ int ret;
+
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+
+ ret = dpm_noirq_suspend_devices(state);
+ if (ret)
+ dpm_resume_noirq(resume_event(state));
+
+ return ret;
+}
+
+static void dpm_propagate_wakeup_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent)
+ return;
+
+ spin_lock_irq(&parent->power.lock);
+
+ if (device_wakeup_path(dev) && !parent->power.ignore_children)
+ parent->power.wakeup_path = true;
+
+ spin_unlock_irq(&parent->power.lock);
+}
+
+/**
+ * __device_suspend_late - Execute a "late suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
+ __pm_runtime_disable(dev, false);
+
+ dpm_wait_for_subordinate(dev, async);
+
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
+ if (dev->power.syscore || dev->power.direct_complete)
+ goto Complete;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+ if (callback)
+ goto Run;
+
+ if (dev_pm_skip_suspend(dev))
+ goto Skip;
+
+ if (dev->driver && dev->driver->pm) {
+ info = "late driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+Run:
+ error = dpm_run_callback(callback, dev, state, info);
+ if (error) {
+ async_error = error;
+ goto Complete;
+ }
+ dpm_propagate_wakeup_to_parent(dev);
+
+Skip:
+ dev->power.is_late_suspended = true;
+
+Complete:
+ TRACE_SUSPEND(error);
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+ struct device *dev = data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+ put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_suspend_late))
+ return 0;
+
+ return __device_suspend_late(dev, pm_transition, false);
+}
+
+/**
+ * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_late(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
+ wake_up_all_idle_cpus();
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
+
+ get_device(dev);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend_late(dev);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_late_early_list);
+
+ if (error) {
+ pm_dev_err(dev, state, " late", error);
+ dpm_save_failed_dev(dev_name(dev));
+ }
+
+ mutex_unlock(&dpm_list_mtx);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (error || async_error)
+ break;
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
+ dpm_resume_early(resume_event(state));
+ }
+ dpm_show_time(starttime, state, error, "late");
+ trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
+ return error;
+}
+
+/**
+ * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_end(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_suspend_late(state);
+ if (error)
+ goto out;
+
+ error = dpm_suspend_noirq(state);
+ if (error)
+ dpm_resume_early(resume_event(state));
+
+out:
+ dpm_show_time(starttime, state, error, "end");
+ return error;
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_end);
+
+/**
+ * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
+ * @dev: Device to suspend.
+ * @state: PM transition of the system being carried out.
+ * @cb: Suspend callback to execute.
+ * @info: string description of caller.
+ */
+static int legacy_suspend(struct device *dev, pm_message_t state,
+ int (*cb)(struct device *dev, pm_message_t state),
+ const char *info)
+{
+ int error;
+ ktime_t calltime;
+
+ calltime = initcall_debug_start(dev, cb);
+
+ trace_device_pm_callback_start(dev, info, state.event);
+ error = cb(dev, state);
+ trace_device_pm_callback_end(dev, error);
+ suspend_report_result(dev, cb, error);
+
+ initcall_debug_report(dev, calltime, cb, error);
+
+ return error;
+}
+
+static void dpm_clear_superiors_direct_complete(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ if (dev->parent) {
+ spin_lock_irq(&dev->parent->power.lock);
+ dev->parent->power.direct_complete = false;
+ spin_unlock_irq(&dev->parent->power.lock);
+ }
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ spin_lock_irq(&link->supplier->power.lock);
+ link->supplier->power.direct_complete = false;
+ spin_unlock_irq(&link->supplier->power.lock);
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * __device_suspend - Execute "suspend" callbacks for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
+ */
+static int __device_suspend(struct device *dev, pm_message_t state, bool async)
+{
+ pm_callback_t callback = NULL;
+ const char *info = NULL;
+ int error = 0;
+ DECLARE_DPM_WATCHDOG_ON_STACK(wd);
+
+ TRACE_DEVICE(dev);
+ TRACE_SUSPEND(0);
+
+ dpm_wait_for_subordinate(dev, async);
+
+ if (async_error) {
+ dev->power.direct_complete = false;
+ goto Complete;
+ }
+
+ /*
+ * Wait for possible runtime PM transitions of the device in progress
+ * to complete and if there's a runtime resume request pending for it,
+ * resume it before proceeding with invoking the system-wide suspend
+ * callbacks for it.
+ *
+ * If the system-wide suspend callbacks below change the configuration
+ * of the device, they must disable runtime PM for it or otherwise
+ * ensure that its runtime-resume callbacks will not be confused by that
+ * change in case they are invoked going forward.
+ */
+ pm_runtime_barrier(dev);
+
+ if (pm_wakeup_pending()) {
+ dev->power.direct_complete = false;
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
+ if (dev->power.syscore)
+ goto Complete;
+
+ /* Avoid direct_complete to let wakeup_path propagate. */
+ if (device_may_wakeup(dev) || device_wakeup_path(dev))
+ dev->power.direct_complete = false;
+
+ if (dev->power.direct_complete) {
+ if (pm_runtime_status_suspended(dev)) {
+ pm_runtime_disable(dev);
+ if (pm_runtime_status_suspended(dev)) {
+ pm_dev_dbg(dev, state, "direct-complete ");
+ goto Complete;
+ }
+
+ pm_runtime_enable(dev);
+ }
+ dev->power.direct_complete = false;
+ }
+
+ dev->power.may_skip_resume = true;
+ dev->power.must_resume = !dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME);
+
+ dpm_watchdog_set(&wd, dev);
+ device_lock(dev);
+
+ if (dev->pm_domain) {
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Run;
+ }
+
+ if (dev->type && dev->type->pm) {
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Run;
+ }
+
+ if (dev->class && dev->class->pm) {
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
+ }
+
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
+ } else if (dev->bus->suspend) {
+ pm_dev_dbg(dev, state, "legacy bus ");
+ error = legacy_suspend(dev, state, dev->bus->suspend,
+ "legacy bus ");
+ goto End;
+ }
+ }
+
+ Run:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
+ End:
+ if (!error) {
+ dev->power.is_suspended = true;
+ if (device_may_wakeup(dev))
+ dev->power.wakeup_path = true;
+
+ dpm_propagate_wakeup_to_parent(dev);
+ dpm_clear_superiors_direct_complete(dev);
+ }
+
+ device_unlock(dev);
+ dpm_watchdog_clear(&wd);
+
+ Complete:
+ if (error)
+ async_error = error;
+
+ complete_all(&dev->power.completion);
+ TRACE_SUSPEND(error);
+ return error;
+}
+
+static void async_suspend(void *data, async_cookie_t cookie)
+{
+ struct device *dev = data;
+ int error;
+
+ error = __device_suspend(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend(struct device *dev)
+{
+ if (dpm_async_fn(dev, async_suspend))
+ return 0;
+
+ return __device_suspend(dev, pm_transition, false);
+}
+
+/**
+ * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
+ might_sleep();
+
+ devfreq_suspend();
+ cpufreq_suspend();
+
+ mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+ while (!list_empty(&dpm_prepared_list)) {
+ struct device *dev = to_device(dpm_prepared_list.prev);
+
+ get_device(dev);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend(dev);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (error) {
+ pm_dev_err(dev, state, "", error);
+ dpm_save_failed_dev(dev_name(dev));
+ } else if (!list_empty(&dev->power.entry)) {
+ list_move(&dev->power.entry, &dpm_suspended_list);
+ }
+
+ mutex_unlock(&dpm_list_mtx);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (error || async_error)
+ break;
+ }
+ mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+ if (error) {
+ suspend_stats.failed_suspend++;
+ dpm_save_failed_step(SUSPEND_SUSPEND);
+ }
+ dpm_show_time(starttime, state, error, NULL);
+ trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
+ return error;
+}
+
+/**
+ * device_prepare - Prepare a device for system power transition.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback(s) for given device. No new children of the
+ * device may be registered after this function has returned.
+ */
+static int device_prepare(struct device *dev, pm_message_t state)
+{
+ int (*callback)(struct device *) = NULL;
+ int ret = 0;
+
+ /*
+ * If a device's parent goes into runtime suspend at the wrong time,
+ * it won't be possible to resume the device. To prevent this we
+ * block runtime suspend here, during the prepare phase, and allow
+ * it again during the complete phase.
+ */
+ pm_runtime_get_noresume(dev);
+
+ if (dev->power.syscore)
+ return 0;
+
+ device_lock(dev);
+
+ dev->power.wakeup_path = false;
+
+ if (dev->power.no_pm_callbacks)
+ goto unlock;
+
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.prepare;
+ else if (dev->type && dev->type->pm)
+ callback = dev->type->pm->prepare;
+ else if (dev->class && dev->class->pm)
+ callback = dev->class->pm->prepare;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->prepare;
+
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->prepare;
+
+ if (callback)
+ ret = callback(dev);
+
+unlock:
+ device_unlock(dev);
+
+ if (ret < 0) {
+ suspend_report_result(dev, callback, ret);
+ pm_runtime_put(dev);
+ return ret;
+ }
+ /*
+ * A positive return value from ->prepare() means "this device appears
+ * to be runtime-suspended and its state is fine, so if it really is
+ * runtime-suspended, you can leave it in that state provided that you
+ * will do the same thing with all of its descendants". This only
+ * applies to suspend transitions, however.
+ */
+ spin_lock_irq(&dev->power.lock);
+ dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
+ (ret > 0 || dev->power.no_pm_callbacks) &&
+ !dev_pm_test_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ spin_unlock_irq(&dev->power.lock);
+ return 0;
+}
+
+/**
+ * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
+ * @state: PM transition of the system being carried out.
+ *
+ * Execute the ->prepare() callback(s) for all devices.
+ */
+int dpm_prepare(pm_message_t state)
+{
+ int error = 0;
+
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
+ might_sleep();
+
+ /*
+ * Give a chance for the known devices to complete their probes, before
+ * disable probing of devices. This sync point is important at least
+ * at boot time + hibernation restore.
+ */
+ wait_for_device_probe();
+ /*
+ * It is unsafe if probing of devices will happen during suspend or
+ * hibernation and system behavior will be unpredictable in this case.
+ * So, let's prohibit device's probing here and defer their probes
+ * instead. The normal behavior will be restored in dpm_complete().
+ */
+ device_block_probing();
+
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_list) && !error) {
+ struct device *dev = to_device(dpm_list.next);
+
+ get_device(dev);
+
+ mutex_unlock(&dpm_list_mtx);
+
+ trace_device_pm_callback_start(dev, "", state.event);
+ error = device_prepare(dev, state);
+ trace_device_pm_callback_end(dev, error);
+
+ mutex_lock(&dpm_list_mtx);
+
+ if (!error) {
+ dev->power.is_prepared = true;
+ if (!list_empty(&dev->power.entry))
+ list_move_tail(&dev->power.entry, &dpm_prepared_list);
+ } else if (error == -EAGAIN) {
+ error = 0;
+ } else {
+ dev_info(dev, "not prepared for power transition: code %d\n",
+ error);
+ }
+
+ mutex_unlock(&dpm_list_mtx);
+
+ put_device(dev);
+
+ mutex_lock(&dpm_list_mtx);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
+ return error;
+}
+
+/**
+ * dpm_suspend_start - Prepare devices for PM transition and suspend them.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prepare all non-sysdev devices for system PM transition and execute "suspend"
+ * callbacks for them.
+ */
+int dpm_suspend_start(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error;
+
+ error = dpm_prepare(state);
+ if (error) {
+ suspend_stats.failed_prepare++;
+ dpm_save_failed_step(SUSPEND_PREPARE);
+ } else
+ error = dpm_suspend(state);
+ dpm_show_time(starttime, state, error, "start");
+ return error;
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_start);
+
+void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
+{
+ if (ret)
+ dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
+}
+EXPORT_SYMBOL_GPL(__suspend_report_result);
+
+/**
+ * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
+ * @subordinate: Device that needs to wait for @dev.
+ * @dev: Device to wait for.
+ */
+int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
+{
+ dpm_wait(dev, subordinate->power.async_suspend);
+ return async_error;
+}
+EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
+
+/**
+ * dpm_for_each_dev - device iterator.
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
+ *
+ * Iterate over devices in dpm_list, and call @fn for each device,
+ * passing it @data.
+ */
+void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
+{
+ struct device *dev;
+
+ if (!fn)
+ return;
+
+ device_pm_lock();
+ list_for_each_entry(dev, &dpm_list, power.entry)
+ fn(dev, data);
+ device_pm_unlock();
+}
+EXPORT_SYMBOL_GPL(dpm_for_each_dev);
+
+static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
+{
+ if (!ops)
+ return true;
+
+ return !ops->prepare &&
+ !ops->suspend &&
+ !ops->suspend_late &&
+ !ops->suspend_noirq &&
+ !ops->resume_noirq &&
+ !ops->resume_early &&
+ !ops->resume &&
+ !ops->complete;
+}
+
+void device_pm_check_callbacks(struct device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev->power.no_pm_callbacks =
+ (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
+ !dev->bus->suspend && !dev->bus->resume)) &&
+ (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+ (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
+ (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
+ (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
+ !dev->driver->suspend && !dev->driver->resume));
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+
+bool dev_pm_skip_suspend(struct device *dev)
+{
+ return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
+ pm_runtime_status_suspended(dev);
+}
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
new file mode 100644
index 000000000..922ed457d
--- /dev/null
+++ b/drivers/base/power/power.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/pm_qos.h>
+
+static inline void device_pm_init_common(struct device *dev)
+{
+ if (!dev->power.early_init) {
+ spin_lock_init(&dev->power.lock);
+ dev->power.qos = NULL;
+ dev->power.early_init = true;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ dev->power.disable_depth = 1;
+ device_pm_init_common(dev);
+}
+
+extern void pm_runtime_init(struct device *dev);
+extern void pm_runtime_reinit(struct device *dev);
+extern void pm_runtime_remove(struct device *dev);
+extern u64 pm_runtime_active_time(struct device *dev);
+
+#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
+#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
+#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
+ WAKE_IRQ_DEDICATED_MANAGED | \
+ WAKE_IRQ_DEDICATED_REVERSE)
+#define WAKE_IRQ_DEDICATED_ENABLED BIT(3)
+
+struct wake_irq {
+ struct device *dev;
+ unsigned int status;
+ int irq;
+ const char *name;
+};
+
+extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
+extern void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status);
+extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
+extern void dev_pm_enable_wake_irq_complete(struct device *dev);
+
+#ifdef CONFIG_PM_SLEEP
+
+extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq);
+extern void device_wakeup_detach_irq(struct device *dev);
+extern void device_wakeup_arm_wake_irqs(void);
+extern void device_wakeup_disarm_wake_irqs(void);
+
+#else
+
+static inline void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq) {}
+
+static inline void device_wakeup_detach_irq(struct device *dev)
+{
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+/*
+ * sysfs.c
+ */
+
+extern int dpm_sysfs_add(struct device *dev);
+extern void dpm_sysfs_remove(struct device *dev);
+extern void rpm_sysfs_remove(struct device *dev);
+extern int wakeup_sysfs_add(struct device *dev);
+extern void wakeup_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
+extern int pm_qos_sysfs_add_flags(struct device *dev);
+extern void pm_qos_sysfs_remove_flags(struct device *dev);
+extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
+extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
+extern int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
+
+#else /* CONFIG_PM */
+
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+}
+
+static inline void pm_runtime_init(struct device *dev) {}
+static inline void pm_runtime_reinit(struct device *dev) {}
+static inline void pm_runtime_remove(struct device *dev) {}
+
+static inline int dpm_sysfs_add(struct device *dev) { return 0; }
+static inline void dpm_sysfs_remove(struct device *dev) {}
+static inline int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid) { return 0; }
+
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+/* kernel/power/main.c */
+extern int pm_async_enabled;
+
+/* drivers/base/power/main.c */
+extern struct list_head dpm_list; /* The active device list */
+
+static inline struct device *to_device(struct list_head *entry)
+{
+ return container_of(entry, struct device, power.entry);
+}
+
+extern void device_pm_sleep_init(struct device *dev);
+extern void device_pm_add(struct device *);
+extern void device_pm_remove(struct device *);
+extern void device_pm_move_before(struct device *, struct device *);
+extern void device_pm_move_after(struct device *, struct device *);
+extern void device_pm_move_last(struct device *);
+extern void device_pm_check_callbacks(struct device *dev);
+
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return dev->power.in_dpm_list;
+}
+
+/* drivers/base/power/wakeup_stats.c */
+extern int wakeup_source_sysfs_add(struct device *parent,
+ struct wakeup_source *ws);
+extern void wakeup_source_sysfs_remove(struct wakeup_source *ws);
+
+extern int pm_wakeup_source_sysfs_add(struct device *parent);
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline void device_pm_sleep_init(struct device *dev) {}
+
+static inline void device_pm_add(struct device *dev) {}
+
+static inline void device_pm_remove(struct device *dev)
+{
+ pm_runtime_remove(dev);
+}
+
+static inline void device_pm_move_before(struct device *deva,
+ struct device *devb) {}
+static inline void device_pm_move_after(struct device *deva,
+ struct device *devb) {}
+static inline void device_pm_move_last(struct device *dev) {}
+
+static inline void device_pm_check_callbacks(struct device *dev) {}
+
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return device_is_registered(dev);
+}
+
+static inline int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static inline void device_pm_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+ device_pm_sleep_init(dev);
+ pm_runtime_init(dev);
+}
diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c
new file mode 100644
index 000000000..79fc6c441
--- /dev/null
+++ b/drivers/base/power/qos-test.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ */
+#include <kunit/test.h>
+#include <linux/pm_qos.h>
+
+/* Basic test for aggregating two "min" requests */
+static void freq_qos_test_min(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+
+ ret = freq_qos_remove_request(&req2);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+}
+
+/* Test that requests for MAX_DEFAULT_VALUE have no effect */
+static void freq_qos_test_maxdef(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req1, req2;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req1, 0, sizeof(req1));
+ memset(&req2, 0, sizeof(req2));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX),
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+
+ ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ /* Add max 1000 */
+ ret = freq_qos_update_request(&req1, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Add max 2000, no impact */
+ ret = freq_qos_update_request(&req2, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000);
+
+ /* Remove max 1000, new max 2000 */
+ ret = freq_qos_remove_request(&req1);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000);
+}
+
+/*
+ * Test that a freq_qos_request can be added again after removal
+ *
+ * This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency
+ * QoS requests after removal")
+ */
+static void freq_qos_test_readd(struct kunit *test)
+{
+ struct freq_constraints qos;
+ struct freq_qos_request req;
+ int ret;
+
+ freq_constraints_init(&qos);
+ memset(&req, 0, sizeof(req));
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000);
+
+ /* Remove */
+ ret = freq_qos_remove_request(&req);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN),
+ FREQ_QOS_MIN_DEFAULT_VALUE);
+
+ /* Add again */
+ ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000);
+ KUNIT_EXPECT_EQ(test, ret, 1);
+ KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000);
+}
+
+static struct kunit_case pm_qos_test_cases[] = {
+ KUNIT_CASE(freq_qos_test_min),
+ KUNIT_CASE(freq_qos_test_maxdef),
+ KUNIT_CASE(freq_qos_test_readd),
+ {},
+};
+
+static struct kunit_suite pm_qos_test_module = {
+ .name = "qos-kunit-test",
+ .test_cases = pm_qos_test_cases,
+};
+kunit_test_suites(&pm_qos_test_module);
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
new file mode 100644
index 000000000..8e93167f1
--- /dev/null
+++ b/drivers/base/power/qos.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Devices PM QoS constraints management
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * This module exposes the interface to kernel space for specifying
+ * per-device PM QoS dependencies. It provides infrastructure for registration
+ * of:
+ *
+ * Dependents on a QoS value : register requests
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ * Watchers can register a per-device notification callback using the
+ * dev_pm_qos_*_notifier API. The notification chain data is stored in the
+ * per-device constraint data struct.
+ *
+ * Note about the per-device constraint data struct allocation:
+ * . The per-device constraints data struct ptr is stored into the device
+ * dev_pm_info.
+ * . To minimize the data usage by the per-device constraints, the data struct
+ * is only allocated at the first call to dev_pm_qos_add_request.
+ * . The data is later free'd when the device is removed from the system.
+ * . A global mutex protects the constraints users from the data being
+ * allocated and free'd.
+ */
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <trace/events/power.h>
+
+#include "power.h"
+
+static DEFINE_MUTEX(dev_pm_qos_mtx);
+static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
+
+/**
+ * __dev_pm_qos_flags - Check PM QoS flags for a given device.
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ struct dev_pm_qos *qos = dev->power.qos;
+ struct pm_qos_flags *pqf;
+ s32 val;
+
+ lockdep_assert_held(&dev->power.lock);
+
+ if (IS_ERR_OR_NULL(qos))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ pqf = &qos->flags;
+ if (list_empty(&pqf->list))
+ return PM_QOS_FLAGS_UNDEFINED;
+
+ val = pqf->effective_flags & mask;
+ if (val)
+ return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
+
+ return PM_QOS_FLAGS_NONE;
+}
+
+/**
+ * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
+ * @dev: Device to check the PM QoS flags for.
+ * @mask: Flags to check against.
+ */
+enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
+{
+ unsigned long irqflags;
+ enum pm_qos_flags_status ret;
+
+ spin_lock_irqsave(&dev->power.lock, irqflags);
+ ret = __dev_pm_qos_flags(dev, mask);
+ spin_unlock_irqrestore(&dev->power.lock, irqflags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
+
+/**
+ * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_resume_latency(struct device *dev)
+{
+ lockdep_assert_held(&dev->power.lock);
+
+ return dev_pm_qos_raw_resume_latency(dev);
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
+ * @dev: Device to get the PM QoS constraint value for.
+ * @type: QoS request type.
+ */
+s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
+{
+ struct dev_pm_qos *qos = dev->power.qos;
+ unsigned long flags;
+ s32 ret;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
+ : pm_qos_read_value(&qos->resume_latency);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
+ : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
+ break;
+ default:
+ WARN_ON(1);
+ ret = 0;
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
+/**
+ * apply_constraint - Add/modify/remove device PM QoS request.
+ * @req: Constraint request to apply
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ *
+ * Internal function to update the constraints list using the PM QoS core
+ * code and if needed call the per-device callbacks.
+ */
+static int apply_constraint(struct dev_pm_qos_request *req,
+ enum pm_qos_req_action action, s32 value)
+{
+ struct dev_pm_qos *qos = req->dev->power.qos;
+ int ret;
+
+ switch(req->type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
+ value = 0;
+
+ ret = pm_qos_update_target(&qos->resume_latency,
+ &req->data.pnode, action, value);
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ ret = pm_qos_update_target(&qos->latency_tolerance,
+ &req->data.pnode, action, value);
+ if (ret) {
+ value = pm_qos_read_value(&qos->latency_tolerance);
+ req->dev->power.set_latency_tolerance(req->dev, value);
+ }
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_apply(&req->data.freq, action, value);
+ break;
+ case DEV_PM_QOS_FLAGS:
+ ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
+ action, value);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * dev_pm_qos_constraints_allocate
+ * @dev: device to allocate data for
+ *
+ * Called at the first call to add_request, for constraint data allocation
+ * Must be called with the dev_pm_qos_mtx mutex held
+ */
+static int dev_pm_qos_constraints_allocate(struct device *dev)
+{
+ struct dev_pm_qos *qos;
+ struct pm_qos_constraints *c;
+ struct blocking_notifier_head *n;
+
+ qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+ if (!qos)
+ return -ENOMEM;
+
+ n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ kfree(qos);
+ return -ENOMEM;
+ }
+
+ c = &qos->resume_latency;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+ c->notifiers = n;
+ BLOCKING_INIT_NOTIFIER_HEAD(n);
+
+ c = &qos->latency_tolerance;
+ plist_head_init(&c->list);
+ c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ c->type = PM_QOS_MIN;
+
+ freq_constraints_init(&qos->freq);
+
+ INIT_LIST_HEAD(&qos->flags.list);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.qos = qos;
+ spin_unlock_irq(&dev->power.lock);
+
+ return 0;
+}
+
+static void __dev_pm_qos_hide_latency_limit(struct device *dev);
+static void __dev_pm_qos_hide_flags(struct device *dev);
+
+/**
+ * dev_pm_qos_constraints_destroy
+ * @dev: target device
+ *
+ * Called from the device PM subsystem on device removal under device_pm_lock().
+ */
+void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ struct dev_pm_qos *qos;
+ struct dev_pm_qos_request *req, *tmp;
+ struct pm_qos_constraints *c;
+ struct pm_qos_flags *f;
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ /*
+ * If the device's PM QoS resume latency limit or PM QoS flags have been
+ * exposed to user space, they have to be hidden at this point.
+ */
+ pm_qos_sysfs_remove_resume_latency(dev);
+ pm_qos_sysfs_remove_flags(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ __dev_pm_qos_hide_latency_limit(dev);
+ __dev_pm_qos_hide_flags(dev);
+
+ qos = dev->power.qos;
+ if (!qos)
+ goto out;
+
+ /* Flush the constraints lists for the device. */
+ c = &qos->resume_latency;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ /*
+ * Update constraints list and call the notification
+ * callbacks if needed
+ */
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->latency_tolerance;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->freq.min_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ c = &qos->freq.max_freq;
+ plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ f = &qos->flags;
+ list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
+ apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ }
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.qos = ERR_PTR(-ENODEV);
+ spin_unlock_irq(&dev->power.lock);
+
+ kfree(qos->resume_latency.notifiers);
+ kfree(qos);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+}
+
+static bool dev_pm_qos_invalid_req_type(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
+ !dev->power.set_latency_tolerance;
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret = 0;
+
+ if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
+ return -EINVAL;
+
+ if (WARN(dev_pm_qos_request_active(req),
+ "%s() called for already added request\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+ if (ret)
+ return ret;
+
+ req->dev = dev;
+ req->type = type;
+ if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MIN, value);
+ else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
+ ret = freq_qos_add_request(&dev->power.qos->freq,
+ &req->data.freq,
+ FREQ_QOS_MAX, value);
+ else
+ ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+
+ return ret;
+}
+
+/**
+ * dev_pm_qos_add_request - inserts new qos request into the list
+ * @dev: target device for the constraint
+ * @req: pointer to a preallocated handle
+ * @type: type of the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the device constraints list of
+ * requested qos performance characteristics. It recomputes the aggregate
+ * QoS expectations of parameters and initializes the dev_pm_qos_request
+ * handle. Caller needs to save this handle for later use in updates and
+ * removal.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
+ * to allocate for data structures, -ENODEV if the device has just been removed
+ * from the system.
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_add_request(dev, req, type, value);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
+
+/**
+ * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
+ * @req : PM QoS request to modify.
+ * @new_value: New value to request.
+ */
+static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+{
+ s32 curr_value;
+ int ret = 0;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
+
+ switch(req->type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ curr_value = req->data.pnode.prio;
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ curr_value = req->data.freq.pnode.prio;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ curr_value = req->data.flr.flags;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
+ new_value);
+ if (curr_value != new_value)
+ ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
+
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_request - modifies an existing qos request
+ * @req : handle to list element holding a dev_pm_qos request to use
+ * @new_value: defines the qos request
+ *
+ * Updates an existing dev PM qos request along with updating the
+ * target value.
+ *
+ * Attempts are made to make this code callable on hot code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_update_request(req, new_value);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
+
+static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret;
+
+ if (!req) /*guard against callers passing in null */
+ return -EINVAL;
+
+ if (WARN(!dev_pm_qos_request_active(req),
+ "%s() called for unknown object\n", __func__))
+ return -EINVAL;
+
+ if (IS_ERR_OR_NULL(req->dev->power.qos))
+ return -ENODEV;
+
+ trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
+ PM_QOS_DEFAULT_VALUE);
+ ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+ memset(req, 0, sizeof(*req));
+ return ret;
+}
+
+/**
+ * dev_pm_qos_remove_request - modifies an existing qos request
+ * @req: handle to request list element
+ *
+ * Will remove pm qos request from the list of constraints and
+ * recompute the current target value. Call this on slow code paths.
+ *
+ * Returns 1 if the aggregated constraint value has changed,
+ * 0 if the aggregated constraint value has not changed,
+ * -EINVAL in case of wrong parameters, -ENODEV if the device has been
+ * removed from the system
+ *
+ * Callers should ensure that the target device is not RPM_SUSPENDED before
+ * using this function for requests of type DEV_PM_QOS_FLAGS.
+ */
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = __dev_pm_qos_remove_request(req);
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
+
+/**
+ * dev_pm_qos_add_notifier - sets notification entry for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block managed by caller.
+ * @type: request type.
+ *
+ * Will register the notifier into a notification chain that gets called
+ * upon changes to the target value for the device.
+ *
+ * If the device's constraints object doesn't exist when this routine is called,
+ * it will be created (or error code will be returned if that fails).
+ */
+int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR(dev->power.qos))
+ ret = -ENODEV;
+ else if (!dev->power.qos)
+ ret = dev_pm_qos_constraints_allocate(dev);
+
+ if (ret)
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_add_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
+
+/**
+ * dev_pm_qos_remove_notifier - deletes notification for changes to target value
+ * of per-device PM QoS constraints
+ *
+ * @dev: target device for the constraint
+ * @notifier: notifier block to be removed.
+ * @type: request type.
+ *
+ * Will remove the notifier from the notification chain that gets called
+ * upon changes to the target value.
+ */
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier,
+ enum dev_pm_qos_req_type type)
+{
+ int ret = 0;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ /* Silently return if the constraints object is not present. */
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ goto unlock;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+ notifier);
+ break;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MIN, notifier);
+ break;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ ret = freq_qos_remove_notifier(&dev->power.qos->freq,
+ FREQ_QOS_MAX, notifier);
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value)
+{
+ struct device *ancestor = dev->parent;
+ int ret = -ENODEV;
+
+ switch (type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ while (ancestor && !ancestor->power.set_latency_tolerance)
+ ancestor = ancestor->parent;
+
+ break;
+ default:
+ ancestor = NULL;
+ }
+ if (ancestor)
+ ret = dev_pm_qos_add_request(ancestor, req, type, value);
+
+ if (ret < 0)
+ req->dev = NULL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
+
+static void __dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ struct dev_pm_qos_request *req = NULL;
+
+ switch(type) {
+ case DEV_PM_QOS_RESUME_LATENCY:
+ req = dev->power.qos->resume_latency_req;
+ dev->power.qos->resume_latency_req = NULL;
+ break;
+ case DEV_PM_QOS_LATENCY_TOLERANCE:
+ req = dev->power.qos->latency_tolerance_req;
+ dev->power.qos->latency_tolerance_req = NULL;
+ break;
+ case DEV_PM_QOS_FLAGS:
+ req = dev->power.qos->flags_req;
+ dev->power.qos->flags_req = NULL;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+}
+
+static void dev_pm_qos_drop_user_request(struct device *dev,
+ enum dev_pm_qos_req_type type)
+{
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_drop_user_request(dev, type);
+ mutex_unlock(&dev_pm_qos_mtx);
+}
+
+/**
+ * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
+ * @dev: Device whose PM QoS latency limit is to be exposed to user space.
+ * @value: Initial value of the latency limit.
+ */
+int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev) || value < 0)
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
+ if (ret < 0) {
+ kfree(req);
+ return ret;
+ }
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->resume_latency_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
+ goto out;
+ }
+ dev->power.qos->resume_latency_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ ret = pm_qos_sysfs_add_resume_latency(dev);
+ if (ret)
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
+
+ out:
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+
+static void __dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
+}
+
+/**
+ * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
+ * @dev: Device whose PM QoS latency limit is to be hidden from user space.
+ */
+void dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_resume_latency(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_latency_limit(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+
+/**
+ * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
+ * @dev: Device whose PM QoS flags are to be exposed to user space.
+ * @val: Initial values of the flags.
+ */
+int dev_pm_qos_expose_flags(struct device *dev, s32 val)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev))
+ return -EINVAL;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
+ if (ret < 0) {
+ kfree(req);
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos))
+ ret = -ENODEV;
+ else if (dev->power.qos->flags_req)
+ ret = -EEXIST;
+
+ if (ret < 0) {
+ __dev_pm_qos_remove_request(req);
+ kfree(req);
+ mutex_unlock(&dev_pm_qos_mtx);
+ goto out;
+ }
+ dev->power.qos->flags_req = req;
+
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ ret = pm_qos_sysfs_add_flags(dev);
+ if (ret)
+ dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+
+ out:
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ pm_runtime_put(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
+
+static void __dev_pm_qos_hide_flags(struct device *dev)
+{
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+}
+
+/**
+ * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
+ * @dev: Device whose PM QoS flags are to be hidden from user space.
+ */
+void dev_pm_qos_hide_flags(struct device *dev)
+{
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+
+ pm_qos_sysfs_remove_flags(dev);
+
+ mutex_lock(&dev_pm_qos_mtx);
+ __dev_pm_qos_hide_flags(dev);
+ mutex_unlock(&dev_pm_qos_mtx);
+
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
+
+/**
+ * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
+ * @dev: Device to update the PM QoS flags request for.
+ * @mask: Flags to set/clear.
+ * @set: Whether to set or clear the flags (true means set).
+ */
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
+{
+ s32 value;
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ value = dev_pm_qos_requested_flags(dev);
+ if (set)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ pm_runtime_put(dev);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+ s32 ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+ ret = IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req ?
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+ dev->power.qos->latency_tolerance_req->data.pnode.prio;
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+ int ret;
+
+ mutex_lock(&dev_pm_qos_mtx);
+
+ if (IS_ERR_OR_NULL(dev->power.qos)
+ || !dev->power.qos->latency_tolerance_req) {
+ struct dev_pm_qos_request *req;
+
+ if (val < 0) {
+ if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
+ ret = 0;
+ else
+ ret = -EINVAL;
+ goto out;
+ }
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+ if (ret < 0) {
+ kfree(req);
+ goto out;
+ }
+ dev->power.qos->latency_tolerance_req = req;
+ } else {
+ if (val < 0) {
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+ ret = 0;
+ } else {
+ ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+ }
+ }
+
+ out:
+ mutex_unlock(&dev_pm_qos_mtx);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
+
+/**
+ * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+ * @dev: Device whose latency tolerance to expose
+ */
+int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+{
+ int ret;
+
+ if (!dev->power.set_latency_tolerance)
+ return -EINVAL;
+
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ ret = pm_qos_sysfs_add_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
+
+/**
+ * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
+ * @dev: Device whose latency tolerance to hide
+ */
+void dev_pm_qos_hide_latency_tolerance(struct device *dev)
+{
+ mutex_lock(&dev_pm_qos_sysfs_mtx);
+ pm_qos_sysfs_remove_latency_tolerance(dev);
+ mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+ /* Remove the request from user space now */
+ pm_runtime_get_sync(dev);
+ dev_pm_qos_update_user_latency_tolerance(dev,
+ PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
+ pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
new file mode 100644
index 000000000..14088b5ad
--- /dev/null
+++ b/drivers/base/power/runtime.c
@@ -0,0 +1,1949 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
+ *
+ * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
+ */
+#include <linux/sched/mm.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/export.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <trace/events/rpm.h>
+
+#include "../base.h"
+#include "power.h"
+
+typedef int (*pm_callback_t)(struct device *);
+
+static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
+{
+ pm_callback_t cb;
+ const struct dev_pm_ops *ops;
+
+ if (dev->pm_domain)
+ ops = &dev->pm_domain->ops;
+ else if (dev->type && dev->type->pm)
+ ops = dev->type->pm;
+ else if (dev->class && dev->class->pm)
+ ops = dev->class->pm;
+ else if (dev->bus && dev->bus->pm)
+ ops = dev->bus->pm;
+ else
+ ops = NULL;
+
+ if (ops)
+ cb = *(pm_callback_t *)((void *)ops + cb_offset);
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+
+ return cb;
+}
+
+#define RPM_GET_CALLBACK(dev, callback) \
+ __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
+static int rpm_resume(struct device *dev, int rpmflags);
+static int rpm_suspend(struct device *dev, int rpmflags);
+
+/**
+ * update_pm_runtime_accounting - Update the time accounting of power states
+ * @dev: Device to update the accounting for
+ *
+ * In order to be able to have time accounting of the various power states
+ * (as used by programs such as PowerTOP to show the effectiveness of runtime
+ * PM), we need to track the time spent in each state.
+ * update_pm_runtime_accounting must be called each time before the
+ * runtime_status field is updated, to account the time in the old state
+ * correctly.
+ */
+static void update_pm_runtime_accounting(struct device *dev)
+{
+ u64 now, last, delta;
+
+ if (dev->power.disable_depth > 0)
+ return;
+
+ last = dev->power.accounting_timestamp;
+
+ now = ktime_get_mono_fast_ns();
+ dev->power.accounting_timestamp = now;
+
+ /*
+ * Because ktime_get_mono_fast_ns() is not monotonic during
+ * timekeeping updates, ensure that 'now' is after the last saved
+ * timesptamp.
+ */
+ if (now < last)
+ return;
+
+ delta = now - last;
+
+ if (dev->power.runtime_status == RPM_SUSPENDED)
+ dev->power.suspended_time += delta;
+ else
+ dev->power.active_time += delta;
+}
+
+static void __update_runtime_status(struct device *dev, enum rpm_status status)
+{
+ update_pm_runtime_accounting(dev);
+ dev->power.runtime_status = status;
+}
+
+static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
+{
+ u64 time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ update_pm_runtime_accounting(dev);
+ time = suspended ? dev->power.suspended_time : dev->power.active_time;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return time;
+}
+
+u64 pm_runtime_active_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, false);
+}
+
+u64 pm_runtime_suspended_time(struct device *dev)
+{
+ return rpm_get_accounted_time(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
+
+/**
+ * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_deactivate_timer(struct device *dev)
+{
+ if (dev->power.timer_expires > 0) {
+ hrtimer_try_to_cancel(&dev->power.suspend_timer);
+ dev->power.timer_expires = 0;
+ }
+}
+
+/**
+ * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
+ * @dev: Device to handle.
+ */
+static void pm_runtime_cancel_pending(struct device *dev)
+{
+ pm_runtime_deactivate_timer(dev);
+ /*
+ * In case there's a request pending, make sure its work function will
+ * return without doing anything.
+ */
+ dev->power.request = RPM_REQ_NONE;
+}
+
+/*
+ * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
+ * @dev: Device to handle.
+ *
+ * Compute the autosuspend-delay expiration time based on the device's
+ * power.last_busy time. If the delay has already expired or is disabled
+ * (negative) or the power.use_autosuspend flag isn't set, return 0.
+ * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
+ *
+ * This function may be called either with or without dev->power.lock held.
+ * Either way it can be racy, since power.last_busy may be updated at any time.
+ */
+u64 pm_runtime_autosuspend_expiration(struct device *dev)
+{
+ int autosuspend_delay;
+ u64 expires;
+
+ if (!dev->power.use_autosuspend)
+ return 0;
+
+ autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
+ if (autosuspend_delay < 0)
+ return 0;
+
+ expires = READ_ONCE(dev->power.last_busy);
+ expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
+ if (expires > ktime_get_mono_fast_ns())
+ return expires; /* Expires in the future */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
+
+static int dev_memalloc_noio(struct device *dev, void *data)
+{
+ return dev->power.memalloc_noio;
+}
+
+/*
+ * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
+ * @dev: Device to handle.
+ * @enable: True for setting the flag and False for clearing the flag.
+ *
+ * Set the flag for all devices in the path from the device to the
+ * root device in the device tree if @enable is true, otherwise clear
+ * the flag for devices in the path whose siblings don't set the flag.
+ *
+ * The function should only be called by block device, or network
+ * device driver for solving the deadlock problem during runtime
+ * resume/suspend:
+ *
+ * If memory allocation with GFP_KERNEL is called inside runtime
+ * resume/suspend callback of any one of its ancestors(or the
+ * block device itself), the deadlock may be triggered inside the
+ * memory allocation since it might not complete until the block
+ * device becomes active and the involed page I/O finishes. The
+ * situation is pointed out first by Alan Stern. Network device
+ * are involved in iSCSI kind of situation.
+ *
+ * The lock of dev_hotplug_mutex is held in the function for handling
+ * hotplug race because pm_runtime_set_memalloc_noio() may be called
+ * in async probe().
+ *
+ * The function should be called between device_add() and device_del()
+ * on the affected device(block/network device).
+ */
+void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
+{
+ static DEFINE_MUTEX(dev_hotplug_mutex);
+
+ mutex_lock(&dev_hotplug_mutex);
+ for (;;) {
+ bool enabled;
+
+ /* hold power lock since bitfield is not SMP-safe. */
+ spin_lock_irq(&dev->power.lock);
+ enabled = dev->power.memalloc_noio;
+ dev->power.memalloc_noio = enable;
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * not need to enable ancestors any more if the device
+ * has been enabled.
+ */
+ if (enabled && enable)
+ break;
+
+ dev = dev->parent;
+
+ /*
+ * clear flag of the parent device only if all the
+ * children don't set the flag because ancestor's
+ * flag was set by any one of the descendants.
+ */
+ if (!dev || (!enable &&
+ device_for_each_child(dev, NULL,
+ dev_memalloc_noio)))
+ break;
+ }
+ mutex_unlock(&dev_hotplug_mutex);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
+
+/**
+ * rpm_check_suspend_allowed - Test whether a device may be suspended.
+ * @dev: Device to test.
+ */
+static int rpm_check_suspend_allowed(struct device *dev)
+{
+ int retval = 0;
+
+ if (dev->power.runtime_error)
+ retval = -EINVAL;
+ else if (dev->power.disable_depth > 0)
+ retval = -EACCES;
+ else if (atomic_read(&dev->power.usage_count))
+ retval = -EAGAIN;
+ else if (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count))
+ retval = -EBUSY;
+
+ /* Pending resume requests take precedence over suspends. */
+ else if ((dev->power.deferred_resume
+ && dev->power.runtime_status == RPM_SUSPENDING)
+ || (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME))
+ retval = -EAGAIN;
+ else if (__dev_pm_qos_resume_latency(dev) == 0)
+ retval = -EPERM;
+ else if (dev->power.runtime_status == RPM_SUSPENDED)
+ retval = 1;
+
+ return retval;
+}
+
+static int rpm_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
+ int retval;
+
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+ /* Ignore suppliers with disabled runtime PM. */
+ if (retval < 0 && retval != -EACCES) {
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+ refcount_inc(&link->rpm_active);
+ }
+ return 0;
+}
+
+/**
+ * pm_runtime_release_supplier - Drop references to device link's supplier.
+ * @link: Target device link.
+ *
+ * Drop all runtime PM references associated with @link to its supplier device.
+ */
+void pm_runtime_release_supplier(struct device_link *link)
+{
+ struct device *supplier = link->supplier;
+
+ /*
+ * The additional power.usage_count check is a safety net in case
+ * the rpm_active refcount becomes saturated, in which case
+ * refcount_dec_not_one() would return true forever, but it is not
+ * strictly necessary.
+ */
+ while (refcount_dec_not_one(&link->rpm_active) &&
+ atomic_read(&supplier->power.usage_count) > 0)
+ pm_runtime_put_noidle(supplier);
+}
+
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held()) {
+ pm_runtime_release_supplier(link);
+ if (try_to_suspend)
+ pm_request_idle(link->supplier);
+ }
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+ __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ pm_request_idle(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * __rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int retval = 0, idx;
+ bool use_links = dev->power.links_count > 0;
+
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+ } else {
+ spin_unlock_irq(&dev->power.lock);
+
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ idx = device_links_read_lock();
+
+ retval = rpm_get_suppliers(dev);
+ if (retval) {
+ rpm_put_suppliers(dev);
+ goto fail;
+ }
+
+ device_links_read_unlock(idx);
+ }
+ }
+
+ if (cb)
+ retval = cb(dev);
+
+ if (dev->power.irq_safe) {
+ spin_lock(&dev->power.lock);
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
+
+ __rpm_put_suppliers(dev, false);
+
+fail:
+ device_links_read_unlock(idx);
+ }
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ return retval;
+}
+
+/**
+ * rpm_idle - Notify device bus type if the device can be suspended.
+ * @dev: Device to notify the bus type about.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be suspended. If
+ * another idle notification has been started earlier, return immediately. If
+ * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_idle(struct device *dev, int rpmflags)
+{
+ int (*callback)(struct device *);
+ int retval;
+
+ trace_rpm_idle_rcuidle(dev, rpmflags);
+ retval = rpm_check_suspend_allowed(dev);
+ if (retval < 0)
+ ; /* Conditions are wrong. */
+
+ /* Idle notifications are allowed only in the RPM_ACTIVE state. */
+ else if (dev->power.runtime_status != RPM_ACTIVE)
+ retval = -EAGAIN;
+
+ /*
+ * Any pending request other than an idle notification takes
+ * precedence over us, except that the timer may be running.
+ */
+ else if (dev->power.request_pending &&
+ dev->power.request > RPM_REQ_IDLE)
+ retval = -EAGAIN;
+
+ /* Act as though RPM_NOWAIT is always set. */
+ else if (dev->power.idle_notification)
+ retval = -EINPROGRESS;
+ if (retval)
+ goto out;
+
+ /* Pending requests need to be canceled. */
+ dev->power.request = RPM_REQ_NONE;
+
+ callback = RPM_GET_CALLBACK(dev, runtime_idle);
+
+ /* If no callback assume success. */
+ if (!callback || dev->power.no_callbacks)
+ goto out;
+
+ /* Carry out an asynchronous or a synchronous idle notification. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = RPM_REQ_IDLE;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+ return 0;
+ }
+
+ dev->power.idle_notification = true;
+
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = callback(dev);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ dev->power.idle_notification = false;
+ wake_up_all(&dev->power.wait_queue);
+
+ out:
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
+}
+
+/**
+ * rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int rpm_callback(int (*cb)(struct device *), struct device *dev)
+{
+ int retval;
+
+ if (dev->power.memalloc_noio) {
+ unsigned int noio_flag;
+
+ /*
+ * Deadlock might be caused if memory allocation with
+ * GFP_KERNEL happens inside runtime_suspend and
+ * runtime_resume callbacks of one block device's
+ * ancestor or the block device itself. Network
+ * device might be thought as part of iSCSI block
+ * device, so network device and its ancestor should
+ * be marked as memalloc_noio too.
+ */
+ noio_flag = memalloc_noio_save();
+ retval = __rpm_callback(cb, dev);
+ memalloc_noio_restore(noio_flag);
+ } else {
+ retval = __rpm_callback(cb, dev);
+ }
+
+ dev->power.runtime_error = retval;
+ return retval != -EACCES ? retval : -EIO;
+}
+
+/**
+ * rpm_suspend - Carry out runtime suspend of given device.
+ * @dev: Device to suspend.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be suspended.
+ * Cancel a pending idle notification, autosuspend or suspend. If
+ * another suspend has been started earlier, either return immediately
+ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+ * otherwise run the ->runtime_suspend() callback directly. When
+ * ->runtime_suspend succeeded, if a deferred resume was requested while
+ * the callback was running then carry it out, otherwise send an idle
+ * notification for its parent (if the suspend succeeded and both
+ * ignore_children of parent->power and irq_safe of dev->power are not set).
+ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+ * flag is set and the next autosuspend-delay expiration time is in the
+ * future, schedule another autosuspend attempt.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_suspend(struct device *dev, int rpmflags)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int (*callback)(struct device *);
+ struct device *parent = NULL;
+ int retval;
+
+ trace_rpm_suspend_rcuidle(dev, rpmflags);
+
+ repeat:
+ retval = rpm_check_suspend_allowed(dev);
+ if (retval < 0)
+ goto out; /* Conditions are wrong. */
+
+ /* Synchronous suspends are not allowed in the RPM_RESUMING state. */
+ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC))
+ retval = -EAGAIN;
+ if (retval)
+ goto out;
+
+ /* If the autosuspend_delay time hasn't expired yet, reschedule. */
+ if ((rpmflags & RPM_AUTO)
+ && dev->power.runtime_status != RPM_SUSPENDING) {
+ u64 expires = pm_runtime_autosuspend_expiration(dev);
+
+ if (expires != 0) {
+ /* Pending requests need to be canceled. */
+ dev->power.request = RPM_REQ_NONE;
+
+ /*
+ * Optimization: If the timer is already running and is
+ * set to expire at or before the autosuspend delay,
+ * avoid the overhead of resetting it. Just let it
+ * expire; pm_suspend_timer_fn() will take care of the
+ * rest.
+ */
+ if (!(dev->power.timer_expires &&
+ dev->power.timer_expires <= expires)) {
+ /*
+ * We add a slack of 25% to gather wakeups
+ * without sacrificing the granularity.
+ */
+ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
+ (NSEC_PER_MSEC >> 2);
+
+ dev->power.timer_expires = expires;
+ hrtimer_start_range_ns(&dev->power.suspend_timer,
+ ns_to_ktime(expires),
+ slack,
+ HRTIMER_MODE_ABS);
+ }
+ dev->power.timer_autosuspends = 1;
+ goto out;
+ }
+ }
+
+ /* Other scheduled or pending requests need to be canceled. */
+ pm_runtime_cancel_pending(dev);
+
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
+ DEFINE_WAIT(wait);
+
+ if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
+ /* Wait for the other suspend running in parallel with us. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_SUSPENDING)
+ break;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ goto repeat;
+ }
+
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+ /* Carry out an asynchronous or a synchronous suspend. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = (rpmflags & RPM_AUTO) ?
+ RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ goto out;
+ }
+
+ __update_runtime_status(dev, RPM_SUSPENDING);
+
+ callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+
+ dev_pm_enable_wake_irq_check(dev, true);
+ retval = rpm_callback(callback, dev);
+ if (retval)
+ goto fail;
+
+ dev_pm_enable_wake_irq_complete(dev);
+
+ no_callback:
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->parent) {
+ parent = dev->parent;
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+ if (dev->power.deferred_resume) {
+ dev->power.deferred_resume = false;
+ rpm_resume(dev, 0);
+ retval = -EAGAIN;
+ goto out;
+ }
+
+ if (dev->power.irq_safe)
+ goto out;
+
+ /* Maybe the parent is now able to suspend. */
+ if (parent && !parent->power.ignore_children) {
+ spin_unlock(&dev->power.lock);
+
+ spin_lock(&parent->power.lock);
+ rpm_idle(parent, RPM_ASYNC);
+ spin_unlock(&parent->power.lock);
+
+ spin_lock(&dev->power.lock);
+ }
+ /* Maybe the suppliers are now able to suspend. */
+ if (dev->power.links_count > 0) {
+ spin_unlock_irq(&dev->power.lock);
+
+ rpm_suspend_suppliers(dev);
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ out:
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+
+ return retval;
+
+ fail:
+ dev_pm_disable_wake_irq_check(dev, true);
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.deferred_resume = false;
+ wake_up_all(&dev->power.wait_queue);
+
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ dev->power.runtime_error = 0;
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
+ pm_runtime_cancel_pending(dev);
+ }
+ goto out;
+}
+
+/**
+ * rpm_resume - Carry out runtime resume of given device.
+ * @dev: Device to resume.
+ * @rpmflags: Flag bits.
+ *
+ * Check if the device's runtime PM status allows it to be resumed. Cancel
+ * any scheduled or pending requests. If another resume has been started
+ * earlier, either return immediately or wait for it to finish, depending on the
+ * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
+ * parallel with this function, either tell the other process to resume after
+ * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC
+ * flag is set then queue a resume request; otherwise run the
+ * ->runtime_resume() callback directly. Queue an idle notification for the
+ * device if the resume succeeded.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static int rpm_resume(struct device *dev, int rpmflags)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int (*callback)(struct device *);
+ struct device *parent = NULL;
+ int retval = 0;
+
+ trace_rpm_resume_rcuidle(dev, rpmflags);
+
+ repeat:
+ if (dev->power.runtime_error) {
+ retval = -EINVAL;
+ } else if (dev->power.disable_depth > 0) {
+ if (dev->power.runtime_status == RPM_ACTIVE &&
+ dev->power.last_status == RPM_ACTIVE)
+ retval = 1;
+ else
+ retval = -EACCES;
+ }
+ if (retval)
+ goto out;
+
+ /*
+ * Other scheduled or pending requests need to be canceled. Small
+ * optimization: If an autosuspend timer is running, leave it running
+ * rather than cancelling it now only to restart it again in the near
+ * future.
+ */
+ dev->power.request = RPM_REQ_NONE;
+ if (!dev->power.timer_autosuspends)
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.runtime_status == RPM_ACTIVE) {
+ retval = 1;
+ goto out;
+ }
+
+ if (dev->power.runtime_status == RPM_RESUMING
+ || dev->power.runtime_status == RPM_SUSPENDING) {
+ DEFINE_WAIT(wait);
+
+ if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
+ if (dev->power.runtime_status == RPM_SUSPENDING) {
+ dev->power.deferred_resume = true;
+ if (rpmflags & RPM_NOWAIT)
+ retval = -EINPROGRESS;
+ } else {
+ retval = -EINPROGRESS;
+ }
+ goto out;
+ }
+
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
+ /* Wait for the operation carried out in parallel with us. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_RESUMING
+ && dev->power.runtime_status != RPM_SUSPENDING)
+ break;
+
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ goto repeat;
+ }
+
+ /*
+ * See if we can skip waking up the parent. This is safe only if
+ * power.no_callbacks is set, because otherwise we don't know whether
+ * the resume will actually succeed.
+ */
+ if (dev->power.no_callbacks && !parent && dev->parent) {
+ spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
+ if (dev->parent->power.disable_depth > 0
+ || dev->parent->power.ignore_children
+ || dev->parent->power.runtime_status == RPM_ACTIVE) {
+ atomic_inc(&dev->parent->power.child_count);
+ spin_unlock(&dev->parent->power.lock);
+ retval = 1;
+ goto no_callback; /* Assume success. */
+ }
+ spin_unlock(&dev->parent->power.lock);
+ }
+
+ /* Carry out an asynchronous or a synchronous resume. */
+ if (rpmflags & RPM_ASYNC) {
+ dev->power.request = RPM_REQ_RESUME;
+ if (!dev->power.request_pending) {
+ dev->power.request_pending = true;
+ queue_work(pm_wq, &dev->power.work);
+ }
+ retval = 0;
+ goto out;
+ }
+
+ if (!parent && dev->parent) {
+ /*
+ * Increment the parent's usage counter and resume it if
+ * necessary. Not needed if dev is irq-safe; then the
+ * parent is permanently resumed.
+ */
+ parent = dev->parent;
+ if (dev->power.irq_safe)
+ goto skip_parent;
+ spin_unlock(&dev->power.lock);
+
+ pm_runtime_get_noresume(parent);
+
+ spin_lock(&parent->power.lock);
+ /*
+ * Resume the parent if it has runtime PM enabled and not been
+ * set to ignore its children.
+ */
+ if (!parent->power.disable_depth
+ && !parent->power.ignore_children) {
+ rpm_resume(parent, 0);
+ if (parent->power.runtime_status != RPM_ACTIVE)
+ retval = -EBUSY;
+ }
+ spin_unlock(&parent->power.lock);
+
+ spin_lock(&dev->power.lock);
+ if (retval)
+ goto out;
+ goto repeat;
+ }
+ skip_parent:
+
+ if (dev->power.no_callbacks)
+ goto no_callback; /* Assume success. */
+
+ __update_runtime_status(dev, RPM_RESUMING);
+
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
+
+ dev_pm_disable_wake_irq_check(dev, false);
+ retval = rpm_callback(callback, dev);
+ if (retval) {
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_cancel_pending(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
+ } else {
+ no_callback:
+ __update_runtime_status(dev, RPM_ACTIVE);
+ pm_runtime_mark_last_busy(dev);
+ if (parent)
+ atomic_inc(&parent->power.child_count);
+ }
+ wake_up_all(&dev->power.wait_queue);
+
+ if (retval >= 0)
+ rpm_idle(dev, RPM_ASYNC);
+
+ out:
+ if (parent && !dev->power.irq_safe) {
+ spin_unlock_irq(&dev->power.lock);
+
+ pm_runtime_put(parent);
+
+ spin_lock_irq(&dev->power.lock);
+ }
+
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+
+ return retval;
+}
+
+/**
+ * pm_runtime_work - Universal runtime PM work function.
+ * @work: Work structure used for scheduling the execution of this function.
+ *
+ * Use @work to get the device object the work is to be done for, determine what
+ * is to be done and execute the appropriate runtime PM function.
+ */
+static void pm_runtime_work(struct work_struct *work)
+{
+ struct device *dev = container_of(work, struct device, power.work);
+ enum rpm_request req;
+
+ spin_lock_irq(&dev->power.lock);
+
+ if (!dev->power.request_pending)
+ goto out;
+
+ req = dev->power.request;
+ dev->power.request = RPM_REQ_NONE;
+ dev->power.request_pending = false;
+
+ switch (req) {
+ case RPM_REQ_NONE:
+ break;
+ case RPM_REQ_IDLE:
+ rpm_idle(dev, RPM_NOWAIT);
+ break;
+ case RPM_REQ_SUSPEND:
+ rpm_suspend(dev, RPM_NOWAIT);
+ break;
+ case RPM_REQ_AUTOSUSPEND:
+ rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
+ break;
+ case RPM_REQ_RESUME:
+ rpm_resume(dev, RPM_NOWAIT);
+ break;
+ }
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
+ * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
+ * @timer: hrtimer used by pm_schedule_suspend().
+ *
+ * Check if the time is right and queue a suspend request.
+ */
+static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
+{
+ struct device *dev = container_of(timer, struct device, power.suspend_timer);
+ unsigned long flags;
+ u64 expires;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ expires = dev->power.timer_expires;
+ /*
+ * If 'expires' is after the current time, we've been called
+ * too early.
+ */
+ if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
+ dev->power.timer_expires = 0;
+ rpm_suspend(dev, dev->power.timer_autosuspends ?
+ (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
+ * @dev: Device to suspend.
+ * @delay: Time to wait before submitting a suspend request, in milliseconds.
+ */
+int pm_schedule_suspend(struct device *dev, unsigned int delay)
+{
+ unsigned long flags;
+ u64 expires;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!delay) {
+ retval = rpm_suspend(dev, RPM_ASYNC);
+ goto out;
+ }
+
+ retval = rpm_check_suspend_allowed(dev);
+ if (retval)
+ goto out;
+
+ /* Other scheduled or pending requests need to be canceled. */
+ pm_runtime_cancel_pending(dev);
+
+ expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
+ dev->power.timer_expires = expires;
+ dev->power.timer_autosuspends = 0;
+ hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_schedule_suspend);
+
+static int rpm_drop_usage_count(struct device *dev)
+{
+ int ret;
+
+ ret = atomic_sub_return(1, &dev->power.usage_count);
+ if (ret >= 0)
+ return ret;
+
+ /*
+ * Because rpm_resume() does not check the usage counter, it will resume
+ * the device even if the usage counter is 0 or negative, so it is
+ * sufficient to increment the usage counter here to reverse the change
+ * made above.
+ */
+ atomic_inc(&dev->power.usage_count);
+ dev_warn(dev, "Runtime PM usage count underflow!\n");
+ return -EINVAL;
+}
+
+/**
+ * __pm_runtime_idle - Entry point for runtime idle operations.
+ * @dev: Device to send idle notification for.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero (if it becomes negative, log a
+ * warning, increment it, and return an error). Then carry out an idle
+ * notification, either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_idle(struct device *dev, int rpmflags)
+{
+ unsigned long flags;
+ int retval;
+
+ if (rpmflags & RPM_GET_PUT) {
+ retval = rpm_drop_usage_count(dev);
+ if (retval < 0) {
+ return retval;
+ } else if (retval > 0) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
+ return 0;
+ }
+ }
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_idle(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_idle);
+
+/**
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
+ * @dev: Device to suspend.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, decrement the device's usage count and
+ * return immediately if it is larger than zero (if it becomes negative, log a
+ * warning, increment it, and return an error). Then carry out a suspend,
+ * either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_suspend(struct device *dev, int rpmflags)
+{
+ unsigned long flags;
+ int retval;
+
+ if (rpmflags & RPM_GET_PUT) {
+ retval = rpm_drop_usage_count(dev);
+ if (retval < 0) {
+ return retval;
+ } else if (retval > 0) {
+ trace_rpm_usage_rcuidle(dev, rpmflags);
+ return 0;
+ }
+ }
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_suspend(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
+
+/**
+ * __pm_runtime_resume - Entry point for runtime resume operations.
+ * @dev: Device to resume.
+ * @rpmflags: Flag bits.
+ *
+ * If the RPM_GET_PUT flag is set, increment the device's usage count. Then
+ * carry out a resume, either synchronous or asynchronous.
+ *
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
+ */
+int __pm_runtime_resume(struct device *dev, int rpmflags)
+{
+ unsigned long flags;
+ int retval;
+
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+ dev->power.runtime_status != RPM_ACTIVE);
+
+ if (rpmflags & RPM_GET_PUT)
+ atomic_inc(&dev->power.usage_count);
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ retval = rpm_resume(dev, rpmflags);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_resume);
+
+/**
+ * pm_runtime_get_if_active - Conditionally bump up device usage counter.
+ * @dev: Device to handle.
+ * @ign_usage_count: Whether or not to look at the current usage counter value.
+ *
+ * Return -EINVAL if runtime PM is disabled for @dev.
+ *
+ * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
+ * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
+ * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
+ * without changing the usage counter.
+ *
+ * If @ign_usage_count is %true, this function can be used to prevent suspending
+ * the device when its runtime PM status is %RPM_ACTIVE.
+ *
+ * If @ign_usage_count is %false, this function can be used to prevent
+ * suspending the device when both its runtime PM status is %RPM_ACTIVE and its
+ * runtime PM usage counter is not zero.
+ *
+ * The caller is responsible for decrementing the runtime PM usage counter of
+ * @dev after this function has returned a positive value for it.
+ */
+int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ if (dev->power.disable_depth > 0) {
+ retval = -EINVAL;
+ } else if (dev->power.runtime_status != RPM_ACTIVE) {
+ retval = 0;
+ } else if (ign_usage_count) {
+ retval = 1;
+ atomic_inc(&dev->power.usage_count);
+ } else {
+ retval = atomic_inc_not_zero(&dev->power.usage_count);
+ }
+ trace_rpm_usage_rcuidle(dev, 0);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
+
+/**
+ * __pm_runtime_set_status - Set runtime PM status of a device.
+ * @dev: Device to handle.
+ * @status: New runtime PM status of the device.
+ *
+ * If runtime PM of the device is disabled or its power.runtime_error field is
+ * different from zero, the status may be changed either to RPM_ACTIVE, or to
+ * RPM_SUSPENDED, as long as that reflects the actual state of the device.
+ * However, if the device has a parent and the parent is not active, and the
+ * parent's power.ignore_children flag is unset, the device's status cannot be
+ * set to RPM_ACTIVE, so -EBUSY is returned in that case.
+ *
+ * If successful, __pm_runtime_set_status() clears the power.runtime_error field
+ * and the device parent's counter of unsuspended children is modified to
+ * reflect the new status. If the new status is RPM_SUSPENDED, an idle
+ * notification request for the parent is submitted.
+ *
+ * If @dev has any suppliers (as reflected by device links to them), and @status
+ * is RPM_ACTIVE, they will be activated upfront and if the activation of one
+ * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead
+ * of the @status value) and the suppliers will be deacticated on exit. The
+ * error returned by the failing supplier activation will be returned in that
+ * case.
+ */
+int __pm_runtime_set_status(struct device *dev, unsigned int status)
+{
+ struct device *parent = dev->parent;
+ bool notify_parent = false;
+ unsigned long flags;
+ int error = 0;
+
+ if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ /*
+ * Prevent PM-runtime from being enabled for the device or return an
+ * error if it is enabled already and working.
+ */
+ if (dev->power.runtime_error || dev->power.disable_depth)
+ dev->power.disable_depth++;
+ else
+ error = -EAGAIN;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (error)
+ return error;
+
+ /*
+ * If the new status is RPM_ACTIVE, the suppliers can be activated
+ * upfront regardless of the current status, because next time
+ * rpm_put_suppliers() runs, the rpm_active refcounts of the links
+ * involved will be dropped down to one anyway.
+ */
+ if (status == RPM_ACTIVE) {
+ int idx = device_links_read_lock();
+
+ error = rpm_get_suppliers(dev);
+ if (error)
+ status = RPM_SUSPENDED;
+
+ device_links_read_unlock(idx);
+ }
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.runtime_status == status || !parent)
+ goto out_set;
+
+ if (status == RPM_SUSPENDED) {
+ atomic_add_unless(&parent->power.child_count, -1, 0);
+ notify_parent = !parent->power.ignore_children;
+ } else {
+ spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
+
+ /*
+ * It is invalid to put an active child under a parent that is
+ * not active, has runtime PM enabled and the
+ * 'power.ignore_children' flag unset.
+ */
+ if (!parent->power.disable_depth
+ && !parent->power.ignore_children
+ && parent->power.runtime_status != RPM_ACTIVE) {
+ dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
+ dev_name(dev),
+ dev_name(parent));
+ error = -EBUSY;
+ } else if (dev->power.runtime_status == RPM_SUSPENDED) {
+ atomic_inc(&parent->power.child_count);
+ }
+
+ spin_unlock(&parent->power.lock);
+
+ if (error) {
+ status = RPM_SUSPENDED;
+ goto out;
+ }
+ }
+
+ out_set:
+ __update_runtime_status(dev, status);
+ if (!error)
+ dev->power.runtime_error = 0;
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (notify_parent)
+ pm_request_idle(parent);
+
+ if (status == RPM_SUSPENDED) {
+ int idx = device_links_read_lock();
+
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
+ pm_runtime_enable(dev);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
+
+/**
+ * __pm_runtime_barrier - Cancel pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Flush all pending requests for the device from pm_wq and wait for all
+ * runtime PM operations involving the device in progress to complete.
+ *
+ * Should be called under dev->power.lock with interrupts disabled.
+ */
+static void __pm_runtime_barrier(struct device *dev)
+{
+ pm_runtime_deactivate_timer(dev);
+
+ if (dev->power.request_pending) {
+ dev->power.request = RPM_REQ_NONE;
+ spin_unlock_irq(&dev->power.lock);
+
+ cancel_work_sync(&dev->power.work);
+
+ spin_lock_irq(&dev->power.lock);
+ dev->power.request_pending = false;
+ }
+
+ if (dev->power.runtime_status == RPM_SUSPENDING
+ || dev->power.runtime_status == RPM_RESUMING
+ || dev->power.idle_notification) {
+ DEFINE_WAIT(wait);
+
+ /* Suspend, wake-up or idle notification in progress. */
+ for (;;) {
+ prepare_to_wait(&dev->power.wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (dev->power.runtime_status != RPM_SUSPENDING
+ && dev->power.runtime_status != RPM_RESUMING
+ && !dev->power.idle_notification)
+ break;
+ spin_unlock_irq(&dev->power.lock);
+
+ schedule();
+
+ spin_lock_irq(&dev->power.lock);
+ }
+ finish_wait(&dev->power.wait_queue, &wait);
+ }
+}
+
+/**
+ * pm_runtime_barrier - Flush pending requests and wait for completions.
+ * @dev: Device to handle.
+ *
+ * Prevent the device from being suspended by incrementing its usage counter and
+ * if there's a pending resume request for the device, wake the device up.
+ * Next, make sure that all pending requests for the device have been flushed
+ * from pm_wq and wait for all runtime PM operations involving the device in
+ * progress to complete.
+ *
+ * Return value:
+ * 1, if there was a resume request pending and the device had to be woken up,
+ * 0, otherwise
+ */
+int pm_runtime_barrier(struct device *dev)
+{
+ int retval = 0;
+
+ pm_runtime_get_noresume(dev);
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME) {
+ rpm_resume(dev, 0);
+ retval = 1;
+ }
+
+ __pm_runtime_barrier(dev);
+
+ spin_unlock_irq(&dev->power.lock);
+ pm_runtime_put_noidle(dev);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_barrier);
+
+/**
+ * __pm_runtime_disable - Disable runtime PM of a device.
+ * @dev: Device to handle.
+ * @check_resume: If set, check if there's a resume request for the device.
+ *
+ * Increment power.disable_depth for the device and if it was zero previously,
+ * cancel all pending runtime PM requests for the device and wait for all
+ * operations in progress to complete. The device can be either active or
+ * suspended after its runtime PM has been disabled.
+ *
+ * If @check_resume is set and there's a resume request pending when
+ * __pm_runtime_disable() is called and power.disable_depth is zero, the
+ * function will wake up the device before disabling its runtime PM.
+ */
+void __pm_runtime_disable(struct device *dev, bool check_resume)
+{
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.disable_depth > 0) {
+ dev->power.disable_depth++;
+ goto out;
+ }
+
+ /*
+ * Wake up the device if there's a resume request pending, because that
+ * means there probably is some I/O to process and disabling runtime PM
+ * shouldn't prevent the device from processing the I/O.
+ */
+ if (check_resume && dev->power.request_pending
+ && dev->power.request == RPM_REQ_RESUME) {
+ /*
+ * Prevent suspends and idle notifications from being carried
+ * out after we have woken up the device.
+ */
+ pm_runtime_get_noresume(dev);
+
+ rpm_resume(dev, 0);
+
+ pm_runtime_put_noidle(dev);
+ }
+
+ /* Update time accounting before disabling PM-runtime. */
+ update_pm_runtime_accounting(dev);
+
+ if (!dev->power.disable_depth++) {
+ __pm_runtime_barrier(dev);
+ dev->power.last_status = dev->power.runtime_status;
+ }
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_disable);
+
+/**
+ * pm_runtime_enable - Enable runtime PM of a device.
+ * @dev: Device to handle.
+ */
+void pm_runtime_enable(struct device *dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (!dev->power.disable_depth) {
+ dev_warn(dev, "Unbalanced %s!\n", __func__);
+ goto out;
+ }
+
+ if (--dev->power.disable_depth > 0)
+ goto out;
+
+ dev->power.last_status = RPM_INVALID;
+ dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
+
+ if (dev->power.runtime_status == RPM_SUSPENDED &&
+ !dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)
+ dev_warn(dev, "Enabling runtime PM for inactive device with active children\n");
+
+out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_enable);
+
+static void pm_runtime_disable_action(void *data)
+{
+ pm_runtime_dont_use_autosuspend(data);
+ pm_runtime_disable(data);
+}
+
+/**
+ * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable.
+ *
+ * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for
+ * you at driver exit time if needed.
+ *
+ * @dev: Device to handle.
+ */
+int devm_pm_runtime_enable(struct device *dev)
+{
+ pm_runtime_enable(dev);
+
+ return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev);
+}
+EXPORT_SYMBOL_GPL(devm_pm_runtime_enable);
+
+/**
+ * pm_runtime_forbid - Block runtime PM of a device.
+ * @dev: Device to handle.
+ *
+ * Increase the device's usage count and clear its power.runtime_auto flag,
+ * so that it cannot be suspended at run time until pm_runtime_allow() is called
+ * for it.
+ */
+void pm_runtime_forbid(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (!dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = false;
+ atomic_inc(&dev->power.usage_count);
+ rpm_resume(dev, 0);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_forbid);
+
+/**
+ * pm_runtime_allow - Unblock runtime PM of a device.
+ * @dev: Device to handle.
+ *
+ * Decrease the device's usage count and set its power.runtime_auto flag.
+ */
+void pm_runtime_allow(struct device *dev)
+{
+ int ret;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.runtime_auto)
+ goto out;
+
+ dev->power.runtime_auto = true;
+ ret = rpm_drop_usage_count(dev);
+ if (ret == 0)
+ rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
+ else if (ret > 0)
+ trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
+
+ out:
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_allow);
+
+/**
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
+ * @dev: Device to handle.
+ *
+ * Set the power.no_callbacks flag, which tells the PM core that this
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own. The runtime sysfs attributes will be removed.
+ */
+void pm_runtime_no_callbacks(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.no_callbacks = 1;
+ spin_unlock_irq(&dev->power.lock);
+ if (device_is_registered(dev))
+ rpm_sysfs_remove(dev);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
+
+/**
+ * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
+ * @dev: Device to handle
+ *
+ * Set the power.irq_safe flag, which tells the PM core that the
+ * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
+ * always be invoked with the spinlock held and interrupts disabled. It also
+ * causes the parent's usage counter to be permanently incremented, preventing
+ * the parent from runtime suspending -- otherwise an irq-safe child might have
+ * to wait for a non-irq-safe parent.
+ */
+void pm_runtime_irq_safe(struct device *dev)
+{
+ if (dev->parent)
+ pm_runtime_get_sync(dev->parent);
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 1;
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
+
+/**
+ * update_autosuspend - Handle a change to a device's autosuspend settings.
+ * @dev: Device to handle.
+ * @old_delay: The former autosuspend_delay value.
+ * @old_use: The former use_autosuspend value.
+ *
+ * Prevent runtime suspend if the new delay is negative and use_autosuspend is
+ * set; otherwise allow it. Send an idle notification if suspends are allowed.
+ *
+ * This function must be called under dev->power.lock with interrupts disabled.
+ */
+static void update_autosuspend(struct device *dev, int old_delay, int old_use)
+{
+ int delay = dev->power.autosuspend_delay;
+
+ /* Should runtime suspend be prevented now? */
+ if (dev->power.use_autosuspend && delay < 0) {
+
+ /* If it used to be allowed then prevent it. */
+ if (!old_use || old_delay >= 0) {
+ atomic_inc(&dev->power.usage_count);
+ rpm_resume(dev, 0);
+ } else {
+ trace_rpm_usage_rcuidle(dev, 0);
+ }
+ }
+
+ /* Runtime suspend should be allowed now. */
+ else {
+
+ /* If it used to be prevented then allow it. */
+ if (old_use && old_delay < 0)
+ atomic_dec(&dev->power.usage_count);
+
+ /* Maybe we can autosuspend now. */
+ rpm_idle(dev, RPM_AUTO);
+ }
+}
+
+/**
+ * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
+ * @dev: Device to handle.
+ * @delay: Value of the new delay in milliseconds.
+ *
+ * Set the device's power.autosuspend_delay value. If it changes to negative
+ * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
+ * changes the other way, allow runtime suspends.
+ */
+void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
+{
+ int old_delay, old_use;
+
+ spin_lock_irq(&dev->power.lock);
+ old_delay = dev->power.autosuspend_delay;
+ old_use = dev->power.use_autosuspend;
+ dev->power.autosuspend_delay = delay;
+ update_autosuspend(dev, old_delay, old_use);
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
+
+/**
+ * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
+ * @dev: Device to handle.
+ * @use: New value for use_autosuspend.
+ *
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
+ * suspends as needed.
+ */
+void __pm_runtime_use_autosuspend(struct device *dev, bool use)
+{
+ int old_delay, old_use;
+
+ spin_lock_irq(&dev->power.lock);
+ old_delay = dev->power.autosuspend_delay;
+ old_use = dev->power.use_autosuspend;
+ dev->power.use_autosuspend = use;
+ update_autosuspend(dev, old_delay, old_use);
+ spin_unlock_irq(&dev->power.lock);
+}
+EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
+
+/**
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
+ * @dev: Device object to initialize.
+ */
+void pm_runtime_init(struct device *dev)
+{
+ dev->power.runtime_status = RPM_SUSPENDED;
+ dev->power.last_status = RPM_INVALID;
+ dev->power.idle_notification = false;
+
+ dev->power.disable_depth = 1;
+ atomic_set(&dev->power.usage_count, 0);
+
+ dev->power.runtime_error = 0;
+
+ atomic_set(&dev->power.child_count, 0);
+ pm_suspend_ignore_children(dev, false);
+ dev->power.runtime_auto = true;
+
+ dev->power.request_pending = false;
+ dev->power.request = RPM_REQ_NONE;
+ dev->power.deferred_resume = false;
+ dev->power.needs_force_resume = 0;
+ INIT_WORK(&dev->power.work, pm_runtime_work);
+
+ dev->power.timer_expires = 0;
+ hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dev->power.suspend_timer.function = pm_suspend_timer_fn;
+
+ init_waitqueue_head(&dev->power.wait_queue);
+}
+
+/**
+ * pm_runtime_reinit - Re-initialize runtime PM fields in given device object.
+ * @dev: Device object to re-initialize.
+ */
+void pm_runtime_reinit(struct device *dev)
+{
+ if (!pm_runtime_enabled(dev)) {
+ if (dev->power.runtime_status == RPM_ACTIVE)
+ pm_runtime_set_suspended(dev);
+ if (dev->power.irq_safe) {
+ spin_lock_irq(&dev->power.lock);
+ dev->power.irq_safe = 0;
+ spin_unlock_irq(&dev->power.lock);
+ if (dev->parent)
+ pm_runtime_put(dev->parent);
+ }
+ }
+}
+
+/**
+ * pm_runtime_remove - Prepare for removing a device from device hierarchy.
+ * @dev: Device object being removed from device hierarchy.
+ */
+void pm_runtime_remove(struct device *dev)
+{
+ __pm_runtime_disable(dev, false);
+ pm_runtime_reinit(dev);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ if (link->flags & DL_FLAG_PM_RUNTIME) {
+ link->supplier_preactivated = true;
+ pm_runtime_get_sync(link->supplier);
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+ device_links_read_lock_held())
+ if (link->supplier_preactivated) {
+ link->supplier_preactivated = false;
+ pm_runtime_put(link->supplier);
+ }
+
+ device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.links_count++;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+static void pm_runtime_drop_link_count(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
+ * pm_runtime_drop_link - Prepare for device link removal.
+ * @link: Device link going away.
+ *
+ * Drop the link count of the consumer end of @link and decrement the supplier
+ * device's runtime PM usage counter as many times as needed to drop all of the
+ * PM runtime reference to it from the consumer.
+ */
+void pm_runtime_drop_link(struct device_link *link)
+{
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ return;
+
+ pm_runtime_drop_link_count(link->consumer);
+ pm_runtime_release_supplier(link);
+ pm_request_idle(link->supplier);
+}
+
+static bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke its ->runtime_suspend callback to suspend it and
+ * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's
+ * usage and children counters don't indicate that the device was in use before
+ * the system-wide transition under way, decrement its parent's children counter
+ * (if there is a parent). Keep runtime PM disabled to preserve the state
+ * unless we encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state and it should only be used during
+ * system-wide PM transitions to sleep states. It assumes that the analogous
+ * pm_runtime_force_resume() will be used to resume the device.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret;
+
+ pm_runtime_disable(dev);
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+
+ dev_pm_enable_wake_irq_check(dev, true);
+ ret = callback ? callback(dev) : 0;
+ if (ret)
+ goto err;
+
+ dev_pm_enable_wake_irq_complete(dev);
+
+ /*
+ * If the device can stay in suspend after the system-wide transition
+ * to the working state that will follow, drop the children counter of
+ * its parent, but set its status to RPM_SUSPENDED anyway in case this
+ * function will be called again for it in the meantime.
+ */
+ if (pm_runtime_need_not_resume(dev)) {
+ pm_runtime_set_suspended(dev);
+ } else {
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ dev->power.needs_force_resume = 1;
+ }
+
+ return 0;
+
+err:
+ dev_pm_disable_wake_irq_check(dev, true);
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state if needed.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and bring the device into full power, if it is expected to be
+ * used on system resume. In the other case, we defer the resume to be managed
+ * via runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
+ goto out;
+
+ /*
+ * The value of the parent's children counter is correct already, so
+ * just update the status of the device.
+ */
+ __update_runtime_status(dev, RPM_ACTIVE);
+
+ callback = RPM_GET_CALLBACK(dev, runtime_resume);
+
+ dev_pm_disable_wake_irq_check(dev, false);
+ ret = callback ? callback(dev) : 0;
+ if (ret) {
+ pm_runtime_set_suspended(dev);
+ dev_pm_enable_wake_irq_check(dev, false);
+ goto out;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+out:
+ dev->power.needs_force_resume = 0;
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
new file mode 100644
index 000000000..a1474fb67
--- /dev/null
+++ b/drivers/base/power/sysfs.c
@@ -0,0 +1,838 @@
+// SPDX-License-Identifier: GPL-2.0
+/* sysfs entries for device PM */
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/export.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeup.h>
+#include <linux/atomic.h>
+#include <linux/jiffies.h>
+#include "power.h"
+
+/*
+ * control - Report/change current runtime PM setting of the device
+ *
+ * Runtime power management of a device can be blocked with the help of
+ * this attribute. All devices have one of the following two values for
+ * the power/control file:
+ *
+ * + "auto\n" to allow the device to be power managed at run time;
+ * + "on\n" to prevent the device from being power managed at run time;
+ *
+ * The default for all devices is "auto", which means that devices may be
+ * subject to automatic power management, depending on their drivers.
+ * Changing this attribute to "on" prevents the driver from power managing
+ * the device at run time. Doing that while the device is suspended causes
+ * it to be woken up.
+ *
+ * wakeup - Report/change current wakeup option for device
+ *
+ * Some devices support "wakeup" events, which are hardware signals
+ * used to activate devices from suspended or low power states. Such
+ * devices have one of three values for the sysfs power/wakeup file:
+ *
+ * + "enabled\n" to issue the events;
+ * + "disabled\n" not to do so; or
+ * + "\n" for temporary or permanent inability to issue wakeup.
+ *
+ * (For example, unconfigured USB devices can't issue wakeups.)
+ *
+ * Familiar examples of devices that can issue wakeup events include
+ * keyboards and mice (both PS2 and USB styles), power buttons, modems,
+ * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
+ * will wake the entire system from a suspend state; others may just
+ * wake up the device (if the system as a whole is already active).
+ * Some wakeup events use normal IRQ lines; other use special out
+ * of band signaling.
+ *
+ * It is the responsibility of device drivers to enable (or disable)
+ * wakeup signaling as part of changing device power states, respecting
+ * the policy choices provided through the driver model.
+ *
+ * Devices may not be able to generate wakeup events from all power
+ * states. Also, the events may be ignored in some configurations;
+ * for example, they might need help from other devices that aren't
+ * active, or which may have wakeup disabled. Some drivers rely on
+ * wakeup events internally (unless they are disabled), keeping
+ * their hardware in low power modes whenever they're unused. This
+ * saves runtime power, without requiring system-wide sleep states.
+ *
+ * async - Report/change current async suspend setting for the device
+ *
+ * Asynchronous suspend and resume of the device during system-wide power
+ * state transitions can be enabled by writing "enabled" to this file.
+ * Analogously, if "disabled" is written to this file, the device will be
+ * suspended and resumed synchronously.
+ *
+ * All devices have one of the following two values for power/async:
+ *
+ * + "enabled\n" to permit the asynchronous suspend/resume of the device;
+ * + "disabled\n" to forbid it;
+ *
+ * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
+ * of a device unless it is certain that all of the PM dependencies of the
+ * device are known to the PM core. However, for some devices this
+ * attribute is set to "enabled" by bus type code or device drivers and in
+ * that cases it should be safe to leave the default value.
+ *
+ * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
+ *
+ * Some drivers don't want to carry out a runtime suspend as soon as a
+ * device becomes idle; they want it always to remain idle for some period
+ * of time before suspending it. This period is the autosuspend_delay
+ * value (expressed in milliseconds) and it can be controlled by the user.
+ * If the value is negative then the device will never be runtime
+ * suspended.
+ *
+ * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
+ * value are used only if the driver calls pm_runtime_use_autosuspend().
+ *
+ * wakeup_count - Report the number of wakeup events related to the device
+ */
+
+const char power_group_name[] = "power";
+EXPORT_SYMBOL_GPL(power_group_name);
+
+static const char ctrl_auto[] = "auto";
+static const char ctrl_on[] = "on";
+
+static ssize_t control_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ dev->power.runtime_auto ? ctrl_auto : ctrl_on);
+}
+
+static ssize_t control_store(struct device * dev, struct device_attribute *attr,
+ const char * buf, size_t n)
+{
+ device_lock(dev);
+ if (sysfs_streq(buf, ctrl_auto))
+ pm_runtime_allow(dev);
+ else if (sysfs_streq(buf, ctrl_on))
+ pm_runtime_forbid(dev);
+ else
+ n = -EINVAL;
+ device_unlock(dev);
+ return n;
+}
+
+static DEVICE_ATTR_RW(control);
+
+static ssize_t runtime_active_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 tmp = pm_runtime_active_time(dev);
+
+ do_div(tmp, NSEC_PER_MSEC);
+
+ return sysfs_emit(buf, "%llu\n", tmp);
+}
+
+static DEVICE_ATTR_RO(runtime_active_time);
+
+static ssize_t runtime_suspended_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 tmp = pm_runtime_suspended_time(dev);
+
+ do_div(tmp, NSEC_PER_MSEC);
+
+ return sysfs_emit(buf, "%llu\n", tmp);
+}
+
+static DEVICE_ATTR_RO(runtime_suspended_time);
+
+static ssize_t runtime_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *output;
+
+ if (dev->power.runtime_error) {
+ output = "error";
+ } else if (dev->power.disable_depth) {
+ output = "unsupported";
+ } else {
+ switch (dev->power.runtime_status) {
+ case RPM_SUSPENDED:
+ output = "suspended";
+ break;
+ case RPM_SUSPENDING:
+ output = "suspending";
+ break;
+ case RPM_RESUMING:
+ output = "resuming";
+ break;
+ case RPM_ACTIVE:
+ output = "active";
+ break;
+ default:
+ return -EIO;
+ }
+ }
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static DEVICE_ATTR_RO(runtime_status);
+
+static ssize_t autosuspend_delay_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (!dev->power.use_autosuspend)
+ return -EIO;
+
+ return sysfs_emit(buf, "%d\n", dev->power.autosuspend_delay);
+}
+
+static ssize_t autosuspend_delay_ms_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t n)
+{
+ long delay;
+
+ if (!dev->power.use_autosuspend)
+ return -EIO;
+
+ if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
+ return -EINVAL;
+
+ device_lock(dev);
+ pm_runtime_set_autosuspend_delay(dev, delay);
+ device_unlock(dev);
+ return n;
+}
+
+static DEVICE_ATTR_RW(autosuspend_delay_ms);
+
+static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+ if (value == 0)
+ return sysfs_emit(buf, "n/a\n");
+ if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ value = 0;
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (!kstrtos32(buf, 0, &value)) {
+ /*
+ * Prevent users from writing negative or "no constraint" values
+ * directly.
+ */
+ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ return -EINVAL;
+
+ if (value == 0)
+ value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ } else if (sysfs_streq(buf, "n/a")) {
+ value = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+ value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
+
+static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+ if (value < 0)
+ return sysfs_emit(buf, "%s\n", "auto");
+ if (value == PM_QOS_LATENCY_ANY)
+ return sysfs_emit(buf, "%s\n", "any");
+
+ return sysfs_emit(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value) == 0) {
+ /* Users can't write negative values directly */
+ if (value < 0)
+ return -EINVAL;
+ } else {
+ if (sysfs_streq(buf, "auto"))
+ value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ else if (sysfs_streq(buf, "any"))
+ value = PM_QOS_LATENCY_ANY;
+ else
+ return -EINVAL;
+ }
+ ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
+
+static ssize_t pm_qos_no_power_off_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
+ & PM_QOS_FLAG_NO_POWER_OFF));
+}
+
+static ssize_t pm_qos_no_power_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ int ret;
+
+ if (kstrtoint(buf, 0, &ret))
+ return -EINVAL;
+
+ if (ret != 0 && ret != 1)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR_RW(pm_qos_no_power_off);
+
+#ifdef CONFIG_PM_SLEEP
+static const char _enabled[] = "enabled";
+static const char _disabled[] = "disabled";
+
+static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", device_can_wakeup(dev)
+ ? (device_may_wakeup(dev) ? _enabled : _disabled)
+ : "");
+}
+
+static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ if (!device_can_wakeup(dev))
+ return -EINVAL;
+
+ if (sysfs_streq(buf, _enabled))
+ device_set_wakeup_enable(dev, 1);
+ else if (sysfs_streq(buf, _disabled))
+ device_set_wakeup_enable(dev, 0);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR_RW(wakeup);
+
+static ssize_t wakeup_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->wakeup_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_count);
+
+static ssize_t wakeup_active_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->active_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_active_count);
+
+static ssize_t wakeup_abort_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->wakeup_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_abort_count);
+
+static ssize_t wakeup_expire_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long count;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ count = dev->power.wakeup->expire_count;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lu\n", count);
+}
+
+static DEVICE_ATTR_RO(wakeup_expire_count);
+
+static ssize_t wakeup_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int active;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ active = dev->power.wakeup->active;
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%u\n", active);
+}
+
+static DEVICE_ATTR_RO(wakeup_active);
+
+static ssize_t wakeup_total_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->total_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static DEVICE_ATTR_RO(wakeup_total_time_ms);
+
+static ssize_t wakeup_max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->max_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static DEVICE_ATTR_RO(wakeup_max_time_ms);
+
+static ssize_t wakeup_last_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->last_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ if (dev->power.wakeup && dev->power.wakeup->dev)
+ return device_change_owner(dev->power.wakeup->dev, kuid, kgid);
+ return 0;
+}
+
+static DEVICE_ATTR_RO(wakeup_last_time_ms);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ s64 msec;
+ bool enabled = false;
+
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
+ enabled = true;
+ }
+ spin_unlock_irq(&dev->power.lock);
+
+ if (!enabled)
+ return sysfs_emit(buf, "\n");
+ return sysfs_emit(buf, "%lld\n", msec);
+}
+
+static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
+#endif /* CONFIG_PM_AUTOSLEEP */
+#else /* CONFIG_PM_SLEEP */
+static inline int dpm_sysfs_wakeup_change_owner(struct device *dev, kuid_t kuid,
+ kgid_t kgid)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+static ssize_t runtime_usage_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", atomic_read(&dev->power.usage_count));
+}
+static DEVICE_ATTR_RO(runtime_usage);
+
+static ssize_t runtime_active_kids_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", dev->power.ignore_children ?
+ 0 : atomic_read(&dev->power.child_count));
+}
+static DEVICE_ATTR_RO(runtime_active_kids);
+
+static ssize_t runtime_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *output;
+
+ if (dev->power.disable_depth && !dev->power.runtime_auto)
+ output = "disabled & forbidden";
+ else if (dev->power.disable_depth)
+ output = "disabled";
+ else if (!dev->power.runtime_auto)
+ output = "forbidden";
+ else
+ output = "enabled";
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+static DEVICE_ATTR_RO(runtime_enabled);
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t async_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ device_async_suspend_enabled(dev) ?
+ _enabled : _disabled);
+}
+
+static ssize_t async_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ if (sysfs_streq(buf, _enabled))
+ device_enable_async_suspend(dev);
+ else if (sysfs_streq(buf, _disabled))
+ device_disable_async_suspend(dev);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR_RW(async);
+
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
+
+static struct attribute *power_attrs[] = {
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_SLEEP
+ &dev_attr_async.attr,
+#endif
+ &dev_attr_runtime_status.attr,
+ &dev_attr_runtime_usage.attr,
+ &dev_attr_runtime_active_kids.attr,
+ &dev_attr_runtime_enabled.attr,
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
+ NULL,
+};
+static const struct attribute_group pm_attr_group = {
+ .name = power_group_name,
+ .attrs = power_attrs,
+};
+
+static struct attribute *wakeup_attrs[] = {
+#ifdef CONFIG_PM_SLEEP
+ &dev_attr_wakeup.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_wakeup_active_count.attr,
+ &dev_attr_wakeup_abort_count.attr,
+ &dev_attr_wakeup_expire_count.attr,
+ &dev_attr_wakeup_active.attr,
+ &dev_attr_wakeup_total_time_ms.attr,
+ &dev_attr_wakeup_max_time_ms.attr,
+ &dev_attr_wakeup_last_time_ms.attr,
+#ifdef CONFIG_PM_AUTOSLEEP
+ &dev_attr_wakeup_prevent_sleep_time_ms.attr,
+#endif
+#endif
+ NULL,
+};
+static const struct attribute_group pm_wakeup_attr_group = {
+ .name = power_group_name,
+ .attrs = wakeup_attrs,
+};
+
+static struct attribute *runtime_attrs[] = {
+#ifndef CONFIG_PM_ADVANCED_DEBUG
+ &dev_attr_runtime_status.attr,
+#endif
+ &dev_attr_control.attr,
+ &dev_attr_runtime_suspended_time.attr,
+ &dev_attr_runtime_active_time.attr,
+ &dev_attr_autosuspend_delay_ms.attr,
+ NULL,
+};
+static const struct attribute_group pm_runtime_attr_group = {
+ .name = power_group_name,
+ .attrs = runtime_attrs,
+};
+
+static struct attribute *pm_qos_resume_latency_attrs[] = {
+ &dev_attr_pm_qos_resume_latency_us.attr,
+ NULL,
+};
+static const struct attribute_group pm_qos_resume_latency_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+ &dev_attr_pm_qos_latency_tolerance_us.attr,
+ NULL,
+};
+static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_latency_tolerance_attrs,
+};
+
+static struct attribute *pm_qos_flags_attrs[] = {
+ &dev_attr_pm_qos_no_power_off.attr,
+ NULL,
+};
+static const struct attribute_group pm_qos_flags_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_flags_attrs,
+};
+
+int dpm_sysfs_add(struct device *dev)
+{
+ int rc;
+
+ /* No need to create PM sysfs if explicitly disabled. */
+ if (device_pm_not_required(dev))
+ return 0;
+
+ rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
+ if (rc)
+ return rc;
+
+ if (!pm_runtime_has_no_callbacks(dev)) {
+ rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
+ if (rc)
+ goto err_out;
+ }
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+ if (rc)
+ goto err_runtime;
+ }
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+ if (rc)
+ goto err_wakeup;
+ }
+ rc = pm_wakeup_source_sysfs_add(dev);
+ if (rc)
+ goto err_latency;
+ return 0;
+
+ err_latency:
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+ err_wakeup:
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+ err_out:
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
+ return rc;
+}
+
+int dpm_sysfs_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid)
+{
+ int rc;
+
+ if (device_pm_not_required(dev))
+ return 0;
+
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+
+ if (!pm_runtime_has_no_callbacks(dev)) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_runtime_attr_group, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_group_change_owner(&dev->kobj, &pm_wakeup_attr_group,
+ kuid, kgid);
+ if (rc)
+ return rc;
+
+ rc = dpm_sysfs_wakeup_change_owner(dev, kuid, kgid);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.set_latency_tolerance) {
+ rc = sysfs_group_change_owner(
+ &dev->kobj, &pm_qos_latency_tolerance_attr_group, kuid,
+ kgid);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+int wakeup_sysfs_add(struct device *dev)
+{
+ int ret = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+
+ if (!ret)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+
+ return ret;
+}
+
+void wakeup_sysfs_remove(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+}
+
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
+}
+
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
+}
+
+int pm_qos_sysfs_add_flags(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
+}
+
+void pm_qos_sysfs_remove_flags(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
+}
+
+int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj,
+ &pm_qos_latency_tolerance_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+}
+
+void rpm_sysfs_remove(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+}
+
+void dpm_sysfs_remove(struct device *dev)
+{
+ if (device_pm_not_required(dev))
+ return;
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+ dev_pm_qos_constraints_destroy(dev);
+ rpm_sysfs_remove(dev);
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
+}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
new file mode 100644
index 000000000..cd6e55964
--- /dev/null
+++ b/drivers/base/power/trace.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/trace.c
+ *
+ * Copyright (C) 2006 Linus Torvalds
+ *
+ * Trace facility for suspend/resume problems, when none of the
+ * devices may be working.
+ */
+#define pr_fmt(fmt) "PM: " fmt
+
+#include <linux/pm-trace.h>
+#include <linux/export.h>
+#include <linux/rtc.h>
+#include <linux/suspend.h>
+#include <linux/init.h>
+
+#include <linux/mc146818rtc.h>
+
+#include "power.h"
+
+/*
+ * Horrid, horrid, horrid.
+ *
+ * It turns out that the _only_ piece of hardware that actually
+ * keeps its value across a hard boot (and, more importantly, the
+ * POST init sequence) is literally the realtime clock.
+ *
+ * Never mind that an RTC chip has 114 bytes (and often a whole
+ * other bank of an additional 128 bytes) of nice SRAM that is
+ * _designed_ to keep data - the POST will clear it. So we literally
+ * can just use the few bytes of actual time data, which means that
+ * we're really limited.
+ *
+ * It means, for example, that we can't use the seconds at all
+ * (since the time between the hang and the boot might be more
+ * than a minute), and we'd better not depend on the low bits of
+ * the minutes either.
+ *
+ * There are the wday fields etc, but I wouldn't guarantee those
+ * are dependable either. And if the date isn't valid, either the
+ * hw or POST will do strange things.
+ *
+ * So we're left with:
+ * - year: 0-99
+ * - month: 0-11
+ * - day-of-month: 1-28
+ * - hour: 0-23
+ * - min: (0-30)*2
+ *
+ * Giving us a total range of 0-16128000 (0xf61800), ie less
+ * than 24 bits of actual data we can save across reboots.
+ *
+ * And if your box can't boot in less than three minutes,
+ * you're screwed.
+ *
+ * Now, almost 24 bits of data is pitifully small, so we need
+ * to be pretty dense if we want to use it for anything nice.
+ * What we do is that instead of saving off nice readable info,
+ * we save off _hashes_ of information that we can hopefully
+ * regenerate after the reboot.
+ *
+ * In particular, this means that we might be unlucky, and hit
+ * a case where we have a hash collision, and we end up not
+ * being able to tell for certain exactly which case happened.
+ * But that's hopefully unlikely.
+ *
+ * What we do is to take the bits we can fit, and split them
+ * into three parts (16*997*1009 = 16095568), and use the values
+ * for:
+ * - 0-15: user-settable
+ * - 0-996: file + line number
+ * - 0-1008: device
+ */
+#define USERHASH (16)
+#define FILEHASH (997)
+#define DEVHASH (1009)
+
+#define DEVSEED (7919)
+
+bool pm_trace_rtc_abused __read_mostly;
+EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
+
+static unsigned int dev_hash_value;
+
+static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
+{
+ unsigned int n = user + USERHASH*(file + FILEHASH*device);
+
+ // June 7th, 2006
+ static struct rtc_time time = {
+ .tm_sec = 0,
+ .tm_min = 0,
+ .tm_hour = 0,
+ .tm_mday = 7,
+ .tm_mon = 5, // June - counting from zero
+ .tm_year = 106,
+ .tm_wday = 3,
+ .tm_yday = 160,
+ .tm_isdst = 1
+ };
+
+ time.tm_year = (n % 100);
+ n /= 100;
+ time.tm_mon = (n % 12);
+ n /= 12;
+ time.tm_mday = (n % 28) + 1;
+ n /= 28;
+ time.tm_hour = (n % 24);
+ n /= 24;
+ time.tm_min = (n % 20) * 3;
+ n /= 20;
+ mc146818_set_time(&time);
+ pm_trace_rtc_abused = true;
+ return n ? -1 : 0;
+}
+
+static unsigned int read_magic_time(void)
+{
+ struct rtc_time time;
+ unsigned int val;
+
+ if (mc146818_get_time(&time, 1000) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+
+ pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
+ val = time.tm_year; /* 100 years */
+ if (val > 100)
+ val -= 100;
+ val += time.tm_mon * 100; /* 12 months */
+ val += (time.tm_mday-1) * 100 * 12; /* 28 month-days */
+ val += time.tm_hour * 100 * 12 * 28; /* 24 hours */
+ val += (time.tm_min / 3) * 100 * 12 * 28 * 24; /* 20 3-minute intervals */
+ return val;
+}
+
+/*
+ * This is just the sdbm hash function with a user-supplied
+ * seed and final size parameter.
+ */
+static unsigned int hash_string(unsigned int seed, const char *data, unsigned int mod)
+{
+ unsigned char c;
+ while ((c = *data++) != 0) {
+ seed = (seed << 16) + (seed << 6) - seed + c;
+ }
+ return seed % mod;
+}
+
+void set_trace_device(struct device *dev)
+{
+ dev_hash_value = hash_string(DEVSEED, dev_name(dev), DEVHASH);
+}
+EXPORT_SYMBOL(set_trace_device);
+
+/*
+ * We could just take the "tracedata" index into the .tracedata
+ * section instead. Generating a hash of the data gives us a
+ * chance to work across kernel versions, and perhaps more
+ * importantly it also gives us valid/invalid check (ie we will
+ * likely not give totally bogus reports - if the hash matches,
+ * it's not any guarantee, but it's a high _likelihood_ that
+ * the match is valid).
+ */
+void generate_pm_trace(const void *tracedata, unsigned int user)
+{
+ unsigned short lineno = *(unsigned short *)tracedata;
+ const char *file = *(const char **)(tracedata + 2);
+ unsigned int user_hash_value, file_hash_value;
+
+ if (!x86_platform.legacy.rtc)
+ return;
+
+ user_hash_value = user % USERHASH;
+ file_hash_value = hash_string(lineno, file, FILEHASH);
+ set_magic_time(user_hash_value, file_hash_value, dev_hash_value);
+}
+EXPORT_SYMBOL(generate_pm_trace);
+
+extern char __tracedata_start[], __tracedata_end[];
+static int show_file_hash(unsigned int value)
+{
+ int match;
+ char *tracedata;
+
+ match = 0;
+ for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
+ tracedata += 2 + sizeof(unsigned long)) {
+ unsigned short lineno = *(unsigned short *)tracedata;
+ const char *file = *(const char **)(tracedata + 2);
+ unsigned int hash = hash_string(lineno, file, FILEHASH);
+ if (hash != value)
+ continue;
+ pr_info(" hash matches %s:%u\n", file, lineno);
+ match++;
+ }
+ return match;
+}
+
+static int show_dev_hash(unsigned int value)
+{
+ int match = 0;
+ struct list_head *entry;
+
+ device_pm_lock();
+ entry = dpm_list.prev;
+ while (entry != &dpm_list) {
+ struct device * dev = to_device(entry);
+ unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH);
+ if (hash == value) {
+ dev_info(dev, "hash matches\n");
+ match++;
+ }
+ entry = entry->prev;
+ }
+ device_pm_unlock();
+ return match;
+}
+
+static unsigned int hash_value_early_read;
+
+int show_trace_dev_match(char *buf, size_t size)
+{
+ unsigned int value = hash_value_early_read / (USERHASH * FILEHASH);
+ int ret = 0;
+ struct list_head *entry;
+
+ /*
+ * It's possible that multiple devices will match the hash and we can't
+ * tell which is the culprit, so it's best to output them all.
+ */
+ device_pm_lock();
+ entry = dpm_list.prev;
+ while (size && entry != &dpm_list) {
+ struct device *dev = to_device(entry);
+ unsigned int hash = hash_string(DEVSEED, dev_name(dev),
+ DEVHASH);
+ if (hash == value) {
+ int len = snprintf(buf, size, "%s\n",
+ dev_driver_string(dev));
+ if (len > size)
+ len = size;
+ buf += len;
+ ret += len;
+ size -= len;
+ }
+ entry = entry->prev;
+ }
+ device_pm_unlock();
+ return ret;
+}
+
+static int
+pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ if (pm_trace_rtc_abused) {
+ pm_trace_rtc_abused = false;
+ pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block pm_trace_nb = {
+ .notifier_call = pm_trace_notify,
+};
+
+static int __init early_resume_init(void)
+{
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
+ hash_value_early_read = read_magic_time();
+ register_pm_notifier(&pm_trace_nb);
+ return 0;
+}
+
+static int __init late_resume_init(void)
+{
+ unsigned int val = hash_value_early_read;
+ unsigned int user, file, dev;
+
+ if (!x86_platform.legacy.rtc)
+ return 0;
+
+ user = val % USERHASH;
+ val = val / USERHASH;
+ file = val % FILEHASH;
+ val = val / FILEHASH;
+ dev = val /* % DEVHASH */;
+
+ pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
+ show_file_hash(file);
+ show_dev_hash(dev);
+ return 0;
+}
+
+core_initcall(early_resume_init);
+late_initcall(late_resume_init);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
new file mode 100644
index 000000000..afd094dec
--- /dev/null
+++ b/drivers/base/power/wakeirq.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Device wakeirq helper functions */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+
+#include "power.h"
+
+/**
+ * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
+ * @dev: Device entry
+ * @wirq: Wake irq specific data
+ *
+ * Internal function to attach a dedicated wake-up interrupt as a wake IRQ.
+ */
+static int dev_pm_attach_wake_irq(struct device *dev, struct wake_irq *wirq)
+{
+ unsigned long flags;
+
+ if (!dev || !wirq)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ if (dev_WARN_ONCE(dev, dev->power.wakeirq,
+ "wake irq already initialized\n")) {
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return -EEXIST;
+ }
+
+ dev->power.wakeirq = wirq;
+ device_wakeup_attach_irq(dev, wirq);
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+ return 0;
+}
+
+/**
+ * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
+ * @dev: Device entry
+ * @irq: Device IO interrupt
+ *
+ * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
+ * automatically configured for wake-up from suspend based
+ * on the device specific sysfs wakeup entry. Typically called
+ * during driver probe after calling device_init_wakeup().
+ */
+int dev_pm_set_wake_irq(struct device *dev, int irq)
+{
+ struct wake_irq *wirq;
+ int err;
+
+ if (irq < 0)
+ return -EINVAL;
+
+ wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+ if (!wirq)
+ return -ENOMEM;
+
+ wirq->dev = dev;
+ wirq->irq = irq;
+
+ err = dev_pm_attach_wake_irq(dev, wirq);
+ if (err)
+ kfree(wirq);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
+
+/**
+ * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
+ * @dev: Device entry
+ *
+ * Detach a device wake IRQ and free resources.
+ *
+ * Note that it's OK for drivers to call this without calling
+ * dev_pm_set_wake_irq() as all the driver instances may not have
+ * a wake IRQ configured. This avoid adding wake IRQ specific
+ * checks into the drivers.
+ */
+void dev_pm_clear_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+ unsigned long flags;
+
+ if (!wirq)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ device_wakeup_detach_irq(dev);
+ dev->power.wakeirq = NULL;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
+ free_irq(wirq->irq, wirq);
+ wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
+ }
+ kfree(wirq->name);
+ kfree(wirq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
+
+/**
+ * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
+ * @irq: Device specific dedicated wake-up interrupt
+ * @_wirq: Wake IRQ data
+ *
+ * Some devices have a separate wake-up interrupt in addition to the
+ * device IO interrupt. The wake-up interrupt signals that a device
+ * should be woken up from it's idle state. This handler uses device
+ * specific pm_runtime functions to wake the device, and then it's
+ * up to the device to do whatever it needs to. Note that as the
+ * device may need to restore context and start up regulators, we
+ * use a threaded IRQ.
+ *
+ * Also note that we are not resending the lost device interrupts.
+ * We assume that the wake-up interrupt just needs to wake-up the
+ * device, and then device's pm_runtime_resume() can deal with the
+ * situation.
+ */
+static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
+{
+ struct wake_irq *wirq = _wirq;
+ int res;
+
+ /* Maybe abort suspend? */
+ if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
+ pm_wakeup_event(wirq->dev, 0);
+
+ return IRQ_HANDLED;
+ }
+
+ /* We don't want RPM_ASYNC or RPM_NOWAIT here */
+ res = pm_runtime_resume(wirq->dev);
+ if (res < 0)
+ dev_warn(wirq->dev,
+ "wake IRQ with no resume: %i\n", res);
+
+ return IRQ_HANDLED;
+}
+
+static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
+{
+ struct wake_irq *wirq;
+ int err;
+
+ if (irq < 0)
+ return -EINVAL;
+
+ wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
+ if (!wirq)
+ return -ENOMEM;
+
+ wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
+ if (!wirq->name) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ wirq->dev = dev;
+ wirq->irq = irq;
+
+ /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ /*
+ * Consumer device may need to power up and restore state
+ * so we use a threaded irq.
+ */
+ err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ wirq->name, wirq);
+ if (err)
+ goto err_free_name;
+
+ err = dev_pm_attach_wake_irq(dev, wirq);
+ if (err)
+ goto err_free_irq;
+
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
+
+ return err;
+
+err_free_irq:
+ free_irq(irq, wirq);
+err_free_name:
+ kfree(wirq->name);
+err_free:
+ kfree(wirq);
+
+ return err;
+}
+
+
+/**
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
+
+/**
+ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
+ * with reverse enable ordering
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has a dedicated
+ * wake-up interrupt in addition to the device IO interrupt. It sets
+ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
+ * to enable dedicated wake-up interrupt after running the runtime suspend
+ * callback for @dev.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
+
+/**
+ * dev_pm_enable_wake_irq - Enable device wake-up interrupt
+ * @dev: Device
+ *
+ * Optionally called from the bus code or the device driver for
+ * runtime_resume() to override the PM runtime core managed wake-up
+ * interrupt handling to enable the wake-up interrupt.
+ *
+ * Note that for runtime_suspend()) the wake-up interrupts
+ * should be unconditionally enabled unlike for suspend()
+ * that is conditional.
+ */
+void dev_pm_enable_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
+ enable_irq(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
+
+/**
+ * dev_pm_disable_wake_irq - Disable device wake-up interrupt
+ * @dev: Device
+ *
+ * Optionally called from the bus code or the device driver for
+ * runtime_suspend() to override the PM runtime core managed wake-up
+ * interrupt handling to disable the wake-up interrupt.
+ */
+void dev_pm_disable_wake_irq(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
+ disable_irq_nosync(wirq->irq);
+}
+EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
+
+/**
+ * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
+ * @dev: Device
+ * @can_change_status: Can change wake-up interrupt status
+ *
+ * Enables wakeirq conditionally. We need to enable wake-up interrupt
+ * lazily on the first rpm_suspend(). This is needed as the consumer device
+ * starts in RPM_SUSPENDED state, and the first pm_runtime_get() would
+ * otherwise try to disable already disabled wakeirq. The wake-up interrupt
+ * starts disabled with IRQ_NOAUTOEN set.
+ *
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ * Caller must hold &dev->power.lock to change wirq->status
+ */
+void dev_pm_enable_wake_irq_check(struct device *dev,
+ bool can_change_status)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
+ goto enable;
+ } else if (can_change_status) {
+ wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
+ goto enable;
+ }
+
+ return;
+
+enable:
+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) {
+ enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
+}
+
+/**
+ * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
+ * @dev: Device
+ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
+ *
+ * Disables wake-up interrupt conditionally based on status.
+ * Should be only called from rpm_suspend() and rpm_resume() path.
+ */
+void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) {
+ wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED;
+ disable_irq_nosync(wirq->irq);
+ }
+}
+
+/**
+ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
+ * @dev: Device using the wake IRQ
+ *
+ * Enable wake IRQ conditionally based on status, mainly used if want to
+ * enable wake IRQ after running ->runtime_suspend() which depends on
+ * WAKE_IRQ_DEDICATED_REVERSE.
+ *
+ * Should be only called from rpm_suspend() path.
+ */
+void dev_pm_enable_wake_irq_complete(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ enable_irq(wirq->irq);
+}
+
+/**
+ * dev_pm_arm_wake_irq - Arm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Sets up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_arm_wake_irq(struct wake_irq *wirq)
+{
+ if (!wirq)
+ return;
+
+ if (device_may_wakeup(wirq->dev)) {
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ enable_irq(wirq->irq);
+
+ enable_irq_wake(wirq->irq);
+ }
+}
+
+/**
+ * dev_pm_disarm_wake_irq - Disarm device wake-up
+ * @wirq: Device wake-up interrupt
+ *
+ * Clears up the wake-up event conditionally based on the
+ * device_may_wake().
+ */
+void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
+{
+ if (!wirq)
+ return;
+
+ if (device_may_wakeup(wirq->dev)) {
+ disable_irq_wake(wirq->irq);
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED))
+ disable_irq_nosync(wirq->irq);
+ }
+}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
new file mode 100644
index 000000000..7cc0c0cf8
--- /dev/null
+++ b/drivers/base/power/wakeup.c
@@ -0,0 +1,1188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/base/power/wakeup.c - System wakeup events framework
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ */
+#define pr_fmt(fmt) "PM: " fmt
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/sched/signal.h>
+#include <linux/capability.h>
+#include <linux/export.h>
+#include <linux/suspend.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/pm_wakeirq.h>
+#include <trace/events/power.h>
+
+#include "power.h"
+
+#ifndef CONFIG_SUSPEND
+suspend_state_t pm_suspend_target_state;
+#define pm_suspend_target_state (PM_SUSPEND_ON)
+#endif
+
+#define list_for_each_entry_rcu_locked(pos, head, member) \
+ list_for_each_entry_rcu(pos, head, member, \
+ srcu_read_lock_held(&wakeup_srcu))
+/*
+ * If set, the suspend/hibernate code will abort transitions to a sleep state
+ * if wakeup events are registered during or immediately before the transition.
+ */
+bool events_check_enabled __read_mostly;
+
+/* First wakeup IRQ seen by the kernel in the last cycle. */
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
+
+/* If greater than 0 and the system is suspending, terminate the suspend. */
+static atomic_t pm_abort_suspend __read_mostly;
+
+/*
+ * Combined counters of registered wakeup events and wakeup events in progress.
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+static atomic_t combined_event_count = ATOMIC_INIT(0);
+
+#define IN_PROGRESS_BITS (sizeof(int) * 4)
+#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
+
+static void split_counters(unsigned int *cnt, unsigned int *inpr)
+{
+ unsigned int comb = atomic_read(&combined_event_count);
+
+ *cnt = (comb >> IN_PROGRESS_BITS);
+ *inpr = comb & MAX_IN_PROGRESS;
+}
+
+/* A preserved old value of the events counter. */
+static unsigned int saved_count;
+
+static DEFINE_RAW_SPINLOCK(events_lock);
+
+static void pm_wakeup_timer_fn(struct timer_list *t);
+
+static LIST_HEAD(wakeup_sources);
+
+static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
+static struct wakeup_source deleted_ws = {
+ .name = "deleted",
+ .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
+};
+
+static DEFINE_IDA(wakeup_ida);
+
+/**
+ * wakeup_source_create - Create a struct wakeup_source object.
+ * @name: Name of the new wakeup source.
+ */
+struct wakeup_source *wakeup_source_create(const char *name)
+{
+ struct wakeup_source *ws;
+ const char *ws_name;
+ int id;
+
+ ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ if (!ws)
+ goto err_ws;
+
+ ws_name = kstrdup_const(name, GFP_KERNEL);
+ if (!ws_name)
+ goto err_name;
+ ws->name = ws_name;
+
+ id = ida_alloc(&wakeup_ida, GFP_KERNEL);
+ if (id < 0)
+ goto err_id;
+ ws->id = id;
+
+ return ws;
+
+err_id:
+ kfree_const(ws->name);
+err_name:
+ kfree(ws);
+err_ws:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_create);
+
+/*
+ * Record wakeup_source statistics being deleted into a dummy wakeup_source.
+ */
+static void wakeup_source_record(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&deleted_ws.lock, flags);
+
+ if (ws->event_count) {
+ deleted_ws.total_time =
+ ktime_add(deleted_ws.total_time, ws->total_time);
+ deleted_ws.prevent_sleep_time =
+ ktime_add(deleted_ws.prevent_sleep_time,
+ ws->prevent_sleep_time);
+ deleted_ws.max_time =
+ ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
+ deleted_ws.max_time : ws->max_time;
+ deleted_ws.event_count += ws->event_count;
+ deleted_ws.active_count += ws->active_count;
+ deleted_ws.relax_count += ws->relax_count;
+ deleted_ws.expire_count += ws->expire_count;
+ deleted_ws.wakeup_count += ws->wakeup_count;
+ }
+
+ spin_unlock_irqrestore(&deleted_ws.lock, flags);
+}
+
+static void wakeup_source_free(struct wakeup_source *ws)
+{
+ ida_free(&wakeup_ida, ws->id);
+ kfree_const(ws->name);
+ kfree(ws);
+}
+
+/**
+ * wakeup_source_destroy - Destroy a struct wakeup_source object.
+ * @ws: Wakeup source to destroy.
+ *
+ * Use only for wakeup source objects created with wakeup_source_create().
+ */
+void wakeup_source_destroy(struct wakeup_source *ws)
+{
+ if (!ws)
+ return;
+
+ __pm_relax(ws);
+ wakeup_source_record(ws);
+ wakeup_source_free(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_destroy);
+
+/**
+ * wakeup_source_add - Add given object to the list of wakeup sources.
+ * @ws: Wakeup source object to add to the list.
+ */
+void wakeup_source_add(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!ws))
+ return;
+
+ spin_lock_init(&ws->lock);
+ timer_setup(&ws->timer, pm_wakeup_timer_fn, 0);
+ ws->active = false;
+
+ raw_spin_lock_irqsave(&events_lock, flags);
+ list_add_rcu(&ws->entry, &wakeup_sources);
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_add);
+
+/**
+ * wakeup_source_remove - Remove given object from the wakeup sources list.
+ * @ws: Wakeup source object to remove from the list.
+ */
+void wakeup_source_remove(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (WARN_ON(!ws))
+ return;
+
+ raw_spin_lock_irqsave(&events_lock, flags);
+ list_del_rcu(&ws->entry);
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+ synchronize_srcu(&wakeup_srcu);
+
+ del_timer_sync(&ws->timer);
+ /*
+ * Clear timer.function to make wakeup_source_not_registered() treat
+ * this wakeup source as not registered.
+ */
+ ws->timer.function = NULL;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_remove);
+
+/**
+ * wakeup_source_register - Create wakeup source and add it to the list.
+ * @dev: Device this wakeup source is associated with (or NULL if virtual).
+ * @name: Name of the wakeup source to register.
+ */
+struct wakeup_source *wakeup_source_register(struct device *dev,
+ const char *name)
+{
+ struct wakeup_source *ws;
+ int ret;
+
+ ws = wakeup_source_create(name);
+ if (ws) {
+ if (!dev || device_is_registered(dev)) {
+ ret = wakeup_source_sysfs_add(dev, ws);
+ if (ret) {
+ wakeup_source_free(ws);
+ return NULL;
+ }
+ }
+ wakeup_source_add(ws);
+ }
+ return ws;
+}
+EXPORT_SYMBOL_GPL(wakeup_source_register);
+
+/**
+ * wakeup_source_unregister - Remove wakeup source from the list and remove it.
+ * @ws: Wakeup source object to unregister.
+ */
+void wakeup_source_unregister(struct wakeup_source *ws)
+{
+ if (ws) {
+ wakeup_source_remove(ws);
+ if (ws->dev)
+ wakeup_source_sysfs_remove(ws);
+
+ wakeup_source_destroy(ws);
+ }
+}
+EXPORT_SYMBOL_GPL(wakeup_source_unregister);
+
+/**
+ * wakeup_sources_read_lock - Lock wakeup source list for read.
+ *
+ * Returns an index of srcu lock for struct wakeup_srcu.
+ * This index must be passed to the matching wakeup_sources_read_unlock().
+ */
+int wakeup_sources_read_lock(void)
+{
+ return srcu_read_lock(&wakeup_srcu);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_lock);
+
+/**
+ * wakeup_sources_read_unlock - Unlock wakeup source list.
+ * @idx: return value from corresponding wakeup_sources_read_lock()
+ */
+void wakeup_sources_read_unlock(int idx)
+{
+ srcu_read_unlock(&wakeup_srcu, idx);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_read_unlock);
+
+/**
+ * wakeup_sources_walk_start - Begin a walk on wakeup source list
+ *
+ * Returns first object of the list of wakeup sources.
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_start(void)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_entry_rcu(ws_head->next, struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_start);
+
+/**
+ * wakeup_sources_walk_next - Get next wakeup source from the list
+ * @ws: Previous wakeup source object
+ *
+ * Note that to be safe, wakeup sources list needs to be locked by calling
+ * wakeup_source_read_lock() for this.
+ */
+struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws)
+{
+ struct list_head *ws_head = &wakeup_sources;
+
+ return list_next_or_null_rcu(ws_head, &ws->entry,
+ struct wakeup_source, entry);
+}
+EXPORT_SYMBOL_GPL(wakeup_sources_walk_next);
+
+/**
+ * device_wakeup_attach - Attach a wakeup source object to a device object.
+ * @dev: Device to handle.
+ * @ws: Wakeup source object to attach to @dev.
+ *
+ * This causes @dev to be treated as a wakeup device.
+ */
+static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
+{
+ spin_lock_irq(&dev->power.lock);
+ if (dev->power.wakeup) {
+ spin_unlock_irq(&dev->power.lock);
+ return -EEXIST;
+ }
+ dev->power.wakeup = ws;
+ if (dev->power.wakeirq)
+ device_wakeup_attach_irq(dev, dev->power.wakeirq);
+ spin_unlock_irq(&dev->power.lock);
+ return 0;
+}
+
+/**
+ * device_wakeup_enable - Enable given device to be a wakeup source.
+ * @dev: Device to handle.
+ *
+ * Create a wakeup source object, register it and attach it to @dev.
+ */
+int device_wakeup_enable(struct device *dev)
+{
+ struct wakeup_source *ws;
+ int ret;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ if (pm_suspend_target_state != PM_SUSPEND_ON)
+ dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__);
+
+ ws = wakeup_source_register(dev, dev_name(dev));
+ if (!ws)
+ return -ENOMEM;
+
+ ret = device_wakeup_attach(dev, ws);
+ if (ret)
+ wakeup_source_unregister(ws);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_enable);
+
+/**
+ * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
+ * @dev: Device to handle
+ * @wakeirq: Device specific wakeirq entry
+ *
+ * Attach a device wakeirq to the wakeup source so the device
+ * wake IRQ can be configured automatically for suspend and
+ * resume.
+ *
+ * Call under the device's power.lock lock.
+ */
+void device_wakeup_attach_irq(struct device *dev,
+ struct wake_irq *wakeirq)
+{
+ struct wakeup_source *ws;
+
+ ws = dev->power.wakeup;
+ if (!ws)
+ return;
+
+ if (ws->wakeirq)
+ dev_err(dev, "Leftover wakeup IRQ found, overriding\n");
+
+ ws->wakeirq = wakeirq;
+}
+
+/**
+ * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
+ * @dev: Device to handle
+ *
+ * Removes a device wakeirq from the wakeup source.
+ *
+ * Call under the device's power.lock lock.
+ */
+void device_wakeup_detach_irq(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ ws = dev->power.wakeup;
+ if (ws)
+ ws->wakeirq = NULL;
+}
+
+/**
+ * device_wakeup_arm_wake_irqs -
+ *
+ * Iterates over the list of device wakeirqs to arm them.
+ */
+void device_wakeup_arm_wake_irqs(void)
+{
+ struct wakeup_source *ws;
+ int srcuidx;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
+ dev_pm_arm_wake_irq(ws->wakeirq);
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+
+/**
+ * device_wakeup_disarm_wake_irqs -
+ *
+ * Iterates over the list of device wakeirqs to disarm them.
+ */
+void device_wakeup_disarm_wake_irqs(void)
+{
+ struct wakeup_source *ws;
+ int srcuidx;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry)
+ dev_pm_disarm_wake_irq(ws->wakeirq);
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+
+/**
+ * device_wakeup_detach - Detach a device's wakeup source object from it.
+ * @dev: Device to detach the wakeup source object from.
+ *
+ * After it returns, @dev will not be treated as a wakeup device any more.
+ */
+static struct wakeup_source *device_wakeup_detach(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ spin_lock_irq(&dev->power.lock);
+ ws = dev->power.wakeup;
+ dev->power.wakeup = NULL;
+ spin_unlock_irq(&dev->power.lock);
+ return ws;
+}
+
+/**
+ * device_wakeup_disable - Do not regard a device as a wakeup source any more.
+ * @dev: Device to handle.
+ *
+ * Detach the @dev's wakeup source object from it, unregister this wakeup source
+ * object and destroy it.
+ */
+int device_wakeup_disable(struct device *dev)
+{
+ struct wakeup_source *ws;
+
+ if (!dev || !dev->power.can_wakeup)
+ return -EINVAL;
+
+ ws = device_wakeup_detach(dev);
+ wakeup_source_unregister(ws);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_wakeup_disable);
+
+/**
+ * device_set_wakeup_capable - Set/reset device wakeup capability flag.
+ * @dev: Device to handle.
+ * @capable: Whether or not @dev is capable of waking up the system from sleep.
+ *
+ * If @capable is set, set the @dev's power.can_wakeup flag and add its
+ * wakeup-related attributes to sysfs. Otherwise, unset the @dev's
+ * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
+ *
+ * This function may sleep and it can't be called from any context where
+ * sleeping is not allowed.
+ */
+void device_set_wakeup_capable(struct device *dev, bool capable)
+{
+ if (!!dev->power.can_wakeup == !!capable)
+ return;
+
+ dev->power.can_wakeup = capable;
+ if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
+ if (capable) {
+ int ret = wakeup_sysfs_add(dev);
+
+ if (ret)
+ dev_info(dev, "Wakeup sysfs attributes not added\n");
+ } else {
+ wakeup_sysfs_remove(dev);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
+
+/**
+ * device_set_wakeup_enable - Enable or disable a device to wake up the system.
+ * @dev: Device to handle.
+ * @enable: enable/disable flag
+ */
+int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+ return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
+
+/**
+ * wakeup_source_not_registered - validate the given wakeup source.
+ * @ws: Wakeup source to be validated.
+ */
+static bool wakeup_source_not_registered(struct wakeup_source *ws)
+{
+ /*
+ * Use timer struct to check if the given source is initialized
+ * by wakeup_source_add.
+ */
+ return ws->timer.function != pm_wakeup_timer_fn;
+}
+
+/*
+ * The functions below use the observation that each wakeup event starts a
+ * period in which the system should not be suspended. The moment this period
+ * will end depends on how the wakeup event is going to be processed after being
+ * detected and all of the possible cases can be divided into two distinct
+ * groups.
+ *
+ * First, a wakeup event may be detected by the same functional unit that will
+ * carry out the entire processing of it and possibly will pass it to user space
+ * for further processing. In that case the functional unit that has detected
+ * the event may later "close" the "no suspend" period associated with it
+ * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
+ * pm_relax(), balanced with each other, is supposed to be used in such
+ * situations.
+ *
+ * Second, a wakeup event may be detected by one functional unit and processed
+ * by another one. In that case the unit that has detected it cannot really
+ * "close" the "no suspend" period associated with it, unless it knows in
+ * advance what's going to happen to the event during processing. This
+ * knowledge, however, may not be available to it, so it can simply specify time
+ * to wait before the system can be suspended and pass it as the second
+ * argument of pm_wakeup_event().
+ *
+ * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
+ * "no suspend" period will be ended either by the pm_relax(), or by the timer
+ * function executed when the timer expires, whichever comes first.
+ */
+
+/**
+ * wakeup_source_activate - Mark given wakeup source as active.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and, if @ws has just been activated, notify the PM
+ * core of the event by incrementing the counter of the wakeup events being
+ * processed.
+ */
+static void wakeup_source_activate(struct wakeup_source *ws)
+{
+ unsigned int cec;
+
+ if (WARN_ONCE(wakeup_source_not_registered(ws),
+ "unregistered wakeup source\n"))
+ return;
+
+ ws->active = true;
+ ws->active_count++;
+ ws->last_time = ktime_get();
+ if (ws->autosleep_enabled)
+ ws->start_prevent_time = ws->last_time;
+
+ /* Increment the counter of events in progress. */
+ cec = atomic_inc_return(&combined_event_count);
+
+ trace_wakeup_source_activate(ws->name, cec);
+}
+
+/**
+ * wakeup_source_report_event - Report wakeup event using the given source.
+ * @ws: Wakeup source to report the event for.
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ */
+static void wakeup_source_report_event(struct wakeup_source *ws, bool hard)
+{
+ ws->event_count++;
+ /* This is racy, but the counter is approximate anyway. */
+ if (events_check_enabled)
+ ws->wakeup_count++;
+
+ if (!ws->active)
+ wakeup_source_activate(ws);
+
+ if (hard)
+ pm_system_wakeup();
+}
+
+/**
+ * __pm_stay_awake - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void __pm_stay_awake(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ wakeup_source_report_event(ws, false);
+ del_timer(&ws->timer);
+ ws->timer_expires = 0;
+
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_stay_awake);
+
+/**
+ * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
+ * @dev: Device the wakeup event is related to.
+ *
+ * Notify the PM core of a wakeup event (signaled by @dev) by calling
+ * __pm_stay_awake for the @dev's wakeup source object.
+ *
+ * Call this function after detecting of a wakeup event if pm_relax() is going
+ * to be called directly after processing the event (and possibly passing it to
+ * user space for further processing).
+ */
+void pm_stay_awake(struct device *dev)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_stay_awake(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_stay_awake);
+
+#ifdef CONFIG_PM_AUTOSLEEP
+static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
+{
+ ktime_t delta = ktime_sub(now, ws->start_prevent_time);
+ ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
+}
+#else
+static inline void update_prevent_sleep_time(struct wakeup_source *ws,
+ ktime_t now) {}
+#endif
+
+/**
+ * wakeup_source_deactivate - Mark given wakeup source as inactive.
+ * @ws: Wakeup source to handle.
+ *
+ * Update the @ws' statistics and notify the PM core that the wakeup source has
+ * become inactive by decrementing the counter of wakeup events being processed
+ * and incrementing the counter of registered wakeup events.
+ */
+static void wakeup_source_deactivate(struct wakeup_source *ws)
+{
+ unsigned int cnt, inpr, cec;
+ ktime_t duration;
+ ktime_t now;
+
+ ws->relax_count++;
+ /*
+ * __pm_relax() may be called directly or from a timer function.
+ * If it is called directly right after the timer function has been
+ * started, but before the timer function calls __pm_relax(), it is
+ * possible that __pm_stay_awake() will be called in the meantime and
+ * will set ws->active. Then, ws->active may be cleared immediately
+ * by the __pm_relax() called from the timer function, but in such a
+ * case ws->relax_count will be different from ws->active_count.
+ */
+ if (ws->relax_count != ws->active_count) {
+ ws->relax_count--;
+ return;
+ }
+
+ ws->active = false;
+
+ now = ktime_get();
+ duration = ktime_sub(now, ws->last_time);
+ ws->total_time = ktime_add(ws->total_time, duration);
+ if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
+ ws->max_time = duration;
+
+ ws->last_time = now;
+ del_timer(&ws->timer);
+ ws->timer_expires = 0;
+
+ if (ws->autosleep_enabled)
+ update_prevent_sleep_time(ws, now);
+
+ /*
+ * Increment the counter of registered wakeup events and decrement the
+ * counter of wakeup events in progress simultaneously.
+ */
+ cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
+ trace_wakeup_source_deactivate(ws->name, cec);
+
+ split_counters(&cnt, &inpr);
+ if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
+ wake_up(&wakeup_count_wait_queue);
+}
+
+/**
+ * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @ws: Wakeup source object associated with the source of the event.
+ *
+ * Call this function for wakeup events whose processing started with calling
+ * __pm_stay_awake().
+ *
+ * It is safe to call it from interrupt context.
+ */
+void __pm_relax(struct wakeup_source *ws)
+{
+ unsigned long flags;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+ if (ws->active)
+ wakeup_source_deactivate(ws);
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(__pm_relax);
+
+/**
+ * pm_relax - Notify the PM core that processing of a wakeup event has ended.
+ * @dev: Device that signaled the event.
+ *
+ * Execute __pm_relax() for the @dev's wakeup source object.
+ */
+void pm_relax(struct device *dev)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ __pm_relax(dev->power.wakeup);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_relax);
+
+/**
+ * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
+ * @t: timer list
+ *
+ * Call wakeup_source_deactivate() for the wakeup source whose address is stored
+ * in @data if it is currently active and its timer has not been canceled and
+ * the expiration time of the timer is not in future.
+ */
+static void pm_wakeup_timer_fn(struct timer_list *t)
+{
+ struct wakeup_source *ws = from_timer(ws, t, timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ if (ws->active && ws->timer_expires
+ && time_after_eq(jiffies, ws->timer_expires)) {
+ wakeup_source_deactivate(ws);
+ ws->expire_count++;
+ }
+
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+
+/**
+ * pm_wakeup_ws_event - Notify the PM core of a wakeup event.
+ * @ws: Wakeup source object associated with the event source.
+ * @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ *
+ * Notify the PM core of a wakeup event whose source is @ws that will take
+ * approximately @msec milliseconds to be processed by the kernel. If @ws is
+ * not active, activate it. If @msec is nonzero, set up the @ws' timer to
+ * execute pm_wakeup_timer_fn() in future.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard)
+{
+ unsigned long flags;
+ unsigned long expires;
+
+ if (!ws)
+ return;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ wakeup_source_report_event(ws, hard);
+
+ if (!msec) {
+ wakeup_source_deactivate(ws);
+ goto unlock;
+ }
+
+ expires = jiffies + msecs_to_jiffies(msec);
+ if (!expires)
+ expires = 1;
+
+ if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
+ mod_timer(&ws->timer, expires);
+ ws->timer_expires = expires;
+ }
+
+ unlock:
+ spin_unlock_irqrestore(&ws->lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_ws_event);
+
+/**
+ * pm_wakeup_dev_event - Notify the PM core of a wakeup event.
+ * @dev: Device the wakeup event is related to.
+ * @msec: Anticipated event processing time (in milliseconds).
+ * @hard: If set, abort suspends in progress and wake up from suspend-to-idle.
+ *
+ * Call pm_wakeup_ws_event() for the @dev's wakeup source object.
+ */
+void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
+{
+ unsigned long flags;
+
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ pm_wakeup_ws_event(dev->power.wakeup, msec, hard);
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
+
+void pm_print_active_wakeup_sources(void)
+{
+ struct wakeup_source *ws;
+ int srcuidx, active = 0;
+ struct wakeup_source *last_activity_ws = NULL;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
+ if (ws->active) {
+ pm_pr_dbg("active wakeup source: %s\n", ws->name);
+ active = 1;
+ } else if (!active &&
+ (!last_activity_ws ||
+ ktime_to_ns(ws->last_time) >
+ ktime_to_ns(last_activity_ws->last_time))) {
+ last_activity_ws = ws;
+ }
+ }
+
+ if (!active && last_activity_ws)
+ pm_pr_dbg("last active wakeup source: %s\n",
+ last_activity_ws->name);
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
+
+/**
+ * pm_wakeup_pending - Check if power transition in progress should be aborted.
+ *
+ * Compare the current number of registered wakeup events with its preserved
+ * value from the past and return true if new wakeup events have been registered
+ * since the old value was stored. Also return true if the current number of
+ * wakeup events being processed is different from zero.
+ */
+bool pm_wakeup_pending(void)
+{
+ unsigned long flags;
+ bool ret = false;
+
+ raw_spin_lock_irqsave(&events_lock, flags);
+ if (events_check_enabled) {
+ unsigned int cnt, inpr;
+
+ split_counters(&cnt, &inpr);
+ ret = (cnt != saved_count || inpr > 0);
+ events_check_enabled = !ret;
+ }
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+
+ if (ret) {
+ pm_pr_dbg("Wakeup pending, aborting suspend\n");
+ pm_print_active_wakeup_sources();
+ }
+
+ return ret || atomic_read(&pm_abort_suspend) > 0;
+}
+EXPORT_SYMBOL_GPL(pm_wakeup_pending);
+
+void pm_system_wakeup(void)
+{
+ atomic_inc(&pm_abort_suspend);
+ s2idle_wake();
+}
+EXPORT_SYMBOL_GPL(pm_system_wakeup);
+
+void pm_system_cancel_wakeup(void)
+{
+ atomic_dec_if_positive(&pm_abort_suspend);
+}
+
+void pm_wakeup_clear(unsigned int irq_number)
+{
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
+ atomic_set(&pm_abort_suspend, 0);
+}
+
+void pm_system_irq_wakeup(unsigned int irq_number)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ pm_pr_dbg("Triggering wakeup from IRQ %d\n", irq_number);
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
+ pm_system_wakeup();
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
+}
+
+/**
+ * pm_get_wakeup_count - Read the number of registered wakeup events.
+ * @count: Address to store the value at.
+ * @block: Whether or not to block.
+ *
+ * Store the number of registered wakeup events at the address in @count. If
+ * @block is set, block until the current number of wakeup events being
+ * processed is zero.
+ *
+ * Return 'false' if the current number of wakeup events being processed is
+ * nonzero. Otherwise return 'true'.
+ */
+bool pm_get_wakeup_count(unsigned int *count, bool block)
+{
+ unsigned int cnt, inpr;
+
+ if (block) {
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(&wakeup_count_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+ split_counters(&cnt, &inpr);
+ if (inpr == 0 || signal_pending(current))
+ break;
+ pm_print_active_wakeup_sources();
+ schedule();
+ }
+ finish_wait(&wakeup_count_wait_queue, &wait);
+ }
+
+ split_counters(&cnt, &inpr);
+ *count = cnt;
+ return !inpr;
+}
+
+/**
+ * pm_save_wakeup_count - Save the current number of registered wakeup events.
+ * @count: Value to compare with the current number of registered wakeup events.
+ *
+ * If @count is equal to the current number of registered wakeup events and the
+ * current number of wakeup events being processed is zero, store @count as the
+ * old number of registered wakeup events for pm_check_wakeup_events(), enable
+ * wakeup events detection and return 'true'. Otherwise disable wakeup events
+ * detection and return 'false'.
+ */
+bool pm_save_wakeup_count(unsigned int count)
+{
+ unsigned int cnt, inpr;
+ unsigned long flags;
+
+ events_check_enabled = false;
+ raw_spin_lock_irqsave(&events_lock, flags);
+ split_counters(&cnt, &inpr);
+ if (cnt == count && inpr == 0) {
+ saved_count = count;
+ events_check_enabled = true;
+ }
+ raw_spin_unlock_irqrestore(&events_lock, flags);
+ return events_check_enabled;
+}
+
+#ifdef CONFIG_PM_AUTOSLEEP
+/**
+ * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
+ * @set: Whether to set or to clear the autosleep_enabled flags.
+ */
+void pm_wakep_autosleep_enabled(bool set)
+{
+ struct wakeup_source *ws;
+ ktime_t now = ktime_get();
+ int srcuidx;
+
+ srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
+ spin_lock_irq(&ws->lock);
+ if (ws->autosleep_enabled != set) {
+ ws->autosleep_enabled = set;
+ if (ws->active) {
+ if (set)
+ ws->start_prevent_time = now;
+ else
+ update_prevent_sleep_time(ws, now);
+ }
+ }
+ spin_unlock_irq(&ws->lock);
+ }
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
+}
+#endif /* CONFIG_PM_AUTOSLEEP */
+
+/**
+ * print_wakeup_source_stats - Print wakeup source statistics information.
+ * @m: seq_file to print the statistics into.
+ * @ws: Wakeup source object to print the statistics for.
+ */
+static int print_wakeup_source_stats(struct seq_file *m,
+ struct wakeup_source *ws)
+{
+ unsigned long flags;
+ ktime_t total_time;
+ ktime_t max_time;
+ unsigned long active_count;
+ ktime_t active_time;
+ ktime_t prevent_sleep_time;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ total_time = ws->total_time;
+ max_time = ws->max_time;
+ prevent_sleep_time = ws->prevent_sleep_time;
+ active_count = ws->active_count;
+ if (ws->active) {
+ ktime_t now = ktime_get();
+
+ active_time = ktime_sub(now, ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ if (active_time > max_time)
+ max_time = active_time;
+
+ if (ws->autosleep_enabled)
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(now, ws->start_prevent_time));
+ } else {
+ active_time = 0;
+ }
+
+ seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
+ ws->name, active_count, ws->event_count,
+ ws->wakeup_count, ws->expire_count,
+ ktime_to_ms(active_time), ktime_to_ms(total_time),
+ ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
+ ktime_to_ms(prevent_sleep_time));
+
+ spin_unlock_irqrestore(&ws->lock, flags);
+
+ return 0;
+}
+
+static void *wakeup_sources_stats_seq_start(struct seq_file *m,
+ loff_t *pos)
+{
+ struct wakeup_source *ws;
+ loff_t n = *pos;
+ int *srcuidx = m->private;
+
+ if (n == 0) {
+ seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
+ "expire_count\tactive_since\ttotal_time\tmax_time\t"
+ "last_change\tprevent_suspend_time\n");
+ }
+
+ *srcuidx = srcu_read_lock(&wakeup_srcu);
+ list_for_each_entry_rcu_locked(ws, &wakeup_sources, entry) {
+ if (n-- <= 0)
+ return ws;
+ }
+
+ return NULL;
+}
+
+static void *wakeup_sources_stats_seq_next(struct seq_file *m,
+ void *v, loff_t *pos)
+{
+ struct wakeup_source *ws = v;
+ struct wakeup_source *next_ws = NULL;
+
+ ++(*pos);
+
+ list_for_each_entry_continue_rcu(ws, &wakeup_sources, entry) {
+ next_ws = ws;
+ break;
+ }
+
+ if (!next_ws)
+ print_wakeup_source_stats(m, &deleted_ws);
+
+ return next_ws;
+}
+
+static void wakeup_sources_stats_seq_stop(struct seq_file *m, void *v)
+{
+ int *srcuidx = m->private;
+
+ srcu_read_unlock(&wakeup_srcu, *srcuidx);
+}
+
+/**
+ * wakeup_sources_stats_seq_show - Print wakeup sources statistics information.
+ * @m: seq_file to print the statistics into.
+ * @v: wakeup_source of each iteration
+ */
+static int wakeup_sources_stats_seq_show(struct seq_file *m, void *v)
+{
+ struct wakeup_source *ws = v;
+
+ print_wakeup_source_stats(m, ws);
+
+ return 0;
+}
+
+static const struct seq_operations wakeup_sources_stats_seq_ops = {
+ .start = wakeup_sources_stats_seq_start,
+ .next = wakeup_sources_stats_seq_next,
+ .stop = wakeup_sources_stats_seq_stop,
+ .show = wakeup_sources_stats_seq_show,
+};
+
+static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
+{
+ return seq_open_private(file, &wakeup_sources_stats_seq_ops, sizeof(int));
+}
+
+static const struct file_operations wakeup_sources_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = wakeup_sources_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static int __init wakeup_sources_debugfs_init(void)
+{
+ debugfs_create_file("wakeup_sources", 0444, NULL, NULL,
+ &wakeup_sources_stats_fops);
+ return 0;
+}
+
+postcore_initcall(wakeup_sources_debugfs_init);
diff --git a/drivers/base/power/wakeup_stats.c b/drivers/base/power/wakeup_stats.c
new file mode 100644
index 000000000..924fac493
--- /dev/null
+++ b/drivers/base/power/wakeup_stats.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wakeup statistics in sysfs
+ *
+ * Copyright (c) 2019 Linux Foundation
+ * Copyright (c) 2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2019 Google Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+
+#include "power.h"
+
+static struct class *wakeup_class;
+
+#define wakeup_attr(_name) \
+static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wakeup_source *ws = dev_get_drvdata(dev); \
+ \
+ return sysfs_emit(buf, "%lu\n", ws->_name); \
+} \
+static DEVICE_ATTR_RO(_name)
+
+wakeup_attr(active_count);
+wakeup_attr(event_count);
+wakeup_attr(wakeup_count);
+wakeup_attr(expire_count);
+
+static ssize_t active_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time =
+ ws->active ? ktime_sub(ktime_get(), ws->last_time) : 0;
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(active_time));
+}
+static DEVICE_ATTR_RO(active_time_ms);
+
+static ssize_t total_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t total_time = ws->total_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ total_time = ktime_add(total_time, active_time);
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(total_time));
+}
+static DEVICE_ATTR_RO(total_time_ms);
+
+static ssize_t max_time_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t active_time;
+ ktime_t max_time = ws->max_time;
+
+ if (ws->active) {
+ active_time = ktime_sub(ktime_get(), ws->last_time);
+ if (active_time > max_time)
+ max_time = active_time;
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(max_time));
+}
+static DEVICE_ATTR_RO(max_time_ms);
+
+static ssize_t last_change_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(ws->last_time));
+}
+static DEVICE_ATTR_RO(last_change_ms);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%s\n", ws->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t prevent_suspend_time_ms_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wakeup_source *ws = dev_get_drvdata(dev);
+ ktime_t prevent_sleep_time = ws->prevent_sleep_time;
+
+ if (ws->active && ws->autosleep_enabled) {
+ prevent_sleep_time = ktime_add(prevent_sleep_time,
+ ktime_sub(ktime_get(), ws->start_prevent_time));
+ }
+
+ return sysfs_emit(buf, "%lld\n", ktime_to_ms(prevent_sleep_time));
+}
+static DEVICE_ATTR_RO(prevent_suspend_time_ms);
+
+static struct attribute *wakeup_source_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_active_count.attr,
+ &dev_attr_event_count.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_expire_count.attr,
+ &dev_attr_active_time_ms.attr,
+ &dev_attr_total_time_ms.attr,
+ &dev_attr_max_time_ms.attr,
+ &dev_attr_last_change_ms.attr,
+ &dev_attr_prevent_suspend_time_ms.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wakeup_source);
+
+static void device_create_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static struct device *wakeup_source_device_create(struct device *parent,
+ struct wakeup_source *ws)
+{
+ struct device *dev = NULL;
+ int retval;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ device_initialize(dev);
+ dev->devt = MKDEV(0, 0);
+ dev->class = wakeup_class;
+ dev->parent = parent;
+ dev->groups = wakeup_source_groups;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, ws);
+ device_set_pm_not_required(dev);
+
+ retval = dev_set_name(dev, "wakeup%d", ws->id);
+ if (retval)
+ goto error;
+
+ retval = device_add(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+
+/**
+ * wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs.
+ * @parent: Device given wakeup source is associated with (or NULL if virtual).
+ * @ws: Wakeup source to be added in sysfs.
+ */
+int wakeup_source_sysfs_add(struct device *parent, struct wakeup_source *ws)
+{
+ struct device *dev;
+
+ dev = wakeup_source_device_create(parent, ws);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+ ws->dev = dev;
+
+ return 0;
+}
+
+/**
+ * pm_wakeup_source_sysfs_add - Add wakeup_source attributes to sysfs
+ * for a device if they're missing.
+ * @parent: Device given wakeup source is associated with
+ */
+int pm_wakeup_source_sysfs_add(struct device *parent)
+{
+ if (!parent->power.wakeup || parent->power.wakeup->dev)
+ return 0;
+
+ return wakeup_source_sysfs_add(parent, parent->power.wakeup);
+}
+
+/**
+ * wakeup_source_sysfs_remove - Remove wakeup_source attributes from sysfs.
+ * @ws: Wakeup source to be removed from sysfs.
+ */
+void wakeup_source_sysfs_remove(struct wakeup_source *ws)
+{
+ device_unregister(ws->dev);
+}
+
+static int __init wakeup_sources_sysfs_init(void)
+{
+ wakeup_class = class_create(THIS_MODULE, "wakeup");
+
+ return PTR_ERR_OR_ZERO(wakeup_class);
+}
+postcore_initcall(wakeup_sources_sysfs_init);
diff --git a/drivers/base/property.c b/drivers/base/property.c
new file mode 100644
index 000000000..eb9b01c2f
--- /dev/null
+++ b/drivers/base/property.c
@@ -0,0 +1,1421 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * property.c - Unified device property interface.
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/property.h>
+#include <linux/phy.h>
+
+struct fwnode_handle *__dev_fwnode(struct device *dev)
+{
+ return IS_ENABLED(CONFIG_OF) && dev->of_node ?
+ of_fwnode_handle(dev->of_node) : dev->fwnode;
+}
+EXPORT_SYMBOL_GPL(__dev_fwnode);
+
+const struct fwnode_handle *__dev_fwnode_const(const struct device *dev)
+{
+ return IS_ENABLED(CONFIG_OF) && dev->of_node ?
+ of_fwnode_handle(dev->of_node) : dev->fwnode;
+}
+EXPORT_SYMBOL_GPL(__dev_fwnode_const);
+
+/**
+ * device_property_present - check if a property of a device is present
+ * @dev: Device whose property is being checked
+ * @propname: Name of the property
+ *
+ * Check if property @propname is present in the device firmware description.
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool device_property_present(struct device *dev, const char *propname)
+{
+ return fwnode_property_present(dev_fwnode(dev), propname);
+}
+EXPORT_SYMBOL_GPL(device_property_present);
+
+/**
+ * fwnode_property_present - check if a property of a firmware node is present
+ * @fwnode: Firmware node whose property to check
+ * @propname: Name of the property
+ *
+ * Return: true if property @propname is present. Otherwise, returns false.
+ */
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ bool ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ ret = fwnode_call_bool_op(fwnode, property_present, propname);
+ if (ret)
+ return ret;
+
+ return fwnode_call_bool_op(fwnode->secondary, property_present, propname);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_present);
+
+/**
+ * device_property_read_u8_array - return a u8 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u8 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * It's recommended to call device_property_count_u8() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u8_array(struct device *dev, const char *propname,
+ u8 *val, size_t nval)
+{
+ return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u8_array);
+
+/**
+ * device_property_read_u16_array - return a u16 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u16 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * It's recommended to call device_property_count_u16() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u16_array(struct device *dev, const char *propname,
+ u16 *val, size_t nval)
+{
+ return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u16_array);
+
+/**
+ * device_property_read_u32_array - return a u32 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u32 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * It's recommended to call device_property_count_u32() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u32_array(struct device *dev, const char *propname,
+ u32 *val, size_t nval)
+{
+ return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u32_array);
+
+/**
+ * device_property_read_u64_array - return a u64 array property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of u64 properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * It's recommended to call device_property_count_u64() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_u64_array(struct device *dev, const char *propname,
+ u64 *val, size_t nval)
+{
+ return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_u64_array);
+
+/**
+ * device_property_read_string_array - return a string array property of device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Function reads an array of string properties with @propname from the device
+ * firmware description and stores them to @val if found.
+ *
+ * It's recommended to call device_property_string_array_count() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values read on success if @val is non-NULL,
+ * number of values available on success if @val is NULL,
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO or %-EILSEQ if the property is not an array of strings,
+ * %-EOVERFLOW if the size of the property is not as expected.
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_string_array(struct device *dev, const char *propname,
+ const char **val, size_t nval)
+{
+ return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval);
+}
+EXPORT_SYMBOL_GPL(device_property_read_string_array);
+
+/**
+ * device_property_read_string - return a string property of a device
+ * @dev: Device to get the property of
+ * @propname: Name of the property
+ * @val: The value is stored here
+ *
+ * Function reads property @propname from the device firmware description and
+ * stores the value into @val if found. The value is checked to be a string.
+ *
+ * Return: %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO or %-EILSEQ if the property type is not a string.
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_read_string(struct device *dev, const char *propname,
+ const char **val)
+{
+ return fwnode_property_read_string(dev_fwnode(dev), propname, val);
+}
+EXPORT_SYMBOL_GPL(device_property_read_string);
+
+/**
+ * device_property_match_string - find a string in an array and return index
+ * @dev: Device to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: index, starting from %0, if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of strings,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int device_property_match_string(struct device *dev, const char *propname,
+ const char *string)
+{
+ return fwnode_property_match_string(dev_fwnode(dev), propname, string);
+}
+EXPORT_SYMBOL_GPL(device_property_match_string);
+
+static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ unsigned int elem_size, void *val,
+ size_t nval)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ ret = fwnode_call_int_op(fwnode, property_read_int_array, propname,
+ elem_size, val, nval);
+ if (ret != -EINVAL)
+ return ret;
+
+ return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname,
+ elem_size, val, nval);
+}
+
+/**
+ * fwnode_property_read_u8_array - return a u8 array property of firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u8 properties with @propname from @fwnode and stores them to
+ * @val if found.
+ *
+ * It's recommended to call fwnode_property_count_u8() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
+ const char *propname, u8 *val, size_t nval)
+{
+ return fwnode_property_read_int_array(fwnode, propname, sizeof(u8),
+ val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
+
+/**
+ * fwnode_property_read_u16_array - return a u16 array property of firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u16 properties with @propname from @fwnode and store them to
+ * @val if found.
+ *
+ * It's recommended to call fwnode_property_count_u16() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
+ const char *propname, u16 *val, size_t nval)
+{
+ return fwnode_property_read_int_array(fwnode, propname, sizeof(u16),
+ val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
+
+/**
+ * fwnode_property_read_u32_array - return a u32 array property of firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u32 properties with @propname from @fwnode store them to
+ * @val if found.
+ *
+ * It's recommended to call fwnode_property_count_u32() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
+ const char *propname, u32 *val, size_t nval)
+{
+ return fwnode_property_read_int_array(fwnode, propname, sizeof(u32),
+ val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
+
+/**
+ * fwnode_property_read_u64_array - return a u64 array property firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an array of u64 properties with @propname from @fwnode and store them to
+ * @val if found.
+ *
+ * It's recommended to call fwnode_property_count_u64() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values if @val was %NULL,
+ * %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of numbers,
+ * %-EOVERFLOW if the size of the property is not as expected,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
+ const char *propname, u64 *val, size_t nval)
+{
+ return fwnode_property_read_int_array(fwnode, propname, sizeof(u64),
+ val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
+
+/**
+ * fwnode_property_read_string_array - return string array property of a node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The values are stored here or %NULL to return the number of values
+ * @nval: Size of the @val array
+ *
+ * Read an string list property @propname from the given firmware node and store
+ * them to @val if found.
+ *
+ * It's recommended to call fwnode_property_string_array_count() instead of calling
+ * this function with @val equals %NULL and @nval equals 0.
+ *
+ * Return: number of values read on success if @val is non-NULL,
+ * number of values available on success if @val is NULL,
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO or %-EILSEQ if the property is not an array of strings,
+ * %-EOVERFLOW if the size of the property is not as expected,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname, const char **val,
+ size_t nval)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -EINVAL;
+
+ ret = fwnode_call_int_op(fwnode, property_read_string_array, propname,
+ val, nval);
+ if (ret != -EINVAL)
+ return ret;
+
+ return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname,
+ val, nval);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
+
+/**
+ * fwnode_property_read_string - return a string property of a firmware node
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property
+ * @val: The value is stored here
+ *
+ * Read property @propname from the given firmware node and store the value into
+ * @val if found. The value is checked to be a string.
+ *
+ * Return: %0 if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO or %-EILSEQ if the property is not a string,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
+ const char *propname, const char **val)
+{
+ int ret = fwnode_property_read_string_array(fwnode, propname, val, 1);
+
+ return ret < 0 ? ret : 0;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_read_string);
+
+/**
+ * fwnode_property_match_string - find a string in an array and return index
+ * @fwnode: Firmware node to get the property of
+ * @propname: Name of the property holding the array
+ * @string: String to look for
+ *
+ * Find a given string in a string array and if it is found return the
+ * index back.
+ *
+ * Return: index, starting from %0, if the property was found (success),
+ * %-EINVAL if given arguments are not valid,
+ * %-ENODATA if the property does not have a value,
+ * %-EPROTO if the property is not an array of strings,
+ * %-ENXIO if no suitable firmware interface is present.
+ */
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
+ const char *propname, const char *string)
+{
+ const char **values;
+ int nval, ret;
+
+ nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0);
+ if (nval < 0)
+ return nval;
+
+ if (nval == 0)
+ return -ENODATA;
+
+ values = kcalloc(nval, sizeof(*values), GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_string_array(fwnode, propname, values, nval);
+ if (ret < 0)
+ goto out;
+
+ ret = match_string(values, nval, string);
+ if (ret < 0)
+ ret = -ENODATA;
+out:
+ kfree(values);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+
+/**
+ * fwnode_property_get_reference_args() - Find a reference with arguments
+ * @fwnode: Firmware node where to look for the reference
+ * @prop: The name of the property
+ * @nargs_prop: The name of the property telling the number of
+ * arguments in the referred node. NULL if @nargs is known,
+ * otherwise @nargs is ignored. Only relevant on OF.
+ * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
+ * @index: Index of the reference, from zero onwards.
+ * @args: Result structure with reference and integer arguments.
+ *
+ * Obtain a reference based on a named property in an fwnode, with
+ * integer arguments.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * @args->fwnode pointer.
+ *
+ * Return: %0 on success
+ * %-ENOENT when the index is out of bounds, the index has an empty
+ * reference or the property was not found
+ * %-EINVAL on parse error
+ */
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return -ENOENT;
+
+ ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+ if (ret == 0)
+ return ret;
+
+ if (IS_ERR_OR_NULL(fwnode->secondary))
+ return ret;
+
+ return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
+
+/**
+ * fwnode_find_reference - Find named reference to a fwnode_handle
+ * @fwnode: Firmware node where to look for the reference
+ * @name: The name of the reference
+ * @index: Index of the reference
+ *
+ * @index can be used when the named reference holds a table of references.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: a pointer to the reference fwnode, when found. Otherwise,
+ * returns an error pointer.
+ */
+struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
+ const char *name,
+ unsigned int index)
+{
+ struct fwnode_reference_args args;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(fwnode, name, NULL, 0, index,
+ &args);
+ return ret ? ERR_PTR(ret) : args.fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_find_reference);
+
+/**
+ * fwnode_get_name - Return the name of a node
+ * @fwnode: The firmware node
+ *
+ * Return: a pointer to the node name, or %NULL.
+ */
+const char *fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_name);
+
+/**
+ * fwnode_get_name_prefix - Return the prefix of node for printing purposes
+ * @fwnode: The firmware node
+ *
+ * Return: the prefix of a node, intended to be printed right before the node.
+ * The prefix works also as a separator between the nodes.
+ */
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_name_prefix);
+}
+
+/**
+ * fwnode_get_parent - Return parent firwmare node
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
+ */
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, get_parent);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_parent);
+
+/**
+ * fwnode_get_next_parent - Iterate to the node's parent
+ * @fwnode: Firmware whose parent is retrieved
+ *
+ * This is like fwnode_get_parent() except that it drops the refcount
+ * on the passed node, making it suitable for iterating through a
+ * node's parents.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @fwnode
+ * unconditionally.
+ *
+ * Return: parent firmware node of the given node if possible or %NULL if no
+ * parent was available.
+ */
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+
+ fwnode_handle_put(fwnode);
+
+ return parent;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
+
+/**
+ * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
+ * @fwnode: firmware node
+ *
+ * Given a firmware node (@fwnode), this function finds its closest ancestor
+ * firmware node that has a corresponding struct device and returns that struct
+ * device.
+ *
+ * The caller is responsible for calling put_device() on the returned device
+ * pointer.
+ *
+ * Return: a pointer to the device of the @fwnode's closest ancestor.
+ */
+struct device *fwnode_get_next_parent_dev(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ struct device *dev;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ dev = get_dev_from_fwnode(parent);
+ if (dev) {
+ fwnode_handle_put(parent);
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * fwnode_count_parents - Return the number of parents a node has
+ * @fwnode: The node the parents of which are to be counted
+ *
+ * Return: the number of parents a node has.
+ */
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ unsigned int count = 0;
+
+ fwnode_for_each_parent_node(fwnode, parent)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fwnode_count_parents);
+
+/**
+ * fwnode_get_nth_parent - Return an nth parent of a node
+ * @fwnode: The node the parent of which is requested
+ * @depth: Distance of the parent from the node
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: the nth parent of a node. If there is no parent at the requested
+ * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to
+ * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on.
+ */
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
+ unsigned int depth)
+{
+ struct fwnode_handle *parent;
+
+ if (depth == 0)
+ return fwnode_handle_get(fwnode);
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ if (--depth == 0)
+ return parent;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
+
+/**
+ * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
+ * @ancestor: Firmware which is tested for being an ancestor
+ * @child: Firmware which is tested for being the child
+ *
+ * A node is considered an ancestor of itself too.
+ *
+ * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
+ */
+bool fwnode_is_ancestor_of(struct fwnode_handle *ancestor, struct fwnode_handle *child)
+{
+ struct fwnode_handle *parent;
+
+ if (IS_ERR_OR_NULL(ancestor))
+ return false;
+
+ if (child == ancestor)
+ return true;
+
+ fwnode_for_each_parent_node(child, parent) {
+ if (parent == ancestor) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * fwnode_get_next_child_node - Return the next child node handle for a node
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the node's child nodes or a %NULL handle.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @child
+ * unconditionally.
+ */
+struct fwnode_handle *
+fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
+
+/**
+ * fwnode_get_next_available_child_node - Return the next available child node handle for a node
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the node's child nodes or a %NULL handle.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @child
+ * unconditionally.
+ */
+struct fwnode_handle *
+fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ struct fwnode_handle *next_child = child;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return NULL;
+
+ do {
+ next_child = fwnode_get_next_child_node(fwnode, next_child);
+ if (!next_child)
+ return NULL;
+ } while (!fwnode_device_is_available(next_child));
+
+ return next_child;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node);
+
+/**
+ * device_get_next_child_node - Return the next child node handle for a device
+ * @dev: Device to find the next child node for.
+ * @child: Handle to one of the device's child nodes or a %NULL handle.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @child
+ * unconditionally.
+ */
+struct fwnode_handle *device_get_next_child_node(struct device *dev,
+ struct fwnode_handle *child)
+{
+ const struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_handle *next;
+
+ if (IS_ERR_OR_NULL(fwnode))
+ return NULL;
+
+ /* Try to find a child in primary fwnode */
+ next = fwnode_get_next_child_node(fwnode, child);
+ if (next)
+ return next;
+
+ /* When no more children in primary, continue with secondary */
+ return fwnode_get_next_child_node(fwnode->secondary, child);
+}
+EXPORT_SYMBOL_GPL(device_get_next_child_node);
+
+/**
+ * fwnode_get_named_child_node - Return first matching named child node handle
+ * @fwnode: Firmware node to find the named child node for.
+ * @childname: String to match child node name against.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ */
+struct fwnode_handle *
+fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
+{
+ return fwnode_call_ptr_op(fwnode, get_named_child_node, childname);
+}
+EXPORT_SYMBOL_GPL(fwnode_get_named_child_node);
+
+/**
+ * device_get_named_child_node - Return first matching named child node handle
+ * @dev: Device to find the named child node for.
+ * @childname: String to match child node name against.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ */
+struct fwnode_handle *device_get_named_child_node(struct device *dev,
+ const char *childname)
+{
+ return fwnode_get_named_child_node(dev_fwnode(dev), childname);
+}
+EXPORT_SYMBOL_GPL(device_get_named_child_node);
+
+/**
+ * fwnode_handle_get - Obtain a reference to a device node
+ * @fwnode: Pointer to the device node to obtain the reference to.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: the fwnode handle.
+ */
+struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
+{
+ if (!fwnode_has_op(fwnode, get))
+ return fwnode;
+
+ return fwnode_call_ptr_op(fwnode, get);
+}
+EXPORT_SYMBOL_GPL(fwnode_handle_get);
+
+/**
+ * fwnode_handle_put - Drop reference to a device node
+ * @fwnode: Pointer to the device node to drop the reference to.
+ *
+ * This has to be used when terminating device_for_each_child_node() iteration
+ * with break or return to prevent stale device node references from being left
+ * behind.
+ */
+void fwnode_handle_put(struct fwnode_handle *fwnode)
+{
+ fwnode_call_void_op(fwnode, put);
+}
+EXPORT_SYMBOL_GPL(fwnode_handle_put);
+
+/**
+ * fwnode_device_is_available - check if a device is available for use
+ * @fwnode: Pointer to the fwnode of the device.
+ *
+ * Return: true if device is available for use. Otherwise, returns false.
+ *
+ * For fwnode node types that don't implement the .device_is_available()
+ * operation, this function returns true.
+ */
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
+{
+ if (IS_ERR_OR_NULL(fwnode))
+ return false;
+
+ if (!fwnode_has_op(fwnode, device_is_available))
+ return true;
+
+ return fwnode_call_bool_op(fwnode, device_is_available);
+}
+EXPORT_SYMBOL_GPL(fwnode_device_is_available);
+
+/**
+ * device_get_child_node_count - return the number of child nodes for device
+ * @dev: Device to cound the child nodes for
+ *
+ * Return: the number of child nodes for a given device.
+ */
+unsigned int device_get_child_node_count(struct device *dev)
+{
+ struct fwnode_handle *child;
+ unsigned int count = 0;
+
+ device_for_each_child_node(dev, child)
+ count++;
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(device_get_child_node_count);
+
+bool device_dma_supported(struct device *dev)
+{
+ return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported);
+}
+EXPORT_SYMBOL_GPL(device_dma_supported);
+
+enum dev_dma_attr device_get_dma_attr(struct device *dev)
+{
+ if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr))
+ return DEV_DMA_NOT_SUPPORTED;
+
+ return fwnode_call_int_op(dev_fwnode(dev), device_get_dma_attr);
+}
+EXPORT_SYMBOL_GPL(device_get_dma_attr);
+
+/**
+ * fwnode_get_phy_mode - Get phy mode for given firmware node
+ * @fwnode: Pointer to the given node
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int fwnode_get_phy_mode(struct fwnode_handle *fwnode)
+{
+ const char *pm;
+ int err, i;
+
+ err = fwnode_property_read_string(fwnode, "phy-mode", &pm);
+ if (err < 0)
+ err = fwnode_property_read_string(fwnode,
+ "phy-connection-type", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+ if (!strcasecmp(pm, phy_modes(i)))
+ return i;
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_phy_mode);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev: Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+ return fwnode_get_phy_mode(dev_fwnode(dev));
+}
+EXPORT_SYMBOL_GPL(device_get_phy_mode);
+
+/**
+ * fwnode_iomap - Maps the memory mapped IO for a given fwnode
+ * @fwnode: Pointer to the firmware node
+ * @index: Index of the IO range
+ *
+ * Return: a pointer to the mapped memory.
+ */
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
+{
+ return fwnode_call_ptr_op(fwnode, iomap, index);
+}
+EXPORT_SYMBOL(fwnode_iomap);
+
+/**
+ * fwnode_irq_get - Get IRQ directly from a fwnode
+ * @fwnode: Pointer to the firmware node
+ * @index: Zero-based index of the IRQ
+ *
+ * Return: Linux IRQ number on success. Negative errno on failure.
+ */
+int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
+{
+ int ret;
+
+ ret = fwnode_call_int_op(fwnode, irq_get, index);
+ /* We treat mapping errors as invalid case */
+ if (ret == 0)
+ return -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL(fwnode_irq_get);
+
+/**
+ * fwnode_irq_get_byname - Get IRQ from a fwnode using its name
+ * @fwnode: Pointer to the firmware node
+ * @name: IRQ name
+ *
+ * Description:
+ * Find a match to the string @name in the 'interrupt-names' string array
+ * in _DSD for ACPI, or of_node for Device Tree. Then get the Linux IRQ
+ * number of the IRQ resource corresponding to the index of the matched
+ * string.
+ *
+ * Return: Linux IRQ number on success, or negative errno otherwise.
+ */
+int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name)
+{
+ int index;
+
+ if (!name)
+ return -EINVAL;
+
+ index = fwnode_property_match_string(fwnode, "interrupt-names", name);
+ if (index < 0)
+ return index;
+
+ return fwnode_irq_get(fwnode, index);
+}
+EXPORT_SYMBOL(fwnode_irq_get_byname);
+
+/**
+ * fwnode_graph_get_next_endpoint - Get next endpoint firmware node
+ * @fwnode: Pointer to the parent firmware node
+ * @prev: Previous endpoint node or %NULL to get the first
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer. Note that this function also puts a reference to @prev
+ * unconditionally.
+ *
+ * Return: an endpoint firmware node pointer or %NULL if no more endpoints
+ * are available.
+ */
+struct fwnode_handle *
+fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev)
+{
+ struct fwnode_handle *ep, *port_parent = NULL;
+ const struct fwnode_handle *parent;
+
+ /*
+ * If this function is in a loop and the previous iteration returned
+ * an endpoint from fwnode->secondary, then we need to use the secondary
+ * as parent rather than @fwnode.
+ */
+ if (prev) {
+ port_parent = fwnode_graph_get_port_parent(prev);
+ parent = port_parent;
+ } else {
+ parent = fwnode;
+ }
+ if (IS_ERR_OR_NULL(parent))
+ return NULL;
+
+ ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
+ if (ep)
+ goto out_put_port_parent;
+
+ ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+
+out_put_port_parent:
+ fwnode_handle_put(port_parent);
+ return ep;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
+
+/**
+ * fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint
+ * @endpoint: Endpoint firmware node of the port
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: the firmware node of the device the @endpoint belongs to.
+ */
+struct fwnode_handle *
+fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint)
+{
+ struct fwnode_handle *port, *parent;
+
+ port = fwnode_get_parent(endpoint);
+ parent = fwnode_call_ptr_op(port, graph_get_port_parent);
+
+ fwnode_handle_put(port);
+
+ return parent;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
+
+/**
+ * fwnode_graph_get_remote_port_parent - Return fwnode of a remote device
+ * @fwnode: Endpoint firmware node pointing to the remote endpoint
+ *
+ * Extracts firmware node of a remote device the @fwnode points to.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ */
+struct fwnode_handle *
+fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *endpoint, *parent;
+
+ endpoint = fwnode_graph_get_remote_endpoint(fwnode);
+ parent = fwnode_graph_get_port_parent(endpoint);
+
+ fwnode_handle_put(endpoint);
+
+ return parent;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
+
+/**
+ * fwnode_graph_get_remote_port - Return fwnode of a remote port
+ * @fwnode: Endpoint firmware node pointing to the remote endpoint
+ *
+ * Extracts firmware node of a remote port the @fwnode points to.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ */
+struct fwnode_handle *
+fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
+{
+ return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode));
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
+
+/**
+ * fwnode_graph_get_remote_endpoint - Return fwnode of a remote endpoint
+ * @fwnode: Endpoint firmware node pointing to the remote endpoint
+ *
+ * Extracts firmware node of a remote endpoint the @fwnode points to.
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ */
+struct fwnode_handle *
+fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+{
+ return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint);
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
+
+static bool fwnode_graph_remote_available(struct fwnode_handle *ep)
+{
+ struct fwnode_handle *dev_node;
+ bool available;
+
+ dev_node = fwnode_graph_get_remote_port_parent(ep);
+ available = fwnode_device_is_available(dev_node);
+ fwnode_handle_put(dev_node);
+
+ return available;
+}
+
+/**
+ * fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers
+ * @fwnode: parent fwnode_handle containing the graph
+ * @port: identifier of the port node
+ * @endpoint: identifier of the endpoint node under the port node
+ * @flags: fwnode lookup flags
+ *
+ * The caller is responsible for calling fwnode_handle_put() on the returned
+ * fwnode pointer.
+ *
+ * Return: the fwnode handle of the local endpoint corresponding the port and
+ * endpoint IDs or %NULL if not found.
+ *
+ * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint
+ * has not been found, look for the closest endpoint ID greater than the
+ * specified one and return the endpoint that corresponds to it, if present.
+ *
+ * Does not return endpoints that belong to disabled devices or endpoints that
+ * are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags.
+ */
+struct fwnode_handle *
+fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ u32 port, u32 endpoint, unsigned long flags)
+{
+ struct fwnode_handle *ep, *best_ep = NULL;
+ unsigned int best_ep_id = 0;
+ bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT;
+ bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED);
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ struct fwnode_endpoint fwnode_ep = { 0 };
+ int ret;
+
+ if (enabled_only && !fwnode_graph_remote_available(ep))
+ continue;
+
+ ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep);
+ if (ret < 0)
+ continue;
+
+ if (fwnode_ep.port != port)
+ continue;
+
+ if (fwnode_ep.id == endpoint)
+ return ep;
+
+ if (!endpoint_next)
+ continue;
+
+ /*
+ * If the endpoint that has just been found is not the first
+ * matching one and the ID of the one found previously is closer
+ * to the requested endpoint ID, skip it.
+ */
+ if (fwnode_ep.id < endpoint ||
+ (best_ep && best_ep_id < fwnode_ep.id))
+ continue;
+
+ fwnode_handle_put(best_ep);
+ best_ep = fwnode_handle_get(ep);
+ best_ep_id = fwnode_ep.id;
+ }
+
+ return best_ep;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id);
+
+/**
+ * fwnode_graph_get_endpoint_count - Count endpoints on a device node
+ * @fwnode: The node related to a device
+ * @flags: fwnode lookup flags
+ * Count endpoints in a device node.
+ *
+ * If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints
+ * and endpoints connected to disabled devices are counted.
+ */
+unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode,
+ unsigned long flags)
+{
+ struct fwnode_handle *ep;
+ unsigned int count = 0;
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ if (flags & FWNODE_GRAPH_DEVICE_DISABLED ||
+ fwnode_graph_remote_available(ep))
+ count++;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_count);
+
+/**
+ * fwnode_graph_parse_endpoint - parse common endpoint node properties
+ * @fwnode: pointer to endpoint fwnode_handle
+ * @endpoint: pointer to the fwnode endpoint data structure
+ *
+ * Parse @fwnode representing a graph endpoint node and store the
+ * information in @endpoint. The caller must hold a reference to
+ * @fwnode.
+ */
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint)
+{
+ memset(endpoint, 0, sizeof(*endpoint));
+
+ return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint);
+}
+EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
+
+const void *device_get_match_data(const struct device *dev)
+{
+ return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
+}
+EXPORT_SYMBOL_GPL(device_get_match_data);
+
+static unsigned int fwnode_graph_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
+{
+ struct fwnode_handle *node;
+ struct fwnode_handle *ep;
+ unsigned int count = 0;
+ void *ret;
+
+ fwnode_graph_for_each_endpoint(fwnode, ep) {
+ if (matches && count >= matches_len) {
+ fwnode_handle_put(ep);
+ break;
+ }
+
+ node = fwnode_graph_get_remote_port_parent(ep);
+ if (!fwnode_device_is_available(node)) {
+ fwnode_handle_put(node);
+ continue;
+ }
+
+ ret = match(node, con_id, data);
+ fwnode_handle_put(node);
+ if (ret) {
+ if (matches)
+ matches[count] = ret;
+ count++;
+ }
+ }
+ return count;
+}
+
+static unsigned int fwnode_devcon_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches,
+ unsigned int matches_len)
+{
+ struct fwnode_handle *node;
+ unsigned int count = 0;
+ unsigned int i;
+ void *ret;
+
+ for (i = 0; ; i++) {
+ if (matches && count >= matches_len)
+ break;
+
+ node = fwnode_find_reference(fwnode, con_id, i);
+ if (IS_ERR(node))
+ break;
+
+ ret = match(node, NULL, data);
+ fwnode_handle_put(node);
+ if (ret) {
+ if (matches)
+ matches[count] = ret;
+ count++;
+ }
+ }
+
+ return count;
+}
+
+/**
+ * fwnode_connection_find_match - Find connection from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ *
+ * Find a connection with unique identifier @con_id between @fwnode and another
+ * device node. @match will be used to convert the connection description to
+ * data the caller is expecting to be returned.
+ */
+void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match)
+{
+ unsigned int count;
+ void *ret;
+
+ if (!fwnode || !match)
+ return NULL;
+
+ count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ if (count)
+ return ret;
+
+ count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1);
+ return count ? ret : NULL;
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_match);
+
+/**
+ * fwnode_connection_find_matches - Find connections from a device node
+ * @fwnode: Device node with the connection
+ * @con_id: Identifier for the connection
+ * @data: Data for the match function
+ * @match: Function to check and convert the connection description
+ * @matches: (Optional) array of pointers to fill with matches
+ * @matches_len: Length of @matches
+ *
+ * Find up to @matches_len connections with unique identifier @con_id between
+ * @fwnode and other device nodes. @match will be used to convert the
+ * connection description to data the caller is expecting to be returned
+ * through the @matches array.
+ *
+ * If @matches is %NULL @matches_len is ignored and the total number of resolved
+ * matches is returned.
+ *
+ * Return: Number of matches resolved, or negative errno.
+ */
+int fwnode_connection_find_matches(struct fwnode_handle *fwnode,
+ const char *con_id, void *data,
+ devcon_match_fn_t match,
+ void **matches, unsigned int matches_len)
+{
+ unsigned int count_graph;
+ unsigned int count_ref;
+
+ if (!fwnode || !match)
+ return -EINVAL;
+
+ count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ if (matches) {
+ matches += count_graph;
+ matches_len -= count_graph;
+ }
+
+ count_ref = fwnode_devcon_matches(fwnode, con_id, data, match,
+ matches, matches_len);
+
+ return count_graph + count_ref;
+}
+EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
new file mode 100644
index 000000000..159bac6c5
--- /dev/null
+++ b/drivers/base/regmap/Kconfig
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0
+# Generic register map support. There are no user servicable options here,
+# this is an API intended to be used by other kernel subsystems. These
+# subsystems should select the appropriate symbols.
+
+config REGMAP
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO)
+ select IRQ_DOMAIN if REGMAP_IRQ
+ select MDIO_BUS if REGMAP_MDIO
+ bool
+
+config REGCACHE_COMPRESSED
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ bool
+
+config REGMAP_AC97
+ tristate
+
+config REGMAP_I2C
+ tristate
+ depends on I2C
+
+config REGMAP_SLIMBUS
+ tristate
+ depends on SLIMBUS
+
+config REGMAP_SPI
+ tristate
+ depends on SPI
+
+config REGMAP_SPMI
+ tristate
+ depends on SPMI
+
+config REGMAP_W1
+ tristate
+ depends on W1
+
+config REGMAP_MDIO
+ tristate
+
+config REGMAP_MMIO
+ tristate
+
+config REGMAP_IRQ
+ bool
+
+config REGMAP_SOUNDWIRE
+ tristate
+ depends on SOUNDWIRE
+
+config REGMAP_SOUNDWIRE_MBQ
+ tristate
+ depends on SOUNDWIRE
+
+config REGMAP_SCCB
+ tristate
+ depends on I2C
+
+config REGMAP_I3C
+ tristate
+ depends on I3C
+
+config REGMAP_SPI_AVMM
+ tristate
+ depends on SPI
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
new file mode 100644
index 000000000..11facb32a
--- /dev/null
+++ b/drivers/base/regmap/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+# For include/trace/define_trace.h to include trace.h
+CFLAGS_regmap.o := -I$(src)
+
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-flat.o
+obj-$(CONFIG_REGCACHE_COMPRESSED) += regcache-lzo.o
+obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
+obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
+obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
+obj-$(CONFIG_REGMAP_SLIMBUS) += regmap-slimbus.o
+obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
+obj-$(CONFIG_REGMAP_MMIO) += regmap-mmio.o
+obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
+obj-$(CONFIG_REGMAP_W1) += regmap-w1.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o
+obj-$(CONFIG_REGMAP_SOUNDWIRE_MBQ) += regmap-sdw-mbq.o
+obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o
+obj-$(CONFIG_REGMAP_I3C) += regmap-i3c.o
+obj-$(CONFIG_REGMAP_SPI_AVMM) += regmap-spi-avmm.o
+obj-$(CONFIG_REGMAP_MDIO) += regmap-mdio.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
new file mode 100644
index 000000000..da8996e7a
--- /dev/null
+++ b/drivers/base/regmap/internal.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Register map access API internal header
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ */
+
+#ifndef _REGMAP_INTERNAL_H
+#define _REGMAP_INTERNAL_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+struct regmap;
+struct regcache_ops;
+
+struct regmap_debugfs_off_cache {
+ struct list_head list;
+ off_t min;
+ off_t max;
+ unsigned int base_reg;
+ unsigned int max_reg;
+};
+
+struct regmap_format {
+ size_t buf_size;
+ size_t reg_bytes;
+ size_t pad_bytes;
+ size_t reg_downshift;
+ size_t val_bytes;
+ void (*format_write)(struct regmap *map,
+ unsigned int reg, unsigned int val);
+ void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
+ void (*format_val)(void *buf, unsigned int val, unsigned int shift);
+ unsigned int (*parse_val)(const void *buf);
+ void (*parse_inplace)(void *buf);
+};
+
+struct regmap_async {
+ struct list_head list;
+ struct regmap *map;
+ void *work_buf;
+};
+
+struct regmap {
+ union {
+ struct mutex mutex;
+ struct {
+ spinlock_t spinlock;
+ unsigned long spinlock_flags;
+ };
+ struct {
+ raw_spinlock_t raw_spinlock;
+ unsigned long raw_spinlock_flags;
+ };
+ };
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg; /* This is passed to lock/unlock functions */
+ gfp_t alloc_flags;
+ unsigned int reg_base;
+
+ struct device *dev; /* Device we do I/O on */
+ void *work_buf; /* Scratch buffer used to format I/O */
+ struct regmap_format format; /* Buffer format */
+ const struct regmap_bus *bus;
+ void *bus_context;
+ const char *name;
+
+ bool async;
+ spinlock_t async_lock;
+ wait_queue_head_t async_waitq;
+ struct list_head async_list;
+ struct list_head async_free;
+ int async_ret;
+
+#ifdef CONFIG_DEBUG_FS
+ bool debugfs_disable;
+ struct dentry *debugfs;
+ const char *debugfs_name;
+
+ unsigned int debugfs_reg_len;
+ unsigned int debugfs_val_len;
+ unsigned int debugfs_tot_len;
+
+ struct list_head debugfs_off_cache;
+ struct mutex cache_lock;
+#endif
+
+ unsigned int max_register;
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*writeable_noinc_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *wr_noinc_table;
+ const struct regmap_access_table *rd_noinc_table;
+
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+ int (*reg_update_bits)(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val);
+ /* Bulk read/write */
+ int (*read)(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+ int (*write)(void *context, const void *data, size_t count);
+
+ bool defer_caching;
+
+ unsigned long read_flag_mask;
+ unsigned long write_flag_mask;
+
+ /* number of bits to (left) shift the reg value when formatting*/
+ int reg_shift;
+ int reg_stride;
+ int reg_stride_order;
+
+ /* regcache specific members */
+ const struct regcache_ops *cache_ops;
+ enum regcache_type cache_type;
+
+ /* number of bytes in reg_defaults_raw */
+ unsigned int cache_size_raw;
+ /* number of bytes per word in reg_defaults_raw */
+ unsigned int cache_word_size;
+ /* number of entries in reg_defaults */
+ unsigned int num_reg_defaults;
+ /* number of entries in reg_defaults_raw */
+ unsigned int num_reg_defaults_raw;
+
+ /* if set, only the cache is modified not the HW */
+ bool cache_only;
+ /* if set, only the HW is modified not the cache */
+ bool cache_bypass;
+ /* if set, remember to free reg_defaults_raw */
+ bool cache_free;
+
+ struct reg_default *reg_defaults;
+ const void *reg_defaults_raw;
+ void *cache;
+ /* if set, the cache contains newer data than the HW */
+ bool cache_dirty;
+ /* if set, the HW registers are known to match map->reg_defaults */
+ bool no_sync_defaults;
+
+ struct reg_sequence *patch;
+ int patch_regs;
+
+ /* if set, converts bulk read to single read */
+ bool use_single_read;
+ /* if set, converts bulk write to single write */
+ bool use_single_write;
+ /* if set, the device supports multi write mode */
+ bool can_multi_write;
+
+ /* if set, raw reads/writes are limited to this size */
+ size_t max_raw_read;
+ size_t max_raw_write;
+
+ struct rb_root range_tree;
+ void *selector_work_buf; /* Scratch buffer used for selector */
+
+ struct hwspinlock *hwlock;
+
+ /* if set, the regmap core can sleep */
+ bool can_sleep;
+};
+
+struct regcache_ops {
+ const char *name;
+ enum regcache_type type;
+ int (*init)(struct regmap *map);
+ int (*exit)(struct regmap *map);
+#ifdef CONFIG_DEBUG_FS
+ void (*debugfs_init)(struct regmap *map);
+#endif
+ int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
+ int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
+ int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
+ int (*drop)(struct regmap *map, unsigned int min, unsigned int max);
+};
+
+bool regmap_cached(struct regmap *map, unsigned int reg);
+bool regmap_writeable(struct regmap *map, unsigned int reg);
+bool regmap_readable(struct regmap *map, unsigned int reg);
+bool regmap_volatile(struct regmap *map, unsigned int reg);
+bool regmap_precious(struct regmap *map, unsigned int reg);
+bool regmap_writeable_noinc(struct regmap *map, unsigned int reg);
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg);
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val);
+
+struct regmap_range_node {
+ struct rb_node node;
+ const char *name;
+ struct regmap *map;
+
+ unsigned int range_min;
+ unsigned int range_max;
+
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
+struct regmap_field {
+ struct regmap *regmap;
+ unsigned int mask;
+ /* lsb */
+ unsigned int shift;
+ unsigned int reg;
+
+ unsigned int id_size;
+ unsigned int id_offset;
+};
+
+#ifdef CONFIG_DEBUG_FS
+extern void regmap_debugfs_initcall(void);
+extern void regmap_debugfs_init(struct regmap *map);
+extern void regmap_debugfs_exit(struct regmap *map);
+
+static inline void regmap_debugfs_disable(struct regmap *map)
+{
+ map->debugfs_disable = true;
+}
+
+#else
+static inline void regmap_debugfs_initcall(void) { }
+static inline void regmap_debugfs_init(struct regmap *map) { }
+static inline void regmap_debugfs_exit(struct regmap *map) { }
+static inline void regmap_debugfs_disable(struct regmap *map) { }
+#endif
+
+/* regcache core declarations */
+int regcache_init(struct regmap *map, const struct regmap_config *config);
+void regcache_exit(struct regmap *map);
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value);
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value);
+int regcache_sync(struct regmap *map);
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base, unsigned int start,
+ unsigned int end);
+
+static inline const void *regcache_get_val_addr(struct regmap *map,
+ const void *base,
+ unsigned int idx)
+{
+ return base + (map->cache_word_size * idx);
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx);
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val);
+int regcache_lookup_reg(struct regmap *map, unsigned int reg);
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool noinc);
+
+void regmap_async_complete_cb(struct regmap_async *async, int ret);
+
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config);
+
+extern struct regcache_ops regcache_rbtree_ops;
+extern struct regcache_ops regcache_lzo_ops;
+extern struct regcache_ops regcache_flat_ops;
+
+static inline const char *regmap_name(const struct regmap *map)
+{
+ if (map->dev)
+ return dev_name(map->dev);
+
+ return map->name;
+}
+
+static inline unsigned int regmap_get_offset(const struct regmap *map,
+ unsigned int index)
+{
+ if (map->reg_stride_order >= 0)
+ return index << map->reg_stride_order;
+ else
+ return index * map->reg_stride;
+}
+
+static inline unsigned int regcache_get_index_by_order(const struct regmap *map,
+ unsigned int reg)
+{
+ return reg >> map->reg_stride_order;
+}
+
+#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644
index 000000000..b7e4b2464
--- /dev/null
+++ b/drivers/base/regmap/regcache-flat.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - flat caching support
+//
+// Copyright 2012 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static inline unsigned int regcache_flat_get_index(const struct regmap *map,
+ unsigned int reg)
+{
+ return regcache_get_index_by_order(map, reg);
+}
+
+static int regcache_flat_init(struct regmap *map)
+{
+ int i;
+ unsigned int *cache;
+
+ if (!map || map->reg_stride_order < 0 || !map->max_register)
+ return -EINVAL;
+
+ map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
+ + 1, sizeof(unsigned int), GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+
+ cache = map->cache;
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ unsigned int reg = map->reg_defaults[i].reg;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ cache[index] = map->reg_defaults[i].def;
+ }
+
+ return 0;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+ kfree(map->cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_flat_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ *value = cache[index];
+
+ return 0;
+}
+
+static int regcache_flat_write(struct regmap *map, unsigned int reg,
+ unsigned int value)
+{
+ unsigned int *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ cache[index] = value;
+
+ return 0;
+}
+
+struct regcache_ops regcache_flat_ops = {
+ .type = REGCACHE_FLAT,
+ .name = "flat",
+ .init = regcache_flat_init,
+ .exit = regcache_flat_exit,
+ .read = regcache_flat_read,
+ .write = regcache_flat_write,
+};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
new file mode 100644
index 000000000..7886303eb
--- /dev/null
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - LZO caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+
+#include <linux/device.h>
+#include <linux/lzo.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static int regcache_lzo_exit(struct regmap *map);
+
+struct regcache_lzo_ctx {
+ void *wmem;
+ void *dst;
+ const void *src;
+ size_t src_len;
+ size_t dst_len;
+ size_t decompressed_size;
+ unsigned long *sync_bmp;
+ int sync_bmp_nbits;
+};
+
+#define LZO_BLOCK_NUM 8
+static int regcache_lzo_block_count(struct regmap *map)
+{
+ return LZO_BLOCK_NUM;
+}
+
+static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
+{
+ lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!lzo_ctx->wmem)
+ return -ENOMEM;
+ return 0;
+}
+
+static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
+{
+ size_t compress_size;
+ int ret;
+
+ ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
+ lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
+ if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
+ return -EINVAL;
+ lzo_ctx->dst_len = compress_size;
+ return 0;
+}
+
+static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
+{
+ size_t dst_len;
+ int ret;
+
+ dst_len = lzo_ctx->dst_len;
+ ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
+ lzo_ctx->dst, &dst_len);
+ if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
+ return -EINVAL;
+ return 0;
+}
+
+static int regcache_lzo_compress_cache_block(struct regmap *map,
+ struct regcache_lzo_ctx *lzo_ctx)
+{
+ int ret;
+
+ lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
+ lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+ if (!lzo_ctx->dst) {
+ lzo_ctx->dst_len = 0;
+ return -ENOMEM;
+ }
+
+ ret = regcache_lzo_compress(lzo_ctx);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int regcache_lzo_decompress_cache_block(struct regmap *map,
+ struct regcache_lzo_ctx *lzo_ctx)
+{
+ int ret;
+
+ lzo_ctx->dst_len = lzo_ctx->decompressed_size;
+ lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+ if (!lzo_ctx->dst) {
+ lzo_ctx->dst_len = 0;
+ return -ENOMEM;
+ }
+
+ ret = regcache_lzo_decompress(lzo_ctx);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int regcache_lzo_get_blkindex(struct regmap *map,
+ unsigned int reg)
+{
+ return ((reg / map->reg_stride) * map->cache_word_size) /
+ DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
+}
+
+static inline int regcache_lzo_get_blkpos(struct regmap *map,
+ unsigned int reg)
+{
+ return (reg / map->reg_stride) %
+ (DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map)) /
+ map->cache_word_size);
+}
+
+static inline int regcache_lzo_get_blksize(struct regmap *map)
+{
+ return DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
+}
+
+static int regcache_lzo_init(struct regmap *map)
+{
+ struct regcache_lzo_ctx **lzo_blocks;
+ size_t bmp_size;
+ int ret, i, blksize, blkcount;
+ const char *p, *end;
+ unsigned long *sync_bmp;
+
+ ret = 0;
+
+ blkcount = regcache_lzo_block_count(map);
+ map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
+ GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+ lzo_blocks = map->cache;
+
+ /*
+ * allocate a bitmap to be used when syncing the cache with
+ * the hardware. Each time a register is modified, the corresponding
+ * bit is set in the bitmap, so we know that we have to sync
+ * that register.
+ */
+ bmp_size = map->num_reg_defaults_raw;
+ sync_bmp = bitmap_zalloc(bmp_size, GFP_KERNEL);
+ if (!sync_bmp) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* allocate the lzo blocks and initialize them */
+ for (i = 0; i < blkcount; i++) {
+ lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
+ GFP_KERNEL);
+ if (!lzo_blocks[i]) {
+ bitmap_free(sync_bmp);
+ ret = -ENOMEM;
+ goto err;
+ }
+ lzo_blocks[i]->sync_bmp = sync_bmp;
+ lzo_blocks[i]->sync_bmp_nbits = bmp_size;
+ /* alloc the working space for the compressed block */
+ ret = regcache_lzo_prepare(lzo_blocks[i]);
+ if (ret < 0)
+ goto err;
+ }
+
+ blksize = regcache_lzo_get_blksize(map);
+ p = map->reg_defaults_raw;
+ end = map->reg_defaults_raw + map->cache_size_raw;
+ /* compress the register map and fill the lzo blocks */
+ for (i = 0; i < blkcount; i++, p += blksize) {
+ lzo_blocks[i]->src = p;
+ if (p + blksize > end)
+ lzo_blocks[i]->src_len = end - p;
+ else
+ lzo_blocks[i]->src_len = blksize;
+ ret = regcache_lzo_compress_cache_block(map,
+ lzo_blocks[i]);
+ if (ret < 0)
+ goto err;
+ lzo_blocks[i]->decompressed_size =
+ lzo_blocks[i]->src_len;
+ }
+
+ return 0;
+err:
+ regcache_lzo_exit(map);
+ return ret;
+}
+
+static int regcache_lzo_exit(struct regmap *map)
+{
+ struct regcache_lzo_ctx **lzo_blocks;
+ int i, blkcount;
+
+ lzo_blocks = map->cache;
+ if (!lzo_blocks)
+ return 0;
+
+ blkcount = regcache_lzo_block_count(map);
+ /*
+ * the pointer to the bitmap used for syncing the cache
+ * is shared amongst all lzo_blocks. Ensure it is freed
+ * only once.
+ */
+ if (lzo_blocks[0])
+ bitmap_free(lzo_blocks[0]->sync_bmp);
+ for (i = 0; i < blkcount; i++) {
+ if (lzo_blocks[i]) {
+ kfree(lzo_blocks[i]->wmem);
+ kfree(lzo_blocks[i]->dst);
+ }
+ /* each lzo_block is a pointer returned by kmalloc or NULL */
+ kfree(lzo_blocks[i]);
+ }
+ kfree(lzo_blocks);
+ map->cache = NULL;
+ return 0;
+}
+
+static int regcache_lzo_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+ int ret, blkindex, blkpos;
+ size_t tmp_dst_len;
+ void *tmp_dst;
+
+ /* index of the compressed lzo block */
+ blkindex = regcache_lzo_get_blkindex(map, reg);
+ /* register index within the decompressed block */
+ blkpos = regcache_lzo_get_blkpos(map, reg);
+ lzo_blocks = map->cache;
+ lzo_block = lzo_blocks[blkindex];
+
+ /* save the pointer and length of the compressed block */
+ tmp_dst = lzo_block->dst;
+ tmp_dst_len = lzo_block->dst_len;
+
+ /* prepare the source to be the compressed block */
+ lzo_block->src = lzo_block->dst;
+ lzo_block->src_len = lzo_block->dst_len;
+
+ /* decompress the block */
+ ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+ if (ret >= 0)
+ /* fetch the value from the cache */
+ *value = regcache_get_val(map, lzo_block->dst, blkpos);
+
+ kfree(lzo_block->dst);
+ /* restore the pointer and length of the compressed block */
+ lzo_block->dst = tmp_dst;
+ lzo_block->dst_len = tmp_dst_len;
+
+ return ret;
+}
+
+static int regcache_lzo_write(struct regmap *map,
+ unsigned int reg, unsigned int value)
+{
+ struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
+ int ret, blkindex, blkpos;
+ size_t tmp_dst_len;
+ void *tmp_dst;
+
+ /* index of the compressed lzo block */
+ blkindex = regcache_lzo_get_blkindex(map, reg);
+ /* register index within the decompressed block */
+ blkpos = regcache_lzo_get_blkpos(map, reg);
+ lzo_blocks = map->cache;
+ lzo_block = lzo_blocks[blkindex];
+
+ /* save the pointer and length of the compressed block */
+ tmp_dst = lzo_block->dst;
+ tmp_dst_len = lzo_block->dst_len;
+
+ /* prepare the source to be the compressed block */
+ lzo_block->src = lzo_block->dst;
+ lzo_block->src_len = lzo_block->dst_len;
+
+ /* decompress the block */
+ ret = regcache_lzo_decompress_cache_block(map, lzo_block);
+ if (ret < 0) {
+ kfree(lzo_block->dst);
+ goto out;
+ }
+
+ /* write the new value to the cache */
+ if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
+ kfree(lzo_block->dst);
+ goto out;
+ }
+
+ /* prepare the source to be the decompressed block */
+ lzo_block->src = lzo_block->dst;
+ lzo_block->src_len = lzo_block->dst_len;
+
+ /* compress the block */
+ ret = regcache_lzo_compress_cache_block(map, lzo_block);
+ if (ret < 0) {
+ kfree(lzo_block->dst);
+ kfree(lzo_block->src);
+ goto out;
+ }
+
+ /* set the bit so we know we have to sync this register */
+ set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
+ kfree(tmp_dst);
+ kfree(lzo_block->src);
+ return 0;
+out:
+ lzo_block->dst = tmp_dst;
+ lzo_block->dst_len = tmp_dst_len;
+ return ret;
+}
+
+static int regcache_lzo_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_lzo_ctx **lzo_blocks;
+ unsigned int val;
+ int i;
+ int ret;
+
+ lzo_blocks = map->cache;
+ i = min;
+ for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
+ lzo_blocks[0]->sync_bmp_nbits) {
+ if (i > max)
+ continue;
+
+ ret = regcache_read(map, i, &val);
+ if (ret)
+ return ret;
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, i);
+ if (ret > 0 && val == map->reg_defaults[ret].def)
+ continue;
+
+ map->cache_bypass = true;
+ ret = _regmap_write(map, i, val);
+ map->cache_bypass = false;
+ if (ret)
+ return ret;
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ i, val);
+ }
+
+ return 0;
+}
+
+struct regcache_ops regcache_lzo_ops = {
+ .type = REGCACHE_COMPRESSED,
+ .name = "lzo",
+ .init = regcache_lzo_init,
+ .exit = regcache_lzo_exit,
+ .read = regcache_lzo_read,
+ .write = regcache_lzo_write,
+ .sync = regcache_lzo_sync
+};
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
new file mode 100644
index 000000000..d65715b9e
--- /dev/null
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API - rbtree caching support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+ unsigned int value);
+static int regcache_rbtree_exit(struct regmap *map);
+
+struct regcache_rbtree_node {
+ /* block of adjacent registers */
+ void *block;
+ /* Which registers are present */
+ long *cache_present;
+ /* base register handled by this block */
+ unsigned int base_reg;
+ /* number of registers available in the block */
+ unsigned int blklen;
+ /* the actual rbtree node holding this block */
+ struct rb_node node;
+};
+
+struct regcache_rbtree_ctx {
+ struct rb_root root;
+ struct regcache_rbtree_node *cached_rbnode;
+};
+
+static inline void regcache_rbtree_get_base_top_reg(
+ struct regmap *map,
+ struct regcache_rbtree_node *rbnode,
+ unsigned int *base, unsigned int *top)
+{
+ *base = rbnode->base_reg;
+ *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
+}
+
+static unsigned int regcache_rbtree_get_register(struct regmap *map,
+ struct regcache_rbtree_node *rbnode, unsigned int idx)
+{
+ return regcache_get_val(map, rbnode->block, idx);
+}
+
+static void regcache_rbtree_set_register(struct regmap *map,
+ struct regcache_rbtree_node *rbnode,
+ unsigned int idx, unsigned int val)
+{
+ set_bit(idx, rbnode->cache_present);
+ regcache_set_val(map, rbnode->block, idx, val);
+}
+
+static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
+ unsigned int reg)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+ struct rb_node *node;
+ struct regcache_rbtree_node *rbnode;
+ unsigned int base_reg, top_reg;
+
+ rbnode = rbtree_ctx->cached_rbnode;
+ if (rbnode) {
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (reg >= base_reg && reg <= top_reg)
+ return rbnode;
+ }
+
+ node = rbtree_ctx->root.rb_node;
+ while (node) {
+ rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (reg >= base_reg && reg <= top_reg) {
+ rbtree_ctx->cached_rbnode = rbnode;
+ return rbnode;
+ } else if (reg > top_reg) {
+ node = node->rb_right;
+ } else if (reg < base_reg) {
+ node = node->rb_left;
+ }
+ }
+
+ return NULL;
+}
+
+static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
+ struct regcache_rbtree_node *rbnode)
+{
+ struct rb_node **new, *parent;
+ struct regcache_rbtree_node *rbnode_tmp;
+ unsigned int base_reg_tmp, top_reg_tmp;
+ unsigned int base_reg;
+
+ parent = NULL;
+ new = &root->rb_node;
+ while (*new) {
+ rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node);
+ /* base and top registers of the current rbnode */
+ regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
+ &top_reg_tmp);
+ /* base register of the rbnode to be added */
+ base_reg = rbnode->base_reg;
+ parent = *new;
+ /* if this register has already been inserted, just return */
+ if (base_reg >= base_reg_tmp &&
+ base_reg <= top_reg_tmp)
+ return 0;
+ else if (base_reg > top_reg_tmp)
+ new = &((*new)->rb_right);
+ else if (base_reg < base_reg_tmp)
+ new = &((*new)->rb_left);
+ }
+
+ /* insert the node into the rbtree */
+ rb_link_node(&rbnode->node, parent, new);
+ rb_insert_color(&rbnode->node, root);
+
+ return 1;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int rbtree_show(struct seq_file *s, void *ignored)
+{
+ struct regmap *map = s->private;
+ struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+ struct regcache_rbtree_node *n;
+ struct rb_node *node;
+ unsigned int base, top;
+ size_t mem_size;
+ int nodes = 0;
+ int registers = 0;
+ int this_registers, average;
+
+ map->lock(map->lock_arg);
+
+ mem_size = sizeof(*rbtree_ctx);
+
+ for (node = rb_first(&rbtree_ctx->root); node != NULL;
+ node = rb_next(node)) {
+ n = rb_entry(node, struct regcache_rbtree_node, node);
+ mem_size += sizeof(*n);
+ mem_size += (n->blklen * map->cache_word_size);
+ mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
+
+ regcache_rbtree_get_base_top_reg(map, n, &base, &top);
+ this_registers = ((top - base) / map->reg_stride) + 1;
+ seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
+
+ nodes++;
+ registers += this_registers;
+ }
+
+ if (nodes)
+ average = registers / nodes;
+ else
+ average = 0;
+
+ seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
+ nodes, registers, average, mem_size);
+
+ map->unlock(map->lock_arg);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(rbtree);
+
+static void rbtree_debugfs_init(struct regmap *map)
+{
+ debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
+}
+#endif
+
+static int regcache_rbtree_init(struct regmap *map)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ int i;
+ int ret;
+
+ map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+ if (!map->cache)
+ return -ENOMEM;
+
+ rbtree_ctx = map->cache;
+ rbtree_ctx->root = RB_ROOT;
+ rbtree_ctx->cached_rbnode = NULL;
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_rbtree_write(map,
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ regcache_rbtree_exit(map);
+ return ret;
+}
+
+static int regcache_rbtree_exit(struct regmap *map)
+{
+ struct rb_node *next;
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct regcache_rbtree_node *rbtree_node;
+
+ /* if we've already been called then just return */
+ rbtree_ctx = map->cache;
+ if (!rbtree_ctx)
+ return 0;
+
+ /* free up the rbtree */
+ next = rb_first(&rbtree_ctx->root);
+ while (next) {
+ rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
+ next = rb_next(&rbtree_node->node);
+ rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+ kfree(rbtree_node->cache_present);
+ kfree(rbtree_node->block);
+ kfree(rbtree_node);
+ }
+
+ /* release the resources */
+ kfree(map->cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_rbtree_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ struct regcache_rbtree_node *rbnode;
+ unsigned int reg_tmp;
+
+ rbnode = regcache_rbtree_lookup(map, reg);
+ if (rbnode) {
+ reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
+ if (!test_bit(reg_tmp, rbnode->cache_present))
+ return -ENOENT;
+ *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
+ } else {
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+
+static int regcache_rbtree_insert_to_block(struct regmap *map,
+ struct regcache_rbtree_node *rbnode,
+ unsigned int base_reg,
+ unsigned int top_reg,
+ unsigned int reg,
+ unsigned int value)
+{
+ unsigned int blklen;
+ unsigned int pos, offset;
+ unsigned long *present;
+ u8 *blk;
+
+ blklen = (top_reg - base_reg) / map->reg_stride + 1;
+ pos = (reg - base_reg) / map->reg_stride;
+ offset = (rbnode->base_reg - base_reg) / map->reg_stride;
+
+ blk = krealloc(rbnode->block,
+ blklen * map->cache_word_size,
+ map->alloc_flags);
+ if (!blk)
+ return -ENOMEM;
+
+ rbnode->block = blk;
+
+ if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present),
+ map->alloc_flags);
+ if (!present)
+ return -ENOMEM;
+
+ memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+ (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+ * sizeof(*present));
+ } else {
+ present = rbnode->cache_present;
+ }
+
+ /* insert the register value in the correct place in the rbnode block */
+ if (pos == 0) {
+ memmove(blk + offset * map->cache_word_size,
+ blk, rbnode->blklen * map->cache_word_size);
+ bitmap_shift_left(present, present, offset, blklen);
+ }
+
+ /* update the rbnode block, its size and the base register */
+ rbnode->blklen = blklen;
+ rbnode->base_reg = base_reg;
+ rbnode->cache_present = present;
+
+ regcache_rbtree_set_register(map, rbnode, pos, value);
+ return 0;
+}
+
+static struct regcache_rbtree_node *
+regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
+{
+ struct regcache_rbtree_node *rbnode;
+ const struct regmap_range *range;
+ int i;
+
+ rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags);
+ if (!rbnode)
+ return NULL;
+
+ /* If there is a read table then use it to guess at an allocation */
+ if (map->rd_table) {
+ for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
+ if (regmap_reg_in_range(reg,
+ &map->rd_table->yes_ranges[i]))
+ break;
+ }
+
+ if (i != map->rd_table->n_yes_ranges) {
+ range = &map->rd_table->yes_ranges[i];
+ rbnode->blklen = (range->range_max - range->range_min) /
+ map->reg_stride + 1;
+ rbnode->base_reg = range->range_min;
+ }
+ }
+
+ if (!rbnode->blklen) {
+ rbnode->blklen = 1;
+ rbnode->base_reg = reg;
+ }
+
+ rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
+ map->alloc_flags);
+ if (!rbnode->block)
+ goto err_free;
+
+ rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
+ sizeof(*rbnode->cache_present),
+ map->alloc_flags);
+ if (!rbnode->cache_present)
+ goto err_free_block;
+
+ return rbnode;
+
+err_free_block:
+ kfree(rbnode->block);
+err_free:
+ kfree(rbnode);
+ return NULL;
+}
+
+static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
+ unsigned int value)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct regcache_rbtree_node *rbnode, *rbnode_tmp;
+ struct rb_node *node;
+ unsigned int reg_tmp;
+ int ret;
+
+ rbtree_ctx = map->cache;
+
+ /* if we can't locate it in the cached rbnode we'll have
+ * to traverse the rbtree looking for it.
+ */
+ rbnode = regcache_rbtree_lookup(map, reg);
+ if (rbnode) {
+ reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
+ regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
+ } else {
+ unsigned int base_reg, top_reg;
+ unsigned int new_base_reg, new_top_reg;
+ unsigned int min, max;
+ unsigned int max_dist;
+ unsigned int dist, best_dist = UINT_MAX;
+
+ max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
+ map->cache_word_size;
+ if (reg < max_dist)
+ min = 0;
+ else
+ min = reg - max_dist;
+ max = reg + max_dist;
+
+ /* look for an adjacent register to the one we are about to add */
+ node = rbtree_ctx->root.rb_node;
+ while (node) {
+ rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
+ node);
+
+ regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
+ &base_reg, &top_reg);
+
+ if (base_reg <= max && top_reg >= min) {
+ if (reg < base_reg)
+ dist = base_reg - reg;
+ else if (reg > top_reg)
+ dist = reg - top_reg;
+ else
+ dist = 0;
+ if (dist < best_dist) {
+ rbnode = rbnode_tmp;
+ best_dist = dist;
+ new_base_reg = min(reg, base_reg);
+ new_top_reg = max(reg, top_reg);
+ }
+ }
+
+ /*
+ * Keep looking, we want to choose the closest block,
+ * otherwise we might end up creating overlapping
+ * blocks, which breaks the rbtree.
+ */
+ if (reg < base_reg)
+ node = node->rb_left;
+ else if (reg > top_reg)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (rbnode) {
+ ret = regcache_rbtree_insert_to_block(map, rbnode,
+ new_base_reg,
+ new_top_reg, reg,
+ value);
+ if (ret)
+ return ret;
+ rbtree_ctx->cached_rbnode = rbnode;
+ return 0;
+ }
+
+ /* We did not manage to find a place to insert it in
+ * an existing block so create a new rbnode.
+ */
+ rbnode = regcache_rbtree_node_alloc(map, reg);
+ if (!rbnode)
+ return -ENOMEM;
+ regcache_rbtree_set_register(map, rbnode,
+ (reg - rbnode->base_reg) / map->reg_stride,
+ value);
+ regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
+ rbtree_ctx->cached_rbnode = rbnode;
+ }
+
+ return 0;
+}
+
+static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct rb_node *node;
+ struct regcache_rbtree_node *rbnode;
+ unsigned int base_reg, top_reg;
+ unsigned int start, end;
+ int ret;
+
+ rbtree_ctx = map->cache;
+ for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+ rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (base_reg > max)
+ break;
+ if (top_reg < min)
+ continue;
+
+ if (min > base_reg)
+ start = (min - base_reg) / map->reg_stride;
+ else
+ start = 0;
+
+ if (max < top_reg)
+ end = (max - base_reg) / map->reg_stride + 1;
+ else
+ end = rbnode->blklen;
+
+ ret = regcache_sync_block(map, rbnode->block,
+ rbnode->cache_present,
+ rbnode->base_reg, start, end);
+ if (ret != 0)
+ return ret;
+ }
+
+ return regmap_async_complete(map);
+}
+
+static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct regcache_rbtree_node *rbnode;
+ struct rb_node *node;
+ unsigned int base_reg, top_reg;
+ unsigned int start, end;
+
+ rbtree_ctx = map->cache;
+ for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+ rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (base_reg > max)
+ break;
+ if (top_reg < min)
+ continue;
+
+ if (min > base_reg)
+ start = (min - base_reg) / map->reg_stride;
+ else
+ start = 0;
+
+ if (max < top_reg)
+ end = (max - base_reg) / map->reg_stride + 1;
+ else
+ end = rbnode->blklen;
+
+ bitmap_clear(rbnode->cache_present, start, end - start);
+ }
+
+ return 0;
+}
+
+struct regcache_ops regcache_rbtree_ops = {
+ .type = REGCACHE_RBTREE,
+ .name = "rbtree",
+ .init = regcache_rbtree_init,
+ .exit = regcache_rbtree_exit,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = rbtree_debugfs_init,
+#endif
+ .read = regcache_rbtree_read,
+ .write = regcache_rbtree_write,
+ .sync = regcache_rbtree_sync,
+ .drop = regcache_rbtree_drop,
+};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
new file mode 100644
index 000000000..4f3dd9316
--- /dev/null
+++ b/drivers/base/regmap/regcache.c
@@ -0,0 +1,827 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register cache access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+
+#include <linux/bsearch.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#include "trace.h"
+#include "internal.h"
+
+static const struct regcache_ops *cache_types[] = {
+ &regcache_rbtree_ops,
+#if IS_ENABLED(CONFIG_REGCACHE_COMPRESSED)
+ &regcache_lzo_ops,
+#endif
+ &regcache_flat_ops,
+};
+
+static int regcache_hw_init(struct regmap *map)
+{
+ int i, j;
+ int ret;
+ int count;
+ unsigned int reg, val;
+ void *tmp_buf;
+
+ if (!map->num_reg_defaults_raw)
+ return -EINVAL;
+
+ /* calculate the size of reg_defaults */
+ for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
+ if (regmap_readable(map, i * map->reg_stride) &&
+ !regmap_volatile(map, i * map->reg_stride))
+ count++;
+
+ /* all registers are unreadable or volatile, so just bypass */
+ if (!count) {
+ map->cache_bypass = true;
+ return 0;
+ }
+
+ map->num_reg_defaults = count;
+ map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
+ GFP_KERNEL);
+ if (!map->reg_defaults)
+ return -ENOMEM;
+
+ if (!map->reg_defaults_raw) {
+ bool cache_bypass = map->cache_bypass;
+ dev_warn(map->dev, "No cache defaults, reading back from HW\n");
+
+ /* Bypass the cache access till data read from HW */
+ map->cache_bypass = true;
+ tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ ret = regmap_raw_read(map, 0, tmp_buf,
+ map->cache_size_raw);
+ map->cache_bypass = cache_bypass;
+ if (ret == 0) {
+ map->reg_defaults_raw = tmp_buf;
+ map->cache_free = true;
+ } else {
+ kfree(tmp_buf);
+ }
+ }
+
+ /* fill the reg_defaults */
+ for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
+ reg = i * map->reg_stride;
+
+ if (!regmap_readable(map, reg))
+ continue;
+
+ if (regmap_volatile(map, reg))
+ continue;
+
+ if (map->reg_defaults_raw) {
+ val = regcache_get_val(map, map->reg_defaults_raw, i);
+ } else {
+ bool cache_bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+ ret = regmap_read(map, reg, &val);
+ map->cache_bypass = cache_bypass;
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read %d: %d\n",
+ reg, ret);
+ goto err_free;
+ }
+ }
+
+ map->reg_defaults[j].reg = reg;
+ map->reg_defaults[j].def = val;
+ j++;
+ }
+
+ return 0;
+
+err_free:
+ kfree(map->reg_defaults);
+
+ return ret;
+}
+
+int regcache_init(struct regmap *map, const struct regmap_config *config)
+{
+ int ret;
+ int i;
+ void *tmp_buf;
+
+ if (map->cache_type == REGCACHE_NONE) {
+ if (config->reg_defaults || config->num_reg_defaults_raw)
+ dev_warn(map->dev,
+ "No cache used with register defaults set!\n");
+
+ map->cache_bypass = true;
+ return 0;
+ }
+
+ if (config->reg_defaults && !config->num_reg_defaults) {
+ dev_err(map->dev,
+ "Register defaults are set without the number!\n");
+ return -EINVAL;
+ }
+
+ if (config->num_reg_defaults && !config->reg_defaults) {
+ dev_err(map->dev,
+ "Register defaults number are set without the reg!\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < config->num_reg_defaults; i++)
+ if (config->reg_defaults[i].reg % map->reg_stride)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cache_types); i++)
+ if (cache_types[i]->type == map->cache_type)
+ break;
+
+ if (i == ARRAY_SIZE(cache_types)) {
+ dev_err(map->dev, "Could not match compress type: %d\n",
+ map->cache_type);
+ return -EINVAL;
+ }
+
+ map->num_reg_defaults = config->num_reg_defaults;
+ map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+ map->reg_defaults_raw = config->reg_defaults_raw;
+ map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+
+ map->cache = NULL;
+ map->cache_ops = cache_types[i];
+
+ if (!map->cache_ops->read ||
+ !map->cache_ops->write ||
+ !map->cache_ops->name)
+ return -EINVAL;
+
+ /* We still need to ensure that the reg_defaults
+ * won't vanish from under us. We'll need to make
+ * a copy of it.
+ */
+ if (config->reg_defaults) {
+ tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
+ sizeof(struct reg_default), GFP_KERNEL);
+ if (!tmp_buf)
+ return -ENOMEM;
+ map->reg_defaults = tmp_buf;
+ } else if (map->num_reg_defaults_raw) {
+ /* Some devices such as PMICs don't have cache defaults,
+ * we cope with this by reading back the HW registers and
+ * crafting the cache defaults by hand.
+ */
+ ret = regcache_hw_init(map);
+ if (ret < 0)
+ return ret;
+ if (map->cache_bypass)
+ return 0;
+ }
+
+ if (!map->max_register && map->num_reg_defaults_raw)
+ map->max_register = (map->num_reg_defaults_raw - 1) * map->reg_stride;
+
+ if (map->cache_ops->init) {
+ dev_dbg(map->dev, "Initializing %s cache\n",
+ map->cache_ops->name);
+ ret = map->cache_ops->init(map);
+ if (ret)
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ return ret;
+}
+
+void regcache_exit(struct regmap *map)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return;
+
+ BUG_ON(!map->cache_ops);
+
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ if (map->cache_ops->exit) {
+ dev_dbg(map->dev, "Destroying %s cache\n",
+ map->cache_ops->name);
+ map->cache_ops->exit(map);
+ }
+}
+
+/**
+ * regcache_read - Fetch the value of a given register from the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
+{
+ int ret;
+
+ if (map->cache_type == REGCACHE_NONE)
+ return -ENOSYS;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_volatile(map, reg)) {
+ ret = map->cache_ops->read(map, reg, value);
+
+ if (ret == 0)
+ trace_regmap_reg_read_cache(map, reg, *value);
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * regcache_write - Set the value of a given register in the cache.
+ *
+ * @map: map to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_write(struct regmap *map,
+ unsigned int reg, unsigned int value)
+{
+ if (map->cache_type == REGCACHE_NONE)
+ return 0;
+
+ BUG_ON(!map->cache_ops);
+
+ if (!regmap_volatile(map, reg))
+ return map->cache_ops->write(map, reg, value);
+
+ return 0;
+}
+
+static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ /* If we don't know the chip just got reset, then sync everything. */
+ if (!map->no_sync_defaults)
+ return true;
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, reg);
+ if (ret >= 0 && val == map->reg_defaults[ret].def)
+ return false;
+ return true;
+}
+
+static int regcache_default_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ unsigned int reg;
+
+ for (reg = min; reg <= max; reg += map->reg_stride) {
+ unsigned int val;
+ int ret;
+
+ if (regmap_volatile(map, reg) ||
+ !regmap_writeable(map, reg))
+ continue;
+
+ ret = regcache_read(map, reg, &val);
+ if (ret)
+ return ret;
+
+ if (!regcache_reg_needs_sync(map, reg, val))
+ continue;
+
+ map->cache_bypass = true;
+ ret = _regmap_write(map, reg, val);
+ map->cache_bypass = false;
+ if (ret) {
+ dev_err(map->dev, "Unable to sync register %#x. %d\n",
+ reg, ret);
+ return ret;
+ }
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
+ }
+
+ return 0;
+}
+
+static int rbtree_all(const void *key, const struct rb_node *node)
+{
+ return 0;
+}
+
+/**
+ * regcache_sync - Sync the register cache with the hardware.
+ *
+ * @map: map to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile. In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync(struct regmap *map)
+{
+ int ret = 0;
+ unsigned int i;
+ const char *name;
+ bool bypass;
+ struct rb_node *node;
+
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+
+ BUG_ON(!map->cache_ops);
+
+ map->lock(map->lock_arg);
+ /* Remember the initial bypass state */
+ bypass = map->cache_bypass;
+ dev_dbg(map->dev, "Syncing %s cache\n",
+ map->cache_ops->name);
+ name = map->cache_ops->name;
+ trace_regcache_sync(map, name, "start");
+
+ if (!map->cache_dirty)
+ goto out;
+
+ map->async = true;
+
+ /* Apply any patch first */
+ map->cache_bypass = true;
+ for (i = 0; i < map->patch_regs; i++) {
+ ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to write %x = %x: %d\n",
+ map->patch[i].reg, map->patch[i].def, ret);
+ goto out;
+ }
+ }
+ map->cache_bypass = false;
+
+ if (map->cache_ops->sync)
+ ret = map->cache_ops->sync(map, 0, map->max_register);
+ else
+ ret = regcache_default_sync(map, 0, map->max_register);
+
+ if (ret == 0)
+ map->cache_dirty = false;
+
+out:
+ /* Restore the bypass state */
+ map->async = false;
+ map->cache_bypass = bypass;
+ map->no_sync_defaults = false;
+
+ /*
+ * If we did any paging with cache bypassed and a cached
+ * paging register then the register and cache state might
+ * have gone out of sync, force writes of all the paging
+ * registers.
+ */
+ rb_for_each(node, 0, &map->range_tree, rbtree_all) {
+ struct regmap_range_node *this =
+ rb_entry(node, struct regmap_range_node, node);
+
+ /* If there's nothing in the cache there's nothing to sync */
+ if (regcache_read(map, this->selector_reg, &i) != 0)
+ continue;
+
+ ret = _regmap_write(map, this->selector_reg, i);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to write %x = %x: %d\n",
+ this->selector_reg, i, ret);
+ break;
+ }
+ }
+
+ map->unlock(map->lock_arg);
+
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map, name, "stop");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync);
+
+/**
+ * regcache_sync_region - Sync part of the register cache with the hardware.
+ *
+ * @map: map to sync.
+ * @min: first register to sync
+ * @max: last register to sync
+ *
+ * Write all non-default register values in the specified region to
+ * the hardware.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ int ret = 0;
+ const char *name;
+ bool bypass;
+
+ if (WARN_ON(map->cache_type == REGCACHE_NONE))
+ return -EINVAL;
+
+ BUG_ON(!map->cache_ops);
+
+ map->lock(map->lock_arg);
+
+ /* Remember the initial bypass state */
+ bypass = map->cache_bypass;
+
+ name = map->cache_ops->name;
+ dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
+
+ trace_regcache_sync(map, name, "start region");
+
+ if (!map->cache_dirty)
+ goto out;
+
+ map->async = true;
+
+ if (map->cache_ops->sync)
+ ret = map->cache_ops->sync(map, min, max);
+ else
+ ret = regcache_default_sync(map, min, max);
+
+out:
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ map->async = false;
+ map->no_sync_defaults = false;
+ map->unlock(map->lock_arg);
+
+ regmap_async_complete(map);
+
+ trace_regcache_sync(map, name, "stop region");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_sync_region);
+
+/**
+ * regcache_drop_region - Discard part of the register cache
+ *
+ * @map: map to operate on
+ * @min: first register to discard
+ * @max: last register to discard
+ *
+ * Discard part of the register cache.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ int ret = 0;
+
+ if (!map->cache_ops || !map->cache_ops->drop)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ trace_regcache_drop_region(map, min, max);
+
+ ret = map->cache_ops->drop(map, min, max);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regcache_drop_region);
+
+/**
+ * regcache_cache_only - Put a register map into cache only mode
+ *
+ * @map: map to configure
+ * @enable: flag if changes should be written to the hardware
+ *
+ * When a register map is marked as cache only writes to the register
+ * map API will only update the register cache, they will not cause
+ * any hardware changes. This is useful for allowing portions of
+ * drivers to act as though the device were functioning as normal when
+ * it is disabled for power saving reasons.
+ */
+void regcache_cache_only(struct regmap *map, bool enable)
+{
+ map->lock(map->lock_arg);
+ WARN_ON(map->cache_type != REGCACHE_NONE &&
+ map->cache_bypass && enable);
+ map->cache_only = enable;
+ trace_regmap_cache_only(map, enable);
+ map->unlock(map->lock_arg);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_only);
+
+/**
+ * regcache_mark_dirty - Indicate that HW registers were reset to default values
+ *
+ * @map: map to mark
+ *
+ * Inform regcache that the device has been powered down or reset, so that
+ * on resume, regcache_sync() knows to write out all non-default values
+ * stored in the cache.
+ *
+ * If this function is not called, regcache_sync() will assume that
+ * the hardware state still matches the cache state, modulo any writes that
+ * happened when cache_only was true.
+ */
+void regcache_mark_dirty(struct regmap *map)
+{
+ map->lock(map->lock_arg);
+ map->cache_dirty = true;
+ map->no_sync_defaults = true;
+ map->unlock(map->lock_arg);
+}
+EXPORT_SYMBOL_GPL(regcache_mark_dirty);
+
+/**
+ * regcache_cache_bypass - Put a register map into cache bypass mode
+ *
+ * @map: map to configure
+ * @enable: flag if changes should not be written to the cache
+ *
+ * When a register map is marked with the cache bypass option, writes
+ * to the register map API will only update the hardware and not
+ * the cache directly. This is useful when syncing the cache back to
+ * the hardware.
+ */
+void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+ map->lock(map->lock_arg);
+ WARN_ON(map->cache_only && enable);
+ map->cache_bypass = enable;
+ trace_regmap_cache_bypass(map, enable);
+ map->unlock(map->lock_arg);
+}
+EXPORT_SYMBOL_GPL(regcache_cache_bypass);
+
+bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
+ unsigned int val)
+{
+ if (regcache_get_val(map, base, idx) == val)
+ return true;
+
+ /* Use device native format if possible */
+ if (map->format.format_val) {
+ map->format.format_val(base + (map->cache_word_size * idx),
+ val, 0);
+ return false;
+ }
+
+ switch (map->cache_word_size) {
+ case 1: {
+ u8 *cache = base;
+
+ cache[idx] = val;
+ break;
+ }
+ case 2: {
+ u16 *cache = base;
+
+ cache[idx] = val;
+ break;
+ }
+ case 4: {
+ u32 *cache = base;
+
+ cache[idx] = val;
+ break;
+ }
+#ifdef CONFIG_64BIT
+ case 8: {
+ u64 *cache = base;
+
+ cache[idx] = val;
+ break;
+ }
+#endif
+ default:
+ BUG();
+ }
+ return false;
+}
+
+unsigned int regcache_get_val(struct regmap *map, const void *base,
+ unsigned int idx)
+{
+ if (!base)
+ return -EINVAL;
+
+ /* Use device native format if possible */
+ if (map->format.parse_val)
+ return map->format.parse_val(regcache_get_val_addr(map, base,
+ idx));
+
+ switch (map->cache_word_size) {
+ case 1: {
+ const u8 *cache = base;
+
+ return cache[idx];
+ }
+ case 2: {
+ const u16 *cache = base;
+
+ return cache[idx];
+ }
+ case 4: {
+ const u32 *cache = base;
+
+ return cache[idx];
+ }
+#ifdef CONFIG_64BIT
+ case 8: {
+ const u64 *cache = base;
+
+ return cache[idx];
+ }
+#endif
+ default:
+ BUG();
+ }
+ /* unreachable */
+ return -1;
+}
+
+static int regcache_default_cmp(const void *a, const void *b)
+{
+ const struct reg_default *_a = a;
+ const struct reg_default *_b = b;
+
+ return _a->reg - _b->reg;
+}
+
+int regcache_lookup_reg(struct regmap *map, unsigned int reg)
+{
+ struct reg_default key;
+ struct reg_default *r;
+
+ key.reg = reg;
+ key.def = 0;
+
+ r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
+ sizeof(struct reg_default), regcache_default_cmp);
+
+ if (r)
+ return r - map->reg_defaults;
+ else
+ return -ENOENT;
+}
+
+static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
+{
+ if (!cache_present)
+ return true;
+
+ return test_bit(idx, cache_present);
+}
+
+static int regcache_sync_block_single(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i, regtmp, val;
+ int ret;
+
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ if (!regcache_reg_present(cache_present, i) ||
+ !regmap_writeable(map, regtmp))
+ continue;
+
+ val = regcache_get_val(map, block, i);
+ if (!regcache_reg_needs_sync(map, regtmp, val))
+ continue;
+
+ map->cache_bypass = true;
+
+ ret = _regmap_write(map, regtmp, val);
+
+ map->cache_bypass = false;
+ if (ret != 0) {
+ dev_err(map->dev, "Unable to sync register %#x. %d\n",
+ regtmp, ret);
+ return ret;
+ }
+ dev_dbg(map->dev, "Synced register %#x, value %#x\n",
+ regtmp, val);
+ }
+
+ return 0;
+}
+
+static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
+ unsigned int base, unsigned int cur)
+{
+ size_t val_bytes = map->format.val_bytes;
+ int ret, count;
+
+ if (*data == NULL)
+ return 0;
+
+ count = (cur - base) / map->reg_stride;
+
+ dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
+ count * val_bytes, count, base, cur - map->reg_stride);
+
+ map->cache_bypass = true;
+
+ ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
+ if (ret)
+ dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
+ base, cur - map->reg_stride, ret);
+
+ map->cache_bypass = false;
+
+ *data = NULL;
+
+ return ret;
+}
+
+static int regcache_sync_block_raw(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base, unsigned int start,
+ unsigned int end)
+{
+ unsigned int i, val;
+ unsigned int regtmp = 0;
+ unsigned int base = 0;
+ const void *data = NULL;
+ int ret;
+
+ for (i = start; i < end; i++) {
+ regtmp = block_base + (i * map->reg_stride);
+
+ if (!regcache_reg_present(cache_present, i) ||
+ !regmap_writeable(map, regtmp)) {
+ ret = regcache_sync_block_raw_flush(map, &data,
+ base, regtmp);
+ if (ret != 0)
+ return ret;
+ continue;
+ }
+
+ val = regcache_get_val(map, block, i);
+ if (!regcache_reg_needs_sync(map, regtmp, val)) {
+ ret = regcache_sync_block_raw_flush(map, &data,
+ base, regtmp);
+ if (ret != 0)
+ return ret;
+ continue;
+ }
+
+ if (!data) {
+ data = regcache_get_val_addr(map, block, i);
+ base = regtmp;
+ }
+ }
+
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+ map->reg_stride);
+}
+
+int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
+ unsigned int block_base, unsigned int start,
+ unsigned int end)
+{
+ if (regmap_can_raw_write(map) && !map->use_single_write)
+ return regcache_sync_block_raw(map, block, cache_present,
+ block_base, start, end);
+ else
+ return regcache_sync_block_single(map, block, cache_present,
+ block_base, start, end);
+}
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
new file mode 100644
index 000000000..b9f76bdf7
--- /dev/null
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - AC'97 support
+//
+// Copyright 2013 Linaro Ltd. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <sound/ac97_codec.h>
+
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AC97_RESET:
+ case AC97_POWERDOWN:
+ case AC97_INT_PAGING:
+ case AC97_EXTENDED_ID:
+ case AC97_EXTENDED_STATUS:
+ case AC97_EXTENDED_MID:
+ case AC97_EXTENDED_MSTATUS:
+ case AC97_GPIO_STATUS:
+ case AC97_MISC_AFE:
+ case AC97_VENDOR_ID1:
+ case AC97_VENDOR_ID2:
+ case AC97_CODEC_CLASS_REV:
+ case AC97_PCI_SVID:
+ case AC97_PCI_SID:
+ case AC97_FUNC_SELECT:
+ case AC97_FUNC_INFO:
+ case AC97_SENSE_INFO:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(regmap_ac97_default_volatile);
+
+static int regmap_ac97_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct snd_ac97 *ac97 = context;
+
+ *val = ac97->bus->ops->read(ac97, reg);
+
+ return 0;
+}
+
+static int regmap_ac97_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct snd_ac97 *ac97 = context;
+
+ ac97->bus->ops->write(ac97, reg, val);
+
+ return 0;
+}
+
+static const struct regmap_bus ac97_regmap_bus = {
+ .reg_write = regmap_ac97_reg_write,
+ .reg_read = regmap_ac97_reg_read,
+};
+
+struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_ac97);
+
+struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_ac97);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
new file mode 100644
index 000000000..1e3d205ce
--- /dev/null
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - debugfs
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/list.h>
+
+#include "internal.h"
+
+struct regmap_debugfs_node {
+ struct regmap *map;
+ struct list_head link;
+};
+
+static unsigned int dummy_index;
+static struct dentry *regmap_debugfs_root;
+static LIST_HEAD(regmap_debugfs_early_list);
+static DEFINE_MUTEX(regmap_debugfs_early_lock);
+
+/* Calculate the length of a fixed format */
+static size_t regmap_calc_reg_len(int max_val)
+{
+ return snprintf(NULL, 0, "%x", max_val);
+}
+
+static ssize_t regmap_name_read_file(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+ const char *name = "nodev";
+ int ret;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (map->dev && map->dev->driver)
+ name = map->dev->driver->name;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
+ if (ret >= PAGE_SIZE) {
+ kfree(buf);
+ return ret;
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations regmap_name_fops = {
+ .open = simple_open,
+ .read = regmap_name_read_file,
+ .llseek = default_llseek,
+};
+
+static void regmap_debugfs_free_dump_cache(struct regmap *map)
+{
+ struct regmap_debugfs_off_cache *c;
+
+ while (!list_empty(&map->debugfs_off_cache)) {
+ c = list_first_entry(&map->debugfs_off_cache,
+ struct regmap_debugfs_off_cache,
+ list);
+ list_del(&c->list);
+ kfree(c);
+ }
+}
+
+static bool regmap_printable(struct regmap *map, unsigned int reg)
+{
+ if (regmap_precious(map, reg))
+ return false;
+
+ if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
+ return false;
+
+ return true;
+}
+
+/*
+ * Work out where the start offset maps into register numbers, bearing
+ * in mind that we suppress hidden registers.
+ */
+static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
+ unsigned int base,
+ loff_t from,
+ loff_t *pos)
+{
+ struct regmap_debugfs_off_cache *c = NULL;
+ loff_t p = 0;
+ unsigned int i, ret;
+ unsigned int fpos_offset;
+ unsigned int reg_offset;
+
+ /* Suppress the cache if we're using a subrange */
+ if (base)
+ return base;
+
+ /*
+ * If we don't have a cache build one so we don't have to do a
+ * linear scan each time.
+ */
+ mutex_lock(&map->cache_lock);
+ i = base;
+ if (list_empty(&map->debugfs_off_cache)) {
+ for (; i <= map->max_register; i += map->reg_stride) {
+ /* Skip unprinted registers, closing off cache entry */
+ if (!regmap_printable(map, i)) {
+ if (c) {
+ c->max = p - 1;
+ c->max_reg = i - map->reg_stride;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ c = NULL;
+ }
+
+ continue;
+ }
+
+ /* No cache entry? Start a new one */
+ if (!c) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
+ return base;
+ }
+ c->min = p;
+ c->base_reg = i;
+ }
+
+ p += map->debugfs_tot_len;
+ }
+ }
+
+ /* Close the last entry off if we didn't scan beyond it */
+ if (c) {
+ c->max = p - 1;
+ c->max_reg = i - map->reg_stride;
+ list_add_tail(&c->list,
+ &map->debugfs_off_cache);
+ }
+
+ /*
+ * This should never happen; we return above if we fail to
+ * allocate and we should never be in this code if there are
+ * no registers at all.
+ */
+ WARN_ON(list_empty(&map->debugfs_off_cache));
+ ret = base;
+
+ /* Find the relevant block:offset */
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (from >= c->min && from <= c->max) {
+ fpos_offset = from - c->min;
+ reg_offset = fpos_offset / map->debugfs_tot_len;
+ *pos = c->min + (reg_offset * map->debugfs_tot_len);
+ mutex_unlock(&map->cache_lock);
+ return c->base_reg + (reg_offset * map->reg_stride);
+ }
+
+ *pos = c->max;
+ ret = c->max_reg;
+ }
+ mutex_unlock(&map->cache_lock);
+
+ return ret;
+}
+
+static inline void regmap_calc_tot_len(struct regmap *map,
+ void *buf, size_t count)
+{
+ /* Calculate the length of a fixed format */
+ if (!map->debugfs_tot_len) {
+ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
+ map->debugfs_val_len = 2 * map->format.val_bytes;
+ map->debugfs_tot_len = map->debugfs_reg_len +
+ map->debugfs_val_len + 3; /* : \n */
+ }
+}
+
+static int regmap_next_readable_reg(struct regmap *map, int reg)
+{
+ struct regmap_debugfs_off_cache *c;
+ int ret = -EINVAL;
+
+ if (regmap_printable(map, reg + map->reg_stride)) {
+ ret = reg + map->reg_stride;
+ } else {
+ mutex_lock(&map->cache_lock);
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ if (reg > c->max_reg)
+ continue;
+ if (reg < c->base_reg) {
+ ret = c->base_reg;
+ break;
+ }
+ }
+ mutex_unlock(&map->cache_lock);
+ }
+ return ret;
+}
+
+static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
+ unsigned int to, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ size_t buf_pos = 0;
+ loff_t p = *ppos;
+ ssize_t ret;
+ int i;
+ char *buf;
+ unsigned int val, start_reg;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ regmap_calc_tot_len(map, buf, count);
+
+ /* Work out which register we're starting at */
+ start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
+
+ for (i = start_reg; i >= 0 && i <= to;
+ i = regmap_next_readable_reg(map, i)) {
+
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+ if (buf_pos + map->debugfs_tot_len > count)
+ break;
+
+ /* Format the register */
+ snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
+ map->debugfs_reg_len, i - from);
+ buf_pos += map->debugfs_reg_len + 2;
+
+ /* Format the value, write all X if we can't read */
+ ret = regmap_read(map, i, &val);
+ if (ret == 0)
+ snprintf(buf + buf_pos, count - buf_pos,
+ "%.*x", map->debugfs_val_len, val);
+ else
+ memset(buf + buf_pos, 'X',
+ map->debugfs_val_len);
+ buf_pos += 2 * map->format.val_bytes;
+
+ buf[buf_pos++] = '\n';
+ }
+ p += map->debugfs_tot_len;
+ }
+
+ ret = buf_pos;
+
+ if (copy_to_user(user_buf, buf, buf_pos)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ *ppos += buf_pos;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+
+ return regmap_read_debugfs(map, 0, map->max_register, user_buf,
+ count, ppos);
+}
+
+#undef REGMAP_ALLOW_WRITE_DEBUGFS
+#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
+/*
+ * This can be dangerous especially when we have clients such as
+ * PMICs, therefore don't provide any real compile time configuration option
+ * for this feature, people who want to use this will need to modify
+ * the source code directly.
+ */
+static ssize_t regmap_map_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ size_t buf_size;
+ char *start = buf;
+ unsigned long reg, value;
+ struct regmap *map = file->private_data;
+ int ret;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ while (*start == ' ')
+ start++;
+ reg = simple_strtoul(start, &start, 16);
+ while (*start == ' ')
+ start++;
+ if (kstrtoul(start, 16, &value))
+ return -EINVAL;
+
+ /* Userspace has been fiddling around behind the kernel's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ ret = regmap_write(map, reg, value);
+ if (ret < 0)
+ return ret;
+ return buf_size;
+}
+#else
+#define regmap_map_write_file NULL
+#endif
+
+static const struct file_operations regmap_map_fops = {
+ .open = simple_open,
+ .read = regmap_map_read_file,
+ .write = regmap_map_write_file,
+ .llseek = default_llseek,
+};
+
+static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap_range_node *range = file->private_data;
+ struct regmap *map = range->map;
+
+ return regmap_read_debugfs(map, range->range_min, range->range_max,
+ user_buf, count, ppos);
+}
+
+static const struct file_operations regmap_range_fops = {
+ .open = simple_open,
+ .read = regmap_range_read_file,
+ .llseek = default_llseek,
+};
+
+static ssize_t regmap_reg_ranges_read_file(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+ struct regmap_debugfs_off_cache *c;
+ loff_t p = 0;
+ size_t buf_pos = 0;
+ char *buf;
+ char *entry;
+ int ret;
+ unsigned int entry_len;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ if (count > (PAGE_SIZE << (MAX_ORDER - 1)))
+ count = PAGE_SIZE << (MAX_ORDER - 1);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!entry) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* While we are at it, build the register dump cache
+ * now so the read() operation on the `registers' file
+ * can benefit from using the cache. We do not care
+ * about the file position information that is contained
+ * in the cache, just about the actual register blocks */
+ regmap_calc_tot_len(map, buf, count);
+ regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
+
+ /* Reset file pointer as the fixed-format of the `registers'
+ * file is not compatible with the `range' file */
+ p = 0;
+ mutex_lock(&map->cache_lock);
+ list_for_each_entry(c, &map->debugfs_off_cache, list) {
+ entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
+ c->base_reg, c->max_reg);
+ if (p >= *ppos) {
+ if (buf_pos + entry_len > count)
+ break;
+ memcpy(buf + buf_pos, entry, entry_len);
+ buf_pos += entry_len;
+ }
+ p += entry_len;
+ }
+ mutex_unlock(&map->cache_lock);
+
+ kfree(entry);
+ ret = buf_pos;
+
+ if (copy_to_user(user_buf, buf, buf_pos)) {
+ ret = -EFAULT;
+ goto out_buf;
+ }
+
+ *ppos += buf_pos;
+out_buf:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations regmap_reg_ranges_fops = {
+ .open = simple_open,
+ .read = regmap_reg_ranges_read_file,
+ .llseek = default_llseek,
+};
+
+static int regmap_access_show(struct seq_file *s, void *ignored)
+{
+ struct regmap *map = s->private;
+ int i, reg_len;
+
+ reg_len = regmap_calc_reg_len(map->max_register);
+
+ for (i = 0; i <= map->max_register; i += map->reg_stride) {
+ /* Ignore registers which are neither readable nor writable */
+ if (!regmap_readable(map, i) && !regmap_writeable(map, i))
+ continue;
+
+ /* Format the register */
+ seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
+ regmap_readable(map, i) ? 'y' : 'n',
+ regmap_writeable(map, i) ? 'y' : 'n',
+ regmap_volatile(map, i) ? 'y' : 'n',
+ regmap_precious(map, i) ? 'y' : 'n');
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(regmap_access);
+
+static ssize_t regmap_cache_only_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = container_of(file->private_data,
+ struct regmap, cache_only);
+ bool new_val, require_sync = false;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
+
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
+
+ map->lock(map->lock_arg);
+
+ if (new_val && !map->cache_only) {
+ dev_warn(map->dev, "debugfs cache_only=Y forced\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ } else if (!new_val && map->cache_only) {
+ dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
+ require_sync = true;
+ }
+ map->cache_only = new_val;
+
+ map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
+
+ if (require_sync) {
+ err = regcache_sync(map);
+ if (err)
+ dev_err(map->dev, "Failed to sync cache %d\n", err);
+ }
+
+ return count;
+}
+
+static const struct file_operations regmap_cache_only_fops = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = regmap_cache_only_write_file,
+};
+
+static ssize_t regmap_cache_bypass_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct regmap *map = container_of(file->private_data,
+ struct regmap, cache_bypass);
+ bool new_val;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
+
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
+
+ map->lock(map->lock_arg);
+
+ if (new_val && !map->cache_bypass) {
+ dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ } else if (!new_val && map->cache_bypass) {
+ dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
+ }
+ map->cache_bypass = new_val;
+
+ map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
+
+ return count;
+}
+
+static const struct file_operations regmap_cache_bypass_fops = {
+ .open = simple_open,
+ .read = debugfs_read_file_bool,
+ .write = regmap_cache_bypass_write_file,
+};
+
+void regmap_debugfs_init(struct regmap *map)
+{
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
+ const char *devname = "dummy";
+ const char *name = map->name;
+
+ /*
+ * Userspace can initiate reads from the hardware over debugfs.
+ * Normally internal regmap structures and buffers are protected with
+ * a mutex or a spinlock, but if the regmap owner decided to disable
+ * all locking mechanisms, this is no longer the case. For safety:
+ * don't create the debugfs entries if locking is disabled.
+ */
+ if (map->debugfs_disable) {
+ dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
+ return;
+ }
+
+ /* If we don't have the debugfs root yet, postpone init */
+ if (!regmap_debugfs_root) {
+ struct regmap_debugfs_node *node;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return;
+ node->map = map;
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_add(&node->link, &regmap_debugfs_early_list);
+ mutex_unlock(&regmap_debugfs_early_lock);
+ return;
+ }
+
+ INIT_LIST_HEAD(&map->debugfs_off_cache);
+ mutex_init(&map->cache_lock);
+
+ if (map->dev)
+ devname = dev_name(map->dev);
+
+ if (name) {
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
+ name = map->debugfs_name;
+ } else {
+ name = devname;
+ }
+
+ if (!strcmp(name, "dummy")) {
+ kfree(map->debugfs_name);
+ map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
+ dummy_index);
+ if (!map->debugfs_name)
+ return;
+ name = map->debugfs_name;
+ dummy_index++;
+ }
+
+ map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
+
+ debugfs_create_file("name", 0400, map->debugfs,
+ map, &regmap_name_fops);
+
+ debugfs_create_file("range", 0400, map->debugfs,
+ map, &regmap_reg_ranges_fops);
+
+ if (map->max_register || regmap_readable(map, 0)) {
+ umode_t registers_mode;
+
+#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
+ registers_mode = 0600;
+#else
+ registers_mode = 0400;
+#endif
+
+ debugfs_create_file("registers", registers_mode, map->debugfs,
+ map, &regmap_map_fops);
+ debugfs_create_file("access", 0400, map->debugfs,
+ map, &regmap_access_fops);
+ }
+
+ if (map->cache_type) {
+ debugfs_create_file("cache_only", 0600, map->debugfs,
+ &map->cache_only, &regmap_cache_only_fops);
+ debugfs_create_bool("cache_dirty", 0400, map->debugfs,
+ &map->cache_dirty);
+ debugfs_create_file("cache_bypass", 0600, map->debugfs,
+ &map->cache_bypass,
+ &regmap_cache_bypass_fops);
+ }
+
+ next = rb_first(&map->range_tree);
+ while (next) {
+ range_node = rb_entry(next, struct regmap_range_node, node);
+
+ if (range_node->name)
+ debugfs_create_file(range_node->name, 0400,
+ map->debugfs, range_node,
+ &regmap_range_fops);
+
+ next = rb_next(&range_node->node);
+ }
+
+ if (map->cache_ops && map->cache_ops->debugfs_init)
+ map->cache_ops->debugfs_init(map);
+}
+
+void regmap_debugfs_exit(struct regmap *map)
+{
+ if (map->debugfs) {
+ debugfs_remove_recursive(map->debugfs);
+ mutex_lock(&map->cache_lock);
+ regmap_debugfs_free_dump_cache(map);
+ mutex_unlock(&map->cache_lock);
+ kfree(map->debugfs_name);
+ map->debugfs_name = NULL;
+ } else {
+ struct regmap_debugfs_node *node, *tmp;
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
+ link) {
+ if (node->map == map) {
+ list_del(&node->link);
+ kfree(node);
+ }
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
+ }
+}
+
+void regmap_debugfs_initcall(void)
+{
+ struct regmap_debugfs_node *node, *tmp;
+
+ regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
+
+ mutex_lock(&regmap_debugfs_early_lock);
+ list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
+ regmap_debugfs_init(node->map);
+ list_del(&node->link);
+ kfree(node);
+ }
+ mutex_unlock(&regmap_debugfs_early_lock);
+}
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
new file mode 100644
index 000000000..3ec611dc0
--- /dev/null
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - I2C support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_byte_data(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (val > 0xff || reg > 0xff)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static const struct regmap_bus regmap_smbus_byte = {
+ .reg_write = regmap_smbus_byte_reg_write,
+ .reg_read = regmap_smbus_byte_reg_read,
+};
+
+static int regmap_smbus_word_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_word_data(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (val > 0xffff || reg > 0xff)
+ return -EINVAL;
+
+ return i2c_smbus_write_word_data(i2c, reg, val);
+}
+
+static const struct regmap_bus regmap_smbus_word = {
+ .reg_write = regmap_smbus_word_reg_write,
+ .reg_read = regmap_smbus_word_reg_read,
+};
+
+static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg > 0xff)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_word_swapped(i2c, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+
+ return 0;
+}
+
+static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (val > 0xffff || reg > 0xff)
+ return -EINVAL;
+
+ return i2c_smbus_write_word_swapped(i2c, reg, val);
+}
+
+static const struct regmap_bus regmap_smbus_word_swapped = {
+ .reg_write = regmap_smbus_word_write_swapped,
+ .reg_read = regmap_smbus_word_read_swapped,
+};
+
+static int regmap_i2c_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_master_send(i2c, data, count);
+ if (ret == count)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int regmap_i2c_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct i2c_msg xfer[2];
+ int ret;
+
+ /* If the I2C controller can't do a gather tell the core, it
+ * will substitute in a linear write for us.
+ */
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
+ return -ENOTSUPP;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = reg_size;
+ xfer[0].buf = (void *)reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_NOSTART;
+ xfer[1].len = val_size;
+ xfer[1].buf = (void *)val;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ return 0;
+ if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static int regmap_i2c_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ struct i2c_msg xfer[2];
+ int ret;
+
+ xfer[0].addr = i2c->addr;
+ xfer[0].flags = 0;
+ xfer[0].len = reg_size;
+ xfer[0].buf = (void *)reg;
+
+ xfer[1].addr = i2c->addr;
+ xfer[1].flags = I2C_M_RD;
+ xfer[1].len = val_size;
+ xfer[1].buf = val;
+
+ ret = i2c_transfer(i2c->adapter, xfer, 2);
+ if (ret == 2)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static const struct regmap_bus regmap_i2c = {
+ .write = regmap_i2c_write,
+ .gather_write = regmap_i2c_gather_write,
+ .read = regmap_i2c_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int regmap_i2c_smbus_i2c_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (count < 1)
+ return -EINVAL;
+
+ --count;
+ return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+ ((u8 *)data + 1));
+}
+
+static int regmap_i2c_smbus_i2c_read(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+
+ if (reg_size != 1 || val_size < 1)
+ return -EINVAL;
+
+ ret = i2c_smbus_read_i2c_block_data(i2c, ((u8 *)reg)[0], val_size, val);
+ if (ret == val_size)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static const struct regmap_bus regmap_i2c_smbus_i2c_block = {
+ .write = regmap_i2c_smbus_i2c_write,
+ .read = regmap_i2c_smbus_i2c_read,
+ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 1,
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 1,
+};
+
+static int regmap_i2c_smbus_i2c_write_reg16(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ if (count < 2)
+ return -EINVAL;
+
+ count--;
+ return i2c_smbus_write_i2c_block_data(i2c, ((u8 *)data)[0], count,
+ (u8 *)data + 1);
+}
+
+static int regmap_i2c_smbus_i2c_read_reg16(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret, count, len = val_size;
+
+ if (reg_size != 2)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(i2c, ((u16 *)reg)[0] & 0xff,
+ ((u16 *)reg)[0] >> 8);
+ if (ret < 0)
+ return ret;
+
+ count = 0;
+ do {
+ /* Current Address Read */
+ ret = i2c_smbus_read_byte(i2c);
+ if (ret < 0)
+ break;
+
+ *((u8 *)val++) = ret;
+ count++;
+ len--;
+ } while (len > 0);
+
+ if (count == val_size)
+ return 0;
+ else if (ret < 0)
+ return ret;
+ else
+ return -EIO;
+}
+
+static const struct regmap_bus regmap_i2c_smbus_i2c_block_reg16 = {
+ .write = regmap_i2c_smbus_i2c_write_reg16,
+ .read = regmap_i2c_smbus_i2c_read_reg16,
+ .max_raw_read = I2C_SMBUS_BLOCK_MAX - 2,
+ .max_raw_write = I2C_SMBUS_BLOCK_MAX - 2,
+};
+
+static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ const struct i2c_adapter_quirks *quirks;
+ const struct regmap_bus *bus = NULL;
+ struct regmap_bus *ret_bus;
+ u16 max_read = 0, max_write = 0;
+
+ if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
+ bus = &regmap_i2c;
+ else if (config->val_bits == 8 && config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ bus = &regmap_i2c_smbus_i2c_block;
+ else if (config->val_bits == 8 && config->reg_bits == 16 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ bus = &regmap_i2c_smbus_i2c_block_reg16;
+ else if (config->val_bits == 16 && config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_WORD_DATA))
+ switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
+ case REGMAP_ENDIAN_LITTLE:
+ bus = &regmap_smbus_word;
+ break;
+ case REGMAP_ENDIAN_BIG:
+ bus = &regmap_smbus_word_swapped;
+ break;
+ default: /* everything else is not supported */
+ break;
+ }
+ else if (config->val_bits == 8 && config->reg_bits == 8 &&
+ i2c_check_functionality(i2c->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ bus = &regmap_smbus_byte;
+
+ if (!bus)
+ return ERR_PTR(-ENOTSUPP);
+
+ quirks = i2c->adapter->quirks;
+ if (quirks) {
+ if (quirks->max_read_len &&
+ (bus->max_raw_read == 0 || bus->max_raw_read > quirks->max_read_len))
+ max_read = quirks->max_read_len;
+
+ if (quirks->max_write_len &&
+ (bus->max_raw_write == 0 || bus->max_raw_write > quirks->max_write_len))
+ max_write = quirks->max_write_len;
+
+ if (max_read || max_write) {
+ ret_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
+ if (!ret_bus)
+ return ERR_PTR(-ENOMEM);
+ ret_bus->free_on_exit = true;
+ ret_bus->max_raw_read = max_read;
+ ret_bus->max_raw_write = max_write;
+ bus = ret_bus;
+ }
+ }
+
+ return bus;
+}
+
+struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_i2c);
+
+struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_i2c);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
new file mode 100644
index 000000000..0328b0b34
--- /dev/null
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+
+#include <linux/regmap.h>
+#include <linux/i3c/device.h>
+#include <linux/i3c/master.h>
+#include <linux/module.h>
+
+static int regmap_i3c_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct i3c_device *i3c = dev_to_i3cdev(dev);
+ struct i3c_priv_xfer xfers[] = {
+ {
+ .rnw = false,
+ .len = count,
+ .data.out = data,
+ },
+ };
+
+ return i3c_device_do_priv_xfers(i3c, xfers, 1);
+}
+
+static int regmap_i3c_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct i3c_device *i3c = dev_to_i3cdev(dev);
+ struct i3c_priv_xfer xfers[2];
+
+ xfers[0].rnw = false;
+ xfers[0].len = reg_size;
+ xfers[0].data.out = reg;
+
+ xfers[1].rnw = true;
+ xfers[1].len = val_size;
+ xfers[1].data.in = val;
+
+ return i3c_device_do_priv_xfers(i3c, xfers, 2);
+}
+
+static const struct regmap_bus regmap_i3c = {
+ .write = regmap_i3c_write,
+ .read = regmap_i3c_read,
+};
+
+struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __devm_regmap_init(&i3c->dev, &regmap_i3c, &i3c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c);
+
+MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
+MODULE_DESCRIPTION("Regmap I3C Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
new file mode 100644
index 000000000..18fc1c436
--- /dev/null
+++ b/drivers/base/regmap/regmap-irq.c
@@ -0,0 +1,1313 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// regmap based irq_chip
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_irq_chip_data {
+ struct mutex lock;
+ struct irq_chip irq_chip;
+
+ struct regmap *map;
+ const struct regmap_irq_chip *chip;
+
+ int irq_base;
+ struct irq_domain *domain;
+
+ int irq;
+ int wake_count;
+
+ unsigned int mask_base;
+ unsigned int unmask_base;
+
+ void *status_reg_buf;
+ unsigned int *main_status_buf;
+ unsigned int *status_buf;
+ unsigned int *mask_buf;
+ unsigned int *mask_buf_def;
+ unsigned int *wake_buf;
+ unsigned int *type_buf;
+ unsigned int *type_buf_def;
+ unsigned int **virt_buf;
+ unsigned int **config_buf;
+
+ unsigned int irq_reg_stride;
+
+ unsigned int (*get_irq_reg)(struct regmap_irq_chip_data *data,
+ unsigned int base, int index);
+
+ unsigned int clear_status:1;
+};
+
+static inline const
+struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
+ int irq)
+{
+ return &data->chip->irqs[irq];
+}
+
+static bool regmap_irq_can_bulk_read_status(struct regmap_irq_chip_data *data)
+{
+ struct regmap *map = data->map;
+
+ /*
+ * While possible that a user-defined ->get_irq_reg() callback might
+ * be linear enough to support bulk reads, most of the time it won't.
+ * Therefore only allow them if the default callback is being used.
+ */
+ return data->irq_reg_stride == 1 && map->reg_stride == 1 &&
+ data->get_irq_reg == regmap_irq_get_irq_reg_linear &&
+ !map->use_single_read;
+}
+
+static void regmap_irq_lock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->lock);
+}
+
+static void regmap_irq_sync_unlock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ int i, j, ret;
+ u32 reg;
+ u32 val;
+
+ if (d->chip->runtime_pm) {
+ ret = pm_runtime_get_sync(map->dev);
+ if (ret < 0)
+ dev_err(map->dev, "IRQ sync failed to resume: %d\n",
+ ret);
+ }
+
+ if (d->clear_status) {
+ for (i = 0; i < d->chip->num_regs; i++) {
+ reg = d->get_irq_reg(d, d->chip->status_base, i);
+
+ ret = regmap_read(map, reg, &val);
+ if (ret)
+ dev_err(d->map->dev,
+ "Failed to clear the interrupt status bits\n");
+ }
+
+ d->clear_status = false;
+ }
+
+ /*
+ * If there's been a change in the mask write it back to the
+ * hardware. We rely on the use of the regmap core cache to
+ * suppress pointless writes.
+ */
+ for (i = 0; i < d->chip->num_regs; i++) {
+ if (d->mask_base) {
+ reg = d->get_irq_reg(d, d->mask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
+ reg);
+ }
+
+ if (d->unmask_base) {
+ reg = d->get_irq_reg(d, d->unmask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], ~d->mask_buf[i]);
+ if (ret)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
+ reg);
+ }
+
+ reg = d->get_irq_reg(d, d->chip->wake_base, i);
+ if (d->wake_buf) {
+ if (d->chip->wake_invert)
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ ~d->wake_buf[i]);
+ else
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev,
+ "Failed to sync wakes in %x: %d\n",
+ reg, ret);
+ }
+
+ if (!d->chip->init_ack_masked)
+ continue;
+ /*
+ * Ack all the masked interrupts unconditionally,
+ * OR if there is masked interrupt which hasn't been Acked,
+ * it'll be ignored in irq handler, then may introduce irq storm
+ */
+ if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
+ reg = d->get_irq_reg(d, d->chip->ack_base, i);
+
+ /* some chips ack by write 0 */
+ if (d->chip->ack_invert)
+ ret = regmap_write(map, reg, ~d->mask_buf[i]);
+ else
+ ret = regmap_write(map, reg, d->mask_buf[i]);
+ if (d->chip->clear_ack) {
+ if (d->chip->ack_invert && !ret)
+ ret = regmap_write(map, reg, UINT_MAX);
+ else if (!ret)
+ ret = regmap_write(map, reg, 0);
+ }
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ }
+ }
+
+ /* Don't update the type bits if we're using mask bits for irq type. */
+ if (!d->chip->type_in_mask) {
+ for (i = 0; i < d->chip->num_type_reg; i++) {
+ if (!d->type_buf_def[i])
+ continue;
+ reg = d->get_irq_reg(d, d->chip->type_base, i);
+ if (d->chip->type_invert)
+ ret = regmap_update_bits(d->map, reg,
+ d->type_buf_def[i], ~d->type_buf[i]);
+ else
+ ret = regmap_update_bits(d->map, reg,
+ d->type_buf_def[i], d->type_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to sync type in %x\n",
+ reg);
+ }
+ }
+
+ if (d->chip->num_virt_regs) {
+ for (i = 0; i < d->chip->num_virt_regs; i++) {
+ for (j = 0; j < d->chip->num_regs; j++) {
+ reg = d->get_irq_reg(d, d->chip->virt_reg_base[i],
+ j);
+ ret = regmap_write(map, reg, d->virt_buf[i][j]);
+ if (ret != 0)
+ dev_err(d->map->dev,
+ "Failed to write virt 0x%x: %d\n",
+ reg, ret);
+ }
+ }
+ }
+
+ for (i = 0; i < d->chip->num_config_bases; i++) {
+ for (j = 0; j < d->chip->num_config_regs; j++) {
+ reg = d->get_irq_reg(d, d->chip->config_base[i], j);
+ ret = regmap_write(map, reg, d->config_buf[i][j]);
+ if (ret)
+ dev_err(d->map->dev,
+ "Failed to write config %x: %d\n",
+ reg, ret);
+ }
+ }
+
+ if (d->chip->runtime_pm)
+ pm_runtime_put(map->dev);
+
+ /* If we've changed our wakeup count propagate it to the parent */
+ if (d->wake_count < 0)
+ for (i = d->wake_count; i < 0; i++)
+ irq_set_irq_wake(d->irq, 0);
+ else if (d->wake_count > 0)
+ for (i = 0; i < d->wake_count; i++)
+ irq_set_irq_wake(d->irq, 1);
+
+ d->wake_count = 0;
+
+ mutex_unlock(&d->lock);
+}
+
+static void regmap_irq_enable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+ unsigned int reg = irq_data->reg_offset / map->reg_stride;
+ unsigned int mask;
+
+ /*
+ * The type_in_mask flag means that the underlying hardware uses
+ * separate mask bits for each interrupt trigger type, but we want
+ * to have a single logical interrupt with a configurable type.
+ *
+ * If the interrupt we're enabling defines any supported types
+ * then instead of using the regular mask bits for this interrupt,
+ * use the value previously written to the type buffer at the
+ * corresponding offset in regmap_irq_set_type().
+ */
+ if (d->chip->type_in_mask && irq_data->type.types_supported)
+ mask = d->type_buf[reg] & irq_data->mask;
+ else
+ mask = irq_data->mask;
+
+ if (d->chip->clear_on_unmask)
+ d->clear_status = true;
+
+ d->mask_buf[reg] &= ~mask;
+}
+
+static void regmap_irq_disable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+
+ d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
+}
+
+static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+ int reg, ret;
+ const struct regmap_irq_type *t = &irq_data->type;
+
+ if ((t->types_supported & type) != type)
+ return 0;
+
+ reg = t->type_reg_offset / map->reg_stride;
+
+ if (t->type_reg_mask)
+ d->type_buf[reg] &= ~t->type_reg_mask;
+ else
+ d->type_buf[reg] &= ~(t->type_falling_val |
+ t->type_rising_val |
+ t->type_level_low_val |
+ t->type_level_high_val);
+ switch (type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ d->type_buf[reg] |= t->type_falling_val;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ d->type_buf[reg] |= t->type_rising_val;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ d->type_buf[reg] |= (t->type_falling_val |
+ t->type_rising_val);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ d->type_buf[reg] |= t->type_level_high_val;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ d->type_buf[reg] |= t->type_level_low_val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (d->chip->set_type_virt) {
+ ret = d->chip->set_type_virt(d->virt_buf, type, data->hwirq,
+ reg);
+ if (ret)
+ return ret;
+ }
+
+ if (d->chip->set_type_config) {
+ ret = d->chip->set_type_config(d->config_buf, type,
+ irq_data, reg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+
+ if (on) {
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ &= ~irq_data->mask;
+ d->wake_count++;
+ } else {
+ if (d->wake_buf)
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ |= irq_data->mask;
+ d->wake_count--;
+ }
+
+ return 0;
+}
+
+static const struct irq_chip regmap_irq_chip = {
+ .irq_bus_lock = regmap_irq_lock,
+ .irq_bus_sync_unlock = regmap_irq_sync_unlock,
+ .irq_disable = regmap_irq_disable,
+ .irq_enable = regmap_irq_enable,
+ .irq_set_type = regmap_irq_set_type,
+ .irq_set_wake = regmap_irq_set_wake,
+};
+
+static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
+ unsigned int b)
+{
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ struct regmap_irq_sub_irq_map *subreg;
+ unsigned int reg;
+ int i, ret = 0;
+
+ if (!chip->sub_reg_offsets) {
+ reg = data->get_irq_reg(data, chip->status_base, b);
+ ret = regmap_read(map, reg, &data->status_buf[b]);
+ } else {
+ /*
+ * Note we can't use ->get_irq_reg() here because the offsets
+ * in 'subreg' are *not* interchangeable with indices.
+ */
+ subreg = &chip->sub_reg_offsets[b];
+ for (i = 0; i < subreg->num_regs; i++) {
+ unsigned int offset = subreg->offset[i];
+ unsigned int index = offset / map->reg_stride;
+
+ if (chip->not_fixed_stride)
+ ret = regmap_read(map,
+ chip->status_base + offset,
+ &data->status_buf[b]);
+ else
+ ret = regmap_read(map,
+ chip->status_base + offset,
+ &data->status_buf[index]);
+
+ if (ret)
+ break;
+ }
+ }
+ return ret;
+}
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ bool handled = false;
+ u32 reg;
+
+ if (chip->handle_pre_irq)
+ chip->handle_pre_irq(chip->irq_drv_data);
+
+ if (chip->runtime_pm) {
+ ret = pm_runtime_get_sync(map->dev);
+ if (ret < 0) {
+ dev_err(map->dev, "IRQ thread failed to resume: %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ /*
+ * Read only registers with active IRQs if the chip has 'main status
+ * register'. Else read in the statuses, using a single bulk read if
+ * possible in order to reduce the I/O overheads.
+ */
+
+ if (chip->num_main_regs) {
+ unsigned int max_main_bits;
+ unsigned long size;
+
+ size = chip->num_regs * sizeof(unsigned int);
+
+ max_main_bits = (chip->num_main_status_bits) ?
+ chip->num_main_status_bits : chip->num_regs;
+ /* Clear the status buf as we don't read all status regs */
+ memset(data->status_buf, 0, size);
+
+ /* We could support bulk read for main status registers
+ * but I don't expect to see devices with really many main
+ * status registers so let's only support single reads for the
+ * sake of simplicity. and add bulk reads only if needed
+ */
+ for (i = 0; i < chip->num_main_regs; i++) {
+ /*
+ * For not_fixed_stride, don't use ->get_irq_reg().
+ * It would produce an incorrect result.
+ */
+ if (data->chip->not_fixed_stride)
+ reg = chip->main_status +
+ i * map->reg_stride * data->irq_reg_stride;
+ else
+ reg = data->get_irq_reg(data,
+ chip->main_status, i);
+
+ ret = regmap_read(map, reg, &data->main_status_buf[i]);
+ if (ret) {
+ dev_err(map->dev,
+ "Failed to read IRQ status %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ /* Read sub registers with active IRQs */
+ for (i = 0; i < chip->num_main_regs; i++) {
+ unsigned int b;
+ const unsigned long mreg = data->main_status_buf[i];
+
+ for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
+ if (i * map->format.val_bytes * 8 + b >
+ max_main_bits)
+ break;
+ ret = read_sub_irq_data(data, b);
+
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to read IRQ status %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ }
+ } else if (regmap_irq_can_bulk_read_status(data)) {
+
+ u8 *buf8 = data->status_reg_buf;
+ u16 *buf16 = data->status_reg_buf;
+ u32 *buf32 = data->status_reg_buf;
+
+ BUG_ON(!data->status_reg_buf);
+
+ ret = regmap_bulk_read(map, chip->status_base,
+ data->status_reg_buf,
+ chip->num_regs);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n",
+ ret);
+ goto exit;
+ }
+
+ for (i = 0; i < data->chip->num_regs; i++) {
+ switch (map->format.val_bytes) {
+ case 1:
+ data->status_buf[i] = buf8[i];
+ break;
+ case 2:
+ data->status_buf[i] = buf16[i];
+ break;
+ case 4:
+ data->status_buf[i] = buf32[i];
+ break;
+ default:
+ BUG();
+ goto exit;
+ }
+ }
+
+ } else {
+ for (i = 0; i < data->chip->num_regs; i++) {
+ unsigned int reg = data->get_irq_reg(data,
+ data->chip->status_base, i);
+ ret = regmap_read(map, reg, &data->status_buf[i]);
+
+ if (ret != 0) {
+ dev_err(map->dev,
+ "Failed to read IRQ status: %d\n",
+ ret);
+ goto exit;
+ }
+ }
+ }
+
+ if (chip->status_invert)
+ for (i = 0; i < data->chip->num_regs; i++)
+ data->status_buf[i] = ~data->status_buf[i];
+
+ /*
+ * Ignore masked IRQs and ack if we need to; we ack early so
+ * there is no race between handling and acknowledging the
+ * interrupt. We assume that typically few of the interrupts
+ * will fire simultaneously so don't worry about overhead from
+ * doing a write per register.
+ */
+ for (i = 0; i < data->chip->num_regs; i++) {
+ data->status_buf[i] &= ~data->mask_buf[i];
+
+ if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
+ reg = data->get_irq_reg(data, data->chip->ack_base, i);
+
+ if (chip->ack_invert)
+ ret = regmap_write(map, reg,
+ ~data->status_buf[i]);
+ else
+ ret = regmap_write(map, reg,
+ data->status_buf[i]);
+ if (chip->clear_ack) {
+ if (chip->ack_invert && !ret)
+ ret = regmap_write(map, reg, UINT_MAX);
+ else if (!ret)
+ ret = regmap_write(map, reg, 0);
+ }
+ if (ret != 0)
+ dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ }
+ }
+
+ for (i = 0; i < chip->num_irqs; i++) {
+ if (data->status_buf[chip->irqs[i].reg_offset /
+ map->reg_stride] & chip->irqs[i].mask) {
+ handle_nested_irq(irq_find_mapping(data->domain, i));
+ handled = true;
+ }
+ }
+
+exit:
+ if (chip->runtime_pm)
+ pm_runtime_put(map->dev);
+
+ if (chip->handle_post_irq)
+ chip->handle_post_irq(chip->irq_drv_data);
+
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct regmap_irq_chip_data *data = h->host_data;
+
+ irq_set_chip_data(virq, data);
+ irq_set_chip(virq, &data->irq_chip);
+ irq_set_nested_thread(virq, 1);
+ irq_set_parent(virq, data->irq);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops regmap_domain_ops = {
+ .map = regmap_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * regmap_irq_get_irq_reg_linear() - Linear IRQ register mapping callback.
+ * @data: Data for the &struct regmap_irq_chip
+ * @base: Base register
+ * @index: Register index
+ *
+ * Returns the register address corresponding to the given @base and @index
+ * by the formula ``base + index * regmap_stride * irq_reg_stride``.
+ */
+unsigned int regmap_irq_get_irq_reg_linear(struct regmap_irq_chip_data *data,
+ unsigned int base, int index)
+{
+ const struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+
+ /*
+ * FIXME: This is for backward compatibility and should be removed
+ * when not_fixed_stride is dropped (it's only used by qcom-pm8008).
+ */
+ if (chip->not_fixed_stride && chip->sub_reg_offsets) {
+ struct regmap_irq_sub_irq_map *subreg;
+
+ subreg = &chip->sub_reg_offsets[0];
+ return base + subreg->offset[0];
+ }
+
+ return base + index * map->reg_stride * data->irq_reg_stride;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_irq_reg_linear);
+
+/**
+ * regmap_irq_set_type_config_simple() - Simple IRQ type configuration callback.
+ * @buf: Buffer containing configuration register values, this is a 2D array of
+ * `num_config_bases` rows, each of `num_config_regs` elements.
+ * @type: The requested IRQ type.
+ * @irq_data: The IRQ being configured.
+ * @idx: Index of the irq's config registers within each array `buf[i]`
+ *
+ * This is a &struct regmap_irq_chip->set_type_config callback suitable for
+ * chips with one config register. Register values are updated according to
+ * the &struct regmap_irq_type data associated with an IRQ.
+ */
+int regmap_irq_set_type_config_simple(unsigned int **buf, unsigned int type,
+ const struct regmap_irq *irq_data, int idx)
+{
+ const struct regmap_irq_type *t = &irq_data->type;
+
+ if (t->type_reg_mask)
+ buf[0][idx] &= ~t->type_reg_mask;
+ else
+ buf[0][idx] &= ~(t->type_falling_val |
+ t->type_rising_val |
+ t->type_level_low_val |
+ t->type_level_high_val);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_FALLING:
+ buf[0][idx] |= t->type_falling_val;
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ buf[0][idx] |= t->type_rising_val;
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ buf[0][idx] |= (t->type_falling_val |
+ t->type_rising_val);
+ break;
+
+ case IRQ_TYPE_LEVEL_HIGH:
+ buf[0][idx] |= t->type_level_high_val;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ buf[0][idx] |= t->type_level_low_val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_set_type_config_simple);
+
+/**
+ * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
+ *
+ * @fwnode: The firmware node where the IRQ domain should be added to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts.
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success.
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * In order for this to be efficient the chip really should use a
+ * register cache. The chip driver is responsible for restoring the
+ * register values used by the IRQ controller over suspend and resume.
+ */
+int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ struct regmap_irq_chip_data *d;
+ int i;
+ int ret = -ENOMEM;
+ int num_type_reg;
+ int num_regs;
+ u32 reg;
+
+ if (chip->num_regs <= 0)
+ return -EINVAL;
+
+ if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
+ return -EINVAL;
+
+ for (i = 0; i < chip->num_irqs; i++) {
+ if (chip->irqs[i].reg_offset % map->reg_stride)
+ return -EINVAL;
+ if (chip->irqs[i].reg_offset / map->reg_stride >=
+ chip->num_regs)
+ return -EINVAL;
+ }
+
+ if (chip->not_fixed_stride) {
+ dev_warn(map->dev, "not_fixed_stride is deprecated; use ->get_irq_reg() instead");
+
+ for (i = 0; i < chip->num_regs; i++)
+ if (chip->sub_reg_offsets[i].num_regs != 1)
+ return -EINVAL;
+ }
+
+ if (chip->num_type_reg)
+ dev_warn(map->dev, "type registers are deprecated; use config registers instead");
+
+ if (chip->num_virt_regs || chip->virt_reg_base || chip->set_type_virt)
+ dev_warn(map->dev, "virtual registers are deprecated; use config registers instead");
+
+ if (irq_base) {
+ irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+ if (irq_base < 0) {
+ dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ return irq_base;
+ }
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ if (chip->num_main_regs) {
+ d->main_status_buf = kcalloc(chip->num_main_regs,
+ sizeof(*d->main_status_buf),
+ GFP_KERNEL);
+
+ if (!d->main_status_buf)
+ goto err_alloc;
+ }
+
+ d->status_buf = kcalloc(chip->num_regs, sizeof(*d->status_buf),
+ GFP_KERNEL);
+ if (!d->status_buf)
+ goto err_alloc;
+
+ d->mask_buf = kcalloc(chip->num_regs, sizeof(*d->mask_buf),
+ GFP_KERNEL);
+ if (!d->mask_buf)
+ goto err_alloc;
+
+ d->mask_buf_def = kcalloc(chip->num_regs, sizeof(*d->mask_buf_def),
+ GFP_KERNEL);
+ if (!d->mask_buf_def)
+ goto err_alloc;
+
+ if (chip->wake_base) {
+ d->wake_buf = kcalloc(chip->num_regs, sizeof(*d->wake_buf),
+ GFP_KERNEL);
+ if (!d->wake_buf)
+ goto err_alloc;
+ }
+
+ /*
+ * Use num_config_regs if defined, otherwise fall back to num_type_reg
+ * to maintain backward compatibility.
+ */
+ num_type_reg = chip->num_config_regs ? chip->num_config_regs
+ : chip->num_type_reg;
+ num_regs = chip->type_in_mask ? chip->num_regs : num_type_reg;
+ if (num_regs) {
+ d->type_buf_def = kcalloc(num_regs,
+ sizeof(*d->type_buf_def), GFP_KERNEL);
+ if (!d->type_buf_def)
+ goto err_alloc;
+
+ d->type_buf = kcalloc(num_regs, sizeof(*d->type_buf),
+ GFP_KERNEL);
+ if (!d->type_buf)
+ goto err_alloc;
+ }
+
+ if (chip->num_virt_regs) {
+ /*
+ * Create virt_buf[chip->num_extra_config_regs][chip->num_regs]
+ */
+ d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf),
+ GFP_KERNEL);
+ if (!d->virt_buf)
+ goto err_alloc;
+
+ for (i = 0; i < chip->num_virt_regs; i++) {
+ d->virt_buf[i] = kcalloc(chip->num_regs,
+ sizeof(**d->virt_buf),
+ GFP_KERNEL);
+ if (!d->virt_buf[i])
+ goto err_alloc;
+ }
+ }
+
+ if (chip->num_config_bases && chip->num_config_regs) {
+ /*
+ * Create config_buf[num_config_bases][num_config_regs]
+ */
+ d->config_buf = kcalloc(chip->num_config_bases,
+ sizeof(*d->config_buf), GFP_KERNEL);
+ if (!d->config_buf)
+ goto err_alloc;
+
+ for (i = 0; i < chip->num_config_bases; i++) {
+ d->config_buf[i] = kcalloc(chip->num_config_regs,
+ sizeof(**d->config_buf),
+ GFP_KERNEL);
+ if (!d->config_buf[i])
+ goto err_alloc;
+ }
+ }
+
+ d->irq_chip = regmap_irq_chip;
+ d->irq_chip.name = chip->name;
+ d->irq = irq;
+ d->map = map;
+ d->chip = chip;
+ d->irq_base = irq_base;
+
+ if (chip->mask_base && chip->unmask_base &&
+ !chip->mask_unmask_non_inverted) {
+ /*
+ * Chips that specify both mask_base and unmask_base used to
+ * get inverted mask behavior by default, with no way to ask
+ * for the normal, non-inverted behavior. This "inverted by
+ * default" behavior is deprecated, but we have to support it
+ * until existing drivers have been fixed.
+ *
+ * Existing drivers should be updated by swapping mask_base
+ * and unmask_base and setting mask_unmask_non_inverted=true.
+ * New drivers should always set the flag.
+ */
+ dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
+
+ /* Might as well warn about mask_invert while we're at it... */
+ if (chip->mask_invert)
+ dev_warn(map->dev, "mask_invert=true ignored");
+
+ d->mask_base = chip->unmask_base;
+ d->unmask_base = chip->mask_base;
+ } else if (chip->mask_invert) {
+ /*
+ * Swap the roles of mask_base and unmask_base if the bits are
+ * inverted. This is deprecated, drivers should use unmask_base
+ * directly.
+ */
+ dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base");
+
+ d->mask_base = chip->unmask_base;
+ d->unmask_base = chip->mask_base;
+ } else {
+ d->mask_base = chip->mask_base;
+ d->unmask_base = chip->unmask_base;
+ }
+
+ if (chip->irq_reg_stride)
+ d->irq_reg_stride = chip->irq_reg_stride;
+ else
+ d->irq_reg_stride = 1;
+
+ if (chip->get_irq_reg)
+ d->get_irq_reg = chip->get_irq_reg;
+ else
+ d->get_irq_reg = regmap_irq_get_irq_reg_linear;
+
+ if (regmap_irq_can_bulk_read_status(d)) {
+ d->status_reg_buf = kmalloc_array(chip->num_regs,
+ map->format.val_bytes,
+ GFP_KERNEL);
+ if (!d->status_reg_buf)
+ goto err_alloc;
+ }
+
+ mutex_init(&d->lock);
+
+ for (i = 0; i < chip->num_irqs; i++)
+ d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
+ |= chip->irqs[i].mask;
+
+ /* Mask all the interrupts by default */
+ for (i = 0; i < chip->num_regs; i++) {
+ d->mask_buf[i] = d->mask_buf_def[i];
+
+ if (d->mask_base) {
+ reg = d->get_irq_reg(d, d->mask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
+
+ if (d->unmask_base) {
+ reg = d->get_irq_reg(d, d->unmask_base, i);
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i], ~d->mask_buf[i]);
+ if (ret) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
+
+ if (!chip->init_ack_masked)
+ continue;
+
+ /* Ack masked but set interrupts */
+ reg = d->get_irq_reg(d, d->chip->status_base, i);
+ ret = regmap_read(map, reg, &d->status_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n",
+ ret);
+ goto err_alloc;
+ }
+
+ if (chip->status_invert)
+ d->status_buf[i] = ~d->status_buf[i];
+
+ if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
+ reg = d->get_irq_reg(d, d->chip->ack_base, i);
+ if (chip->ack_invert)
+ ret = regmap_write(map, reg,
+ ~(d->status_buf[i] & d->mask_buf[i]));
+ else
+ ret = regmap_write(map, reg,
+ d->status_buf[i] & d->mask_buf[i]);
+ if (chip->clear_ack) {
+ if (chip->ack_invert && !ret)
+ ret = regmap_write(map, reg, UINT_MAX);
+ else if (!ret)
+ ret = regmap_write(map, reg, 0);
+ }
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
+ }
+
+ /* Wake is disabled by default */
+ if (d->wake_buf) {
+ for (i = 0; i < chip->num_regs; i++) {
+ d->wake_buf[i] = d->mask_buf_def[i];
+ reg = d->get_irq_reg(d, d->chip->wake_base, i);
+
+ if (chip->wake_invert)
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ 0);
+ else
+ ret = regmap_update_bits(d->map, reg,
+ d->mask_buf_def[i],
+ d->wake_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
+ }
+
+ if (chip->num_type_reg && !chip->type_in_mask) {
+ for (i = 0; i < chip->num_type_reg; ++i) {
+ reg = d->get_irq_reg(d, d->chip->type_base, i);
+
+ ret = regmap_read(map, reg, &d->type_buf_def[i]);
+
+ if (d->chip->type_invert)
+ d->type_buf_def[i] = ~d->type_buf_def[i];
+
+ if (ret) {
+ dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
+ }
+
+ if (irq_base)
+ d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs,
+ irq_base, 0,
+ &regmap_domain_ops, d);
+ else
+ d->domain = irq_domain_create_linear(fwnode, chip->num_irqs,
+ &regmap_domain_ops, d);
+ if (!d->domain) {
+ dev_err(map->dev, "Failed to create IRQ domain\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
+ irq_flags | IRQF_ONESHOT,
+ chip->name, d);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
+ irq, chip->name, ret);
+ goto err_domain;
+ }
+
+ *data = d;
+
+ return 0;
+
+err_domain:
+ /* Should really dispose of the domain but... */
+err_alloc:
+ kfree(d->type_buf);
+ kfree(d->type_buf_def);
+ kfree(d->wake_buf);
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_buf);
+ kfree(d->status_reg_buf);
+ if (d->virt_buf) {
+ for (i = 0; i < chip->num_virt_regs; i++)
+ kfree(d->virt_buf[i]);
+ kfree(d->virt_buf);
+ }
+ if (d->config_buf) {
+ for (i = 0; i < chip->num_config_bases; i++)
+ kfree(d->config_buf[i]);
+ kfree(d->config_buf);
+ }
+ kfree(d);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
+
+/**
+ * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
+ *
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts.
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success.
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
+ * node of the regmap is used.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
+ irq_flags, irq_base, chip, data);
+}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
+
+/**
+ * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
+ *
+ * @irq: Primary IRQ for the device
+ * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ *
+ * This function also disposes of all mapped IRQs on the chip.
+ */
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+{
+ unsigned int virq;
+ int i, hwirq;
+
+ if (!d)
+ return;
+
+ free_irq(irq, d);
+
+ /* Dispose all virtual irq from irq domain before removing it */
+ for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
+ /* Ignore hwirq if holes in the IRQ list */
+ if (!d->chip->irqs[hwirq].mask)
+ continue;
+
+ /*
+ * Find the virtual irq of hwirq on chip and if it is
+ * there then dispose it
+ */
+ virq = irq_find_mapping(d->domain, hwirq);
+ if (virq)
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(d->domain);
+ kfree(d->type_buf);
+ kfree(d->type_buf_def);
+ kfree(d->wake_buf);
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
+ kfree(d->status_buf);
+ if (d->config_buf) {
+ for (i = 0; i < d->chip->num_config_bases; i++)
+ kfree(d->config_buf[i]);
+ kfree(d->config_buf);
+ }
+ kfree(d);
+}
+EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+
+static void devm_regmap_irq_chip_release(struct device *dev, void *res)
+{
+ struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
+
+ regmap_del_irq_chip(d->irq, d);
+}
+
+static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
+
+{
+ struct regmap_irq_chip_data **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+/**
+ * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
+ *
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @fwnode: The firmware node where the IRQ domain should be added to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The &regmap_irq_chip_data will be automatically released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip_fwnode(struct device *dev,
+ struct fwnode_handle *fwnode,
+ struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ struct regmap_irq_chip_data **ptr, *d;
+ int ret;
+
+ ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
+ chip, &d);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = d;
+ devres_add(dev, ptr);
+ *data = d;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
+
+/**
+ * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip()
+ *
+ * @dev: The device pointer on which irq_chip belongs to.
+ * @map: The regmap for the device.
+ * @irq: The IRQ the device uses to signal interrupts
+ * @irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * @irq_base: Allocate at specific IRQ number if irq_base > 0.
+ * @chip: Configuration for the interrupt controller.
+ * @data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * The &regmap_irq_chip_data will be automatically released when the device is
+ * unbound.
+ */
+int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
+ int irq_flags, int irq_base,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
+ irq, irq_flags, irq_base, chip,
+ data);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
+
+/**
+ * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
+ *
+ * @dev: Device for which the resource was allocated.
+ * @irq: Primary IRQ for the device.
+ * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
+ *
+ * A resource managed version of regmap_del_irq_chip().
+ */
+void devm_regmap_del_irq_chip(struct device *dev, int irq,
+ struct regmap_irq_chip_data *data)
+{
+ int rc;
+
+ WARN_ON(irq != data->irq);
+ rc = devres_release(dev, devm_regmap_irq_chip_release,
+ devm_regmap_irq_chip_match, data);
+
+ if (rc != 0)
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
+
+/**
+ * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
+ *
+ * @data: regmap irq controller to operate on.
+ *
+ * Useful for drivers to request their own IRQs.
+ */
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
+{
+ WARN_ON(!data->irq_base);
+ return data->irq_base;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
+
+/**
+ * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
+ *
+ * @data: regmap irq controller to operate on.
+ * @irq: index of the interrupt requested in the chip IRQs.
+ *
+ * Useful for drivers to request their own IRQs.
+ */
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
+{
+ /* Handle holes in the IRQ list */
+ if (!data->chip->irqs[irq].mask)
+ return -EINVAL;
+
+ return irq_create_mapping(data->domain, irq);
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
+
+/**
+ * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
+ *
+ * @data: regmap_irq controller to operate on.
+ *
+ * Useful for drivers to request their own IRQs and for integration
+ * with subsystems. For ease of integration NULL is accepted as a
+ * domain, allowing devices to just call this even if no domain is
+ * allocated.
+ */
+struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
+{
+ if (data)
+ return data->domain;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
new file mode 100644
index 000000000..f7293040a
--- /dev/null
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/errno.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#define REGVAL_MASK GENMASK(15, 0)
+#define REGNUM_C22_MASK GENMASK(4, 0)
+/* Clause-45 mask includes the device type (5 bit) and actual register number (16 bit) */
+#define REGNUM_C45_MASK GENMASK(20, 0)
+
+static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int *val)
+{
+ int ret;
+
+ ret = mdiodev_read(mdio_dev, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & REGVAL_MASK;
+ return 0;
+}
+
+static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
+{
+ return mdiodev_write(mdio_dev, reg, val);
+}
+
+static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_read(mdio_dev, reg, val);
+}
+
+static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
+ return mdiodev_write(mdio_dev, reg, val);
+}
+
+static const struct regmap_bus regmap_mdio_c22_bus = {
+ .reg_write = regmap_mdio_c22_write,
+ .reg_read = regmap_mdio_c22_read,
+};
+
+static int regmap_mdio_c45_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C45_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_read(mdio_dev, MII_ADDR_C45 | reg, val);
+}
+
+static int regmap_mdio_c45_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mdio_device *mdio_dev = context;
+
+ if (unlikely(reg & ~REGNUM_C45_MASK))
+ return -ENXIO;
+
+ return regmap_mdio_write(mdio_dev, MII_ADDR_C45 | reg, val);
+}
+
+static const struct regmap_bus regmap_mdio_c45_bus = {
+ .reg_write = regmap_mdio_c45_write,
+ .reg_read = regmap_mdio_c45_read,
+};
+
+struct regmap *__regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config, struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus;
+
+ if (config->reg_bits == 5 && config->val_bits == 16)
+ bus = &regmap_mdio_c22_bus;
+ else if (config->reg_bits == 21 && config->val_bits == 16)
+ bus = &regmap_mdio_c45_bus;
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_mdio);
+
+struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
+ const struct regmap_config *config, struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus;
+
+ if (config->reg_bits == 5 && config->val_bits == 16)
+ bus = &regmap_mdio_c22_bus;
+ else if (config->reg_bits == 21 && config->val_bits == 16)
+ bus = &regmap_mdio_c45_bus;
+ else
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return __devm_regmap_init(&mdio_dev->dev, bus, mdio_dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
+
+MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
+MODULE_DESCRIPTION("Regmap MDIO Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
new file mode 100644
index 000000000..3ccdd86a9
--- /dev/null
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - MMIO support
+//
+// Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/swab.h>
+
+#include "internal.h"
+
+struct regmap_mmio_context {
+ void __iomem *regs;
+ unsigned int val_bytes;
+ bool big_endian;
+
+ bool attached_clk;
+ struct clk *clk;
+
+ void (*reg_write)(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val);
+ unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+ unsigned int reg);
+};
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+ switch (reg_bits) {
+ case 8:
+ case 16:
+ case 32:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int regmap_mmio_get_min_stride(size_t val_bits)
+{
+ int min_stride;
+
+ switch (val_bits) {
+ case 8:
+ /* The core treats 0 as 1 */
+ min_stride = 0;
+ break;
+ case 16:
+ min_stride = 2;
+ break;
+ case 32:
+ min_stride = 4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return min_stride;
+}
+
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeb(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write8_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writeb_relaxed(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite8(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite8(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew_relaxed(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite16le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite16(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writew(swab16(val), ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite16be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel_relaxed(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite32le(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite32(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+ unsigned int reg,
+ unsigned int val)
+{
+ writel(swab32(val), ctx->regs + reg);
+}
+
+static void regmap_mmio_iowrite32be(struct regmap_mmio_context *ctx,
+ unsigned int reg, unsigned int val)
+{
+ iowrite32be(val, ctx->regs + reg);
+}
+
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ ctx->reg_write(ctx, reg, val);
+
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return 0;
+}
+
+static int regmap_mmio_noinc_write(void *context, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+ int i;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ {
+ const u16 *valp = (const u16 *)val;
+ for (i = 0; i < val_count; i++)
+ writew(swab16(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+ case 4:
+ {
+ const u32 *valp = (const u32 *)val;
+ for (i = 0; i < val_count; i++)
+ writel(swab32(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#ifdef CONFIG_64BIT
+ case 8:
+ {
+ const u64 *valp = (const u64 *)val;
+ for (i = 0; i < val_count; i++)
+ writeq(swab64(valp[i]), ctx->regs + reg);
+ goto out_clk;
+ }
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ writesb(ctx->regs + reg, (const u8 *)val, val_count);
+ break;
+ case 2:
+ writesw(ctx->regs + reg, (const u16 *)val, val_count);
+ break;
+ case 4:
+ writesl(ctx->regs + reg, (const u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ writesq(ctx->regs + reg, (const u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readb(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read8_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readb_relaxed(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_ioread8(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread8(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readw(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readw_relaxed(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_ioread16le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return swab16(readw(ctx->regs + reg));
+}
+
+static unsigned int regmap_mmio_ioread16be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread16be(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readl(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le_relaxed(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return readl_relaxed(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_ioread32le(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread32(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return swab32(readl(ctx->regs + reg));
+}
+
+static unsigned int regmap_mmio_ioread32be(struct regmap_mmio_context *ctx,
+ unsigned int reg)
+{
+ return ioread32be(ctx->regs + reg);
+}
+
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ *val = ctx->reg_read(ctx, reg);
+
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return 0;
+}
+
+static int regmap_mmio_noinc_read(void *context, unsigned int reg,
+ void *val, size_t val_count)
+{
+ struct regmap_mmio_context *ctx = context;
+ int ret = 0;
+
+ if (!IS_ERR(ctx->clk)) {
+ ret = clk_enable(ctx->clk);
+ if (ret < 0)
+ return ret;
+ }
+
+ switch (ctx->val_bytes) {
+ case 1:
+ readsb(ctx->regs + reg, (u8 *)val, val_count);
+ break;
+ case 2:
+ readsw(ctx->regs + reg, (u16 *)val, val_count);
+ break;
+ case 4:
+ readsl(ctx->regs + reg, (u32 *)val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ readsq(ctx->regs + reg, (u64 *)val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out_clk;
+ }
+
+ /*
+ * There are no native, assembly-optimized write single register
+ * operations for big endian, so fall back to emulation if this
+ * is needed. (Single bytes are fine, they are not affected by
+ * endianness.)
+ */
+ if (ctx->big_endian && (ctx->val_bytes > 1)) {
+ switch (ctx->val_bytes) {
+ case 2:
+ swab16_array(val, val_count);
+ break;
+ case 4:
+ swab32_array(val, val_count);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ swab64_array(val, val_count);
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+out_clk:
+ if (!IS_ERR(ctx->clk))
+ clk_disable(ctx->clk);
+
+ return ret;
+}
+
+
+static void regmap_mmio_free_context(void *context)
+{
+ struct regmap_mmio_context *ctx = context;
+
+ if (!IS_ERR(ctx->clk)) {
+ clk_unprepare(ctx->clk);
+ if (!ctx->attached_clk)
+ clk_put(ctx->clk);
+ }
+ kfree(context);
+}
+
+static const struct regmap_bus regmap_mmio = {
+ .fast_io = true,
+ .reg_write = regmap_mmio_write,
+ .reg_read = regmap_mmio_read,
+ .reg_noinc_write = regmap_mmio_noinc_write,
+ .reg_noinc_read = regmap_mmio_noinc_read,
+ .free_context = regmap_mmio_free_context,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config)
+{
+ struct regmap_mmio_context *ctx;
+ int min_stride;
+ int ret;
+
+ ret = regmap_mmio_regbits_check(config->reg_bits);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (config->pad_bits)
+ return ERR_PTR(-EINVAL);
+
+ min_stride = regmap_mmio_get_min_stride(config->val_bits);
+ if (min_stride < 0)
+ return ERR_PTR(min_stride);
+
+ if (config->reg_stride < min_stride)
+ return ERR_PTR(-EINVAL);
+
+ if (config->use_relaxed_mmio && config->io_port)
+ return ERR_PTR(-EINVAL);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->regs = regs;
+ ctx->val_bytes = config->val_bits / 8;
+ ctx->clk = ERR_PTR(-ENODEV);
+
+ switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
+ case REGMAP_ENDIAN_DEFAULT:
+ case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ switch (config->val_bits) {
+ case 8:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else if (config->use_relaxed_mmio) {
+ ctx->reg_read = regmap_mmio_read8_relaxed;
+ ctx->reg_write = regmap_mmio_write8_relaxed;
+ } else {
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ }
+ break;
+ case 16:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16le;
+ ctx->reg_write = regmap_mmio_iowrite16le;
+ } else if (config->use_relaxed_mmio) {
+ ctx->reg_read = regmap_mmio_read16le_relaxed;
+ ctx->reg_write = regmap_mmio_write16le_relaxed;
+ } else {
+ ctx->reg_read = regmap_mmio_read16le;
+ ctx->reg_write = regmap_mmio_write16le;
+ }
+ break;
+ case 32:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32le;
+ ctx->reg_write = regmap_mmio_iowrite32le;
+ } else if (config->use_relaxed_mmio) {
+ ctx->reg_read = regmap_mmio_read32le_relaxed;
+ ctx->reg_write = regmap_mmio_write32le_relaxed;
+ } else {
+ ctx->reg_read = regmap_mmio_read32le;
+ ctx->reg_write = regmap_mmio_write32le;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+ case REGMAP_ENDIAN_NATIVE:
+#endif
+ ctx->big_endian = true;
+ switch (config->val_bits) {
+ case 8:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread8;
+ ctx->reg_write = regmap_mmio_iowrite8;
+ } else {
+ ctx->reg_read = regmap_mmio_read8;
+ ctx->reg_write = regmap_mmio_write8;
+ }
+ break;
+ case 16:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread16be;
+ ctx->reg_write = regmap_mmio_iowrite16be;
+ } else {
+ ctx->reg_read = regmap_mmio_read16be;
+ ctx->reg_write = regmap_mmio_write16be;
+ }
+ break;
+ case 32:
+ if (config->io_port) {
+ ctx->reg_read = regmap_mmio_ioread32be;
+ ctx->reg_write = regmap_mmio_iowrite32be;
+ } else {
+ ctx->reg_read = regmap_mmio_read32be;
+ ctx->reg_write = regmap_mmio_write32be;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (clk_id == NULL)
+ return ctx;
+
+ ctx->clk = clk_get(dev, clk_id);
+ if (IS_ERR(ctx->clk)) {
+ ret = PTR_ERR(ctx->clk);
+ goto err_free;
+ }
+
+ ret = clk_prepare(ctx->clk);
+ if (ret < 0) {
+ clk_put(ctx->clk);
+ goto err_free;
+ }
+
+ return ctx;
+
+err_free:
+ kfree(ctx);
+
+ return ERR_PTR(ret);
+}
+
+struct regmap *__regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_mmio_clk);
+
+struct regmap *__devm_regmap_init_mmio_clk(struct device *dev,
+ const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap_mmio_context *ctx;
+
+ ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ return __devm_regmap_init(dev, &regmap_mmio, ctx, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_mmio_clk);
+
+int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ ctx->clk = clk;
+ ctx->attached_clk = true;
+
+ return clk_prepare(ctx->clk);
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_attach_clk);
+
+void regmap_mmio_detach_clk(struct regmap *map)
+{
+ struct regmap_mmio_context *ctx = map->bus_context;
+
+ clk_unprepare(ctx->clk);
+
+ ctx->attached_clk = false;
+ ctx->clk = NULL;
+}
+EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sccb.c b/drivers/base/regmap/regmap-sccb.c
new file mode 100644
index 000000000..986af26d8
--- /dev/null
+++ b/drivers/base/regmap/regmap-sccb.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+// Register map access API - SCCB support
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "internal.h"
+
+/**
+ * sccb_is_available - Check if the adapter supports SCCB protocol
+ * @adap: I2C adapter
+ *
+ * Return true if the I2C adapter is capable of using SCCB helper functions,
+ * false otherwise.
+ */
+static bool sccb_is_available(struct i2c_adapter *adap)
+{
+ u32 needed_funcs = I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
+
+ /*
+ * If we ever want support for hardware doing SCCB natively, we will
+ * introduce a sccb_xfer() callback to struct i2c_algorithm and check
+ * for it here.
+ */
+
+ return (i2c_get_functionality(adap) & needed_funcs) == needed_funcs;
+}
+
+/**
+ * regmap_sccb_read - Read data from SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * This executes the 2-phase write transmission cycle that is followed by a
+ * 2-phase read transmission cycle, returning negative errno else zero on
+ * success.
+ */
+static int regmap_sccb_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+ int ret;
+ union i2c_smbus_data data;
+
+ i2c_lock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &data);
+ if (ret < 0)
+ goto out;
+
+ *val = data.byte;
+out:
+ i2c_unlock_bus(i2c->adapter, I2C_LOCK_SEGMENT);
+
+ return ret;
+}
+
+/**
+ * regmap_sccb_write - Write data to SCCB slave device
+ * @context: Device that will be interacted with
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * This executes the SCCB 3-phase write transmission cycle, returning negative
+ * errno else zero on success.
+ */
+static int regmap_sccb_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct i2c_client *i2c = to_i2c_client(dev);
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static const struct regmap_bus regmap_sccb_bus = {
+ .reg_write = regmap_sccb_write,
+ .reg_read = regmap_sccb_read,
+};
+
+static const struct regmap_bus *regmap_get_sccb_bus(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ if (config->val_bits == 8 && config->reg_bits == 8 &&
+ sccb_is_available(i2c->adapter))
+ return &regmap_sccb_bus;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sccb);
+
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_sccb_bus(i2c, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&i2c->dev, bus, &i2c->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sccb);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
new file mode 100644
index 000000000..388c3a087
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2020 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/soundwire/sdw.h>
+#include <linux/soundwire/sdw_registers.h>
+#include "internal.h"
+
+static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int ret;
+
+ ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg), (val >> 8) & 0xff);
+ if (ret < 0)
+ return ret;
+
+ return sdw_write_no_pm(slave, reg, val & 0xff);
+}
+
+static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read0;
+ int read1;
+
+ read0 = sdw_read_no_pm(slave, reg);
+ if (read0 < 0)
+ return read0;
+
+ read1 = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
+ if (read1 < 0)
+ return read1;
+
+ *val = (read1 << 8) | read0;
+
+ return 0;
+}
+
+static const struct regmap_bus regmap_sdw_mbq = {
+ .reg_read = regmap_sdw_mbq_read,
+ .reg_write = regmap_sdw_mbq_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
+{
+ /* MBQ-based controls are only 16-bits for now */
+ if (config->val_bits != 16)
+ return -ENOTSUPP;
+
+ /* Registers are 32 bits wide */
+ if (config->reg_bits != 32)
+ return -ENOTSUPP;
+
+ if (config->pad_bits != 0)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_mbq_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw_mbq,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
+
+struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_mbq_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
+
+MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
new file mode 100644
index 000000000..81b0327f7
--- /dev/null
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-17 Intel Corporation.
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/soundwire/sdw.h>
+#include "internal.h"
+
+static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+ return sdw_write_no_pm(slave, reg, val);
+}
+
+static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+ read = sdw_read_no_pm(slave, reg);
+ if (read < 0)
+ return read;
+
+ *val = read;
+ return 0;
+}
+
+static const struct regmap_bus regmap_sdw = {
+ .reg_read = regmap_sdw_read,
+ .reg_write = regmap_sdw_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static int regmap_sdw_config_check(const struct regmap_config *config)
+{
+ /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */
+ if (config->val_bits != 8)
+ return -ENOTSUPP;
+
+ /* Registers are 32 bits wide */
+ if (config->reg_bits != 32)
+ return -ENOTSUPP;
+
+ if (config->pad_bits != 0)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+struct regmap *__regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_sdw);
+
+struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ int ret;
+
+ ret = regmap_sdw_config_check(config);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return __devm_regmap_init(&sdw->dev, &regmap_sdw,
+ &sdw->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
+
+MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
new file mode 100644
index 000000000..8075db788
--- /dev/null
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017, Linaro Ltd.
+
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static int regmap_slimbus_write(void *context, const void *data, size_t count)
+{
+ struct slim_device *sdev = context;
+
+ return slim_write(sdev, *(u16 *)data, count - 2, (u8 *)data + 2);
+}
+
+static int regmap_slimbus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct slim_device *sdev = context;
+
+ return slim_read(sdev, *(u16 *)reg, val_size, val);
+}
+
+static const struct regmap_bus regmap_slimbus_bus = {
+ .write = regmap_slimbus_write,
+ .read = regmap_slimbus_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct regmap_bus *regmap_get_slimbus(struct slim_device *slim,
+ const struct regmap_config *config)
+{
+ if (config->val_bits == 8 && config->reg_bits == 16)
+ return &regmap_slimbus_bus;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
+
+struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_slimbus(slimbus, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi-avmm.c b/drivers/base/regmap/regmap-spi-avmm.c
new file mode 100644
index 000000000..4c2b94b3e
--- /dev/null
+++ b/drivers/base/regmap/regmap-spi-avmm.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPI AVMM support
+//
+// Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
+
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/swab.h>
+
+/*
+ * This driver implements the regmap operations for a generic SPI
+ * master to access the registers of the spi slave chip which has an
+ * Avalone bus in it.
+ *
+ * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated
+ * in the spi slave chip. The IP acts as a bridge to convert encoded streams of
+ * bytes from the host to the internal register read/write on Avalon bus. In
+ * order to issue register access requests to the slave chip, the host should
+ * send formatted bytes that conform to the transfer protocol.
+ * The transfer protocol contains 3 layers: transaction layer, packet layer
+ * and physical layer.
+ *
+ * Reference Documents could be found at:
+ * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html
+ *
+ * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general
+ * introduction to the protocol.
+ *
+ * Chapter "Avalon Packets to Transactions Converter Core" describes
+ * the transaction layer.
+ *
+ * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores"
+ * describes the packet layer.
+ *
+ * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the
+ * physical layer.
+ *
+ *
+ * When host issues a regmap read/write, the driver will transform the request
+ * to byte stream layer by layer. It formats the register addr, value and
+ * length to the transaction layer request, then converts the request to packet
+ * layer bytes stream and then to physical layer bytes stream. Finally the
+ * driver sends the formatted byte stream over SPI bus to the slave chip.
+ *
+ * The spi-avmm IP on the slave chip decodes the byte stream and initiates
+ * register read/write on its internal Avalon bus, and then encodes the
+ * response to byte stream and sends back to host.
+ *
+ * The driver receives the byte stream, reverses the 3 layers transformation,
+ * and finally gets the response value (read out data for register read,
+ * successful written size for register write).
+ */
+
+#define PKT_SOP 0x7a
+#define PKT_EOP 0x7b
+#define PKT_CHANNEL 0x7c
+#define PKT_ESC 0x7d
+
+#define PHY_IDLE 0x4a
+#define PHY_ESC 0x4d
+
+#define TRANS_CODE_WRITE 0x0
+#define TRANS_CODE_SEQ_WRITE 0x4
+#define TRANS_CODE_READ 0x10
+#define TRANS_CODE_SEQ_READ 0x14
+#define TRANS_CODE_NO_TRANS 0x7f
+
+#define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200))
+
+/* slave's register addr is 32 bits */
+#define SPI_AVMM_REG_SIZE 4UL
+/* slave's register value is 32 bits */
+#define SPI_AVMM_VAL_SIZE 4UL
+
+/*
+ * max rx size could be larger. But considering the buffer consuming,
+ * it is proper that we limit 1KB xfer at max.
+ */
+#define MAX_READ_CNT 256UL
+#define MAX_WRITE_CNT 1UL
+
+struct trans_req_header {
+ u8 code;
+ u8 rsvd;
+ __be16 size;
+ __be32 addr;
+} __packed;
+
+struct trans_resp_header {
+ u8 r_code;
+ u8 rsvd;
+ __be16 size;
+} __packed;
+
+#define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header))
+#define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header))
+
+/*
+ * In transaction layer,
+ * the write request format is: Transaction request header + data
+ * the read request format is: Transaction request header
+ * the write response format is: Transaction response header
+ * the read response format is: pure data, no Transaction response header
+ */
+#define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n))
+#define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE
+#define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT)
+
+#define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n))
+#define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE
+#define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT)
+
+/* tx & rx share one transaction layer buffer */
+#define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \
+ TRANS_TX_MAX : TRANS_RX_MAX)
+
+/*
+ * In tx phase, the host prepares all the phy layer bytes of a request in the
+ * phy buffer and sends them in a batch.
+ *
+ * The packet layer and physical layer defines several special chars for
+ * various purpose, when a transaction layer byte hits one of these special
+ * chars, it should be escaped. The escape rule is, "Escape char first,
+ * following the byte XOR'ed with 0x20".
+ *
+ * This macro defines the max possible length of the phy data. In the worst
+ * case, all transaction layer bytes need to be escaped (so the data length
+ * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally
+ * we should make sure the length is aligned to SPI BPW.
+ */
+#define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4)
+
+/*
+ * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max
+ * length of the rx bit stream is unpredictable. So the driver reads the words
+ * one by one, and parses each word immediately into transaction layer buffer.
+ * Only one word length of phy buffer is used for rx.
+ */
+#define PHY_BUF_SIZE PHY_TX_MAX
+
+/**
+ * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge
+ *
+ * @spi: spi slave associated with this bridge.
+ * @word_len: bytes of word for spi transfer.
+ * @trans_len: length of valid data in trans_buf.
+ * @phy_len: length of valid data in phy_buf.
+ * @trans_buf: the bridge buffer for transaction layer data.
+ * @phy_buf: the bridge buffer for physical layer data.
+ * @swap_words: the word swapping cb for phy data. NULL if not needed.
+ *
+ * As a device's registers are implemented on the AVMM bus address space, it
+ * requires the driver to issue formatted requests to spi slave to AVMM bus
+ * master bridge to perform register access.
+ */
+struct spi_avmm_bridge {
+ struct spi_device *spi;
+ unsigned char word_len;
+ unsigned int trans_len;
+ unsigned int phy_len;
+ /* bridge buffer used in translation between protocol layers */
+ char trans_buf[TRANS_BUF_SIZE];
+ char phy_buf[PHY_BUF_SIZE];
+ void (*swap_words)(void *buf, unsigned int len);
+};
+
+static void br_swap_words_32(void *buf, unsigned int len)
+{
+ swab32_array(buf, len / 4);
+}
+
+/*
+ * Format transaction layer data in br->trans_buf according to the register
+ * access request, Store valid transaction layer data length in br->trans_len.
+ */
+static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg,
+ u32 *wr_val, u32 count)
+{
+ struct trans_req_header *header;
+ unsigned int trans_len;
+ u8 code;
+ __le32 *data;
+ int i;
+
+ if (is_read) {
+ if (count == 1)
+ code = TRANS_CODE_READ;
+ else
+ code = TRANS_CODE_SEQ_READ;
+ } else {
+ if (count == 1)
+ code = TRANS_CODE_WRITE;
+ else
+ code = TRANS_CODE_SEQ_WRITE;
+ }
+
+ header = (struct trans_req_header *)br->trans_buf;
+ header->code = code;
+ header->rsvd = 0;
+ header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE);
+ header->addr = cpu_to_be32(reg);
+
+ trans_len = TRANS_REQ_HD_SIZE;
+
+ if (!is_read) {
+ trans_len += SPI_AVMM_VAL_SIZE * count;
+ if (trans_len > sizeof(br->trans_buf))
+ return -ENOMEM;
+
+ data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE);
+
+ for (i = 0; i < count; i++)
+ *data++ = cpu_to_le32(*wr_val++);
+ }
+
+ /* Store valid trans data length for next layer */
+ br->trans_len = trans_len;
+
+ return 0;
+}
+
+/*
+ * Convert transaction layer data (in br->trans_buf) to phy layer data, store
+ * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy
+ * layer data length in br->phy_len.
+ *
+ * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded
+ * with PHY_IDLE, then the slave will just drop them.
+ *
+ * The driver will not simply pad 4a at the tail. The concern is that driver
+ * will not store MISO data during tx phase, if the driver pads 4a at the tail,
+ * it is possible that if the slave is fast enough to response at the padding
+ * time. As a result these rx bytes are lost. In the following case, 7a,7c,00
+ * will lost.
+ * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|...
+ * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|...
+ *
+ * So the driver moves EOP and bytes after EOP to the end of the aligned size,
+ * then fill the hole with PHY_IDLE. As following:
+ * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|
+ * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40|
+ * Then if the slave will not get the entire packet before the tx phase is
+ * over, it can't responsed to anything either.
+ */
+static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br)
+{
+ char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL;
+ unsigned int aligned_phy_len, move_size;
+ bool need_esc = false;
+
+ tb = br->trans_buf;
+ tb_end = tb + br->trans_len;
+ pb = br->phy_buf;
+ pb_limit = pb + ARRAY_SIZE(br->phy_buf);
+
+ *pb++ = PKT_SOP;
+
+ /*
+ * The driver doesn't support multiple channels so the channel number
+ * is always 0.
+ */
+ *pb++ = PKT_CHANNEL;
+ *pb++ = 0x0;
+
+ for (; pb < pb_limit && tb < tb_end; pb++) {
+ if (need_esc) {
+ *pb = *tb++ ^ 0x20;
+ need_esc = false;
+ continue;
+ }
+
+ /* EOP should be inserted before the last valid char */
+ if (tb == tb_end - 1 && !pb_eop) {
+ *pb = PKT_EOP;
+ pb_eop = pb;
+ continue;
+ }
+
+ /*
+ * insert an ESCAPE char if the data value equals any special
+ * char.
+ */
+ switch (*tb) {
+ case PKT_SOP:
+ case PKT_EOP:
+ case PKT_CHANNEL:
+ case PKT_ESC:
+ *pb = PKT_ESC;
+ need_esc = true;
+ break;
+ case PHY_IDLE:
+ case PHY_ESC:
+ *pb = PHY_ESC;
+ need_esc = true;
+ break;
+ default:
+ *pb = *tb++;
+ break;
+ }
+ }
+
+ /* The phy buffer is used out but transaction layer data remains */
+ if (tb < tb_end)
+ return -ENOMEM;
+
+ /* Store valid phy data length for spi transfer */
+ br->phy_len = pb - br->phy_buf;
+
+ if (br->word_len == 1)
+ return 0;
+
+ /* Do phy buf padding if word_len > 1 byte. */
+ aligned_phy_len = ALIGN(br->phy_len, br->word_len);
+ if (aligned_phy_len > sizeof(br->phy_buf))
+ return -ENOMEM;
+
+ if (aligned_phy_len == br->phy_len)
+ return 0;
+
+ /* move EOP and bytes after EOP to the end of aligned size */
+ move_size = pb - pb_eop;
+ memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size);
+
+ /* fill the hole with PHY_IDLEs */
+ memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len);
+
+ /* update the phy data length */
+ br->phy_len = aligned_phy_len;
+
+ return 0;
+}
+
+/*
+ * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will
+ * ignore rx in tx phase.
+ */
+static int br_do_tx(struct spi_avmm_bridge *br)
+{
+ /* reorder words for spi transfer */
+ if (br->swap_words)
+ br->swap_words(br->phy_buf, br->phy_len);
+
+ /* send all data in phy_buf */
+ return spi_write(br->spi, br->phy_buf, br->phy_len);
+}
+
+/*
+ * This function read the rx byte stream from SPI word by word and convert
+ * them to transaction layer data in br->trans_buf. It also stores the length
+ * of rx transaction layer data in br->trans_len
+ *
+ * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot
+ * prepare a fixed length buffer to receive all of the rx data in a batch. We
+ * have to read word by word and convert them to transaction layer data at
+ * once.
+ */
+static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br)
+{
+ bool eop_found = false, channel_found = false, esc_found = false;
+ bool valid_word = false, last_try = false;
+ struct device *dev = &br->spi->dev;
+ char *pb, *tb_limit, *tb = NULL;
+ unsigned long poll_timeout;
+ int ret, i;
+
+ tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf);
+ pb = br->phy_buf;
+ poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
+ while (tb < tb_limit) {
+ ret = spi_read(br->spi, pb, br->word_len);
+ if (ret)
+ return ret;
+
+ /* reorder the word back */
+ if (br->swap_words)
+ br->swap_words(pb, br->word_len);
+
+ valid_word = false;
+ for (i = 0; i < br->word_len; i++) {
+ /* drop everything before first SOP */
+ if (!tb && pb[i] != PKT_SOP)
+ continue;
+
+ /* drop PHY_IDLE */
+ if (pb[i] == PHY_IDLE)
+ continue;
+
+ valid_word = true;
+
+ /*
+ * We don't support multiple channels, so error out if
+ * a non-zero channel number is found.
+ */
+ if (channel_found) {
+ if (pb[i] != 0) {
+ dev_err(dev, "%s channel num != 0\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ channel_found = false;
+ continue;
+ }
+
+ switch (pb[i]) {
+ case PKT_SOP:
+ /*
+ * reset the parsing if a second SOP appears.
+ */
+ tb = br->trans_buf;
+ eop_found = false;
+ channel_found = false;
+ esc_found = false;
+ break;
+ case PKT_EOP:
+ /*
+ * No special char is expected after ESC char.
+ * No special char (except ESC & PHY_IDLE) is
+ * expected after EOP char.
+ *
+ * The special chars are all dropped.
+ */
+ if (esc_found || eop_found)
+ return -EFAULT;
+
+ eop_found = true;
+ break;
+ case PKT_CHANNEL:
+ if (esc_found || eop_found)
+ return -EFAULT;
+
+ channel_found = true;
+ break;
+ case PKT_ESC:
+ case PHY_ESC:
+ if (esc_found)
+ return -EFAULT;
+
+ esc_found = true;
+ break;
+ default:
+ /* Record the normal byte in trans_buf. */
+ if (esc_found) {
+ *tb++ = pb[i] ^ 0x20;
+ esc_found = false;
+ } else {
+ *tb++ = pb[i];
+ }
+
+ /*
+ * We get the last normal byte after EOP, it is
+ * time we finish. Normally the function should
+ * return here.
+ */
+ if (eop_found) {
+ br->trans_len = tb - br->trans_buf;
+ return 0;
+ }
+ }
+ }
+
+ if (valid_word) {
+ /* update poll timeout when we get valid word */
+ poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT;
+ last_try = false;
+ } else {
+ /*
+ * We timeout when rx keeps invalid for some time. But
+ * it is possible we are scheduled out for long time
+ * after a spi_read. So when we are scheduled in, a SW
+ * timeout happens. But actually HW may have worked fine and
+ * has been ready long time ago. So we need to do an extra
+ * read, if we get a valid word then we could continue rx,
+ * otherwise real a HW issue happens.
+ */
+ if (last_try)
+ return -ETIMEDOUT;
+
+ if (time_after(jiffies, poll_timeout))
+ last_try = true;
+ }
+ }
+
+ /*
+ * We have used out all transfer layer buffer but cannot find the end
+ * of the byte stream.
+ */
+ dev_err(dev, "%s transfer buffer is full but rx doesn't end\n",
+ __func__);
+
+ return -EFAULT;
+}
+
+/*
+ * For read transactions, the avmm bus will directly return register values
+ * without transaction response header.
+ */
+static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br,
+ u32 *val, unsigned int expected_count)
+{
+ unsigned int i, trans_len = br->trans_len;
+ __le32 *data;
+
+ if (expected_count * SPI_AVMM_VAL_SIZE != trans_len)
+ return -EFAULT;
+
+ data = (__le32 *)br->trans_buf;
+ for (i = 0; i < expected_count; i++)
+ *val++ = le32_to_cpu(*data++);
+
+ return 0;
+}
+
+/*
+ * For write transactions, the slave will return a transaction response
+ * header.
+ */
+static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br,
+ unsigned int expected_count)
+{
+ unsigned int trans_len = br->trans_len;
+ struct trans_resp_header *resp;
+ u8 code;
+ u16 val_len;
+
+ if (trans_len != TRANS_RESP_HD_SIZE)
+ return -EFAULT;
+
+ resp = (struct trans_resp_header *)br->trans_buf;
+
+ code = resp->r_code ^ 0x80;
+ val_len = be16_to_cpu(resp->size);
+ if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE)
+ return -EFAULT;
+
+ /* error out if the trans code doesn't align with the val size */
+ if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) ||
+ (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_reg_access(void *context, bool is_read, unsigned int reg,
+ unsigned int *value, unsigned int count)
+{
+ struct spi_avmm_bridge *br = context;
+ int ret;
+
+ /* invalidate bridge buffers first */
+ br->trans_len = 0;
+ br->phy_len = 0;
+
+ ret = br_trans_tx_prepare(br, is_read, reg, value, count);
+ if (ret)
+ return ret;
+
+ ret = br_pkt_phy_tx_prepare(br);
+ if (ret)
+ return ret;
+
+ ret = br_do_tx(br);
+ if (ret)
+ return ret;
+
+ ret = br_do_rx_and_pkt_phy_parse(br);
+ if (ret)
+ return ret;
+
+ if (is_read)
+ return br_rd_trans_rx_parse(br, value, count);
+ else
+ return br_wr_trans_rx_parse(br, count);
+}
+
+static int regmap_spi_avmm_gather_write(void *context,
+ const void *reg_buf, size_t reg_len,
+ const void *val_buf, size_t val_len)
+{
+ if (reg_len != SPI_AVMM_REG_SIZE)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
+ return -EINVAL;
+
+ return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf,
+ val_len / SPI_AVMM_VAL_SIZE);
+}
+
+static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes)
+{
+ if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE)
+ return -EINVAL;
+
+ return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE,
+ data + SPI_AVMM_REG_SIZE,
+ bytes - SPI_AVMM_REG_SIZE);
+}
+
+static int regmap_spi_avmm_read(void *context,
+ const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ if (reg_len != SPI_AVMM_REG_SIZE)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE))
+ return -EINVAL;
+
+ return do_reg_access(context, true, *(u32 *)reg_buf, val_buf,
+ (val_len / SPI_AVMM_VAL_SIZE));
+}
+
+static struct spi_avmm_bridge *
+spi_avmm_bridge_ctx_gen(struct spi_device *spi)
+{
+ struct spi_avmm_bridge *br;
+
+ if (!spi)
+ return ERR_PTR(-ENODEV);
+
+ /* Only support BPW == 8 or 32 now. Try 32 BPW first. */
+ spi->mode = SPI_MODE_1;
+ spi->bits_per_word = 32;
+ if (spi_setup(spi)) {
+ spi->bits_per_word = 8;
+ if (spi_setup(spi))
+ return ERR_PTR(-EINVAL);
+ }
+
+ br = kzalloc(sizeof(*br), GFP_KERNEL);
+ if (!br)
+ return ERR_PTR(-ENOMEM);
+
+ br->spi = spi;
+ br->word_len = spi->bits_per_word / 8;
+ if (br->word_len == 4) {
+ /*
+ * The protocol requires little endian byte order but MSB
+ * first. So driver needs to swap the byte order word by word
+ * if word length > 1.
+ */
+ br->swap_words = br_swap_words_32;
+ }
+
+ return br;
+}
+
+static void spi_avmm_bridge_ctx_free(void *context)
+{
+ kfree(context);
+}
+
+static const struct regmap_bus regmap_spi_avmm_bus = {
+ .write = regmap_spi_avmm_write,
+ .gather_write = regmap_spi_avmm_gather_write,
+ .read = regmap_spi_avmm_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT,
+ .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT,
+ .free_context = spi_avmm_bridge_ctx_free,
+};
+
+struct regmap *__regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct spi_avmm_bridge *bridge;
+ struct regmap *map;
+
+ bridge = spi_avmm_bridge_ctx_gen(spi);
+ if (IS_ERR(bridge))
+ return ERR_CAST(bridge);
+
+ map = __regmap_init(&spi->dev, &regmap_spi_avmm_bus,
+ bridge, config, lock_key, lock_name);
+ if (IS_ERR(map)) {
+ spi_avmm_bridge_ctx_free(bridge);
+ return ERR_CAST(map);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm);
+
+struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct spi_avmm_bridge *bridge;
+ struct regmap *map;
+
+ bridge = spi_avmm_bridge_ctx_gen(spi);
+ if (IS_ERR(bridge))
+ return ERR_CAST(bridge);
+
+ map = __devm_regmap_init(&spi->dev, &regmap_spi_avmm_bus,
+ bridge, config, lock_key, lock_name);
+ if (IS_ERR(map)) {
+ spi_avmm_bridge_ctx_free(bridge);
+ return ERR_CAST(map);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
new file mode 100644
index 000000000..37ab23a9d
--- /dev/null
+++ b/drivers/base/regmap/regmap-spi.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPI support
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+struct regmap_async_spi {
+ struct regmap_async core;
+ struct spi_message m;
+ struct spi_transfer t[2];
+};
+
+static void regmap_spi_complete(void *data)
+{
+ struct regmap_async_spi *async = data;
+
+ regmap_async_complete_cb(&async->core, async->m.status);
+}
+
+static int regmap_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static int regmap_spi_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_message m;
+ struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
+ { .tx_buf = val, .len = val_len, }, };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int regmap_spi_async_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *a)
+{
+ struct regmap_async_spi *async = container_of(a,
+ struct regmap_async_spi,
+ core);
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ async->t[0].tx_buf = reg;
+ async->t[0].len = reg_len;
+ async->t[1].tx_buf = val;
+ async->t[1].len = val_len;
+
+ spi_message_init(&async->m);
+ spi_message_add_tail(&async->t[0], &async->m);
+ if (val)
+ spi_message_add_tail(&async->t[1], &async->m);
+
+ async->m.complete = regmap_spi_complete;
+ async->m.context = async;
+
+ return spi_async(spi, &async->m);
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+ struct regmap_async_spi *async_spi;
+
+ async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
+ if (!async_spi)
+ return NULL;
+
+ return &async_spi->core;
+}
+
+static int regmap_spi_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static const struct regmap_bus regmap_spi = {
+ .write = regmap_spi_write,
+ .gather_write = regmap_spi_gather_write,
+ .async_write = regmap_spi_async_write,
+ .async_alloc = regmap_spi_async_alloc,
+ .read = regmap_spi_read,
+ .read_flag_mask = 0x80,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
+ const struct regmap_config *config)
+{
+ size_t max_size = spi_max_transfer_size(spi);
+ size_t max_msg_size, reg_reserve_size;
+ struct regmap_bus *bus;
+
+ if (max_size != SIZE_MAX) {
+ bus = kmemdup(&regmap_spi, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ max_msg_size = spi_max_message_size(spi);
+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ + config->pad_bits / BITS_PER_BYTE;
+ if (max_size + reg_reserve_size > max_msg_size)
+ max_size -= reg_reserve_size;
+
+ bus->free_on_exit = true;
+ bus->max_raw_read = max_size;
+ bus->max_raw_write = max_size;
+
+ return bus;
+ }
+
+ return &regmap_spi;
+}
+
+struct regmap *__regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spi);
+
+struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
new file mode 100644
index 000000000..cdf12d2aa
--- /dev/null
+++ b/drivers/base/regmap/regmap-spmi.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - SPMI support
+//
+// Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+//
+// Based on regmap-i2c.c:
+// Copyright 2011 Wolfson Microelectronics plc
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+#include <linux/regmap.h>
+#include <linux/spmi.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+static int regmap_spmi_base_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ while (val_size-- && !err)
+ err = spmi_register_read(context, addr++, val++);
+
+ return err;
+}
+
+static int regmap_spmi_base_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ const u8 *data = val;
+ u8 addr = *(u8 *)reg;
+ int err = 0;
+
+ BUG_ON(reg_size != 1);
+
+ /*
+ * SPMI defines a more bandwidth-efficient 'Register 0 Write' sequence,
+ * use it when possible.
+ */
+ if (addr == 0 && val_size) {
+ err = spmi_register_zero_write(context, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+ while (val_size) {
+ err = spmi_register_write(context, addr, *data);
+ if (err)
+ goto err_out;
+
+ data++;
+ addr++;
+ val_size--;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_base_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 1);
+ return regmap_spmi_base_gather_write(context, data, 1, data + 1,
+ count - 1);
+}
+
+static const struct regmap_bus regmap_spmi_base = {
+ .read = regmap_spmi_base_read,
+ .write = regmap_spmi_base_write,
+ .gather_write = regmap_spmi_base_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+struct regmap *__regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_base);
+
+struct regmap *__devm_regmap_init_spmi_base(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __devm_regmap_init(&sdev->dev, &regmap_spmi_base, sdev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_base);
+
+static int regmap_spmi_ext_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
+ BUG_ON(reg_size != 2);
+
+ addr = *(u16 *)reg;
+
+ /*
+ * Split accesses into two to take advantage of the more
+ * bandwidth-efficient 'Extended Register Read' command when possible
+ */
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_read(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_readl(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_ext_gather_write(void *context,
+ const void *reg, size_t reg_size,
+ const void *val, size_t val_size)
+{
+ int err = 0;
+ size_t len;
+ u16 addr;
+
+ BUG_ON(reg_size != 2);
+
+ addr = *(u16 *)reg;
+
+ while (addr <= 0xFF && val_size) {
+ len = min_t(size_t, val_size, 16);
+
+ err = spmi_ext_register_write(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+ while (val_size) {
+ len = min_t(size_t, val_size, 8);
+
+ err = spmi_ext_register_writel(context, addr, val, len);
+ if (err)
+ goto err_out;
+
+ addr += len;
+ val += len;
+ val_size -= len;
+ }
+
+err_out:
+ return err;
+}
+
+static int regmap_spmi_ext_write(void *context, const void *data,
+ size_t count)
+{
+ BUG_ON(count < 2);
+ return regmap_spmi_ext_gather_write(context, data, 2, data + 2,
+ count - 2);
+}
+
+static const struct regmap_bus regmap_spmi_ext = {
+ .read = regmap_spmi_ext_read,
+ .write = regmap_spmi_ext_write,
+ .gather_write = regmap_spmi_ext_gather_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+struct regmap *__regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_spmi_ext);
+
+struct regmap *__devm_regmap_init_spmi_ext(struct spmi_device *sdev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ return __devm_regmap_init(&sdev->dev, &regmap_spmi_ext, sdev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_spmi_ext);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
new file mode 100644
index 000000000..3a8b402db
--- /dev/null
+++ b/drivers/base/regmap/regmap-w1.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API - W1 (1-Wire) support
+//
+// Copyright (c) 2017 Radioavionica Corporation
+// Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
+
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/w1.h>
+
+#include "internal.h"
+
+#define W1_CMD_READ_DATA 0x69
+#define W1_CMD_WRITE_DATA 0x6C
+
+/*
+ * 1-Wire slaves registers with addess 8 bit and data 8 bit
+ */
+
+static int w1_reg_a8_v8_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct device *dev = context;
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+ int ret = 0;
+
+ if (reg > 255)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_8(sl->master, W1_CMD_READ_DATA);
+ w1_write_8(sl->master, reg);
+ *val = w1_read_8(sl->master);
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+static int w1_reg_a8_v8_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct device *dev = context;
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+ int ret = 0;
+
+ if (reg > 255)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+ w1_write_8(sl->master, reg);
+ w1_write_8(sl->master, val);
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+/*
+ * 1-Wire slaves registers with addess 8 bit and data 16 bit
+ */
+
+static int w1_reg_a8_v16_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+ int ret = 0;
+
+ if (reg > 255)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_8(sl->master, W1_CMD_READ_DATA);
+ w1_write_8(sl->master, reg);
+ *val = w1_read_8(sl->master);
+ *val |= w1_read_8(sl->master)<<8;
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+static int w1_reg_a8_v16_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+ int ret = 0;
+
+ if (reg > 255)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+ w1_write_8(sl->master, reg);
+ w1_write_8(sl->master, val & 0x00FF);
+ w1_write_8(sl->master, val>>8 & 0x00FF);
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+/*
+ * 1-Wire slaves registers with addess 16 bit and data 16 bit
+ */
+
+static int w1_reg_a16_v16_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct device *dev = context;
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+ int ret = 0;
+
+ if (reg > 65535)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_8(sl->master, W1_CMD_READ_DATA);
+ w1_write_8(sl->master, reg & 0x00FF);
+ w1_write_8(sl->master, reg>>8 & 0x00FF);
+ *val = w1_read_8(sl->master);
+ *val |= w1_read_8(sl->master)<<8;
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+static int w1_reg_a16_v16_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct device *dev = context;
+ struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+ int ret = 0;
+
+ if (reg > 65535)
+ return -EINVAL;
+
+ mutex_lock(&sl->master->bus_mutex);
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_8(sl->master, W1_CMD_WRITE_DATA);
+ w1_write_8(sl->master, reg & 0x00FF);
+ w1_write_8(sl->master, reg>>8 & 0x00FF);
+ w1_write_8(sl->master, val & 0x00FF);
+ w1_write_8(sl->master, val>>8 & 0x00FF);
+ } else {
+ ret = -ENODEV;
+ }
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
+/*
+ * Various types of supported bus addressing
+ */
+
+static const struct regmap_bus regmap_w1_bus_a8_v8 = {
+ .reg_read = w1_reg_a8_v8_read,
+ .reg_write = w1_reg_a8_v8_write,
+};
+
+static const struct regmap_bus regmap_w1_bus_a8_v16 = {
+ .reg_read = w1_reg_a8_v16_read,
+ .reg_write = w1_reg_a8_v16_write,
+};
+
+static const struct regmap_bus regmap_w1_bus_a16_v16 = {
+ .reg_read = w1_reg_a16_v16_read,
+ .reg_write = w1_reg_a16_v16_write,
+};
+
+static const struct regmap_bus *regmap_get_w1_bus(struct device *w1_dev,
+ const struct regmap_config *config)
+{
+ if (config->reg_bits == 8 && config->val_bits == 8)
+ return &regmap_w1_bus_a8_v8;
+
+ if (config->reg_bits == 8 && config->val_bits == 16)
+ return &regmap_w1_bus_a8_v16;
+
+ if (config->reg_bits == 16 && config->val_bits == 16)
+ return &regmap_w1_bus_a16_v16;
+
+ return ERR_PTR(-ENOTSUPP);
+}
+
+struct regmap *__regmap_init_w1(struct device *w1_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+
+ const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(w1_dev, bus, w1_dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__regmap_init_w1);
+
+struct regmap *__devm_regmap_init_w1(struct device *w1_dev,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+
+ const struct regmap_bus *bus = regmap_get_w1_bus(w1_dev, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(w1_dev, bus, w1_dev, config,
+ lock_key, lock_name);
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init_w1);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
new file mode 100644
index 000000000..140af27f5
--- /dev/null
+++ b/drivers/base/regmap/regmap.c
@@ -0,0 +1,3515 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Register map access API
+//
+// Copyright 2011 Wolfson Microelectronics plc
+//
+// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/property.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#include "internal.h"
+
+/*
+ * Sometimes for failures during very early init the trace
+ * infrastructure isn't available early enough to be used. For this
+ * sort of problem defining LOG_DEVICE will add printks for basic
+ * register I/O on a specific device.
+ */
+#undef LOG_DEVICE
+
+#ifdef LOG_DEVICE
+static inline bool regmap_should_log(struct regmap *map)
+{
+ return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
+}
+#else
+static inline bool regmap_should_log(struct regmap *map) { return false; }
+#endif
+
+
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool force_write);
+
+static int _regmap_bus_reg_read(void *context, unsigned int reg,
+ unsigned int *val);
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val);
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val);
+static int _regmap_bus_reg_write(void *context, unsigned int reg,
+ unsigned int val);
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val);
+
+bool regmap_reg_in_ranges(unsigned int reg,
+ const struct regmap_range *ranges,
+ unsigned int nranges)
+{
+ const struct regmap_range *r;
+ int i;
+
+ for (i = 0, r = ranges; i < nranges; i++, r++)
+ if (regmap_reg_in_range(reg, r))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
+
+bool regmap_check_range_table(struct regmap *map, unsigned int reg,
+ const struct regmap_access_table *table)
+{
+ /* Check "no ranges" first */
+ if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
+ return false;
+
+ /* In case zero "yes ranges" are supplied, any reg is OK */
+ if (!table->n_yes_ranges)
+ return true;
+
+ return regmap_reg_in_ranges(reg, table->yes_ranges,
+ table->n_yes_ranges);
+}
+EXPORT_SYMBOL_GPL(regmap_check_range_table);
+
+bool regmap_writeable(struct regmap *map, unsigned int reg)
+{
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ if (map->writeable_reg)
+ return map->writeable_reg(map->dev, reg);
+
+ if (map->wr_table)
+ return regmap_check_range_table(map, reg, map->wr_table);
+
+ return true;
+}
+
+bool regmap_cached(struct regmap *map, unsigned int reg)
+{
+ int ret;
+ unsigned int val;
+
+ if (map->cache_type == REGCACHE_NONE)
+ return false;
+
+ if (!map->cache_ops)
+ return false;
+
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ map->lock(map->lock_arg);
+ ret = regcache_read(map, reg, &val);
+ map->unlock(map->lock_arg);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+bool regmap_readable(struct regmap *map, unsigned int reg)
+{
+ if (!map->reg_read)
+ return false;
+
+ if (map->max_register && reg > map->max_register)
+ return false;
+
+ if (map->format.format_write)
+ return false;
+
+ if (map->readable_reg)
+ return map->readable_reg(map->dev, reg);
+
+ if (map->rd_table)
+ return regmap_check_range_table(map, reg, map->rd_table);
+
+ return true;
+}
+
+bool regmap_volatile(struct regmap *map, unsigned int reg)
+{
+ if (!map->format.format_write && !regmap_readable(map, reg))
+ return false;
+
+ if (map->volatile_reg)
+ return map->volatile_reg(map->dev, reg);
+
+ if (map->volatile_table)
+ return regmap_check_range_table(map, reg, map->volatile_table);
+
+ if (map->cache_ops)
+ return false;
+ else
+ return true;
+}
+
+bool regmap_precious(struct regmap *map, unsigned int reg)
+{
+ if (!regmap_readable(map, reg))
+ return false;
+
+ if (map->precious_reg)
+ return map->precious_reg(map->dev, reg);
+
+ if (map->precious_table)
+ return regmap_check_range_table(map, reg, map->precious_table);
+
+ return false;
+}
+
+bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
+{
+ if (map->writeable_noinc_reg)
+ return map->writeable_noinc_reg(map->dev, reg);
+
+ if (map->wr_noinc_table)
+ return regmap_check_range_table(map, reg, map->wr_noinc_table);
+
+ return true;
+}
+
+bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
+{
+ if (map->readable_noinc_reg)
+ return map->readable_noinc_reg(map->dev, reg);
+
+ if (map->rd_noinc_table)
+ return regmap_check_range_table(map, reg, map->rd_noinc_table);
+
+ return true;
+}
+
+static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
+ size_t num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
+ return false;
+
+ return true;
+}
+
+static void regmap_format_12_20_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[0] = reg >> 4;
+ out[1] = (reg << 4) | (val >> 16);
+ out[2] = val >> 8;
+ out[3] = val;
+}
+
+
+static void regmap_format_2_6_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ *out = (reg << 6) | val;
+}
+
+static void regmap_format_4_12_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ __be16 *out = map->work_buf;
+ *out = cpu_to_be16((reg << 12) | val);
+}
+
+static void regmap_format_7_9_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ __be16 *out = map->work_buf;
+ *out = cpu_to_be16((reg << 9) | val);
+}
+
+static void regmap_format_7_17_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[2] = val;
+ out[1] = val >> 8;
+ out[0] = (val >> 16) | (reg << 1);
+}
+
+static void regmap_format_10_14_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[2] = val;
+ out[1] = (val >> 8) | (reg << 6);
+ out[0] = reg >> 2;
+}
+
+static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
+{
+ u8 *b = buf;
+
+ b[0] = val << shift;
+}
+
+static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
+{
+ put_unaligned_be16(val << shift, buf);
+}
+
+static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
+{
+ put_unaligned_le16(val << shift, buf);
+}
+
+static void regmap_format_16_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ u16 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
+{
+ put_unaligned_be24(val << shift, buf);
+}
+
+static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
+{
+ put_unaligned_be32(val << shift, buf);
+}
+
+static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
+{
+ put_unaligned_le32(val << shift, buf);
+}
+
+static void regmap_format_32_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ u32 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+#ifdef CONFIG_64BIT
+static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
+{
+ put_unaligned_be64((u64) val << shift, buf);
+}
+
+static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
+{
+ put_unaligned_le64((u64) val << shift, buf);
+}
+
+static void regmap_format_64_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ u64 v = (u64) val << shift;
+
+ memcpy(buf, &v, sizeof(v));
+}
+#endif
+
+static void regmap_parse_inplace_noop(void *buf)
+{
+}
+
+static unsigned int regmap_parse_8(const void *buf)
+{
+ const u8 *b = buf;
+
+ return b[0];
+}
+
+static unsigned int regmap_parse_16_be(const void *buf)
+{
+ return get_unaligned_be16(buf);
+}
+
+static unsigned int regmap_parse_16_le(const void *buf)
+{
+ return get_unaligned_le16(buf);
+}
+
+static void regmap_parse_16_be_inplace(void *buf)
+{
+ u16 v = get_unaligned_be16(buf);
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+static void regmap_parse_16_le_inplace(void *buf)
+{
+ u16 v = get_unaligned_le16(buf);
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+static unsigned int regmap_parse_16_native(const void *buf)
+{
+ u16 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
+}
+
+static unsigned int regmap_parse_24_be(const void *buf)
+{
+ return get_unaligned_be24(buf);
+}
+
+static unsigned int regmap_parse_32_be(const void *buf)
+{
+ return get_unaligned_be32(buf);
+}
+
+static unsigned int regmap_parse_32_le(const void *buf)
+{
+ return get_unaligned_le32(buf);
+}
+
+static void regmap_parse_32_be_inplace(void *buf)
+{
+ u32 v = get_unaligned_be32(buf);
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+static void regmap_parse_32_le_inplace(void *buf)
+{
+ u32 v = get_unaligned_le32(buf);
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+static unsigned int regmap_parse_32_native(const void *buf)
+{
+ u32 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
+}
+
+#ifdef CONFIG_64BIT
+static unsigned int regmap_parse_64_be(const void *buf)
+{
+ return get_unaligned_be64(buf);
+}
+
+static unsigned int regmap_parse_64_le(const void *buf)
+{
+ return get_unaligned_le64(buf);
+}
+
+static void regmap_parse_64_be_inplace(void *buf)
+{
+ u64 v = get_unaligned_be64(buf);
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+static void regmap_parse_64_le_inplace(void *buf)
+{
+ u64 v = get_unaligned_le64(buf);
+
+ memcpy(buf, &v, sizeof(v));
+}
+
+static unsigned int regmap_parse_64_native(const void *buf)
+{
+ u64 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
+}
+#endif
+
+static void regmap_lock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
+}
+
+static void regmap_lock_hwlock_irqsave(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
+ &map->spinlock_flags);
+}
+
+static void regmap_unlock_hwlock(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irq(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irq(map->hwlock);
+}
+
+static void regmap_unlock_hwlock_irqrestore(void *__map)
+{
+ struct regmap *map = __map;
+
+ hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
+}
+
+static void regmap_lock_unlock_none(void *__map)
+{
+
+}
+
+static void regmap_lock_mutex(void *__map)
+{
+ struct regmap *map = __map;
+ mutex_lock(&map->mutex);
+}
+
+static void regmap_unlock_mutex(void *__map)
+{
+ struct regmap *map = __map;
+ mutex_unlock(&map->mutex);
+}
+
+static void regmap_lock_spinlock(void *__map)
+__acquires(&map->spinlock)
+{
+ struct regmap *map = __map;
+ unsigned long flags;
+
+ spin_lock_irqsave(&map->spinlock, flags);
+ map->spinlock_flags = flags;
+}
+
+static void regmap_unlock_spinlock(void *__map)
+__releases(&map->spinlock)
+{
+ struct regmap *map = __map;
+ spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
+}
+
+static void regmap_lock_raw_spinlock(void *__map)
+__acquires(&map->raw_spinlock)
+{
+ struct regmap *map = __map;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&map->raw_spinlock, flags);
+ map->raw_spinlock_flags = flags;
+}
+
+static void regmap_unlock_raw_spinlock(void *__map)
+__releases(&map->raw_spinlock)
+{
+ struct regmap *map = __map;
+ raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
+}
+
+static void dev_get_regmap_release(struct device *dev, void *res)
+{
+ /*
+ * We don't actually have anything to do here; the goal here
+ * is not to manage the regmap but to provide a simple way to
+ * get the regmap back given a struct device.
+ */
+}
+
+static bool _regmap_range_add(struct regmap *map,
+ struct regmap_range_node *data)
+{
+ struct rb_root *root = &map->range_tree;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct regmap_range_node *this =
+ rb_entry(*new, struct regmap_range_node, node);
+
+ parent = *new;
+ if (data->range_max < this->range_min)
+ new = &((*new)->rb_left);
+ else if (data->range_min > this->range_max)
+ new = &((*new)->rb_right);
+ else
+ return false;
+ }
+
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+
+ return true;
+}
+
+static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
+ unsigned int reg)
+{
+ struct rb_node *node = map->range_tree.rb_node;
+
+ while (node) {
+ struct regmap_range_node *this =
+ rb_entry(node, struct regmap_range_node, node);
+
+ if (reg < this->range_min)
+ node = node->rb_left;
+ else if (reg > this->range_max)
+ node = node->rb_right;
+ else
+ return this;
+ }
+
+ return NULL;
+}
+
+static void regmap_range_exit(struct regmap *map)
+{
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
+
+ next = rb_first(&map->range_tree);
+ while (next) {
+ range_node = rb_entry(next, struct regmap_range_node, node);
+ next = rb_next(&range_node->node);
+ rb_erase(&range_node->node, &map->range_tree);
+ kfree(range_node);
+ }
+
+ kfree(map->selector_work_buf);
+}
+
+static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
+{
+ if (config->name) {
+ const char *name = kstrdup_const(config->name, GFP_KERNEL);
+
+ if (!name)
+ return -ENOMEM;
+
+ kfree_const(map->name);
+ map->name = name;
+ }
+
+ return 0;
+}
+
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config)
+{
+ struct regmap **m;
+ int ret;
+
+ map->dev = dev;
+
+ ret = regmap_set_name(map, config);
+ if (ret)
+ return ret;
+
+ regmap_debugfs_exit(map);
+ regmap_debugfs_init(map);
+
+ /* Add a devres resource for dev_get_regmap() */
+ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+ if (!m) {
+ regmap_debugfs_exit(map);
+ return -ENOMEM;
+ }
+ *m = map;
+ devres_add(dev, m);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_attach_dev);
+
+static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
+ const struct regmap_config *config)
+{
+ enum regmap_endian endian;
+
+ /* Retrieve the endianness specification from the regmap config */
+ endian = config->reg_format_endian;
+
+ /* If the regmap config specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* Retrieve the endianness specification from the bus config */
+ if (bus && bus->reg_format_endian_default)
+ endian = bus->reg_format_endian_default;
+
+ /* If the bus specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* Use this if no other value was found */
+ return REGMAP_ENDIAN_BIG;
+}
+
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config)
+{
+ struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
+ enum regmap_endian endian;
+
+ /* Retrieve the endianness specification from the regmap config */
+ endian = config->val_format_endian;
+
+ /* If the regmap config specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* If the firmware node exist try to get endianness from it */
+ if (fwnode_property_read_bool(fwnode, "big-endian"))
+ endian = REGMAP_ENDIAN_BIG;
+ else if (fwnode_property_read_bool(fwnode, "little-endian"))
+ endian = REGMAP_ENDIAN_LITTLE;
+ else if (fwnode_property_read_bool(fwnode, "native-endian"))
+ endian = REGMAP_ENDIAN_NATIVE;
+
+ /* If the endianness was specified in fwnode, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* Retrieve the endianness specification from the bus config */
+ if (bus && bus->val_format_endian_default)
+ endian = bus->val_format_endian_default;
+
+ /* If the bus specified a non-default value, use that */
+ if (endian != REGMAP_ENDIAN_DEFAULT)
+ return endian;
+
+ /* Use this if no other value was found */
+ return REGMAP_ENDIAN_BIG;
+}
+EXPORT_SYMBOL_GPL(regmap_get_val_endian);
+
+struct regmap *__regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap *map;
+ int ret = -EINVAL;
+ enum regmap_endian reg_endian, val_endian;
+ int i, j;
+
+ if (!config)
+ goto err;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = regmap_set_name(map, config);
+ if (ret)
+ goto err_map;
+
+ ret = -EINVAL; /* Later error paths rely on this */
+
+ if (config->disable_locking) {
+ map->lock = map->unlock = regmap_lock_unlock_none;
+ map->can_sleep = config->can_sleep;
+ regmap_debugfs_disable(map);
+ } else if (config->lock && config->unlock) {
+ map->lock = config->lock;
+ map->unlock = config->unlock;
+ map->lock_arg = config->lock_arg;
+ map->can_sleep = config->can_sleep;
+ } else if (config->use_hwlock) {
+ map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
+ if (!map->hwlock) {
+ ret = -ENXIO;
+ goto err_name;
+ }
+
+ switch (config->hwlock_mode) {
+ case HWLOCK_IRQSTATE:
+ map->lock = regmap_lock_hwlock_irqsave;
+ map->unlock = regmap_unlock_hwlock_irqrestore;
+ break;
+ case HWLOCK_IRQ:
+ map->lock = regmap_lock_hwlock_irq;
+ map->unlock = regmap_unlock_hwlock_irq;
+ break;
+ default:
+ map->lock = regmap_lock_hwlock;
+ map->unlock = regmap_unlock_hwlock;
+ break;
+ }
+
+ map->lock_arg = map;
+ } else {
+ if ((bus && bus->fast_io) ||
+ config->fast_io) {
+ if (config->use_raw_spinlock) {
+ raw_spin_lock_init(&map->raw_spinlock);
+ map->lock = regmap_lock_raw_spinlock;
+ map->unlock = regmap_unlock_raw_spinlock;
+ lockdep_set_class_and_name(&map->raw_spinlock,
+ lock_key, lock_name);
+ } else {
+ spin_lock_init(&map->spinlock);
+ map->lock = regmap_lock_spinlock;
+ map->unlock = regmap_unlock_spinlock;
+ lockdep_set_class_and_name(&map->spinlock,
+ lock_key, lock_name);
+ }
+ } else {
+ mutex_init(&map->mutex);
+ map->lock = regmap_lock_mutex;
+ map->unlock = regmap_unlock_mutex;
+ map->can_sleep = true;
+ lockdep_set_class_and_name(&map->mutex,
+ lock_key, lock_name);
+ }
+ map->lock_arg = map;
+ }
+
+ /*
+ * When we write in fast-paths with regmap_bulk_write() don't allocate
+ * scratch buffers with sleeping allocations.
+ */
+ if ((bus && bus->fast_io) || config->fast_io)
+ map->alloc_flags = GFP_ATOMIC;
+ else
+ map->alloc_flags = GFP_KERNEL;
+
+ map->reg_base = config->reg_base;
+
+ map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
+ map->format.pad_bytes = config->pad_bits / 8;
+ map->format.reg_downshift = config->reg_downshift;
+ map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
+ map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+ config->val_bits + config->pad_bits, 8);
+ map->reg_shift = config->pad_bits % 8;
+ if (config->reg_stride)
+ map->reg_stride = config->reg_stride;
+ else
+ map->reg_stride = 1;
+ if (is_power_of_2(map->reg_stride))
+ map->reg_stride_order = ilog2(map->reg_stride);
+ else
+ map->reg_stride_order = -1;
+ map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
+ map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
+ map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
+ if (bus) {
+ map->max_raw_read = bus->max_raw_read;
+ map->max_raw_write = bus->max_raw_write;
+ } else if (config->max_raw_read && config->max_raw_write) {
+ map->max_raw_read = config->max_raw_read;
+ map->max_raw_write = config->max_raw_write;
+ }
+ map->dev = dev;
+ map->bus = bus;
+ map->bus_context = bus_context;
+ map->max_register = config->max_register;
+ map->wr_table = config->wr_table;
+ map->rd_table = config->rd_table;
+ map->volatile_table = config->volatile_table;
+ map->precious_table = config->precious_table;
+ map->wr_noinc_table = config->wr_noinc_table;
+ map->rd_noinc_table = config->rd_noinc_table;
+ map->writeable_reg = config->writeable_reg;
+ map->readable_reg = config->readable_reg;
+ map->volatile_reg = config->volatile_reg;
+ map->precious_reg = config->precious_reg;
+ map->writeable_noinc_reg = config->writeable_noinc_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
+ map->cache_type = config->cache_type;
+
+ spin_lock_init(&map->async_lock);
+ INIT_LIST_HEAD(&map->async_list);
+ INIT_LIST_HEAD(&map->async_free);
+ init_waitqueue_head(&map->async_waitq);
+
+ if (config->read_flag_mask ||
+ config->write_flag_mask ||
+ config->zero_flag_mask) {
+ map->read_flag_mask = config->read_flag_mask;
+ map->write_flag_mask = config->write_flag_mask;
+ } else if (bus) {
+ map->read_flag_mask = bus->read_flag_mask;
+ }
+
+ if (config && config->read && config->write) {
+ map->reg_read = _regmap_bus_read;
+ if (config->reg_update_bits)
+ map->reg_update_bits = config->reg_update_bits;
+
+ /* Bulk read/write */
+ map->read = config->read;
+ map->write = config->write;
+
+ reg_endian = REGMAP_ENDIAN_NATIVE;
+ val_endian = REGMAP_ENDIAN_NATIVE;
+ } else if (!bus) {
+ map->reg_read = config->reg_read;
+ map->reg_write = config->reg_write;
+ map->reg_update_bits = config->reg_update_bits;
+
+ map->defer_caching = false;
+ goto skip_format_initialization;
+ } else if (!bus->read || !bus->write) {
+ map->reg_read = _regmap_bus_reg_read;
+ map->reg_write = _regmap_bus_reg_write;
+ map->reg_update_bits = bus->reg_update_bits;
+
+ map->defer_caching = false;
+ goto skip_format_initialization;
+ } else {
+ map->reg_read = _regmap_bus_read;
+ map->reg_update_bits = bus->reg_update_bits;
+ /* Bulk read/write */
+ map->read = bus->read;
+ map->write = bus->write;
+
+ reg_endian = regmap_get_reg_endian(bus, config);
+ val_endian = regmap_get_val_endian(dev, bus, config);
+ }
+
+ switch (config->reg_bits + map->reg_shift) {
+ case 2:
+ switch (config->val_bits) {
+ case 6:
+ map->format.format_write = regmap_format_2_6_write;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+ case 4:
+ switch (config->val_bits) {
+ case 12:
+ map->format.format_write = regmap_format_4_12_write;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+ case 7:
+ switch (config->val_bits) {
+ case 9:
+ map->format.format_write = regmap_format_7_9_write;
+ break;
+ case 17:
+ map->format.format_write = regmap_format_7_17_write;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+ case 10:
+ switch (config->val_bits) {
+ case 14:
+ map->format.format_write = regmap_format_10_14_write;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+ case 12:
+ switch (config->val_bits) {
+ case 20:
+ map->format.format_write = regmap_format_12_20_write;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+ case 8:
+ map->format.format_reg = regmap_format_8;
+ break;
+
+ case 16:
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_16_be;
+ break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_reg = regmap_format_16_le;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_16_native;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+ case 24:
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_24_be;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+ case 32:
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_32_be;
+ break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_reg = regmap_format_32_le;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_32_native;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+
+#ifdef CONFIG_64BIT
+ case 64:
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_64_be;
+ break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_reg = regmap_format_64_le;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_64_native;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+#endif
+
+ default:
+ goto err_hwlock;
+ }
+
+ if (val_endian == REGMAP_ENDIAN_NATIVE)
+ map->format.parse_inplace = regmap_parse_inplace_noop;
+
+ switch (config->val_bits) {
+ case 8:
+ map->format.format_val = regmap_format_8;
+ map->format.parse_val = regmap_parse_8;
+ map->format.parse_inplace = regmap_parse_inplace_noop;
+ break;
+ case 16:
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_16_be;
+ map->format.parse_val = regmap_parse_16_be;
+ map->format.parse_inplace = regmap_parse_16_be_inplace;
+ break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_val = regmap_format_16_le;
+ map->format.parse_val = regmap_parse_16_le;
+ map->format.parse_inplace = regmap_parse_16_le_inplace;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_16_native;
+ map->format.parse_val = regmap_parse_16_native;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+ case 24:
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_24_be;
+ map->format.parse_val = regmap_parse_24_be;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+ case 32:
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_32_be;
+ map->format.parse_val = regmap_parse_32_be;
+ map->format.parse_inplace = regmap_parse_32_be_inplace;
+ break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_val = regmap_format_32_le;
+ map->format.parse_val = regmap_parse_32_le;
+ map->format.parse_inplace = regmap_parse_32_le_inplace;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_32_native;
+ map->format.parse_val = regmap_parse_32_native;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+#ifdef CONFIG_64BIT
+ case 64:
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_64_be;
+ map->format.parse_val = regmap_parse_64_be;
+ map->format.parse_inplace = regmap_parse_64_be_inplace;
+ break;
+ case REGMAP_ENDIAN_LITTLE:
+ map->format.format_val = regmap_format_64_le;
+ map->format.parse_val = regmap_parse_64_le;
+ map->format.parse_inplace = regmap_parse_64_le_inplace;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_64_native;
+ map->format.parse_val = regmap_parse_64_native;
+ break;
+ default:
+ goto err_hwlock;
+ }
+ break;
+#endif
+ }
+
+ if (map->format.format_write) {
+ if ((reg_endian != REGMAP_ENDIAN_BIG) ||
+ (val_endian != REGMAP_ENDIAN_BIG))
+ goto err_hwlock;
+ map->use_single_write = true;
+ }
+
+ if (!map->format.format_write &&
+ !(map->format.format_reg && map->format.format_val))
+ goto err_hwlock;
+
+ map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
+ if (map->work_buf == NULL) {
+ ret = -ENOMEM;
+ goto err_hwlock;
+ }
+
+ if (map->format.format_write) {
+ map->defer_caching = false;
+ map->reg_write = _regmap_bus_formatted_write;
+ } else if (map->format.format_val) {
+ map->defer_caching = true;
+ map->reg_write = _regmap_bus_raw_write;
+ }
+
+skip_format_initialization:
+
+ map->range_tree = RB_ROOT;
+ for (i = 0; i < config->num_ranges; i++) {
+ const struct regmap_range_cfg *range_cfg = &config->ranges[i];
+ struct regmap_range_node *new;
+
+ /* Sanity check */
+ if (range_cfg->range_max < range_cfg->range_min) {
+ dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
+ range_cfg->range_max, range_cfg->range_min);
+ goto err_range;
+ }
+
+ if (range_cfg->range_max > map->max_register) {
+ dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
+ range_cfg->range_max, map->max_register);
+ goto err_range;
+ }
+
+ if (range_cfg->selector_reg > map->max_register) {
+ dev_err(map->dev,
+ "Invalid range %d: selector out of map\n", i);
+ goto err_range;
+ }
+
+ if (range_cfg->window_len == 0) {
+ dev_err(map->dev, "Invalid range %d: window_len 0\n",
+ i);
+ goto err_range;
+ }
+
+ /* Make sure, that this register range has no selector
+ or data window within its boundary */
+ for (j = 0; j < config->num_ranges; j++) {
+ unsigned int sel_reg = config->ranges[j].selector_reg;
+ unsigned int win_min = config->ranges[j].window_start;
+ unsigned int win_max = win_min +
+ config->ranges[j].window_len - 1;
+
+ /* Allow data window inside its own virtual range */
+ if (j == i)
+ continue;
+
+ if (range_cfg->range_min <= sel_reg &&
+ sel_reg <= range_cfg->range_max) {
+ dev_err(map->dev,
+ "Range %d: selector for %d in window\n",
+ i, j);
+ goto err_range;
+ }
+
+ if (!(win_max < range_cfg->range_min ||
+ win_min > range_cfg->range_max)) {
+ dev_err(map->dev,
+ "Range %d: window for %d in window\n",
+ i, j);
+ goto err_range;
+ }
+ }
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (new == NULL) {
+ ret = -ENOMEM;
+ goto err_range;
+ }
+
+ new->map = map;
+ new->name = range_cfg->name;
+ new->range_min = range_cfg->range_min;
+ new->range_max = range_cfg->range_max;
+ new->selector_reg = range_cfg->selector_reg;
+ new->selector_mask = range_cfg->selector_mask;
+ new->selector_shift = range_cfg->selector_shift;
+ new->window_start = range_cfg->window_start;
+ new->window_len = range_cfg->window_len;
+
+ if (!_regmap_range_add(map, new)) {
+ dev_err(map->dev, "Failed to add range %d\n", i);
+ kfree(new);
+ goto err_range;
+ }
+
+ if (map->selector_work_buf == NULL) {
+ map->selector_work_buf =
+ kzalloc(map->format.buf_size, GFP_KERNEL);
+ if (map->selector_work_buf == NULL) {
+ ret = -ENOMEM;
+ goto err_range;
+ }
+ }
+ }
+
+ ret = regcache_init(map, config);
+ if (ret != 0)
+ goto err_range;
+
+ if (dev) {
+ ret = regmap_attach_dev(dev, map, config);
+ if (ret != 0)
+ goto err_regcache;
+ } else {
+ regmap_debugfs_init(map);
+ }
+
+ return map;
+
+err_regcache:
+ regcache_exit(map);
+err_range:
+ regmap_range_exit(map);
+ kfree(map->work_buf);
+err_hwlock:
+ if (map->hwlock)
+ hwspin_lock_free(map->hwlock);
+err_name:
+ kfree_const(map->name);
+err_map:
+ kfree(map);
+err:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(__regmap_init);
+
+static void devm_regmap_release(struct device *dev, void *res)
+{
+ regmap_exit(*(struct regmap **)res);
+}
+
+struct regmap *__devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name)
+{
+ struct regmap **ptr, *regmap;
+
+ ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ regmap = __regmap_init(dev, bus, bus_context, config,
+ lock_key, lock_name);
+ if (!IS_ERR(regmap)) {
+ *ptr = regmap;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(__devm_regmap_init);
+
+static void regmap_field_init(struct regmap_field *rm_field,
+ struct regmap *regmap, struct reg_field reg_field)
+{
+ rm_field->regmap = regmap;
+ rm_field->reg = reg_field.reg;
+ rm_field->shift = reg_field.lsb;
+ rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
+
+ WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
+
+ rm_field->id_size = reg_field.id_size;
+ rm_field->id_offset = reg_field.id_offset;
+}
+
+/**
+ * devm_regmap_field_alloc() - Allocate and initialise a register field.
+ *
+ * @dev: Device that will be interacted with
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field will be automatically freed
+ * by the device management code.
+ */
+struct regmap_field *devm_regmap_field_alloc(struct device *dev,
+ struct regmap *regmap, struct reg_field reg_field)
+{
+ struct regmap_field *rm_field = devm_kzalloc(dev,
+ sizeof(*rm_field), GFP_KERNEL);
+ if (!rm_field)
+ return ERR_PTR(-ENOMEM);
+
+ regmap_field_init(rm_field, regmap, reg_field);
+
+ return rm_field;
+
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
+
+
+/**
+ * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
+ *
+ * @regmap: regmap bank in which this register field is located.
+ * @rm_field: regmap register fields within the bank.
+ * @reg_field: Register fields within the bank.
+ * @num_fields: Number of register fields.
+ *
+ * The return value will be an -ENOMEM on error or zero for success.
+ * Newly allocated regmap_fields should be freed by calling
+ * regmap_field_bulk_free()
+ */
+int regmap_field_bulk_alloc(struct regmap *regmap,
+ struct regmap_field **rm_field,
+ const struct reg_field *reg_field,
+ int num_fields)
+{
+ struct regmap_field *rf;
+ int i;
+
+ rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_fields; i++) {
+ regmap_field_init(&rf[i], regmap, reg_field[i]);
+ rm_field[i] = &rf[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
+
+/**
+ * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
+ * fields.
+ *
+ * @dev: Device that will be interacted with
+ * @regmap: regmap bank in which this register field is located.
+ * @rm_field: regmap register fields within the bank.
+ * @reg_field: Register fields within the bank.
+ * @num_fields: Number of register fields.
+ *
+ * The return value will be an -ENOMEM on error or zero for success.
+ * Newly allocated regmap_fields will be automatically freed by the
+ * device management code.
+ */
+int devm_regmap_field_bulk_alloc(struct device *dev,
+ struct regmap *regmap,
+ struct regmap_field **rm_field,
+ const struct reg_field *reg_field,
+ int num_fields)
+{
+ struct regmap_field *rf;
+ int i;
+
+ rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_fields; i++) {
+ regmap_field_init(&rf[i], regmap, reg_field[i]);
+ rm_field[i] = &rf[i];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
+
+/**
+ * regmap_field_bulk_free() - Free register field allocated using
+ * regmap_field_bulk_alloc.
+ *
+ * @field: regmap fields which should be freed.
+ */
+void regmap_field_bulk_free(struct regmap_field *field)
+{
+ kfree(field);
+}
+EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
+
+/**
+ * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
+ * devm_regmap_field_bulk_alloc.
+ *
+ * @dev: Device that will be interacted with
+ * @field: regmap field which should be freed.
+ *
+ * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
+ * drivers need not call this function, as the memory allocated via devm
+ * will be freed as per device-driver life-cycle.
+ */
+void devm_regmap_field_bulk_free(struct device *dev,
+ struct regmap_field *field)
+{
+ devm_kfree(dev, field);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
+
+/**
+ * devm_regmap_field_free() - Free a register field allocated using
+ * devm_regmap_field_alloc.
+ *
+ * @dev: Device that will be interacted with
+ * @field: regmap field which should be freed.
+ *
+ * Free register field allocated using devm_regmap_field_alloc(). Usually
+ * drivers need not call this function, as the memory allocated via devm
+ * will be freed as per device-driver life-cyle.
+ */
+void devm_regmap_field_free(struct device *dev,
+ struct regmap_field *field)
+{
+ devm_kfree(dev, field);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_field_free);
+
+/**
+ * regmap_field_alloc() - Allocate and initialise a register field.
+ *
+ * @regmap: regmap bank in which this register field is located.
+ * @reg_field: Register field with in the bank.
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap_field. The regmap_field should be freed by the
+ * user once its finished working with it using regmap_field_free().
+ */
+struct regmap_field *regmap_field_alloc(struct regmap *regmap,
+ struct reg_field reg_field)
+{
+ struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
+
+ if (!rm_field)
+ return ERR_PTR(-ENOMEM);
+
+ regmap_field_init(rm_field, regmap, reg_field);
+
+ return rm_field;
+}
+EXPORT_SYMBOL_GPL(regmap_field_alloc);
+
+/**
+ * regmap_field_free() - Free register field allocated using
+ * regmap_field_alloc.
+ *
+ * @field: regmap field which should be freed.
+ */
+void regmap_field_free(struct regmap_field *field)
+{
+ kfree(field);
+}
+EXPORT_SYMBOL_GPL(regmap_field_free);
+
+/**
+ * regmap_reinit_cache() - Reinitialise the current register cache
+ *
+ * @map: Register map to operate on.
+ * @config: New configuration. Only the cache data will be used.
+ *
+ * Discard any existing register cache for the map and initialize a
+ * new cache. This can be used to restore the cache to defaults or to
+ * update the cache configuration to reflect runtime discovery of the
+ * hardware.
+ *
+ * No explicit locking is done here, the user needs to ensure that
+ * this function will not race with other calls to regmap.
+ */
+int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
+{
+ int ret;
+
+ regcache_exit(map);
+ regmap_debugfs_exit(map);
+
+ map->max_register = config->max_register;
+ map->writeable_reg = config->writeable_reg;
+ map->readable_reg = config->readable_reg;
+ map->volatile_reg = config->volatile_reg;
+ map->precious_reg = config->precious_reg;
+ map->writeable_noinc_reg = config->writeable_noinc_reg;
+ map->readable_noinc_reg = config->readable_noinc_reg;
+ map->cache_type = config->cache_type;
+
+ ret = regmap_set_name(map, config);
+ if (ret)
+ return ret;
+
+ regmap_debugfs_init(map);
+
+ map->cache_bypass = false;
+ map->cache_only = false;
+
+ return regcache_init(map, config);
+}
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
+
+/**
+ * regmap_exit() - Free a previously allocated register map
+ *
+ * @map: Register map to operate on.
+ */
+void regmap_exit(struct regmap *map)
+{
+ struct regmap_async *async;
+
+ regcache_exit(map);
+ regmap_debugfs_exit(map);
+ regmap_range_exit(map);
+ if (map->bus && map->bus->free_context)
+ map->bus->free_context(map->bus_context);
+ kfree(map->work_buf);
+ while (!list_empty(&map->async_free)) {
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ list_del(&async->list);
+ kfree(async->work_buf);
+ kfree(async);
+ }
+ if (map->hwlock)
+ hwspin_lock_free(map->hwlock);
+ if (map->lock == regmap_lock_mutex)
+ mutex_destroy(&map->mutex);
+ kfree_const(map->name);
+ kfree(map->patch);
+ if (map->bus && map->bus->free_on_exit)
+ kfree(map->bus);
+ kfree(map);
+}
+EXPORT_SYMBOL_GPL(regmap_exit);
+
+static int dev_get_regmap_match(struct device *dev, void *res, void *data)
+{
+ struct regmap **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ /* If the user didn't specify a name match any */
+ if (data)
+ return (*r)->name && !strcmp((*r)->name, data);
+ else
+ return 1;
+}
+
+/**
+ * dev_get_regmap() - Obtain the regmap (if any) for a device
+ *
+ * @dev: Device to retrieve the map for
+ * @name: Optional name for the register map, usually NULL.
+ *
+ * Returns the regmap for the device if one is present, or NULL. If
+ * name is specified then it must match the name specified when
+ * registering the device, if it is NULL then the first regmap found
+ * will be used. Devices with multiple register maps are very rare,
+ * generic code should normally not need to specify a name.
+ */
+struct regmap *dev_get_regmap(struct device *dev, const char *name)
+{
+ struct regmap **r = devres_find(dev, dev_get_regmap_release,
+ dev_get_regmap_match, (void *)name);
+
+ if (!r)
+ return NULL;
+ return *r;
+}
+EXPORT_SYMBOL_GPL(dev_get_regmap);
+
+/**
+ * regmap_get_device() - Obtain the device from a regmap
+ *
+ * @map: Register map to operate on.
+ *
+ * Returns the underlying device that the regmap has been created for.
+ */
+struct device *regmap_get_device(struct regmap *map)
+{
+ return map->dev;
+}
+EXPORT_SYMBOL_GPL(regmap_get_device);
+
+static int _regmap_select_page(struct regmap *map, unsigned int *reg,
+ struct regmap_range_node *range,
+ unsigned int val_num)
+{
+ void *orig_work_buf;
+ unsigned int win_offset;
+ unsigned int win_page;
+ bool page_chg;
+ int ret;
+
+ win_offset = (*reg - range->range_min) % range->window_len;
+ win_page = (*reg - range->range_min) / range->window_len;
+
+ if (val_num > 1) {
+ /* Bulk write shouldn't cross range boundary */
+ if (*reg + val_num - 1 > range->range_max)
+ return -EINVAL;
+
+ /* ... or single page boundary */
+ if (val_num > range->window_len - win_offset)
+ return -EINVAL;
+ }
+
+ /* It is possible to have selector register inside data window.
+ In that case, selector register is located on every page and
+ it needs no page switching, when accessed alone. */
+ if (val_num > 1 ||
+ range->window_start + win_offset != range->selector_reg) {
+ /* Use separate work_buf during page switching */
+ orig_work_buf = map->work_buf;
+ map->work_buf = map->selector_work_buf;
+
+ ret = _regmap_update_bits(map, range->selector_reg,
+ range->selector_mask,
+ win_page << range->selector_shift,
+ &page_chg, false);
+
+ map->work_buf = orig_work_buf;
+
+ if (ret != 0)
+ return ret;
+ }
+
+ *reg = range->window_start + win_offset;
+
+ return 0;
+}
+
+static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
+ unsigned long mask)
+{
+ u8 *buf;
+ int i;
+
+ if (!mask || !map->work_buf)
+ return;
+
+ buf = map->work_buf;
+
+ for (i = 0; i < max_bytes; i++)
+ buf[i] |= (mask >> (8 * i)) & 0xff;
+}
+
+static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool noinc)
+{
+ struct regmap_range_node *range;
+ unsigned long flags;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
+ void *buf;
+ int ret = -ENOTSUPP;
+ size_t len;
+ int i;
+
+ /* Check for unwritable or noinc registers in range
+ * before we start
+ */
+ if (!regmap_writeable_noinc(map, reg)) {
+ for (i = 0; i < val_len / map->format.val_bytes; i++) {
+ unsigned int element =
+ reg + regmap_get_offset(map, i);
+ if (!regmap_writeable(map, element) ||
+ regmap_writeable_noinc(map, element))
+ return -EINVAL;
+ }
+ }
+
+ if (!map->cache_bypass && map->format.parse_val) {
+ unsigned int ival, offset;
+ int val_bytes = map->format.val_bytes;
+
+ /* Cache the last written value for noinc writes */
+ i = noinc ? val_len - val_bytes : 0;
+ for (; i < val_len; i += val_bytes) {
+ ival = map->format.parse_val(val + i);
+ offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
+ ret = regcache_write(map, reg + offset, ival);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+ reg + offset, ret);
+ return ret;
+ }
+ }
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ int val_num = val_len / map->format.val_bytes;
+ int win_offset = (reg - range->range_min) % range->window_len;
+ int win_residue = range->window_len - win_offset;
+
+ /* If the write goes beyond the end of the window split it */
+ while (val_num > win_residue) {
+ dev_dbg(map->dev, "Writing window %d/%zu\n",
+ win_residue, val_len / map->format.val_bytes);
+ ret = _regmap_raw_write_impl(map, reg, val,
+ win_residue *
+ map->format.val_bytes, noinc);
+ if (ret != 0)
+ return ret;
+
+ reg += win_residue;
+ val_num -= win_residue;
+ val += win_residue * map->format.val_bytes;
+ val_len -= win_residue * map->format.val_bytes;
+
+ win_offset = (reg - range->range_min) %
+ range->window_len;
+ win_residue = range->window_len - win_offset;
+ }
+
+ ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
+ if (ret != 0)
+ return ret;
+ }
+
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
+ map->format.format_reg(map->work_buf, reg, map->reg_shift);
+ regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
+ map->write_flag_mask);
+
+ /*
+ * Essentially all I/O mechanisms will be faster with a single
+ * buffer to write. Since register syncs often generate raw
+ * writes of single registers optimise that case.
+ */
+ if (val != work_val && val_len == map->format.val_bytes) {
+ memcpy(work_val, val, map->format.val_bytes);
+ val = work_val;
+ }
+
+ if (map->async && map->bus && map->bus->async_write) {
+ struct regmap_async *async;
+
+ trace_regmap_async_write_start(map, reg, val_len);
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ async = list_first_entry_or_null(&map->async_free,
+ struct regmap_async,
+ list);
+ if (async)
+ list_del(&async->list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ if (!async) {
+ async = map->bus->async_alloc();
+ if (!async)
+ return -ENOMEM;
+
+ async->work_buf = kzalloc(map->format.buf_size,
+ GFP_KERNEL | GFP_DMA);
+ if (!async->work_buf) {
+ kfree(async);
+ return -ENOMEM;
+ }
+ }
+
+ async->map = map;
+
+ /* If the caller supplied the value we can use it safely. */
+ memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
+ map->format.reg_bytes + map->format.val_bytes);
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_add_tail(&async->list, &map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ if (val != work_val)
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len, async);
+ else
+ ret = map->bus->async_write(map->bus_context,
+ async->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len, NULL, 0, async);
+
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to schedule write: %d\n",
+ ret);
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ list_move(&async->list, &map->async_free);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+ }
+
+ return ret;
+ }
+
+ trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
+
+ /* If we're doing a single register write we can probably just
+ * send the work_buf directly, otherwise try to do a gather
+ * write.
+ */
+ if (val == work_val)
+ ret = map->write(map->bus_context, map->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len);
+ else if (map->bus && map->bus->gather_write)
+ ret = map->bus->gather_write(map->bus_context, map->work_buf,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ val, val_len);
+ else
+ ret = -ENOTSUPP;
+
+ /* If that didn't work fall back on linearising by hand. */
+ if (ret == -ENOTSUPP) {
+ len = map->format.reg_bytes + map->format.pad_bytes + val_len;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, map->work_buf, map->format.reg_bytes);
+ memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
+ val, val_len);
+ ret = map->write(map->bus_context, buf, len);
+
+ kfree(buf);
+ } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
+ /* regcache_drop_region() takes lock that we already have,
+ * thus call map->cache_ops->drop() directly
+ */
+ if (map->cache_ops && map->cache_ops->drop)
+ map->cache_ops->drop(map, reg, reg + 1);
+ }
+
+ trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
+
+ return ret;
+}
+
+/**
+ * regmap_can_raw_write - Test if regmap_raw_write() is supported
+ *
+ * @map: Map to check.
+ */
+bool regmap_can_raw_write(struct regmap *map)
+{
+ return map->write && map->format.format_val && map->format.format_reg;
+}
+EXPORT_SYMBOL_GPL(regmap_can_raw_write);
+
+/**
+ * regmap_get_raw_read_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_read_max(struct regmap *map)
+{
+ return map->max_raw_read;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
+
+/**
+ * regmap_get_raw_write_max - Get the maximum size we can read
+ *
+ * @map: Map to check.
+ */
+size_t regmap_get_raw_write_max(struct regmap *map)
+{
+ return map->max_raw_write;
+}
+EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
+
+static int _regmap_bus_formatted_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+ struct regmap_range_node *range;
+ struct regmap *map = context;
+
+ WARN_ON(!map->format.format_write);
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range, 1);
+ if (ret != 0)
+ return ret;
+ }
+
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
+ map->format.format_write(map, reg, val);
+
+ trace_regmap_hw_write_start(map, reg, 1);
+
+ ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
+
+ trace_regmap_hw_write_done(map, reg, 1);
+
+ return ret;
+}
+
+static int _regmap_bus_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct regmap *map = context;
+
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
+ return map->bus->reg_write(map->bus_context, reg, val);
+}
+
+static int _regmap_bus_raw_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct regmap *map = context;
+
+ WARN_ON(!map->format.format_val);
+
+ map->format.format_val(map->work_buf + map->format.reg_bytes
+ + map->format.pad_bytes, val, 0);
+ return _regmap_raw_write_impl(map, reg,
+ map->work_buf +
+ map->format.reg_bytes +
+ map->format.pad_bytes,
+ map->format.val_bytes,
+ false);
+}
+
+static inline void *_regmap_map_get_context(struct regmap *map)
+{
+ return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
+}
+
+int _regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+ void *context = _regmap_map_get_context(map);
+
+ if (!regmap_writeable(map, reg))
+ return -EIO;
+
+ if (!map->cache_bypass && !map->defer_caching) {
+ ret = regcache_write(map, reg, val);
+ if (ret != 0)
+ return ret;
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
+ ret = map->reg_write(context, reg, val);
+ if (ret == 0) {
+ if (regmap_should_log(map))
+ dev_info(map->dev, "%x <= %x\n", reg, val);
+
+ trace_regmap_reg_write(map, reg, val);
+ }
+
+ return ret;
+}
+
+/**
+ * regmap_write() - Write a value to a single register
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_write(map, reg, val);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write);
+
+/**
+ * regmap_write_async() - Write a value to a single register asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_write(map, reg, val);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_async);
+
+int _regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len, bool noinc)
+{
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ size_t chunk_count, chunk_bytes;
+ size_t chunk_regs = val_count;
+ int ret, i;
+
+ if (!val_count)
+ return -EINVAL;
+
+ if (map->use_single_write)
+ chunk_regs = 1;
+ else if (map->max_raw_write && val_len > map->max_raw_write)
+ chunk_regs = map->max_raw_write / val_bytes;
+
+ chunk_count = val_count / chunk_regs;
+ chunk_bytes = chunk_regs * val_bytes;
+
+ /* Write as many bytes as possible with chunk_size */
+ for (i = 0; i < chunk_count; i++) {
+ ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
+ if (ret)
+ return ret;
+
+ reg += regmap_get_offset(map, chunk_regs);
+ val += chunk_bytes;
+ val_len -= chunk_bytes;
+ }
+
+ /* Write remaining bytes */
+ if (val_len)
+ ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
+
+ return ret;
+}
+
+/**
+ * regmap_raw_write() - Write raw values to one or more registers
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ * device
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device. No formatting will be done on the data provided.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ int ret;
+
+ if (!regmap_can_raw_write(map))
+ return -EINVAL;
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_raw_write(map, reg, val, val_len, false);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write);
+
+static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
+ void *val, unsigned int val_len, bool write)
+{
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ unsigned int lastval;
+ u8 *u8p;
+ u16 *u16p;
+ u32 *u32p;
+#ifdef CONFIG_64BIT
+ u64 *u64p;
+#endif
+ int ret;
+ int i;
+
+ switch (val_bytes) {
+ case 1:
+ u8p = val;
+ if (write)
+ lastval = (unsigned int)u8p[val_count - 1];
+ break;
+ case 2:
+ u16p = val;
+ if (write)
+ lastval = (unsigned int)u16p[val_count - 1];
+ break;
+ case 4:
+ u32p = val;
+ if (write)
+ lastval = (unsigned int)u32p[val_count - 1];
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ u64p = val;
+ if (write)
+ lastval = (unsigned int)u64p[val_count - 1];
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Update the cache with the last value we write, the rest is just
+ * gone down in the hardware FIFO. We can't cache FIFOs. This makes
+ * sure a single read from the cache will work.
+ */
+ if (write) {
+ if (!map->cache_bypass && !map->defer_caching) {
+ ret = regcache_write(map, reg, lastval);
+ if (ret != 0)
+ return ret;
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+ ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
+ } else {
+ ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
+ }
+
+ if (!ret && regmap_should_log(map)) {
+ dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
+ for (i = 0; i < val_count; i++) {
+ switch (val_bytes) {
+ case 1:
+ pr_cont("%x", u8p[i]);
+ break;
+ case 2:
+ pr_cont("%x", u16p[i]);
+ break;
+ case 4:
+ pr_cont("%x", u32p[i]);
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ pr_cont("%llx", u64p[i]);
+ break;
+#endif
+ default:
+ break;
+ }
+ if (i == (val_count - 1))
+ pr_cont("]\n");
+ else
+ pr_cont(",");
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * regmap_noinc_write(): Write data from a register without incrementing the
+ * register number
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Pointer to data buffer
+ * @val_len: Length of output buffer in bytes.
+ *
+ * The regmap API usually assumes that bulk bus write operations will write a
+ * range of registers. Some devices have certain registers for which a write
+ * operation can write to an internal FIFO.
+ *
+ * The target register must be volatile but registers after it can be
+ * completely unrelated cacheable registers.
+ *
+ * This will attempt multiple writes as required to write val_len bytes.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_noinc_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ size_t write_len;
+ int ret;
+
+ if (!map->write && !(map->bus && map->bus->reg_noinc_write))
+ return -EINVAL;
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_len == 0)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /*
+ * Use the accelerated operation if we can. The val drops the const
+ * typing in order to facilitate code reuse in regmap_noinc_readwrite().
+ */
+ if (map->bus->reg_noinc_write) {
+ ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
+ goto out_unlock;
+ }
+
+ while (val_len) {
+ if (map->max_raw_write && map->max_raw_write < val_len)
+ write_len = map->max_raw_write;
+ else
+ write_len = val_len;
+ ret = _regmap_raw_write(map, reg, val, write_len, true);
+ if (ret)
+ goto out_unlock;
+ val = ((u8 *)val) + write_len;
+ val_len -= write_len;
+ }
+
+out_unlock:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_noinc_write);
+
+/**
+ * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
+ * register field.
+ *
+ * @field: Register field to write to
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
+ *
+ * Perform a read/modify/write cycle on the register field with change,
+ * async, force option.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_update_bits_base(struct regmap_field *field,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits_base(field->regmap, field->reg,
+ mask, val << field->shift,
+ change, async, force);
+}
+EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
+
+/**
+ * regmap_field_test_bits() - Check if all specified bits are set in a
+ * register field.
+ *
+ * @field: Register field to operate on
+ * @bits: Bits to test
+ *
+ * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
+ * tested bits is not set and 1 if all tested bits are set.
+ */
+int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
+{
+ unsigned int val, ret;
+
+ ret = regmap_field_read(field, &val);
+ if (ret)
+ return ret;
+
+ return (val & bits) == bits;
+}
+EXPORT_SYMBOL_GPL(regmap_field_test_bits);
+
+/**
+ * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
+ * register field with port ID
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ mask = (mask << field->shift) & field->mask;
+
+ return regmap_update_bits_base(field->regmap,
+ field->reg + (field->id_offset * id),
+ mask, val << field->shift,
+ change, async, force);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
+
+/**
+ * regmap_bulk_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @reg: First register to be write from
+ * @val: Block of data to be written, in native register size for device
+ * @val_count: Number of registers to write
+ *
+ * This function is intended to be used for writing a large block of
+ * data to the device either in single transfer or multiple transfer.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret = 0, i;
+ size_t val_bytes = map->format.val_bytes;
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ /*
+ * Some devices don't support bulk write, for them we have a series of
+ * single write operations.
+ */
+ if (!map->write || !map->format.parse_inplace) {
+ map->lock(map->lock_arg);
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
+
+ switch (val_bytes) {
+ case 1:
+ ival = *(u8 *)(val + (i * val_bytes));
+ break;
+ case 2:
+ ival = *(u16 *)(val + (i * val_bytes));
+ break;
+ case 4:
+ ival = *(u32 *)(val + (i * val_bytes));
+ break;
+#ifdef CONFIG_64BIT
+ case 8:
+ ival = *(u64 *)(val + (i * val_bytes));
+ break;
+#endif
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = _regmap_write(map,
+ reg + regmap_get_offset(map, i),
+ ival);
+ if (ret != 0)
+ goto out;
+ }
+out:
+ map->unlock(map->lock_arg);
+ } else {
+ void *wval;
+
+ wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
+ if (!wval)
+ return -ENOMEM;
+
+ for (i = 0; i < val_count * val_bytes; i += val_bytes)
+ map->format.parse_inplace(wval + i);
+
+ ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
+
+ kfree(wval);
+ }
+
+ if (!ret)
+ trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_write);
+
+/*
+ * _regmap_raw_multi_reg_write()
+ *
+ * the (register,newvalue) pairs in regs have not been formatted, but
+ * they are all in the same page and have been changed to being page
+ * relative. The page register has been written if that was necessary.
+ */
+static int _regmap_raw_multi_reg_write(struct regmap *map,
+ const struct reg_sequence *regs,
+ size_t num_regs)
+{
+ int ret;
+ void *buf;
+ int i;
+ u8 *u8;
+ size_t val_bytes = map->format.val_bytes;
+ size_t reg_bytes = map->format.reg_bytes;
+ size_t pad_bytes = map->format.pad_bytes;
+ size_t pair_size = reg_bytes + pad_bytes + val_bytes;
+ size_t len = pair_size * num_regs;
+
+ if (!len)
+ return -EINVAL;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* We have to linearise by hand. */
+
+ u8 = buf;
+
+ for (i = 0; i < num_regs; i++) {
+ unsigned int reg = regs[i].reg;
+ unsigned int val = regs[i].def;
+ trace_regmap_hw_write_start(map, reg, 1);
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
+ map->format.format_reg(u8, reg, map->reg_shift);
+ u8 += reg_bytes + pad_bytes;
+ map->format.format_val(u8, val, 0);
+ u8 += val_bytes;
+ }
+ u8 = buf;
+ *u8 |= map->write_flag_mask;
+
+ ret = map->write(map->bus_context, buf, len);
+
+ kfree(buf);
+
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ trace_regmap_hw_write_done(map, reg, 1);
+ }
+ return ret;
+}
+
+static unsigned int _regmap_register_page(struct regmap *map,
+ unsigned int reg,
+ struct regmap_range_node *range)
+{
+ unsigned int win_page = (reg - range->range_min) / range->window_len;
+
+ return win_page;
+}
+
+static int _regmap_range_multi_paged_reg_write(struct regmap *map,
+ struct reg_sequence *regs,
+ size_t num_regs)
+{
+ int ret;
+ int i, n;
+ struct reg_sequence *base;
+ unsigned int this_page = 0;
+ unsigned int page_change = 0;
+ /*
+ * the set of registers are not neccessarily in order, but
+ * since the order of write must be preserved this algorithm
+ * chops the set each time the page changes. This also applies
+ * if there is a delay required at any point in the sequence.
+ */
+ base = regs;
+ for (i = 0, n = 0; i < num_regs; i++, n++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ unsigned int win_page = _regmap_register_page(map, reg,
+ range);
+
+ if (i == 0)
+ this_page = win_page;
+ if (win_page != this_page) {
+ this_page = win_page;
+ page_change = 1;
+ }
+ }
+
+ /* If we have both a page change and a delay make sure to
+ * write the regs and apply the delay before we change the
+ * page.
+ */
+
+ if (page_change || regs[i].delay_us) {
+
+ /* For situations where the first write requires
+ * a delay we need to make sure we don't call
+ * raw_multi_reg_write with n=0
+ * This can't occur with page breaks as we
+ * never write on the first iteration
+ */
+ if (regs[i].delay_us && i == 0)
+ n = 1;
+
+ ret = _regmap_raw_multi_reg_write(map, base, n);
+ if (ret != 0)
+ return ret;
+
+ if (regs[i].delay_us) {
+ if (map->can_sleep)
+ fsleep(regs[i].delay_us);
+ else
+ udelay(regs[i].delay_us);
+ }
+
+ base += n;
+ n = 0;
+
+ if (page_change) {
+ ret = _regmap_select_page(map,
+ &base[n].reg,
+ range, 1);
+ if (ret != 0)
+ return ret;
+
+ page_change = 0;
+ }
+
+ }
+
+ }
+ if (n > 0)
+ return _regmap_raw_multi_reg_write(map, base, n);
+ return 0;
+}
+
+static int _regmap_multi_reg_write(struct regmap *map,
+ const struct reg_sequence *regs,
+ size_t num_regs)
+{
+ int i;
+ int ret;
+
+ if (!map->can_multi_write) {
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0)
+ return ret;
+
+ if (regs[i].delay_us) {
+ if (map->can_sleep)
+ fsleep(regs[i].delay_us);
+ else
+ udelay(regs[i].delay_us);
+ }
+ }
+ return 0;
+ }
+
+ if (!map->format.parse_inplace)
+ return -EINVAL;
+
+ if (map->writeable_reg)
+ for (i = 0; i < num_regs; i++) {
+ int reg = regs[i].reg;
+ if (!map->writeable_reg(map->dev, reg))
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ }
+
+ if (!map->cache_bypass) {
+ for (i = 0; i < num_regs; i++) {
+ unsigned int val = regs[i].def;
+ unsigned int reg = regs[i].reg;
+ ret = regcache_write(map, reg, val);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %x ret: %d\n",
+ reg, ret);
+ return ret;
+ }
+ }
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
+ WARN_ON(!map->bus);
+
+ for (i = 0; i < num_regs; i++) {
+ unsigned int reg = regs[i].reg;
+ struct regmap_range_node *range;
+
+ /* Coalesce all the writes between a page break or a delay
+ * in a sequence
+ */
+ range = _regmap_range_lookup(map, reg);
+ if (range || regs[i].delay_us) {
+ size_t len = sizeof(struct reg_sequence)*num_regs;
+ struct reg_sequence *base = kmemdup(regs, len,
+ GFP_KERNEL);
+ if (!base)
+ return -ENOMEM;
+ ret = _regmap_range_multi_paged_reg_write(map, base,
+ num_regs);
+ kfree(base);
+
+ return ret;
+ }
+ }
+ return _regmap_raw_multi_reg_write(map, regs, num_regs);
+}
+
+/**
+ * regmap_multi_reg_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
+ * The 'normal' block write mode will send ultimately send data on the
+ * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
+ * addressed. However, this alternative block multi write mode will send
+ * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
+ * must of course support the mode.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
+ int num_regs)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
+/**
+ * regmap_multi_reg_write_bypassed() - Write multiple registers to the
+ * device but not the cache
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * Write multiple registers to the device but not the cache where the set
+ * of register are supplied in any order.
+ *
+ * This function is intended to be used for writing a large block of data
+ * atomically to the device in single transfer for those I2C client devices
+ * that implement this alternative block write mode.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_sequence *regs,
+ int num_regs)
+{
+ int ret;
+ bool bypass;
+
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+ map->cache_bypass = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->cache_bypass = bypass;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
+
+/**
+ * regmap_raw_write_async() - Write raw values to one or more registers
+ * asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Initial register to write to
+ * @val: Block of data to be written, laid out for direct transmission to the
+ * device. Must be valid until regmap_async_complete() is called.
+ * @val_len: Length of data pointed to by val.
+ *
+ * This function is intended to be used for things like firmware
+ * download where a large block of data needs to be transferred to the
+ * device. No formatting will be done on the data provided.
+ *
+ * If supported by the underlying bus the write will be scheduled
+ * asynchronously, helping maximise I/O speed on higher speed buses
+ * like SPI. regmap_async_complete() can be called to ensure that all
+ * asynchrnous writes have been completed.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ int ret;
+
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ map->async = true;
+
+ ret = _regmap_raw_write(map, reg, val, val_len, false);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_write_async);
+
+static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ unsigned int val_len, bool noinc)
+{
+ struct regmap_range_node *range;
+ int ret;
+
+ if (!map->read)
+ return -EINVAL;
+
+ range = _regmap_range_lookup(map, reg);
+ if (range) {
+ ret = _regmap_select_page(map, &reg, range,
+ noinc ? 1 : val_len / map->format.val_bytes);
+ if (ret != 0)
+ return ret;
+ }
+
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
+ map->format.format_reg(map->work_buf, reg, map->reg_shift);
+ regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
+ map->read_flag_mask);
+ trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
+
+ ret = map->read(map->bus_context, map->work_buf,
+ map->format.reg_bytes + map->format.pad_bytes,
+ val, val_len);
+
+ trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
+
+ return ret;
+}
+
+static int _regmap_bus_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct regmap *map = context;
+
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
+ return map->bus->reg_read(map->bus_context, reg, val);
+}
+
+static int _regmap_bus_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct regmap *map = context;
+ void *work_val = map->work_buf + map->format.reg_bytes +
+ map->format.pad_bytes;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
+ if (ret == 0)
+ *val = map->format.parse_val(work_val);
+
+ return ret;
+}
+
+static int _regmap_read(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ void *context = _regmap_map_get_context(map);
+
+ if (!map->cache_bypass) {
+ ret = regcache_read(map, reg, val);
+ if (ret == 0)
+ return 0;
+ }
+
+ if (map->cache_only)
+ return -EBUSY;
+
+ if (!regmap_readable(map, reg))
+ return -EIO;
+
+ ret = map->reg_read(context, reg, val);
+ if (ret == 0) {
+ if (regmap_should_log(map))
+ dev_info(map->dev, "%x => %x\n", reg, *val);
+
+ trace_regmap_reg_read(map, reg, *val);
+
+ if (!map->cache_bypass)
+ regcache_write(map, reg, *val);
+ }
+
+ return ret;
+}
+
+/**
+ * regmap_read() - Read a value from a single register
+ *
+ * @map: Register map to read from
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+ int ret;
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ ret = _regmap_read(map, reg, val);
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read);
+
+/**
+ * regmap_raw_read() - Read raw data from the device
+ *
+ * @map: Register map to read from
+ * @reg: First register to be read from
+ * @val: Pointer to store read value
+ * @val_len: Size of data to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_len)
+{
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ unsigned int v;
+ int ret, i;
+
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_count == 0)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
+ map->cache_type == REGCACHE_NONE) {
+ size_t chunk_count, chunk_bytes;
+ size_t chunk_regs = val_count;
+
+ if (!map->read) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ if (map->use_single_read)
+ chunk_regs = 1;
+ else if (map->max_raw_read && val_len > map->max_raw_read)
+ chunk_regs = map->max_raw_read / val_bytes;
+
+ chunk_count = val_count / chunk_regs;
+ chunk_bytes = chunk_regs * val_bytes;
+
+ /* Read bytes that fit into whole chunks */
+ for (i = 0; i < chunk_count; i++) {
+ ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
+ if (ret != 0)
+ goto out;
+
+ reg += regmap_get_offset(map, chunk_regs);
+ val += chunk_bytes;
+ val_len -= chunk_bytes;
+ }
+
+ /* Read remaining bytes */
+ if (val_len) {
+ ret = _regmap_raw_read(map, reg, val, val_len, false);
+ if (ret != 0)
+ goto out;
+ }
+ } else {
+ /* Otherwise go word by word for the cache; should be low
+ * cost as we expect to hit the cache.
+ */
+ for (i = 0; i < val_count; i++) {
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i),
+ &v);
+ if (ret != 0)
+ goto out;
+
+ map->format.format_val(val + (i * val_bytes), v, 0);
+ }
+ }
+
+ out:
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_raw_read);
+
+/**
+ * regmap_noinc_read(): Read data from a register without incrementing the
+ * register number
+ *
+ * @map: Register map to read from
+ * @reg: Register to read from
+ * @val: Pointer to data buffer
+ * @val_len: Length of output buffer in bytes.
+ *
+ * The regmap API usually assumes that bulk read operations will read a
+ * range of registers. Some devices have certain registers for which a read
+ * operation read will read from an internal FIFO.
+ *
+ * The target register must be volatile but registers after it can be
+ * completely unrelated cacheable registers.
+ *
+ * This will attempt multiple reads as required to read val_len bytes.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ size_t read_len;
+ int ret;
+
+ if (!map->read)
+ return -ENOTSUPP;
+
+ if (val_len % map->format.val_bytes)
+ return -EINVAL;
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_len == 0)
+ return -EINVAL;
+
+ map->lock(map->lock_arg);
+
+ if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Use the accelerated operation if we can */
+ if (map->bus->reg_noinc_read) {
+ /*
+ * We have not defined the FIFO semantics for cache, as the
+ * cache is just one value deep. Should we return the last
+ * written value? Just avoid this by always reading the FIFO
+ * even when using cache. Cache only will not work.
+ */
+ if (map->cache_only) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
+ goto out_unlock;
+ }
+
+ while (val_len) {
+ if (map->max_raw_read && map->max_raw_read < val_len)
+ read_len = map->max_raw_read;
+ else
+ read_len = val_len;
+ ret = _regmap_raw_read(map, reg, val, read_len, true);
+ if (ret)
+ goto out_unlock;
+ val = ((u8 *)val) + read_len;
+ val_len -= read_len;
+ }
+
+out_unlock:
+ map->unlock(map->lock_arg);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_noinc_read);
+
+/**
+ * regmap_field_read(): Read a value to a single register field
+ *
+ * @field: Register field to read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_read(struct regmap_field *field, unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+ ret = regmap_read(field->regmap, field->reg, &reg_val);
+ if (ret != 0)
+ return ret;
+
+ reg_val &= field->mask;
+ reg_val >>= field->shift;
+ *val = reg_val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_field_read);
+
+/**
+ * regmap_fields_read() - Read a value to a single register field with port ID
+ *
+ * @field: Register field to read from
+ * @id: port ID
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ if (id >= field->id_size)
+ return -EINVAL;
+
+ ret = regmap_read(field->regmap,
+ field->reg + (field->id_offset * id),
+ &reg_val);
+ if (ret != 0)
+ return ret;
+
+ reg_val &= field->mask;
+ reg_val >>= field->shift;
+ *val = reg_val;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_fields_read);
+
+/**
+ * regmap_bulk_read() - Read multiple registers from the device
+ *
+ * @map: Register map to read from
+ * @reg: First register to be read from
+ * @val: Pointer to store read value, in native register size for device
+ * @val_count: Number of registers to read
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret, i;
+ size_t val_bytes = map->format.val_bytes;
+ bool vol = regmap_volatile_range(map, reg, val_count);
+
+ if (!IS_ALIGNED(reg, map->reg_stride))
+ return -EINVAL;
+ if (val_count == 0)
+ return -EINVAL;
+
+ if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
+ ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < val_count * val_bytes; i += val_bytes)
+ map->format.parse_inplace(val + i);
+ } else {
+#ifdef CONFIG_64BIT
+ u64 *u64 = val;
+#endif
+ u32 *u32 = val;
+ u16 *u16 = val;
+ u8 *u8 = val;
+
+ map->lock(map->lock_arg);
+
+ for (i = 0; i < val_count; i++) {
+ unsigned int ival;
+
+ ret = _regmap_read(map, reg + regmap_get_offset(map, i),
+ &ival);
+ if (ret != 0)
+ goto out;
+
+ switch (map->format.val_bytes) {
+#ifdef CONFIG_64BIT
+ case 8:
+ u64[i] = ival;
+ break;
+#endif
+ case 4:
+ u32[i] = ival;
+ break;
+ case 2:
+ u16[i] = ival;
+ break;
+ case 1:
+ u8[i] = ival;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ map->unlock(map->lock_arg);
+ }
+
+ if (!ret)
+ trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_read);
+
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool force_write)
+{
+ int ret;
+ unsigned int tmp, orig;
+
+ if (change)
+ *change = false;
+
+ if (regmap_volatile(map, reg) && map->reg_update_bits) {
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
+ ret = map->reg_update_bits(map->bus_context, reg, mask, val);
+ if (ret == 0 && change)
+ *change = true;
+ } else {
+ ret = _regmap_read(map, reg, &orig);
+ if (ret != 0)
+ return ret;
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (force_write || (tmp != orig)) {
+ ret = _regmap_write(map, reg, tmp);
+ if (ret == 0 && change)
+ *change = true;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ * @async: Boolean indicating asynchronously
+ * @force: Boolean indicating use force update
+ *
+ * Perform a read/modify/write cycle on a register map with change, async, force
+ * options.
+ *
+ * If async is true:
+ *
+ * With most buses the read must be done synchronously so this is most useful
+ * for devices with a cache which do not need to interact with the hardware to
+ * determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_base(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change, bool async, bool force)
+{
+ int ret;
+
+ map->lock(map->lock_arg);
+
+ map->async = async;
+
+ ret = _regmap_update_bits(map, reg, mask, val, change, force);
+
+ map->async = false;
+
+ map->unlock(map->lock_arg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_base);
+
+/**
+ * regmap_test_bits() - Check if all specified bits are set in a register.
+ *
+ * @map: Register map to operate on
+ * @reg: Register to read from
+ * @bits: Bits to test
+ *
+ * Returns 0 if at least one of the tested bits is not set, 1 if all tested
+ * bits are set and a negative error number if the underlying regmap_read()
+ * fails.
+ */
+int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
+{
+ unsigned int val, ret;
+
+ ret = regmap_read(map, reg, &val);
+ if (ret)
+ return ret;
+
+ return (val & bits) == bits;
+}
+EXPORT_SYMBOL_GPL(regmap_test_bits);
+
+void regmap_async_complete_cb(struct regmap_async *async, int ret)
+{
+ struct regmap *map = async->map;
+ bool wake;
+
+ trace_regmap_async_io_complete(map);
+
+ spin_lock(&map->async_lock);
+ list_move(&async->list, &map->async_free);
+ wake = list_empty(&map->async_list);
+
+ if (ret != 0)
+ map->async_ret = ret;
+
+ spin_unlock(&map->async_lock);
+
+ if (wake)
+ wake_up(&map->async_waitq);
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
+
+static int regmap_async_is_done(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = list_empty(&map->async_list);
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ return ret;
+}
+
+/**
+ * regmap_async_complete - Ensure all asynchronous I/O has completed.
+ *
+ * @map: Map to operate on.
+ *
+ * Blocks until any pending asynchronous I/O has completed. Returns
+ * an error code for any failed I/O operations.
+ */
+int regmap_async_complete(struct regmap *map)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Nothing to do with no async support */
+ if (!map->bus || !map->bus->async_write)
+ return 0;
+
+ trace_regmap_async_complete_start(map);
+
+ wait_event(map->async_waitq, regmap_async_is_done(map));
+
+ spin_lock_irqsave(&map->async_lock, flags);
+ ret = map->async_ret;
+ map->async_ret = 0;
+ spin_unlock_irqrestore(&map->async_lock, flags);
+
+ trace_regmap_async_complete_done(map);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_async_complete);
+
+/**
+ * regmap_register_patch - Register and apply register updates to be applied
+ * on device initialistion
+ *
+ * @map: Register map to apply updates to.
+ * @regs: Values to update.
+ * @num_regs: Number of entries in regs.
+ *
+ * Register a set of register updates to be applied to the device
+ * whenever the device registers are synchronised with the cache and
+ * apply them immediately. Typically this is used to apply
+ * corrections to be applied to the device defaults on startup, such
+ * as the updates some vendors provide to undocumented registers.
+ *
+ * The caller must ensure that this function cannot be called
+ * concurrently with either itself or regcache_sync().
+ */
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
+ int num_regs)
+{
+ struct reg_sequence *p;
+ int ret;
+ bool bypass;
+
+ if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
+ num_regs))
+ return 0;
+
+ p = krealloc(map->patch,
+ sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
+ GFP_KERNEL);
+ if (p) {
+ memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
+ map->patch = p;
+ map->patch_regs += num_regs;
+ } else {
+ return -ENOMEM;
+ }
+
+ map->lock(map->lock_arg);
+
+ bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+ map->async = true;
+
+ ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+ map->async = false;
+ map->cache_bypass = bypass;
+
+ map->unlock(map->lock_arg);
+
+ regmap_async_complete(map);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_register_patch);
+
+/**
+ * regmap_get_val_bytes() - Report the size of a register value
+ *
+ * @map: Register map to operate on.
+ *
+ * Report the size of a register value, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_val_bytes(struct regmap *map)
+{
+ if (map->format.format_write)
+ return -EINVAL;
+
+ return map->format.val_bytes;
+}
+EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+
+/**
+ * regmap_get_max_register() - Report the max register value
+ *
+ * @map: Register map to operate on.
+ *
+ * Report the max register value, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_max_register(struct regmap *map)
+{
+ return map->max_register ? map->max_register : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regmap_get_max_register);
+
+/**
+ * regmap_get_reg_stride() - Report the register address stride
+ *
+ * @map: Register map to operate on.
+ *
+ * Report the register address stride, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_reg_stride(struct regmap *map)
+{
+ return map->reg_stride;
+}
+EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
+
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ *val = map->format.parse_val(buf);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_parse_val);
+
+static int __init regmap_initcall(void)
+{
+ regmap_debugfs_initcall();
+
+ return 0;
+}
+postcore_initcall(regmap_initcall);
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
new file mode 100644
index 000000000..704e106e5
--- /dev/null
+++ b/drivers/base/regmap/trace.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regmap
+
+#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGMAP_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+#include "internal.h"
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(regmap_reg,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(map, reg, val),
+
+ TP_STRUCT__entry(
+ __string( name, regmap_name(map) )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("%s reg=%x val=%x", __get_str(name), __entry->reg, __entry->val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_write,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(map, reg, val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(map, reg, val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read_cache,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(map, reg, val)
+);
+
+DECLARE_EVENT_CLASS(regmap_bulk,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len),
+
+ TP_STRUCT__entry(
+ __string(name, regmap_name(map))
+ __field(unsigned int, reg)
+ __dynamic_array(char, buf, val_len)
+ __field(int, val_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->reg = reg;
+ __entry->val_len = val_len;
+ memcpy(__get_dynamic_array(buf), val, val_len);
+ ),
+
+ TP_printk("%s reg=%x val=%s", __get_str(name), __entry->reg,
+ __print_hex(__get_dynamic_array(buf), __entry->val_len))
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_write,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
+);
+
+DEFINE_EVENT(regmap_bulk, regmap_bulk_read,
+
+ TP_PROTO(struct regmap *map, unsigned int reg,
+ const void *val, int val_len),
+
+ TP_ARGS(map, reg, val, val_len)
+);
+
+DECLARE_EVENT_CLASS(regmap_block,
+
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+ TP_ARGS(map, reg, count),
+
+ TP_STRUCT__entry(
+ __string( name, regmap_name(map) )
+ __field( unsigned int, reg )
+ __field( int, count )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->reg = reg;
+ __entry->count = count;
+ ),
+
+ TP_printk("%s reg=%x count=%d", __get_str(name), __entry->reg, __entry->count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+ TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+ TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+ TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+ TP_ARGS(map, reg, count)
+);
+
+TRACE_EVENT(regcache_sync,
+
+ TP_PROTO(struct regmap *map, const char *type,
+ const char *status),
+
+ TP_ARGS(map, type, status),
+
+ TP_STRUCT__entry(
+ __string( name, regmap_name(map) )
+ __string( status, status )
+ __string( type, type )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __assign_str(status, status);
+ __assign_str(type, type);
+ ),
+
+ TP_printk("%s type=%s status=%s", __get_str(name),
+ __get_str(type), __get_str(status))
+);
+
+DECLARE_EVENT_CLASS(regmap_bool,
+
+ TP_PROTO(struct regmap *map, bool flag),
+
+ TP_ARGS(map, flag),
+
+ TP_STRUCT__entry(
+ __string( name, regmap_name(map) )
+ __field( int, flag )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->flag = flag;
+ ),
+
+ TP_printk("%s flag=%d", __get_str(name), __entry->flag)
+);
+
+DEFINE_EVENT(regmap_bool, regmap_cache_only,
+
+ TP_PROTO(struct regmap *map, bool flag),
+
+ TP_ARGS(map, flag)
+);
+
+DEFINE_EVENT(regmap_bool, regmap_cache_bypass,
+
+ TP_PROTO(struct regmap *map, bool flag),
+
+ TP_ARGS(map, flag)
+);
+
+DECLARE_EVENT_CLASS(regmap_async,
+
+ TP_PROTO(struct regmap *map),
+
+ TP_ARGS(map),
+
+ TP_STRUCT__entry(
+ __string( name, regmap_name(map) )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+DEFINE_EVENT(regmap_block, regmap_async_write_start,
+
+ TP_PROTO(struct regmap *map, unsigned int reg, int count),
+
+ TP_ARGS(map, reg, count)
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_io_complete,
+
+ TP_PROTO(struct regmap *map),
+
+ TP_ARGS(map)
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_start,
+
+ TP_PROTO(struct regmap *map),
+
+ TP_ARGS(map)
+);
+
+DEFINE_EVENT(regmap_async, regmap_async_complete_done,
+
+ TP_PROTO(struct regmap *map),
+
+ TP_ARGS(map)
+);
+
+TRACE_EVENT(regcache_drop_region,
+
+ TP_PROTO(struct regmap *map, unsigned int from,
+ unsigned int to),
+
+ TP_ARGS(map, from, to),
+
+ TP_STRUCT__entry(
+ __string( name, regmap_name(map) )
+ __field( unsigned int, from )
+ __field( unsigned int, to )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, regmap_name(map));
+ __entry->from = from;
+ __entry->to = to;
+ ),
+
+ TP_printk("%s %u-%u", __get_str(name), __entry->from, __entry->to)
+);
+
+#endif /* _TRACE_REGMAP_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644
index 000000000..22130b5f7
--- /dev/null
+++ b/drivers/base/soc.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+#include <linux/err.h>
+#include <linux/glob.h>
+
+static DEFINE_IDA(soc_ida);
+
+/* Prototype to allow declarations of DEVICE_ATTR(<foo>) before soc_info_show */
+static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+
+struct soc_device {
+ struct device dev;
+ struct soc_device_attribute *attr;
+ int soc_dev_num;
+};
+
+static struct bus_type soc_bus_type = {
+ .name = "soc",
+};
+
+static DEVICE_ATTR(machine, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(family, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(serial_number, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(soc_id, 0444, soc_info_show, NULL);
+static DEVICE_ATTR(revision, 0444, soc_info_show, NULL);
+
+struct device *soc_device_to_device(struct soc_device *soc_dev)
+{
+ return &soc_dev->dev;
+}
+
+static umode_t soc_attribute_mode(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ if ((attr == &dev_attr_machine.attr) && soc_dev->attr->machine)
+ return attr->mode;
+ if ((attr == &dev_attr_family.attr) && soc_dev->attr->family)
+ return attr->mode;
+ if ((attr == &dev_attr_revision.attr) && soc_dev->attr->revision)
+ return attr->mode;
+ if ((attr == &dev_attr_serial_number.attr) && soc_dev->attr->serial_number)
+ return attr->mode;
+ if ((attr == &dev_attr_soc_id.attr) && soc_dev->attr->soc_id)
+ return attr->mode;
+
+ /* Unknown or unfilled attribute */
+ return 0;
+}
+
+static ssize_t soc_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+ const char *output;
+
+ if (attr == &dev_attr_machine)
+ output = soc_dev->attr->machine;
+ else if (attr == &dev_attr_family)
+ output = soc_dev->attr->family;
+ else if (attr == &dev_attr_revision)
+ output = soc_dev->attr->revision;
+ else if (attr == &dev_attr_serial_number)
+ output = soc_dev->attr->serial_number;
+ else if (attr == &dev_attr_soc_id)
+ output = soc_dev->attr->soc_id;
+ else
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", output);
+}
+
+static struct attribute *soc_attr[] = {
+ &dev_attr_machine.attr,
+ &dev_attr_family.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_soc_id.attr,
+ &dev_attr_revision.attr,
+ NULL,
+};
+
+static const struct attribute_group soc_attr_group = {
+ .attrs = soc_attr,
+ .is_visible = soc_attribute_mode,
+};
+
+static void soc_release(struct device *dev)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
+ kfree(soc_dev->dev.groups);
+ kfree(soc_dev);
+}
+
+static struct soc_device_attribute *early_soc_dev_attr;
+
+struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
+{
+ struct soc_device *soc_dev;
+ const struct attribute_group **soc_attr_groups;
+ int ret;
+
+ if (!soc_bus_type.p) {
+ if (early_soc_dev_attr)
+ return ERR_PTR(-EBUSY);
+ early_soc_dev_attr = soc_dev_attr;
+ return NULL;
+ }
+
+ soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
+ if (!soc_dev) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ soc_attr_groups = kcalloc(3, sizeof(*soc_attr_groups), GFP_KERNEL);
+ if (!soc_attr_groups) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+ soc_attr_groups[0] = &soc_attr_group;
+ soc_attr_groups[1] = soc_dev_attr->custom_attr_group;
+
+ /* Fetch a unique (reclaimable) SOC ID. */
+ ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto out3;
+ soc_dev->soc_dev_num = ret;
+
+ soc_dev->attr = soc_dev_attr;
+ soc_dev->dev.bus = &soc_bus_type;
+ soc_dev->dev.groups = soc_attr_groups;
+ soc_dev->dev.release = soc_release;
+
+ dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
+
+ ret = device_register(&soc_dev->dev);
+ if (ret) {
+ put_device(&soc_dev->dev);
+ return ERR_PTR(ret);
+ }
+
+ return soc_dev;
+
+out3:
+ kfree(soc_attr_groups);
+out2:
+ kfree(soc_dev);
+out1:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(soc_device_register);
+
+/* Ensure soc_dev->attr is freed after calling soc_device_unregister. */
+void soc_device_unregister(struct soc_device *soc_dev)
+{
+ device_unregister(&soc_dev->dev);
+ early_soc_dev_attr = NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_unregister);
+
+static int __init soc_bus_register(void)
+{
+ int ret;
+
+ ret = bus_register(&soc_bus_type);
+ if (ret)
+ return ret;
+
+ if (early_soc_dev_attr)
+ return PTR_ERR(soc_device_register(early_soc_dev_attr));
+
+ return 0;
+}
+core_initcall(soc_bus_register);
+
+static int soc_device_match_attr(const struct soc_device_attribute *attr,
+ const struct soc_device_attribute *match)
+{
+ if (match->machine &&
+ (!attr->machine || !glob_match(match->machine, attr->machine)))
+ return 0;
+
+ if (match->family &&
+ (!attr->family || !glob_match(match->family, attr->family)))
+ return 0;
+
+ if (match->revision &&
+ (!attr->revision || !glob_match(match->revision, attr->revision)))
+ return 0;
+
+ if (match->soc_id &&
+ (!attr->soc_id || !glob_match(match->soc_id, attr->soc_id)))
+ return 0;
+
+ return 1;
+}
+
+static int soc_device_match_one(struct device *dev, void *arg)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ return soc_device_match_attr(soc_dev->attr, arg);
+}
+
+/*
+ * soc_device_match - identify the SoC in the machine
+ * @matches: zero-terminated array of possible matches
+ *
+ * returns the first matching entry of the argument array, or NULL
+ * if none of them match.
+ *
+ * This function is meant as a helper in place of of_match_node()
+ * in cases where either no device tree is available or the information
+ * in a device node is insufficient to identify a particular variant
+ * by its compatible strings or other properties. For new devices,
+ * the DT binding should always provide unique compatible strings
+ * that allow the use of of_match_node() instead.
+ *
+ * The calling function can use the .data entry of the
+ * soc_device_attribute to pass a structure or function pointer for
+ * each entry.
+ */
+const struct soc_device_attribute *soc_device_match(
+ const struct soc_device_attribute *matches)
+{
+ int ret;
+
+ if (!matches)
+ return NULL;
+
+ while (matches->machine || matches->family || matches->revision ||
+ matches->soc_id) {
+ ret = bus_for_each_dev(&soc_bus_type, NULL, (void *)matches,
+ soc_device_match_one);
+ if (ret < 0 && early_soc_dev_attr)
+ ret = soc_device_match_attr(early_soc_dev_attr,
+ matches);
+ if (ret < 0)
+ return NULL;
+ if (ret)
+ return matches;
+
+ matches++;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(soc_device_match);
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
new file mode 100644
index 000000000..44153caa8
--- /dev/null
+++ b/drivers/base/swnode.c
@@ -0,0 +1,1177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Software nodes for the firmware node framework.
+ *
+ * Copyright (C) 2018, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "base.h"
+
+struct swnode {
+ struct kobject kobj;
+ struct fwnode_handle fwnode;
+ const struct software_node *node;
+ int id;
+
+ /* hierarchy */
+ struct ida child_ids;
+ struct list_head entry;
+ struct list_head children;
+ struct swnode *parent;
+
+ unsigned int allocated:1;
+ unsigned int managed:1;
+};
+
+static DEFINE_IDA(swnode_root_ids);
+static struct kset *swnode_kset;
+
+#define kobj_to_swnode(_kobj_) container_of(_kobj_, struct swnode, kobj)
+
+static const struct fwnode_operations software_node_ops;
+
+bool is_software_node(const struct fwnode_handle *fwnode)
+{
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &software_node_ops;
+}
+EXPORT_SYMBOL_GPL(is_software_node);
+
+#define to_swnode(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_swnode_fwnode = __fwnode; \
+ \
+ is_software_node(__to_swnode_fwnode) ? \
+ container_of(__to_swnode_fwnode, \
+ struct swnode, fwnode) : NULL; \
+ })
+
+static inline struct swnode *dev_to_swnode(struct device *dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ if (!fwnode)
+ return NULL;
+
+ if (!is_software_node(fwnode))
+ fwnode = fwnode->secondary;
+
+ return to_swnode(fwnode);
+}
+
+static struct swnode *
+software_node_to_swnode(const struct software_node *node)
+{
+ struct swnode *swnode = NULL;
+ struct kobject *k;
+
+ if (!node)
+ return NULL;
+
+ spin_lock(&swnode_kset->list_lock);
+
+ list_for_each_entry(k, &swnode_kset->list, entry) {
+ swnode = kobj_to_swnode(k);
+ if (swnode->node == node)
+ break;
+ swnode = NULL;
+ }
+
+ spin_unlock(&swnode_kset->list_lock);
+
+ return swnode;
+}
+
+const struct software_node *to_software_node(const struct fwnode_handle *fwnode)
+{
+ const struct swnode *swnode = to_swnode(fwnode);
+
+ return swnode ? swnode->node : NULL;
+}
+EXPORT_SYMBOL_GPL(to_software_node);
+
+struct fwnode_handle *software_node_fwnode(const struct software_node *node)
+{
+ struct swnode *swnode = software_node_to_swnode(node);
+
+ return swnode ? &swnode->fwnode : NULL;
+}
+EXPORT_SYMBOL_GPL(software_node_fwnode);
+
+/* -------------------------------------------------------------------------- */
+/* property_entry processing */
+
+static const struct property_entry *
+property_entry_get(const struct property_entry *prop, const char *name)
+{
+ if (!prop)
+ return NULL;
+
+ for (; prop->name; prop++)
+ if (!strcmp(name, prop->name))
+ return prop;
+
+ return NULL;
+}
+
+static const void *property_get_pointer(const struct property_entry *prop)
+{
+ if (!prop->length)
+ return NULL;
+
+ return prop->is_inline ? &prop->value : prop->pointer;
+}
+
+static const void *property_entry_find(const struct property_entry *props,
+ const char *propname, size_t length)
+{
+ const struct property_entry *prop;
+ const void *pointer;
+
+ prop = property_entry_get(props, propname);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+ pointer = property_get_pointer(prop);
+ if (!pointer)
+ return ERR_PTR(-ENODATA);
+ if (length > prop->length)
+ return ERR_PTR(-EOVERFLOW);
+ return pointer;
+}
+
+static int
+property_entry_count_elems_of_size(const struct property_entry *props,
+ const char *propname, size_t length)
+{
+ const struct property_entry *prop;
+
+ prop = property_entry_get(props, propname);
+ if (!prop)
+ return -EINVAL;
+
+ return prop->length / length;
+}
+
+static int property_entry_read_int_array(const struct property_entry *props,
+ const char *name,
+ unsigned int elem_size, void *val,
+ size_t nval)
+{
+ const void *pointer;
+ size_t length;
+
+ if (!val)
+ return property_entry_count_elems_of_size(props, name,
+ elem_size);
+
+ if (!is_power_of_2(elem_size) || elem_size > sizeof(u64))
+ return -ENXIO;
+
+ length = nval * elem_size;
+
+ pointer = property_entry_find(props, name, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(val, pointer, length);
+ return 0;
+}
+
+static int property_entry_read_string_array(const struct property_entry *props,
+ const char *propname,
+ const char **strings, size_t nval)
+{
+ const void *pointer;
+ size_t length;
+ int array_len;
+
+ /* Find out the array length. */
+ array_len = property_entry_count_elems_of_size(props, propname,
+ sizeof(const char *));
+ if (array_len < 0)
+ return array_len;
+
+ /* Return how many there are if strings is NULL. */
+ if (!strings)
+ return array_len;
+
+ array_len = min_t(size_t, nval, array_len);
+ length = array_len * sizeof(*strings);
+
+ pointer = property_entry_find(props, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(strings, pointer, length);
+
+ return array_len;
+}
+
+static void property_entry_free_data(const struct property_entry *p)
+{
+ const char * const *src_str;
+ size_t i, nval;
+
+ if (p->type == DEV_PROP_STRING) {
+ src_str = property_get_pointer(p);
+ nval = p->length / sizeof(*src_str);
+ for (i = 0; i < nval; i++)
+ kfree(src_str[i]);
+ }
+
+ if (!p->is_inline)
+ kfree(p->pointer);
+
+ kfree(p->name);
+}
+
+static bool property_copy_string_array(const char **dst_ptr,
+ const char * const *src_ptr,
+ size_t nval)
+{
+ int i;
+
+ for (i = 0; i < nval; i++) {
+ dst_ptr[i] = kstrdup(src_ptr[i], GFP_KERNEL);
+ if (!dst_ptr[i] && src_ptr[i]) {
+ while (--i >= 0)
+ kfree(dst_ptr[i]);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int property_entry_copy_data(struct property_entry *dst,
+ const struct property_entry *src)
+{
+ const void *pointer = property_get_pointer(src);
+ void *dst_ptr;
+ size_t nval;
+
+ /*
+ * Properties with no data should not be marked as stored
+ * out of line.
+ */
+ if (!src->is_inline && !src->length)
+ return -ENODATA;
+
+ /*
+ * Reference properties are never stored inline as
+ * they are too big.
+ */
+ if (src->type == DEV_PROP_REF && src->is_inline)
+ return -EINVAL;
+
+ if (src->length <= sizeof(dst->value)) {
+ dst_ptr = &dst->value;
+ dst->is_inline = true;
+ } else {
+ dst_ptr = kmalloc(src->length, GFP_KERNEL);
+ if (!dst_ptr)
+ return -ENOMEM;
+ dst->pointer = dst_ptr;
+ }
+
+ if (src->type == DEV_PROP_STRING) {
+ nval = src->length / sizeof(const char *);
+ if (!property_copy_string_array(dst_ptr, pointer, nval)) {
+ if (!dst->is_inline)
+ kfree(dst->pointer);
+ return -ENOMEM;
+ }
+ } else {
+ memcpy(dst_ptr, pointer, src->length);
+ }
+
+ dst->length = src->length;
+ dst->type = src->type;
+ dst->name = kstrdup(src->name, GFP_KERNEL);
+ if (!dst->name) {
+ property_entry_free_data(dst);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * property_entries_dup - duplicate array of properties
+ * @properties: array of properties to copy
+ *
+ * This function creates a deep copy of the given NULL-terminated array
+ * of property entries.
+ */
+struct property_entry *
+property_entries_dup(const struct property_entry *properties)
+{
+ struct property_entry *p;
+ int i, n = 0;
+ int ret;
+
+ if (!properties)
+ return NULL;
+
+ while (properties[n].name)
+ n++;
+
+ p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < n; i++) {
+ ret = property_entry_copy_data(&p[i], &properties[i]);
+ if (ret) {
+ while (--i >= 0)
+ property_entry_free_data(&p[i]);
+ kfree(p);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(property_entries_dup);
+
+/**
+ * property_entries_free - free previously allocated array of properties
+ * @properties: array of properties to destroy
+ *
+ * This function frees given NULL-terminated array of property entries,
+ * along with their data.
+ */
+void property_entries_free(const struct property_entry *properties)
+{
+ const struct property_entry *p;
+
+ if (!properties)
+ return;
+
+ for (p = properties; p->name; p++)
+ property_entry_free_data(p);
+
+ kfree(properties);
+}
+EXPORT_SYMBOL_GPL(property_entries_free);
+
+/* -------------------------------------------------------------------------- */
+/* fwnode operations */
+
+static struct fwnode_handle *software_node_get(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ kobject_get(&swnode->kobj);
+
+ return &swnode->fwnode;
+}
+
+static void software_node_put(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ kobject_put(&swnode->kobj);
+}
+
+static bool software_node_property_present(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ return !!property_entry_get(swnode->node->properties, propname);
+}
+
+static int software_node_read_int_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ unsigned int elem_size, void *val,
+ size_t nval)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ return property_entry_read_int_array(swnode->node->properties, propname,
+ elem_size, val, nval);
+}
+
+static int software_node_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char **val, size_t nval)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ return property_entry_read_string_array(swnode->node->properties,
+ propname, val, nval);
+}
+
+static const char *
+software_node_get_name(const struct fwnode_handle *fwnode)
+{
+ const struct swnode *swnode = to_swnode(fwnode);
+
+ return kobject_name(&swnode->kobj);
+}
+
+static const char *
+software_node_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ const char *prefix;
+
+ parent = fwnode_get_parent(fwnode);
+ if (!parent)
+ return "";
+
+ /* Figure out the prefix from the parents. */
+ while (is_software_node(parent))
+ parent = fwnode_get_next_parent(parent);
+
+ prefix = fwnode_get_name_prefix(parent);
+ fwnode_handle_put(parent);
+
+ /* Guess something if prefix was NULL. */
+ return prefix ?: "/";
+}
+
+static struct fwnode_handle *
+software_node_get_parent(const struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ if (!swnode || !swnode->parent)
+ return NULL;
+
+ return fwnode_handle_get(&swnode->parent->fwnode);
+}
+
+static struct fwnode_handle *
+software_node_get_next_child(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ struct swnode *p = to_swnode(fwnode);
+ struct swnode *c = to_swnode(child);
+
+ if (!p || list_empty(&p->children) ||
+ (c && list_is_last(&c->entry, &p->children))) {
+ fwnode_handle_put(child);
+ return NULL;
+ }
+
+ if (c)
+ c = list_next_entry(c, entry);
+ else
+ c = list_first_entry(&p->children, struct swnode, entry);
+
+ fwnode_handle_put(child);
+ return fwnode_handle_get(&c->fwnode);
+}
+
+static struct fwnode_handle *
+software_node_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ struct swnode *child;
+
+ if (!swnode || list_empty(&swnode->children))
+ return NULL;
+
+ list_for_each_entry(child, &swnode->children, entry) {
+ if (!strcmp(childname, kobject_name(&child->kobj))) {
+ kobject_get(&child->kobj);
+ return &child->fwnode;
+ }
+ }
+ return NULL;
+}
+
+static int
+software_node_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *propname, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const struct software_node_ref_args *ref_array;
+ const struct software_node_ref_args *ref;
+ const struct property_entry *prop;
+ struct fwnode_handle *refnode;
+ u32 nargs_prop_val;
+ int error;
+ int i;
+
+ prop = property_entry_get(swnode->node->properties, propname);
+ if (!prop)
+ return -ENOENT;
+
+ if (prop->type != DEV_PROP_REF)
+ return -EINVAL;
+
+ /*
+ * We expect that references are never stored inline, even
+ * single ones, as they are too big.
+ */
+ if (prop->is_inline)
+ return -EINVAL;
+
+ if (index * sizeof(*ref) >= prop->length)
+ return -ENOENT;
+
+ ref_array = prop->pointer;
+ ref = &ref_array[index];
+
+ refnode = software_node_fwnode(ref->node);
+ if (!refnode)
+ return -ENOENT;
+
+ if (nargs_prop) {
+ error = property_entry_read_int_array(ref->node->properties,
+ nargs_prop, sizeof(u32),
+ &nargs_prop_val, 1);
+ if (error)
+ return error;
+
+ nargs = nargs_prop_val;
+ }
+
+ if (nargs > NR_FWNODE_REFERENCE_ARGS)
+ return -EINVAL;
+
+ if (!args)
+ return 0;
+
+ args->fwnode = software_node_get(refnode);
+ args->nargs = nargs;
+
+ for (i = 0; i < nargs; i++)
+ args->args[i] = ref->args[i];
+
+ return 0;
+}
+
+static struct fwnode_handle *
+swnode_graph_find_next_port(const struct fwnode_handle *parent,
+ struct fwnode_handle *port)
+{
+ struct fwnode_handle *old = port;
+
+ while ((port = software_node_get_next_child(parent, old))) {
+ /*
+ * fwnode ports have naming style "port@", so we search for any
+ * children that follow that convention.
+ */
+ if (!strncmp(to_swnode(port)->node->name, "port@",
+ strlen("port@")))
+ return port;
+ old = port;
+ }
+
+ return NULL;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ struct fwnode_handle *parent;
+ struct fwnode_handle *port;
+
+ if (!swnode)
+ return NULL;
+
+ if (endpoint) {
+ port = software_node_get_parent(endpoint);
+ parent = software_node_get_parent(port);
+ } else {
+ parent = software_node_get_named_child_node(fwnode, "ports");
+ if (!parent)
+ parent = software_node_get(&swnode->fwnode);
+
+ port = swnode_graph_find_next_port(parent, NULL);
+ }
+
+ for (; port; port = swnode_graph_find_next_port(parent, port)) {
+ endpoint = software_node_get_next_child(port, endpoint);
+ if (endpoint) {
+ fwnode_handle_put(port);
+ break;
+ }
+ }
+
+ fwnode_handle_put(parent);
+
+ return endpoint;
+}
+
+static struct fwnode_handle *
+software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const struct software_node_ref_args *ref;
+ const struct property_entry *prop;
+
+ if (!swnode)
+ return NULL;
+
+ prop = property_entry_get(swnode->node->properties, "remote-endpoint");
+ if (!prop || prop->type != DEV_PROP_REF || prop->is_inline)
+ return NULL;
+
+ ref = prop->pointer;
+
+ return software_node_get(software_node_fwnode(ref[0].node));
+}
+
+static struct fwnode_handle *
+software_node_graph_get_port_parent(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ swnode = swnode->parent;
+ if (swnode && !strcmp(swnode->node->name, "ports"))
+ swnode = swnode->parent;
+
+ return swnode ? software_node_get(&swnode->fwnode) : NULL;
+}
+
+static int
+software_node_graph_parse_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+ const char *parent_name = swnode->parent->node->name;
+ int ret;
+
+ if (strlen("port@") >= strlen(parent_name) ||
+ strncmp(parent_name, "port@", strlen("port@")))
+ return -EINVAL;
+
+ /* Ports have naming style "port@n", we need to select the n */
+ ret = kstrtou32(parent_name + strlen("port@"), 10, &endpoint->port);
+ if (ret)
+ return ret;
+
+ endpoint->id = swnode->id;
+ endpoint->local_fwnode = fwnode;
+
+ return 0;
+}
+
+static const struct fwnode_operations software_node_ops = {
+ .get = software_node_get,
+ .put = software_node_put,
+ .property_present = software_node_property_present,
+ .property_read_int_array = software_node_read_int_array,
+ .property_read_string_array = software_node_read_string_array,
+ .get_name = software_node_get_name,
+ .get_name_prefix = software_node_get_name_prefix,
+ .get_parent = software_node_get_parent,
+ .get_next_child_node = software_node_get_next_child,
+ .get_named_child_node = software_node_get_named_child_node,
+ .get_reference_args = software_node_get_reference_args,
+ .graph_get_next_endpoint = software_node_graph_get_next_endpoint,
+ .graph_get_remote_endpoint = software_node_graph_get_remote_endpoint,
+ .graph_get_port_parent = software_node_graph_get_port_parent,
+ .graph_parse_endpoint = software_node_graph_parse_endpoint,
+};
+
+/* -------------------------------------------------------------------------- */
+
+/**
+ * software_node_find_by_name - Find software node by name
+ * @parent: Parent of the software node
+ * @name: Name of the software node
+ *
+ * The function will find a node that is child of @parent and that is named
+ * @name. If no node is found, the function returns NULL.
+ *
+ * NOTE: you will need to drop the reference with fwnode_handle_put() after use.
+ */
+const struct software_node *
+software_node_find_by_name(const struct software_node *parent, const char *name)
+{
+ struct swnode *swnode = NULL;
+ struct kobject *k;
+
+ if (!name)
+ return NULL;
+
+ spin_lock(&swnode_kset->list_lock);
+
+ list_for_each_entry(k, &swnode_kset->list, entry) {
+ swnode = kobj_to_swnode(k);
+ if (parent == swnode->node->parent && swnode->node->name &&
+ !strcmp(name, swnode->node->name)) {
+ kobject_get(&swnode->kobj);
+ break;
+ }
+ swnode = NULL;
+ }
+
+ spin_unlock(&swnode_kset->list_lock);
+
+ return swnode ? swnode->node : NULL;
+}
+EXPORT_SYMBOL_GPL(software_node_find_by_name);
+
+static struct software_node *software_node_alloc(const struct property_entry *properties)
+{
+ struct property_entry *props;
+ struct software_node *node;
+
+ props = property_entries_dup(properties);
+ if (IS_ERR(props))
+ return ERR_CAST(props);
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ property_entries_free(props);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ node->properties = props;
+
+ return node;
+}
+
+static void software_node_free(const struct software_node *node)
+{
+ property_entries_free(node->properties);
+ kfree(node);
+}
+
+static void software_node_release(struct kobject *kobj)
+{
+ struct swnode *swnode = kobj_to_swnode(kobj);
+
+ if (swnode->parent) {
+ ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+ list_del(&swnode->entry);
+ } else {
+ ida_simple_remove(&swnode_root_ids, swnode->id);
+ }
+
+ if (swnode->allocated)
+ software_node_free(swnode->node);
+
+ ida_destroy(&swnode->child_ids);
+ kfree(swnode);
+}
+
+static struct kobj_type software_node_type = {
+ .release = software_node_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct fwnode_handle *
+swnode_register(const struct software_node *node, struct swnode *parent,
+ unsigned int allocated)
+{
+ struct swnode *swnode;
+ int ret;
+
+ swnode = kzalloc(sizeof(*swnode), GFP_KERNEL);
+ if (!swnode)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_simple_get(parent ? &parent->child_ids : &swnode_root_ids,
+ 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(swnode);
+ return ERR_PTR(ret);
+ }
+
+ swnode->id = ret;
+ swnode->node = node;
+ swnode->parent = parent;
+ swnode->kobj.kset = swnode_kset;
+ fwnode_init(&swnode->fwnode, &software_node_ops);
+
+ ida_init(&swnode->child_ids);
+ INIT_LIST_HEAD(&swnode->entry);
+ INIT_LIST_HEAD(&swnode->children);
+
+ if (node->name)
+ ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
+ parent ? &parent->kobj : NULL,
+ "%s", node->name);
+ else
+ ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
+ parent ? &parent->kobj : NULL,
+ "node%d", swnode->id);
+ if (ret) {
+ kobject_put(&swnode->kobj);
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Assign the flag only in the successful case, so
+ * the above kobject_put() won't mess up with properties.
+ */
+ swnode->allocated = allocated;
+
+ if (parent)
+ list_add_tail(&swnode->entry, &parent->children);
+
+ kobject_uevent(&swnode->kobj, KOBJ_ADD);
+ return &swnode->fwnode;
+}
+
+/**
+ * software_node_register_nodes - Register an array of software nodes
+ * @nodes: Zero terminated array of software nodes to be registered
+ *
+ * Register multiple software nodes at once. If any node in the array
+ * has its .parent pointer set (which can only be to another software_node),
+ * then its parent **must** have been registered before it is; either outside
+ * of this function or by ordering the array such that parent comes before
+ * child.
+ */
+int software_node_register_nodes(const struct software_node *nodes)
+{
+ int ret;
+ int i;
+
+ for (i = 0; nodes[i].name; i++) {
+ const struct software_node *parent = nodes[i].parent;
+
+ if (parent && !software_node_to_swnode(parent)) {
+ ret = -EINVAL;
+ goto err_unregister_nodes;
+ }
+
+ ret = software_node_register(&nodes[i]);
+ if (ret)
+ goto err_unregister_nodes;
+ }
+
+ return 0;
+
+err_unregister_nodes:
+ software_node_unregister_nodes(nodes);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(software_node_register_nodes);
+
+/**
+ * software_node_unregister_nodes - Unregister an array of software nodes
+ * @nodes: Zero terminated array of software nodes to be unregistered
+ *
+ * Unregister multiple software nodes at once. If parent pointers are set up
+ * in any of the software nodes then the array **must** be ordered such that
+ * parents come before their children.
+ *
+ * NOTE: If you are uncertain whether the array is ordered such that
+ * parents will be unregistered before their children, it is wiser to
+ * remove the nodes individually, in the correct order (child before
+ * parent).
+ */
+void software_node_unregister_nodes(const struct software_node *nodes)
+{
+ unsigned int i = 0;
+
+ while (nodes[i].name)
+ i++;
+
+ while (i--)
+ software_node_unregister(&nodes[i]);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
+
+/**
+ * software_node_register_node_group - Register a group of software nodes
+ * @node_group: NULL terminated array of software node pointers to be registered
+ *
+ * Register multiple software nodes at once. If any node in the array
+ * has its .parent pointer set (which can only be to another software_node),
+ * then its parent **must** have been registered before it is; either outside
+ * of this function or by ordering the array such that parent comes before
+ * child.
+ */
+int software_node_register_node_group(const struct software_node **node_group)
+{
+ unsigned int i;
+ int ret;
+
+ if (!node_group)
+ return 0;
+
+ for (i = 0; node_group[i]; i++) {
+ ret = software_node_register(node_group[i]);
+ if (ret) {
+ software_node_unregister_node_group(node_group);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(software_node_register_node_group);
+
+/**
+ * software_node_unregister_node_group - Unregister a group of software nodes
+ * @node_group: NULL terminated array of software node pointers to be unregistered
+ *
+ * Unregister multiple software nodes at once. If parent pointers are set up
+ * in any of the software nodes then the array **must** be ordered such that
+ * parents come before their children.
+ *
+ * NOTE: If you are uncertain whether the array is ordered such that
+ * parents will be unregistered before their children, it is wiser to
+ * remove the nodes individually, in the correct order (child before
+ * parent).
+ */
+void software_node_unregister_node_group(
+ const struct software_node **node_group)
+{
+ unsigned int i = 0;
+
+ if (!node_group)
+ return;
+
+ while (node_group[i])
+ i++;
+
+ while (i--)
+ software_node_unregister(node_group[i]);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister_node_group);
+
+/**
+ * software_node_register - Register static software node
+ * @node: The software node to be registered
+ */
+int software_node_register(const struct software_node *node)
+{
+ struct swnode *parent = software_node_to_swnode(node->parent);
+
+ if (software_node_to_swnode(node))
+ return -EEXIST;
+
+ if (node->parent && !parent)
+ return -EINVAL;
+
+ return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
+}
+EXPORT_SYMBOL_GPL(software_node_register);
+
+/**
+ * software_node_unregister - Unregister static software node
+ * @node: The software node to be unregistered
+ */
+void software_node_unregister(const struct software_node *node)
+{
+ struct swnode *swnode;
+
+ swnode = software_node_to_swnode(node);
+ if (swnode)
+ fwnode_remove_software_node(&swnode->fwnode);
+}
+EXPORT_SYMBOL_GPL(software_node_unregister);
+
+struct fwnode_handle *
+fwnode_create_software_node(const struct property_entry *properties,
+ const struct fwnode_handle *parent)
+{
+ struct fwnode_handle *fwnode;
+ struct software_node *node;
+ struct swnode *p;
+
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ p = to_swnode(parent);
+ if (parent && !p)
+ return ERR_PTR(-EINVAL);
+
+ node = software_node_alloc(properties);
+ if (IS_ERR(node))
+ return ERR_CAST(node);
+
+ node->parent = p ? p->node : NULL;
+
+ fwnode = swnode_register(node, p, 1);
+ if (IS_ERR(fwnode))
+ software_node_free(node);
+
+ return fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_create_software_node);
+
+void fwnode_remove_software_node(struct fwnode_handle *fwnode)
+{
+ struct swnode *swnode = to_swnode(fwnode);
+
+ if (!swnode)
+ return;
+
+ kobject_put(&swnode->kobj);
+}
+EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
+
+/**
+ * device_add_software_node - Assign software node to a device
+ * @dev: The device the software node is meant for.
+ * @node: The software node.
+ *
+ * This function will make @node the secondary firmware node pointer of @dev. If
+ * @dev has no primary node, then @node will become the primary node. The
+ * function will register @node automatically if it wasn't already registered.
+ */
+int device_add_software_node(struct device *dev, const struct software_node *node)
+{
+ struct swnode *swnode;
+ int ret;
+
+ /* Only one software node per device. */
+ if (dev_to_swnode(dev))
+ return -EBUSY;
+
+ swnode = software_node_to_swnode(node);
+ if (swnode) {
+ kobject_get(&swnode->kobj);
+ } else {
+ ret = software_node_register(node);
+ if (ret)
+ return ret;
+
+ swnode = software_node_to_swnode(node);
+ }
+
+ set_secondary_fwnode(dev, &swnode->fwnode);
+
+ /*
+ * If the device has been fully registered by the time this function is
+ * called, software_node_notify() must be called separately so that the
+ * symlinks get created and the reference count of the node is kept in
+ * balance.
+ */
+ if (device_is_registered(dev))
+ software_node_notify(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_add_software_node);
+
+/**
+ * device_remove_software_node - Remove device's software node
+ * @dev: The device with the software node.
+ *
+ * This function will unregister the software node of @dev.
+ */
+void device_remove_software_node(struct device *dev)
+{
+ struct swnode *swnode;
+
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
+ return;
+
+ if (device_is_registered(dev))
+ software_node_notify_remove(dev);
+
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+}
+EXPORT_SYMBOL_GPL(device_remove_software_node);
+
+/**
+ * device_create_managed_software_node - Create a software node for a device
+ * @dev: The device the software node is assigned to.
+ * @properties: Device properties for the software node.
+ * @parent: Parent of the software node.
+ *
+ * Creates a software node as a managed resource for @dev, which means the
+ * lifetime of the newly created software node is tied to the lifetime of @dev.
+ * Software nodes created with this function should not be reused or shared
+ * because of that. The function takes a deep copy of @properties for the
+ * software node.
+ *
+ * Since the new software node is assigned directly to @dev, and since it should
+ * not be shared, it is not returned to the caller. The function returns 0 on
+ * success, and errno in case of an error.
+ */
+int device_create_managed_software_node(struct device *dev,
+ const struct property_entry *properties,
+ const struct software_node *parent)
+{
+ struct fwnode_handle *p = software_node_fwnode(parent);
+ struct fwnode_handle *fwnode;
+
+ if (parent && !p)
+ return -EINVAL;
+
+ fwnode = fwnode_create_software_node(properties, p);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
+
+ to_swnode(fwnode)->managed = true;
+ set_secondary_fwnode(dev, fwnode);
+
+ if (device_is_registered(dev))
+ software_node_notify(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(device_create_managed_software_node);
+
+void software_node_notify(struct device *dev)
+{
+ struct swnode *swnode;
+ int ret;
+
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
+ return;
+
+ ret = sysfs_create_link(&dev->kobj, &swnode->kobj, "software_node");
+ if (ret)
+ return;
+
+ ret = sysfs_create_link(&swnode->kobj, &dev->kobj, dev_name(dev));
+ if (ret) {
+ sysfs_remove_link(&dev->kobj, "software_node");
+ return;
+ }
+
+ kobject_get(&swnode->kobj);
+}
+
+void software_node_notify_remove(struct device *dev)
+{
+ struct swnode *swnode;
+
+ swnode = dev_to_swnode(dev);
+ if (!swnode)
+ return;
+
+ sysfs_remove_link(&swnode->kobj, dev_name(dev));
+ sysfs_remove_link(&dev->kobj, "software_node");
+ kobject_put(&swnode->kobj);
+
+ if (swnode->managed) {
+ set_secondary_fwnode(dev, NULL);
+ kobject_put(&swnode->kobj);
+ }
+}
+
+static int __init software_node_init(void)
+{
+ swnode_kset = kset_create_and_add("software_nodes", NULL, kernel_kobj);
+ if (!swnode_kset)
+ return -ENOMEM;
+ return 0;
+}
+postcore_initcall(software_node_init);
+
+static void __exit software_node_exit(void)
+{
+ ida_destroy(&swnode_root_ids);
+ kset_unregister(swnode_kset);
+}
+__exitcall(software_node_exit);
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
new file mode 100644
index 000000000..13db1f78d
--- /dev/null
+++ b/drivers/base/syscore.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * syscore.c - Execution of system core operations.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ */
+
+#include <linux/syscore_ops.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <trace/events/power.h>
+
+static LIST_HEAD(syscore_ops_list);
+static DEFINE_MUTEX(syscore_ops_lock);
+
+/**
+ * register_syscore_ops - Register a set of system core operations.
+ * @ops: System core operations to register.
+ */
+void register_syscore_ops(struct syscore_ops *ops)
+{
+ mutex_lock(&syscore_ops_lock);
+ list_add_tail(&ops->node, &syscore_ops_list);
+ mutex_unlock(&syscore_ops_lock);
+}
+EXPORT_SYMBOL_GPL(register_syscore_ops);
+
+/**
+ * unregister_syscore_ops - Unregister a set of system core operations.
+ * @ops: System core operations to unregister.
+ */
+void unregister_syscore_ops(struct syscore_ops *ops)
+{
+ mutex_lock(&syscore_ops_lock);
+ list_del(&ops->node);
+ mutex_unlock(&syscore_ops_lock);
+}
+EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * syscore_suspend - Execute all the registered system core suspend callbacks.
+ *
+ * This function is executed with one CPU on-line and disabled interrupts.
+ */
+int syscore_suspend(void)
+{
+ struct syscore_ops *ops;
+ int ret = 0;
+
+ trace_suspend_resume(TPS("syscore_suspend"), 0, true);
+ pm_pr_dbg("Checking wakeup interrupts\n");
+
+ /* Return error code if there are any wakeup interrupts pending. */
+ if (pm_wakeup_pending())
+ return -EBUSY;
+
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled before system core suspend.\n");
+
+ list_for_each_entry_reverse(ops, &syscore_ops_list, node)
+ if (ops->suspend) {
+ pm_pr_dbg("Calling %pS\n", ops->suspend);
+ ret = ops->suspend();
+ if (ret)
+ goto err_out;
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pS\n", ops->suspend);
+ }
+
+ trace_suspend_resume(TPS("syscore_suspend"), 0, false);
+ return 0;
+
+ err_out:
+ pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
+
+ list_for_each_entry_continue(ops, &syscore_ops_list, node)
+ if (ops->resume)
+ ops->resume();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(syscore_suspend);
+
+/**
+ * syscore_resume - Execute all the registered system core resume callbacks.
+ *
+ * This function is executed with one CPU on-line and disabled interrupts.
+ */
+void syscore_resume(void)
+{
+ struct syscore_ops *ops;
+
+ trace_suspend_resume(TPS("syscore_resume"), 0, true);
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled before system core resume.\n");
+
+ list_for_each_entry(ops, &syscore_ops_list, node)
+ if (ops->resume) {
+ pm_pr_dbg("Calling %pS\n", ops->resume);
+ ops->resume();
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pS\n", ops->resume);
+ }
+ trace_suspend_resume(TPS("syscore_resume"), 0, false);
+}
+EXPORT_SYMBOL_GPL(syscore_resume);
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * syscore_shutdown - Execute all the registered system core shutdown callbacks.
+ */
+void syscore_shutdown(void)
+{
+ struct syscore_ops *ops;
+
+ mutex_lock(&syscore_ops_lock);
+
+ list_for_each_entry_reverse(ops, &syscore_ops_list, node)
+ if (ops->shutdown) {
+ if (initcall_debug)
+ pr_info("PM: Calling %pS\n", ops->shutdown);
+ ops->shutdown();
+ }
+
+ mutex_unlock(&syscore_ops_lock);
+}
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 000000000..2f3fa31a9
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+config TEST_ASYNC_DRIVER_PROBE
+ tristate "Build kernel module to test asynchronous driver probing"
+ depends on m
+ help
+ Enabling this option produces a kernel module that allows
+ testing asynchronous driver probing by the device core.
+ The module name will be test_async_driver_probe.ko
+
+ If unsure say N.
+config DRIVER_PE_KUNIT_TEST
+ bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
+ depends on KUNIT=y
+ default KUNIT_ALL_TESTS
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 000000000..7f76fee6f
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
+
+obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
+CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
new file mode 100644
index 000000000..6071d5bc1
--- /dev/null
+++ b/drivers/base/test/property-entry-test.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0
+// Unit tests for property entries API
+//
+// Copyright 2019 Google LLC.
+
+#include <kunit/test.h>
+#include <linux/property.h>
+#include <linux/types.h>
+
+static void pe_test_uints(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8("prop-u8", 8),
+ PROPERTY_ENTRY_U16("prop-u16", 16),
+ PROPERTY_ENTRY_U32("prop-u32", 32),
+ PROPERTY_ENTRY_U64("prop-u64", 64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[2];
+ u16 val_u16, array_u16[2];
+ u32 val_u32, array_u32[2];
+ u64 val_u64, array_u64[2];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_count_u8(node, "prop-u8");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u8, 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u16, 16);
+
+ error = fwnode_property_count_u16(node, "prop-u16");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u32, 32);
+
+ error = fwnode_property_count_u32(node, "prop-u32");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u64, 64);
+
+ error = fwnode_property_count_u64(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ /* Count 64-bit values as 16-bit */
+ error = fwnode_property_count_u16(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 4);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_uint_arrays(struct kunit *test)
+{
+ static const u8 a_u8[10] = { 8, 9 };
+ static const u16 a_u16[10] = { 16, 17 };
+ static const u32 a_u32[10] = { 32, 33 };
+ static const u64 a_u64[10] = { 64, 65 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("prop-u8", a_u8),
+ PROPERTY_ENTRY_U16_ARRAY("prop-u16", a_u16),
+ PROPERTY_ENTRY_U32_ARRAY("prop-u32", a_u32),
+ PROPERTY_ENTRY_U64_ARRAY("prop-u64", a_u64),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ u8 val_u8, array_u8[32];
+ u16 val_u16, array_u16[32];
+ u32 val_u32, array_u32[32];
+ u64 val_u64, array_u64[32];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_u8(node, "prop-u8", &val_u8);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u8, 8);
+
+ error = fwnode_property_count_u8(node, "prop-u8");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u8[0], 8);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u8[0], 8);
+ KUNIT_EXPECT_EQ(test, array_u8[1], 9);
+
+ error = fwnode_property_read_u8_array(node, "prop-u8", array_u8, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8(node, "no-prop-u8", &val_u8);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u8_array(node, "no-prop-u8", array_u8, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "prop-u16", &val_u16);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u16, 16);
+
+ error = fwnode_property_count_u16(node, "prop-u16");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u16[0], 16);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u16[0], 16);
+ KUNIT_EXPECT_EQ(test, array_u16[1], 17);
+
+ error = fwnode_property_read_u16_array(node, "prop-u16", array_u16, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16(node, "no-prop-u16", &val_u16);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u16_array(node, "no-prop-u16", array_u16, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "prop-u32", &val_u32);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u32, 32);
+
+ error = fwnode_property_count_u32(node, "prop-u32");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u32[0], 32);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u32[0], 32);
+ KUNIT_EXPECT_EQ(test, array_u32[1], 33);
+
+ error = fwnode_property_read_u32_array(node, "prop-u32", array_u32, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32(node, "no-prop-u32", &val_u32);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u32_array(node, "no-prop-u32", array_u32, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "prop-u64", &val_u64);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, val_u64, 64);
+
+ error = fwnode_property_count_u64(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 10);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 1);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u64[0], 64);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 2);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_EQ(test, array_u64[0], 64);
+ KUNIT_EXPECT_EQ(test, array_u64[1], 65);
+
+ error = fwnode_property_read_u64_array(node, "prop-u64", array_u64, 17);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64(node, "no-prop-u64", &val_u64);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_u64_array(node, "no-prop-u64", array_u64, 1);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ /* Count 64-bit values as 16-bit */
+ error = fwnode_property_count_u16(node, "prop-u64");
+ KUNIT_EXPECT_EQ(test, error, 40);
+
+ /* Other way around */
+ error = fwnode_property_count_u64(node, "prop-u16");
+ KUNIT_EXPECT_EQ(test, error, 2);
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_strings(struct kunit *test)
+{
+ static const char *strings[] = {
+ "string-a",
+ "string-b",
+ };
+
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING("str", "single"),
+ PROPERTY_ENTRY_STRING("empty", ""),
+ PROPERTY_ENTRY_STRING_ARRAY("strs", strings),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ const char *str;
+ const char *strs[10];
+ int error;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_read_string(node, "str", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "single");
+
+ error = fwnode_property_string_array_count(node, "str");
+ KUNIT_EXPECT_EQ(test, error, 1);
+
+ error = fwnode_property_read_string_array(node, "str", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ /* asking for more data returns what we have */
+ error = fwnode_property_read_string_array(node, "str", strs, 2);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "single");
+
+ error = fwnode_property_read_string(node, "no-str", &str);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_read_string_array(node, "no-str", strs, 1);
+ KUNIT_EXPECT_LT(test, error, 0);
+
+ error = fwnode_property_read_string(node, "empty", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "");
+
+ error = fwnode_property_string_array_count(node, "strs");
+ KUNIT_EXPECT_EQ(test, error, 2);
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 3);
+ KUNIT_EXPECT_EQ(test, error, 2);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+ KUNIT_EXPECT_STREQ(test, strs[1], "string-b");
+
+ error = fwnode_property_read_string_array(node, "strs", strs, 1);
+ KUNIT_EXPECT_EQ(test, error, 1);
+ KUNIT_EXPECT_STREQ(test, strs[0], "string-a");
+
+ /* NULL argument -> returns size */
+ error = fwnode_property_read_string_array(node, "strs", NULL, 0);
+ KUNIT_EXPECT_EQ(test, error, 2);
+
+ /* accessing array as single value */
+ error = fwnode_property_read_string(node, "strs", &str);
+ KUNIT_EXPECT_EQ(test, error, 0);
+ KUNIT_EXPECT_STREQ(test, str, "string-a");
+
+ fwnode_remove_software_node(node);
+}
+
+static void pe_test_bool(struct kunit *test)
+{
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_BOOL("prop"),
+ { }
+ };
+
+ struct fwnode_handle *node;
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ KUNIT_EXPECT_TRUE(test, fwnode_property_read_bool(node, "prop"));
+ KUNIT_EXPECT_FALSE(test, fwnode_property_read_bool(node, "not-prop"));
+
+ fwnode_remove_software_node(node);
+}
+
+/* Verifies that small U8 array is stored inline when property is copied */
+static void pe_test_move_inline_u8(struct kunit *test)
+{
+ static const u8 u8_array_small[8] = { 1, 2, 3, 4 };
+ static const u8 u8_array_big[128] = { 5, 6, 7, 8 };
+ static const struct property_entry entries[] = {
+ PROPERTY_ENTRY_U8_ARRAY("small", u8_array_small),
+ PROPERTY_ENTRY_U8_ARRAY("big", u8_array_big),
+ { }
+ };
+
+ struct property_entry *copy;
+ const u8 *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ data_ptr = (u8 *)&copy[0].value;
+ KUNIT_EXPECT_EQ(test, data_ptr[0], 1);
+ KUNIT_EXPECT_EQ(test, data_ptr[1], 2);
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_EQ(test, data_ptr[0], 5);
+ KUNIT_EXPECT_EQ(test, data_ptr[1], 6);
+
+ property_entries_free(copy);
+}
+
+/* Verifies that single string array is stored inline when property is copied */
+static void pe_test_move_inline_str(struct kunit *test)
+{
+ static char *str_array_small[] = { "a" };
+ static char *str_array_big[] = { "b", "c", "d", "e" };
+ static char *str_array_small_empty[] = { "" };
+ static struct property_entry entries[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("small", str_array_small),
+ PROPERTY_ENTRY_STRING_ARRAY("big", str_array_big),
+ PROPERTY_ENTRY_STRING_ARRAY("small-empty", str_array_small_empty),
+ { }
+ };
+
+ struct property_entry *copy;
+ const char * const *data_ptr;
+
+ copy = property_entries_dup(entries);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, copy);
+
+ KUNIT_EXPECT_TRUE(test, copy[0].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[0].value.str[0], "a");
+
+ KUNIT_EXPECT_FALSE(test, copy[1].is_inline);
+ data_ptr = copy[1].pointer;
+ KUNIT_EXPECT_STREQ(test, data_ptr[0], "b");
+ KUNIT_EXPECT_STREQ(test, data_ptr[1], "c");
+
+ KUNIT_EXPECT_TRUE(test, copy[2].is_inline);
+ KUNIT_EXPECT_STREQ(test, copy[2].value.str[0], "");
+
+ property_entries_free(copy);
+}
+
+/* Handling of reference properties */
+static void pe_test_reference(struct kunit *test)
+{
+ static const struct software_node nodes[] = {
+ { .name = "1", },
+ { .name = "2", },
+ { }
+ };
+
+ static const struct software_node_ref_args refs[] = {
+ SOFTWARE_NODE_REFERENCE(&nodes[0]),
+ SOFTWARE_NODE_REFERENCE(&nodes[1], 3, 4),
+ };
+
+ const struct property_entry entries[] = {
+ PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
+ PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+ PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
+ { }
+ };
+
+ struct fwnode_handle *node;
+ struct fwnode_reference_args ref;
+ int error;
+
+ error = software_node_register_nodes(nodes);
+ KUNIT_ASSERT_EQ(test, error, 0);
+
+ node = fwnode_create_software_node(entries, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, node);
+
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 1, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+
+ /* asking for more args, padded with zero data */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 3, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[2], 0LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-2", NULL,
+ 2, 1, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ /* array of references */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 0, 0, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
+
+ /* second reference in the array */
+ error = fwnode_property_get_reference_args(node, "ref-3", NULL,
+ 2, 1, &ref);
+ KUNIT_ASSERT_EQ(test, error, 0);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
+ KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
+ KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
+
+ /* wrong index */
+ error = fwnode_property_get_reference_args(node, "ref-1", NULL,
+ 0, 2, &ref);
+ KUNIT_EXPECT_NE(test, error, 0);
+
+ fwnode_remove_software_node(node);
+ software_node_unregister_nodes(nodes);
+}
+
+static struct kunit_case property_entry_test_cases[] = {
+ KUNIT_CASE(pe_test_uints),
+ KUNIT_CASE(pe_test_uint_arrays),
+ KUNIT_CASE(pe_test_strings),
+ KUNIT_CASE(pe_test_bool),
+ KUNIT_CASE(pe_test_move_inline_u8),
+ KUNIT_CASE(pe_test_move_inline_str),
+ KUNIT_CASE(pe_test_reference),
+ { }
+};
+
+static struct kunit_suite property_entry_test_suite = {
+ .name = "property-entry",
+ .test_cases = property_entry_test_cases,
+};
+
+kunit_test_suite(property_entry_test_suite);
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 000000000..3465800ba
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Google, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/numa.h>
+#include <linux/nodemask.h>
+#include <linux/topology.h>
+
+#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
+#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
+
+static atomic_t warnings, errors, timeout, async_completed;
+
+static int test_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ /*
+ * Determine if we have hit the "timeout" limit for the test if we
+ * have then report it as an error, otherwise we wil sleep for the
+ * required amount of time and then report completion.
+ */
+ if (atomic_read(&timeout)) {
+ dev_err(dev, "async probe took too long\n");
+ atomic_inc(&errors);
+ } else {
+ dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n",
+ TEST_PROBE_DELAY);
+ msleep(TEST_PROBE_DELAY);
+ dev_dbg(&pdev->dev, "done sleeping\n");
+ }
+
+ /*
+ * Report NUMA mismatch if device node is set and we are not
+ * performing an async init on that node.
+ */
+ if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) {
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ dev_to_node(dev) != numa_node_id()) {
+ dev_warn(dev, "NUMA node mismatch %d != %d\n",
+ dev_to_node(dev), numa_node_id());
+ atomic_inc(&warnings);
+ }
+
+ atomic_inc(&async_completed);
+ }
+
+ return 0;
+}
+
+static struct platform_driver async_driver = {
+ .driver = {
+ .name = "test_async_driver",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+ .driver = {
+ .name = "test_sync_driver",
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_device *async_dev[NR_CPUS * 2];
+static struct platform_device *sync_dev[2];
+
+static struct platform_device *
+test_platform_device_register_node(char *name, int id, int nid)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_alloc(name, id);
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
+
+ if (nid != NUMA_NO_NODE)
+ set_dev_node(&pdev->dev, nid);
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
+
+ return pdev;
+
+}
+
+static int __init test_async_probe_init(void)
+{
+ struct platform_device **pdev = NULL;
+ int async_id = 0, sync_id = 0;
+ unsigned long long duration;
+ ktime_t calltime;
+ int err, nid, cpu;
+
+ pr_info("registering first set of asynchronous devices...\n");
+
+ for_each_online_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+ pdev = &async_dev[async_id];
+ *pdev = test_platform_device_register_node("test_async_driver",
+ async_id,
+ nid);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create async_dev: %d\n", err);
+ goto err_unregister_async_devs;
+ }
+
+ async_id++;
+ }
+
+ pr_info("registering asynchronous driver...\n");
+ calltime = ktime_get();
+ err = platform_driver_register(&async_driver);
+ if (err) {
+ pr_err("Failed to register async_driver: %d\n", err);
+ goto err_unregister_async_devs;
+ }
+
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ err = -ETIMEDOUT;
+ goto err_unregister_async_driver;
+ }
+
+ pr_info("registering second set of asynchronous devices...\n");
+ calltime = ktime_get();
+ for_each_online_cpu(cpu) {
+ nid = cpu_to_node(cpu);
+ pdev = &async_dev[async_id];
+
+ *pdev = test_platform_device_register_node("test_async_driver",
+ async_id,
+ nid);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create async_dev: %d\n", err);
+ goto err_unregister_async_driver;
+ }
+
+ async_id++;
+ }
+
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
+ dev_info(&(*pdev)->dev,
+ "registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ dev_err(&(*pdev)->dev,
+ "test failed: probe took too long\n");
+ err = -ETIMEDOUT;
+ goto err_unregister_async_driver;
+ }
+
+
+ pr_info("registering first synchronous device...\n");
+ nid = cpu_to_node(cpu);
+ pdev = &sync_dev[sync_id];
+
+ *pdev = test_platform_device_register_node("test_sync_driver",
+ sync_id,
+ NUMA_NO_NODE);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create sync_dev: %d\n", err);
+ goto err_unregister_async_driver;
+ }
+
+ sync_id++;
+
+ pr_info("registering synchronous driver...\n");
+ calltime = ktime_get();
+ err = platform_driver_register(&sync_driver);
+ if (err) {
+ pr_err("Failed to register async_driver: %d\n", err);
+ goto err_unregister_sync_devs;
+ }
+
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ dev_err(&(*pdev)->dev,
+ "test failed: probe was too quick\n");
+ err = -ETIMEDOUT;
+ goto err_unregister_sync_driver;
+ }
+
+ pr_info("registering second synchronous device...\n");
+ pdev = &sync_dev[sync_id];
+ calltime = ktime_get();
+
+ *pdev = test_platform_device_register_node("test_sync_driver",
+ sync_id,
+ NUMA_NO_NODE);
+ if (IS_ERR(*pdev)) {
+ err = PTR_ERR(*pdev);
+ *pdev = NULL;
+ pr_err("failed to create sync_dev: %d\n", err);
+ goto err_unregister_sync_driver;
+ }
+
+ sync_id++;
+
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
+ dev_info(&(*pdev)->dev,
+ "registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ dev_err(&(*pdev)->dev,
+ "test failed: probe was too quick\n");
+ err = -ETIMEDOUT;
+ goto err_unregister_sync_driver;
+ }
+
+ /*
+ * The async events should have completed while we were taking care
+ * of the synchronous events. We will now terminate any outstanding
+ * asynchronous probe calls remaining by forcing timeout and remove
+ * the driver before we return which should force the flush of the
+ * pending asynchronous probe calls.
+ *
+ * Otherwise if they completed without errors or warnings then
+ * report successful completion.
+ */
+ if (atomic_read(&async_completed) != async_id) {
+ pr_err("async events still pending, forcing timeout\n");
+ atomic_inc(&timeout);
+ err = -ETIMEDOUT;
+ } else if (!atomic_read(&errors) && !atomic_read(&warnings)) {
+ pr_info("completed successfully\n");
+ return 0;
+ }
+
+err_unregister_sync_driver:
+ platform_driver_unregister(&sync_driver);
+err_unregister_sync_devs:
+ while (sync_id--)
+ platform_device_unregister(sync_dev[sync_id]);
+err_unregister_async_driver:
+ platform_driver_unregister(&async_driver);
+err_unregister_async_devs:
+ while (async_id--)
+ platform_device_unregister(async_dev[async_id]);
+
+ /*
+ * If err is already set then count that as an additional error for
+ * the test. Otherwise we will report an invalid argument error and
+ * not count that as we should have reached here as a result of
+ * errors or warnings being reported by the probe routine.
+ */
+ if (err)
+ atomic_inc(&errors);
+ else
+ err = -EINVAL;
+
+ pr_err("Test failed with %d errors and %d warnings\n",
+ atomic_read(&errors), atomic_read(&warnings));
+
+ return err;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+ int id = 2;
+
+ platform_driver_unregister(&async_driver);
+ platform_driver_unregister(&sync_driver);
+
+ while (id--)
+ platform_device_unregister(sync_dev[id]);
+
+ id = NR_CPUS * 2;
+ while (id--)
+ platform_device_unregister(async_dev[id]);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
new file mode 100644
index 000000000..89f98be5c
--- /dev/null
+++ b/drivers/base/topology.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * driver/base/topology.c - Populate sysfs with cpu topology information
+ *
+ * Written by: Zhang Yanmin, Intel Corporation
+ *
+ * Copyright (C) 2006, Intel Corp.
+ *
+ * All rights reserved.
+ */
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/hardirq.h>
+#include <linux/topology.h>
+
+#define define_id_show_func(name, fmt) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return sysfs_emit(buf, fmt "\n", topology_##name(dev->id)); \
+}
+
+#define define_siblings_read_func(name, mask) \
+static ssize_t name##_read(struct file *file, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ struct device *dev = kobj_to_dev(kobj); \
+ \
+ return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \
+ off, count); \
+} \
+ \
+static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ struct device *dev = kobj_to_dev(kobj); \
+ \
+ return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \
+ off, count); \
+}
+
+define_id_show_func(physical_package_id, "%d");
+static DEVICE_ATTR_RO(physical_package_id);
+
+#ifdef TOPOLOGY_DIE_SYSFS
+define_id_show_func(die_id, "%d");
+static DEVICE_ATTR_RO(die_id);
+#endif
+
+#ifdef TOPOLOGY_CLUSTER_SYSFS
+define_id_show_func(cluster_id, "%d");
+static DEVICE_ATTR_RO(cluster_id);
+#endif
+
+define_id_show_func(core_id, "%d");
+static DEVICE_ATTR_RO(core_id);
+
+define_id_show_func(ppin, "0x%llx");
+static DEVICE_ATTR_ADMIN_RO(ppin);
+
+define_siblings_read_func(thread_siblings, sibling_cpumask);
+static BIN_ATTR_RO(thread_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(thread_siblings_list, CPULIST_FILE_MAX_BYTES);
+
+define_siblings_read_func(core_cpus, sibling_cpumask);
+static BIN_ATTR_RO(core_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(core_cpus_list, CPULIST_FILE_MAX_BYTES);
+
+define_siblings_read_func(core_siblings, core_cpumask);
+static BIN_ATTR_RO(core_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(core_siblings_list, CPULIST_FILE_MAX_BYTES);
+
+#ifdef TOPOLOGY_CLUSTER_SYSFS
+define_siblings_read_func(cluster_cpus, cluster_cpumask);
+static BIN_ATTR_RO(cluster_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(cluster_cpus_list, CPULIST_FILE_MAX_BYTES);
+#endif
+
+#ifdef TOPOLOGY_DIE_SYSFS
+define_siblings_read_func(die_cpus, die_cpumask);
+static BIN_ATTR_RO(die_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(die_cpus_list, CPULIST_FILE_MAX_BYTES);
+#endif
+
+define_siblings_read_func(package_cpus, core_cpumask);
+static BIN_ATTR_RO(package_cpus, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(package_cpus_list, CPULIST_FILE_MAX_BYTES);
+
+#ifdef TOPOLOGY_BOOK_SYSFS
+define_id_show_func(book_id, "%d");
+static DEVICE_ATTR_RO(book_id);
+define_siblings_read_func(book_siblings, book_cpumask);
+static BIN_ATTR_RO(book_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(book_siblings_list, CPULIST_FILE_MAX_BYTES);
+#endif
+
+#ifdef TOPOLOGY_DRAWER_SYSFS
+define_id_show_func(drawer_id, "%d");
+static DEVICE_ATTR_RO(drawer_id);
+define_siblings_read_func(drawer_siblings, drawer_cpumask);
+static BIN_ATTR_RO(drawer_siblings, CPUMAP_FILE_MAX_BYTES);
+static BIN_ATTR_RO(drawer_siblings_list, CPULIST_FILE_MAX_BYTES);
+#endif
+
+static struct bin_attribute *bin_attrs[] = {
+ &bin_attr_core_cpus,
+ &bin_attr_core_cpus_list,
+ &bin_attr_thread_siblings,
+ &bin_attr_thread_siblings_list,
+ &bin_attr_core_siblings,
+ &bin_attr_core_siblings_list,
+#ifdef TOPOLOGY_CLUSTER_SYSFS
+ &bin_attr_cluster_cpus,
+ &bin_attr_cluster_cpus_list,
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
+ &bin_attr_die_cpus,
+ &bin_attr_die_cpus_list,
+#endif
+ &bin_attr_package_cpus,
+ &bin_attr_package_cpus_list,
+#ifdef TOPOLOGY_BOOK_SYSFS
+ &bin_attr_book_siblings,
+ &bin_attr_book_siblings_list,
+#endif
+#ifdef TOPOLOGY_DRAWER_SYSFS
+ &bin_attr_drawer_siblings,
+ &bin_attr_drawer_siblings_list,
+#endif
+ NULL
+};
+
+static struct attribute *default_attrs[] = {
+ &dev_attr_physical_package_id.attr,
+#ifdef TOPOLOGY_DIE_SYSFS
+ &dev_attr_die_id.attr,
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
+ &dev_attr_cluster_id.attr,
+#endif
+ &dev_attr_core_id.attr,
+#ifdef TOPOLOGY_BOOK_SYSFS
+ &dev_attr_book_id.attr,
+#endif
+#ifdef TOPOLOGY_DRAWER_SYSFS
+ &dev_attr_drawer_id.attr,
+#endif
+ &dev_attr_ppin.attr,
+ NULL
+};
+
+static umode_t topology_is_visible(struct kobject *kobj,
+ struct attribute *attr, int unused)
+{
+ if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id))
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group topology_attr_group = {
+ .attrs = default_attrs,
+ .bin_attrs = bin_attrs,
+ .is_visible = topology_is_visible,
+ .name = "topology"
+};
+
+/* Add/Remove cpu_topology interface for CPU device */
+static int topology_add_dev(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ return sysfs_create_group(&dev->kobj, &topology_attr_group);
+}
+
+static int topology_remove_dev(unsigned int cpu)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ sysfs_remove_group(&dev->kobj, &topology_attr_group);
+ return 0;
+}
+
+static int __init topology_sysfs_init(void)
+{
+ return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
+ "base/topology:prepare", topology_add_dev,
+ topology_remove_dev);
+}
+
+device_initcall(topology_sysfs_init);
diff --git a/drivers/base/trace.c b/drivers/base/trace.c
new file mode 100644
index 000000000..b24b0a309
--- /dev/null
+++ b/drivers/base/trace.c
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device core Trace Support
+ * Copyright (C) 2021, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/base/trace.h b/drivers/base/trace.h
new file mode 100644
index 000000000..3192e18f8
--- /dev/null
+++ b/drivers/base/trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Device core Trace Support
+ * Copyright (C) 2021, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dev
+
+#if !defined(__DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __DEV_TRACE_H
+
+#include <linux/device.h>
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+DECLARE_EVENT_CLASS(devres,
+ TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size),
+ TP_ARGS(dev, op, node, name, size),
+ TP_STRUCT__entry(
+ __string(devname, dev_name(dev))
+ __field(struct device *, dev)
+ __field(const char *, op)
+ __field(void *, node)
+ __field(const char *, name)
+ __field(size_t, size)
+ ),
+ TP_fast_assign(
+ __assign_str(devname, dev_name(dev));
+ __entry->op = op;
+ __entry->node = node;
+ __entry->name = name;
+ __entry->size = size;
+ ),
+ TP_printk("%s %3s %p %s (%zu bytes)", __get_str(devname),
+ __entry->op, __entry->node, __entry->name, __entry->size)
+);
+
+DEFINE_EVENT(devres, devres_log,
+ TP_PROTO(struct device *dev, const char *op, void *node, const char *name, size_t size),
+ TP_ARGS(dev, op, node, name, size)
+);
+
+#endif /* __DEV_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
new file mode 100644
index 000000000..09ee2a1e3
--- /dev/null
+++ b/drivers/base/transport_class.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * transport_class.c - implementation of generic transport classes
+ * using attribute_containers
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * The basic idea here is to allow any "device controller" (which
+ * would most often be a Host Bus Adapter to use the services of one
+ * or more tranport classes for performing transport specific
+ * services. Transport specific services are things that the generic
+ * command layer doesn't want to know about (speed settings, line
+ * condidtioning, etc), but which the user might be interested in.
+ * Thus, the HBA's use the routines exported by the transport classes
+ * to perform these functions. The transport classes export certain
+ * values to the user via sysfs using attribute containers.
+ *
+ * Note: because not every HBA will care about every transport
+ * attribute, there's a many to one relationship that goes like this:
+ *
+ * transport class<-----attribute container<----class device
+ *
+ * Usually the attribute container is per-HBA, but the design doesn't
+ * mandate that. Although most of the services will be specific to
+ * the actual external storage connection used by the HBA, the generic
+ * transport class is framed entirely in terms of generic devices to
+ * allow it to be used by any physical HBA in the system.
+ */
+#include <linux/export.h>
+#include <linux/attribute_container.h>
+#include <linux/transport_class.h>
+
+static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev);
+
+/**
+ * transport_class_register - register an initial transport class
+ *
+ * @tclass: a pointer to the transport class structure to be initialised
+ *
+ * The transport class contains an embedded class which is used to
+ * identify it. The caller should initialise this structure with
+ * zeros and then generic class must have been initialised with the
+ * actual transport class unique name. There's a macro
+ * DECLARE_TRANSPORT_CLASS() to do this (declared classes still must
+ * be registered).
+ *
+ * Returns 0 on success or error on failure.
+ */
+int transport_class_register(struct transport_class *tclass)
+{
+ return class_register(&tclass->class);
+}
+EXPORT_SYMBOL_GPL(transport_class_register);
+
+/**
+ * transport_class_unregister - unregister a previously registered class
+ *
+ * @tclass: The transport class to unregister
+ *
+ * Must be called prior to deallocating the memory for the transport
+ * class.
+ */
+void transport_class_unregister(struct transport_class *tclass)
+{
+ class_unregister(&tclass->class);
+}
+EXPORT_SYMBOL_GPL(transport_class_unregister);
+
+static int anon_transport_dummy_function(struct transport_container *tc,
+ struct device *dev,
+ struct device *cdev)
+{
+ /* do nothing */
+ return 0;
+}
+
+/**
+ * anon_transport_class_register - register an anonymous class
+ *
+ * @atc: The anon transport class to register
+ *
+ * The anonymous transport class contains both a transport class and a
+ * container. The idea of an anonymous class is that it never
+ * actually has any device attributes associated with it (and thus
+ * saves on container storage). So it can only be used for triggering
+ * events. Use prezero and then use DECLARE_ANON_TRANSPORT_CLASS() to
+ * initialise the anon transport class storage.
+ */
+int anon_transport_class_register(struct anon_transport_class *atc)
+{
+ int error;
+ atc->container.class = &atc->tclass.class;
+ attribute_container_set_no_classdevs(&atc->container);
+ error = attribute_container_register(&atc->container);
+ if (error)
+ return error;
+ atc->tclass.setup = anon_transport_dummy_function;
+ atc->tclass.remove = anon_transport_dummy_function;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(anon_transport_class_register);
+
+/**
+ * anon_transport_class_unregister - unregister an anon class
+ *
+ * @atc: Pointer to the anon transport class to unregister
+ *
+ * Must be called prior to deallocating the memory for the anon
+ * transport class.
+ */
+void anon_transport_class_unregister(struct anon_transport_class *atc)
+{
+ if (unlikely(attribute_container_unregister(&atc->container)))
+ BUG();
+}
+EXPORT_SYMBOL_GPL(anon_transport_class_unregister);
+
+static int transport_setup_classdev(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev)
+{
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+
+ if (tclass->setup)
+ tclass->setup(tcont, dev, classdev);
+
+ return 0;
+}
+
+/**
+ * transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+ * the HBA itself or a device remote across the HBA bus). This
+ * routine is simply a trigger point to see if any set of transport
+ * classes wishes to associate with the added device. This allocates
+ * storage for the class device and initialises it, but does not yet
+ * add it to the system or add attributes to it (you do this with
+ * transport_add_device). If you have no need for a separate setup
+ * and add operations, use transport_register_device (see
+ * transport_class.h).
+ */
+
+void transport_setup_device(struct device *dev)
+{
+ attribute_container_add_device(dev, transport_setup_classdev);
+}
+EXPORT_SYMBOL_GPL(transport_setup_device);
+
+static int transport_add_class_device(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev)
+{
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ int error = attribute_container_add_class_device(classdev);
+ struct transport_container *tcont =
+ attribute_container_to_transport_container(cont);
+
+ if (error)
+ goto err_remove;
+
+ if (tcont->statistics) {
+ error = sysfs_create_group(&classdev->kobj, tcont->statistics);
+ if (error)
+ goto err_del;
+ }
+
+ return 0;
+
+err_del:
+ attribute_container_class_device_del(classdev);
+err_remove:
+ if (tclass->remove)
+ tclass->remove(tcont, dev, classdev);
+
+ return error;
+}
+
+
+/**
+ * transport_add_device - declare a new dev for transport class association
+ *
+ * @dev: the generic device representing the entity being added
+ *
+ * Usually, dev represents some component in the HBA system (either
+ * the HBA itself or a device remote across the HBA bus). This
+ * routine is simply a trigger point used to add the device to the
+ * system and register attributes for it.
+ */
+int transport_add_device(struct device *dev)
+{
+ return attribute_container_device_trigger_safe(dev,
+ transport_add_class_device,
+ transport_remove_classdev);
+}
+EXPORT_SYMBOL_GPL(transport_add_device);
+
+static int transport_configure(struct attribute_container *cont,
+ struct device *dev,
+ struct device *cdev)
+{
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+ struct transport_container *tcont = attribute_container_to_transport_container(cont);
+
+ if (tclass->configure)
+ tclass->configure(tcont, dev, cdev);
+
+ return 0;
+}
+
+/**
+ * transport_configure_device - configure an already set up device
+ *
+ * @dev: generic device representing device to be configured
+ *
+ * The idea of configure is simply to provide a point within the setup
+ * process to allow the transport class to extract information from a
+ * device after it has been setup. This is used in SCSI because we
+ * have to have a setup device to begin using the HBA, but after we
+ * send the initial inquiry, we use configure to extract the device
+ * parameters. The device need not have been added to be configured.
+ */
+void transport_configure_device(struct device *dev)
+{
+ attribute_container_device_trigger(dev, transport_configure);
+}
+EXPORT_SYMBOL_GPL(transport_configure_device);
+
+static int transport_remove_classdev(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev)
+{
+ struct transport_container *tcont =
+ attribute_container_to_transport_container(cont);
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+
+ if (tclass->remove)
+ tclass->remove(tcont, dev, classdev);
+
+ if (tclass->remove != anon_transport_dummy_function) {
+ if (tcont->statistics)
+ sysfs_remove_group(&classdev->kobj, tcont->statistics);
+ attribute_container_class_device_del(classdev);
+ }
+
+ return 0;
+}
+
+
+/**
+ * transport_remove_device - remove the visibility of a device
+ *
+ * @dev: generic device to remove
+ *
+ * This call removes the visibility of the device (to the user from
+ * sysfs), but does not destroy it. To eliminate a device entirely
+ * you must also call transport_destroy_device. If you don't need to
+ * do remove and destroy as separate operations, use
+ * transport_unregister_device() (see transport_class.h) which will
+ * perform both calls for you.
+ */
+void transport_remove_device(struct device *dev)
+{
+ attribute_container_device_trigger(dev, transport_remove_classdev);
+}
+EXPORT_SYMBOL_GPL(transport_remove_device);
+
+static void transport_destroy_classdev(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev)
+{
+ struct transport_class *tclass = class_to_transport_class(cont->class);
+
+ if (tclass->remove != anon_transport_dummy_function)
+ put_device(classdev);
+}
+
+
+/**
+ * transport_destroy_device - destroy a removed device
+ *
+ * @dev: device to eliminate from the transport class.
+ *
+ * This call triggers the elimination of storage associated with the
+ * transport classdev. Note: all it really does is relinquish a
+ * reference to the classdev. The memory will not be freed until the
+ * last reference goes to zero. Note also that the classdev retains a
+ * reference count on dev, so dev too will remain for as long as the
+ * transport class device remains around.
+ */
+void transport_destroy_device(struct device *dev)
+{
+ attribute_container_remove_device(dev, transport_destroy_classdev);
+}
+EXPORT_SYMBOL_GPL(transport_destroy_device);