diff options
Diffstat (limited to '')
64 files changed, 17443 insertions, 0 deletions
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig new file mode 100644 index 000000000..80b5fd44a --- /dev/null +++ b/drivers/of/Kconfig @@ -0,0 +1,97 @@ +# SPDX-License-Identifier: GPL-2.0 +config DTC + bool + +menuconfig OF + bool "Device Tree and Open Firmware support" + help + This option enables the device tree infrastructure. + It is automatically selected by platforms that need it or can + be enabled manually for unittests, overlays or + compile-coverage. + +if OF + +config OF_UNITTEST + bool "Device Tree runtime unit tests" + depends on !SPARC + select IRQ_DOMAIN + select OF_EARLY_FLATTREE + select OF_RESOLVE + help + This option builds in test cases for the device tree infrastructure + that are executed once at boot time, and the results dumped to the + console. + + If unsure, say N here, but this option is safe to enable. + +config OF_ALL_DTBS + bool "Build all Device Tree Blobs" + depends on COMPILE_TEST + select DTC + help + This option builds all possible Device Tree Blobs (DTBs) for the + current architecture. + + If unsure, say N here, but this option is safe to enable. + +config OF_FLATTREE + bool + select DTC + select LIBFDT + select CRC32 + +config OF_EARLY_FLATTREE + bool + select DMA_DECLARE_COHERENT if HAS_DMA + select OF_FLATTREE + +config OF_PROMTREE + bool + +config OF_KOBJ + def_bool SYSFS + +# Hardly any platforms need this. It is safe to select, but only do so if you +# need it. +config OF_DYNAMIC + bool "Support for dynamic device trees" if OF_UNITTEST + select OF_KOBJ + help + On some platforms, the device tree can be manipulated at runtime. + While this option is selected automatically on such platforms, you + can enable it manually to improve device tree unit test coverage. + +config OF_ADDRESS + def_bool y + depends on !SPARC && (HAS_IOMEM || UML) + +config OF_IRQ + def_bool y + depends on !SPARC && IRQ_DOMAIN + +config OF_RESERVED_MEM + def_bool OF_EARLY_FLATTREE + +config OF_RESOLVE + bool + +config OF_OVERLAY + bool "Device Tree overlays" + select OF_DYNAMIC + select OF_FLATTREE + select OF_RESOLVE + help + Overlays are a method to dynamically modify part of the kernel's + device tree with dynamically loaded data. + While this option is selected automatically when needed, you can + enable it manually to improve device tree unit test coverage. + +config OF_NUMA + bool + +config OF_DMA_DEFAULT_COHERENT + # arches should select this if DMA is coherent by default for OF devices + bool + +endif # OF diff --git a/drivers/of/Makefile b/drivers/of/Makefile new file mode 100644 index 000000000..e0360a443 --- /dev/null +++ b/drivers/of/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y = base.o device.o platform.o property.o +obj-$(CONFIG_OF_KOBJ) += kobj.o +obj-$(CONFIG_OF_DYNAMIC) += dynamic.o +obj-$(CONFIG_OF_FLATTREE) += fdt.o +obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o +obj-$(CONFIG_OF_PROMTREE) += pdt.o +obj-$(CONFIG_OF_ADDRESS) += address.o +obj-$(CONFIG_OF_IRQ) += irq.o +obj-$(CONFIG_OF_UNITTEST) += unittest.o +obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o +obj-$(CONFIG_OF_RESOLVE) += resolver.o +obj-$(CONFIG_OF_OVERLAY) += overlay.o +obj-$(CONFIG_OF_NUMA) += of_numa.o + +ifdef CONFIG_KEXEC_FILE +ifdef CONFIG_OF_FLATTREE +obj-y += kexec.o +endif +endif + +obj-$(CONFIG_OF_UNITTEST) += unittest-data/ diff --git a/drivers/of/address.c b/drivers/of/address.c new file mode 100644 index 000000000..67763e5b8 --- /dev/null +++ b/drivers/of/address.c @@ -0,0 +1,1111 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "OF: " fmt + +#include <linux/device.h> +#include <linux/fwnode.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/logic_pio.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/dma-direct.h> /* for bus_dma_region */ + +#include "of_private.h" + +/* Max address size we deal with */ +#define OF_MAX_ADDR_CELLS 4 +#define OF_CHECK_ADDR_COUNT(na) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS) +#define OF_CHECK_COUNTS(na, ns) (OF_CHECK_ADDR_COUNT(na) && (ns) > 0) + +static struct of_bus *of_match_bus(struct device_node *np); +static int __of_address_to_resource(struct device_node *dev, int index, + int bar_no, struct resource *r); +static bool of_mmio_is_nonposted(struct device_node *np); + +/* Debug utility */ +#ifdef DEBUG +static void of_dump_addr(const char *s, const __be32 *addr, int na) +{ + pr_debug("%s", s); + while (na--) + pr_cont(" %08x", be32_to_cpu(*(addr++))); + pr_cont("\n"); +} +#else +static void of_dump_addr(const char *s, const __be32 *addr, int na) { } +#endif + +/* Callbacks for bus specific translators */ +struct of_bus { + const char *name; + const char *addresses; + int (*match)(struct device_node *parent); + void (*count_cells)(struct device_node *child, + int *addrc, int *sizec); + u64 (*map)(__be32 *addr, const __be32 *range, + int na, int ns, int pna); + int (*translate)(__be32 *addr, u64 offset, int na); + bool has_flags; + unsigned int (*get_flags)(const __be32 *addr); +}; + +/* + * Default translator (generic bus) + */ + +static void of_bus_default_count_cells(struct device_node *dev, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = of_n_addr_cells(dev); + if (sizec) + *sizec = of_n_size_cells(dev); +} + +static u64 of_bus_default_map(__be32 *addr, const __be32 *range, + int na, int ns, int pna) +{ + u64 cp, s, da; + + cp = of_read_number(range, na); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr, na); + + pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", cp, s, da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int of_bus_default_translate(__be32 *addr, u64 offset, int na) +{ + u64 a = of_read_number(addr, na); + memset(addr, 0, na * 4); + a += offset; + if (na > 1) + addr[na - 2] = cpu_to_be32(a >> 32); + addr[na - 1] = cpu_to_be32(a & 0xffffffffu); + + return 0; +} + +static unsigned int of_bus_default_get_flags(const __be32 *addr) +{ + return IORESOURCE_MEM; +} + +#ifdef CONFIG_PCI +static unsigned int of_bus_pci_get_flags(const __be32 *addr) +{ + unsigned int flags = 0; + u32 w = be32_to_cpup(addr); + + if (!IS_ENABLED(CONFIG_PCI)) + return 0; + + switch((w >> 24) & 0x03) { + case 0x01: + flags |= IORESOURCE_IO; + break; + case 0x02: /* 32 bits */ + flags |= IORESOURCE_MEM; + break; + + case 0x03: /* 64 bits */ + flags |= IORESOURCE_MEM | IORESOURCE_MEM_64; + break; + } + if (w & 0x40000000) + flags |= IORESOURCE_PREFETCH; + return flags; +} + +/* + * PCI bus specific translator + */ + +static bool of_node_is_pcie(struct device_node *np) +{ + bool is_pcie = of_node_name_eq(np, "pcie"); + + if (is_pcie) + pr_warn_once("%pOF: Missing device_type\n", np); + + return is_pcie; +} + +static int of_bus_pci_match(struct device_node *np) +{ + /* + * "pciex" is PCI Express + * "vci" is for the /chaos bridge on 1st-gen PCI powermacs + * "ht" is hypertransport + * + * If none of the device_type match, and that the node name is + * "pcie", accept the device as PCI (with a warning). + */ + return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") || + of_node_is_type(np, "vci") || of_node_is_type(np, "ht") || + of_node_is_pcie(np); +} + +static void of_bus_pci_count_cells(struct device_node *np, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 3; + if (sizec) + *sizec = 2; +} + +static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, + int pna) +{ + u64 cp, s, da; + unsigned int af, rf; + + af = of_bus_pci_get_flags(addr); + rf = of_bus_pci_get_flags(range); + + /* Check address type match */ + if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) + return OF_BAD_ADDR; + + /* Read address values, skipping high cell */ + cp = of_read_number(range + 1, na - 1); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr + 1, na - 1); + + pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", cp, s, da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) +{ + return of_bus_default_translate(addr + 1, offset, na - 1); +} +#endif /* CONFIG_PCI */ + +int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + + if (!IS_ENABLED(CONFIG_PCI)) + return -ENOSYS; + + return __of_address_to_resource(dev, -1, bar, r); +} +EXPORT_SYMBOL_GPL(of_pci_address_to_resource); + +/* + * of_pci_range_to_resource - Create a resource from an of_pci_range + * @range: the PCI range that describes the resource + * @np: device node where the range belongs to + * @res: pointer to a valid resource that will be updated to + * reflect the values contained in the range. + * + * Returns EINVAL if the range cannot be converted to resource. + * + * Note that if the range is an IO range, the resource will be converted + * using pci_address_to_pio() which can fail if it is called too early or + * if the range cannot be matched to any host bridge IO space (our case here). + * To guard against that we try to register the IO range first. + * If that fails we know that pci_address_to_pio() will do too. + */ +int of_pci_range_to_resource(struct of_pci_range *range, + struct device_node *np, struct resource *res) +{ + int err; + res->flags = range->flags; + res->parent = res->child = res->sibling = NULL; + res->name = np->full_name; + + if (!IS_ENABLED(CONFIG_PCI)) + return -ENOSYS; + + if (res->flags & IORESOURCE_IO) { + unsigned long port; + err = pci_register_io_range(&np->fwnode, range->cpu_addr, + range->size); + if (err) + goto invalid_range; + port = pci_address_to_pio(range->cpu_addr); + if (port == (unsigned long)-1) { + err = -EINVAL; + goto invalid_range; + } + res->start = port; + } else { + if ((sizeof(resource_size_t) < 8) && + upper_32_bits(range->cpu_addr)) { + err = -EINVAL; + goto invalid_range; + } + + res->start = range->cpu_addr; + } + res->end = res->start + range->size - 1; + return 0; + +invalid_range: + res->start = (resource_size_t)OF_BAD_ADDR; + res->end = (resource_size_t)OF_BAD_ADDR; + return err; +} +EXPORT_SYMBOL(of_pci_range_to_resource); + +/* + * ISA bus specific translator + */ + +static int of_bus_isa_match(struct device_node *np) +{ + return of_node_name_eq(np, "isa"); +} + +static void of_bus_isa_count_cells(struct device_node *child, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 2; + if (sizec) + *sizec = 1; +} + +static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, + int pna) +{ + u64 cp, s, da; + + /* Check address type match */ + if ((addr[0] ^ range[0]) & cpu_to_be32(1)) + return OF_BAD_ADDR; + + /* Read address values, skipping high cell */ + cp = of_read_number(range + 1, na - 1); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr + 1, na - 1); + + pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", cp, s, da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) +{ + return of_bus_default_translate(addr + 1, offset, na - 1); +} + +static unsigned int of_bus_isa_get_flags(const __be32 *addr) +{ + unsigned int flags = 0; + u32 w = be32_to_cpup(addr); + + if (w & 1) + flags |= IORESOURCE_IO; + else + flags |= IORESOURCE_MEM; + return flags; +} + +/* + * Array of bus specific translators + */ + +static struct of_bus of_busses[] = { +#ifdef CONFIG_PCI + /* PCI */ + { + .name = "pci", + .addresses = "assigned-addresses", + .match = of_bus_pci_match, + .count_cells = of_bus_pci_count_cells, + .map = of_bus_pci_map, + .translate = of_bus_pci_translate, + .has_flags = true, + .get_flags = of_bus_pci_get_flags, + }, +#endif /* CONFIG_PCI */ + /* ISA */ + { + .name = "isa", + .addresses = "reg", + .match = of_bus_isa_match, + .count_cells = of_bus_isa_count_cells, + .map = of_bus_isa_map, + .translate = of_bus_isa_translate, + .has_flags = true, + .get_flags = of_bus_isa_get_flags, + }, + /* Default */ + { + .name = "default", + .addresses = "reg", + .match = NULL, + .count_cells = of_bus_default_count_cells, + .map = of_bus_default_map, + .translate = of_bus_default_translate, + .get_flags = of_bus_default_get_flags, + }, +}; + +static struct of_bus *of_match_bus(struct device_node *np) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(of_busses); i++) + if (!of_busses[i].match || of_busses[i].match(np)) + return &of_busses[i]; + BUG(); + return NULL; +} + +static int of_empty_ranges_quirk(struct device_node *np) +{ + if (IS_ENABLED(CONFIG_PPC)) { + /* To save cycles, we cache the result for global "Mac" setting */ + static int quirk_state = -1; + + /* PA-SEMI sdc DT bug */ + if (of_device_is_compatible(np, "1682m-sdc")) + return true; + + /* Make quirk cached */ + if (quirk_state < 0) + quirk_state = + of_machine_is_compatible("Power Macintosh") || + of_machine_is_compatible("MacRISC"); + return quirk_state; + } + return false; +} + +static int of_translate_one(struct device_node *parent, struct of_bus *bus, + struct of_bus *pbus, __be32 *addr, + int na, int ns, int pna, const char *rprop) +{ + const __be32 *ranges; + unsigned int rlen; + int rone; + u64 offset = OF_BAD_ADDR; + + /* + * Normally, an absence of a "ranges" property means we are + * crossing a non-translatable boundary, and thus the addresses + * below the current cannot be converted to CPU physical ones. + * Unfortunately, while this is very clear in the spec, it's not + * what Apple understood, and they do have things like /uni-n or + * /ht nodes with no "ranges" property and a lot of perfectly + * useable mapped devices below them. Thus we treat the absence of + * "ranges" as equivalent to an empty "ranges" property which means + * a 1:1 translation at that level. It's up to the caller not to try + * to translate addresses that aren't supposed to be translated in + * the first place. --BenH. + * + * As far as we know, this damage only exists on Apple machines, so + * This code is only enabled on powerpc. --gcl + * + * This quirk also applies for 'dma-ranges' which frequently exist in + * child nodes without 'dma-ranges' in the parent nodes. --RobH + */ + ranges = of_get_property(parent, rprop, &rlen); + if (ranges == NULL && !of_empty_ranges_quirk(parent) && + strcmp(rprop, "dma-ranges")) { + pr_debug("no ranges; cannot translate\n"); + return 1; + } + if (ranges == NULL || rlen == 0) { + offset = of_read_number(addr, na); + memset(addr, 0, pna * 4); + pr_debug("empty ranges; 1:1 translation\n"); + goto finish; + } + + pr_debug("walking ranges...\n"); + + /* Now walk through the ranges */ + rlen /= 4; + rone = na + pna + ns; + for (; rlen >= rone; rlen -= rone, ranges += rone) { + offset = bus->map(addr, ranges, na, ns, pna); + if (offset != OF_BAD_ADDR) + break; + } + if (offset == OF_BAD_ADDR) { + pr_debug("not found !\n"); + return 1; + } + memcpy(addr, ranges + na, 4 * pna); + + finish: + of_dump_addr("parent translation for:", addr, pna); + pr_debug("with offset: %llx\n", offset); + + /* Translate it into parent bus space */ + return pbus->translate(addr, offset, pna); +} + +/* + * Translate an address from the device-tree into a CPU physical address, + * this walks up the tree and applies the various bus mappings on the + * way. + * + * Note: We consider that crossing any level with #size-cells == 0 to mean + * that translation is impossible (that is we are not dealing with a value + * that can be mapped to a cpu physical address). This is not really specified + * that way, but this is traditionally the way IBM at least do things + * + * Whenever the translation fails, the *host pointer will be set to the + * device that had registered logical PIO mapping, and the return code is + * relative to that node. + */ +static u64 __of_translate_address(struct device_node *dev, + struct device_node *(*get_parent)(const struct device_node *), + const __be32 *in_addr, const char *rprop, + struct device_node **host) +{ + struct device_node *parent = NULL; + struct of_bus *bus, *pbus; + __be32 addr[OF_MAX_ADDR_CELLS]; + int na, ns, pna, pns; + u64 result = OF_BAD_ADDR; + + pr_debug("** translation for device %pOF **\n", dev); + + /* Increase refcount at current level */ + of_node_get(dev); + + *host = NULL; + /* Get parent & match bus type */ + parent = get_parent(dev); + if (parent == NULL) + goto bail; + bus = of_match_bus(parent); + + /* Count address cells & copy address locally */ + bus->count_cells(dev, &na, &ns); + if (!OF_CHECK_COUNTS(na, ns)) { + pr_debug("Bad cell count for %pOF\n", dev); + goto bail; + } + memcpy(addr, in_addr, na * 4); + + pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n", + bus->name, na, ns, parent); + of_dump_addr("translating address:", addr, na); + + /* Translate */ + for (;;) { + struct logic_pio_hwaddr *iorange; + + /* Switch to parent bus */ + of_node_put(dev); + dev = parent; + parent = get_parent(dev); + + /* If root, we have finished */ + if (parent == NULL) { + pr_debug("reached root node\n"); + result = of_read_number(addr, na); + break; + } + + /* + * For indirectIO device which has no ranges property, get + * the address from reg directly. + */ + iorange = find_io_range_by_fwnode(&dev->fwnode); + if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) { + result = of_read_number(addr + 1, na - 1); + pr_debug("indirectIO matched(%pOF) 0x%llx\n", + dev, result); + *host = of_node_get(dev); + break; + } + + /* Get new parent bus and counts */ + pbus = of_match_bus(parent); + pbus->count_cells(dev, &pna, &pns); + if (!OF_CHECK_COUNTS(pna, pns)) { + pr_err("Bad cell count for %pOF\n", dev); + break; + } + + pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n", + pbus->name, pna, pns, parent); + + /* Apply bus translation */ + if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) + break; + + /* Complete the move up one level */ + na = pna; + ns = pns; + bus = pbus; + + of_dump_addr("one level translation:", addr, na); + } + bail: + of_node_put(parent); + of_node_put(dev); + + return result; +} + +u64 of_translate_address(struct device_node *dev, const __be32 *in_addr) +{ + struct device_node *host; + u64 ret; + + ret = __of_translate_address(dev, of_get_parent, + in_addr, "ranges", &host); + if (host) { + of_node_put(host); + return OF_BAD_ADDR; + } + + return ret; +} +EXPORT_SYMBOL(of_translate_address); + +#ifdef CONFIG_HAS_DMA +struct device_node *__of_get_dma_parent(const struct device_node *np) +{ + struct of_phandle_args args; + int ret, index; + + index = of_property_match_string(np, "interconnect-names", "dma-mem"); + if (index < 0) + return of_get_parent(np); + + ret = of_parse_phandle_with_args(np, "interconnects", + "#interconnect-cells", + index, &args); + if (ret < 0) + return of_get_parent(np); + + return of_node_get(args.np); +} +#endif + +static struct device_node *of_get_next_dma_parent(struct device_node *np) +{ + struct device_node *parent; + + parent = __of_get_dma_parent(np); + of_node_put(np); + + return parent; +} + +u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr) +{ + struct device_node *host; + u64 ret; + + ret = __of_translate_address(dev, __of_get_dma_parent, + in_addr, "dma-ranges", &host); + + if (host) { + of_node_put(host); + return OF_BAD_ADDR; + } + + return ret; +} +EXPORT_SYMBOL(of_translate_dma_address); + +const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no, + u64 *size, unsigned int *flags) +{ + const __be32 *prop; + unsigned int psize; + struct device_node *parent; + struct of_bus *bus; + int onesize, i, na, ns; + + /* Get parent & match bus type */ + parent = of_get_parent(dev); + if (parent == NULL) + return NULL; + bus = of_match_bus(parent); + if (strcmp(bus->name, "pci") && (bar_no >= 0)) { + of_node_put(parent); + return NULL; + } + bus->count_cells(dev, &na, &ns); + of_node_put(parent); + if (!OF_CHECK_ADDR_COUNT(na)) + return NULL; + + /* Get "reg" or "assigned-addresses" property */ + prop = of_get_property(dev, bus->addresses, &psize); + if (prop == NULL) + return NULL; + psize /= 4; + + onesize = na + ns; + for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { + u32 val = be32_to_cpu(prop[0]); + /* PCI bus matches on BAR number instead of index */ + if (((bar_no >= 0) && ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0))) || + ((index >= 0) && (i == index))) { + if (size) + *size = of_read_number(prop + na, ns); + if (flags) + *flags = bus->get_flags(prop); + return prop; + } + } + return NULL; +} +EXPORT_SYMBOL(__of_get_address); + +static int parser_init(struct of_pci_range_parser *parser, + struct device_node *node, const char *name) +{ + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->na = of_bus_n_addr_cells(node); + parser->ns = of_bus_n_size_cells(node); + parser->dma = !strcmp(name, "dma-ranges"); + parser->bus = of_match_bus(node); + + parser->range = of_get_property(node, name, &rlen); + if (parser->range == NULL) + return -ENOENT; + + parser->end = parser->range + rlen / sizeof(__be32); + + return 0; +} + +int of_pci_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + return parser_init(parser, node, "ranges"); +} +EXPORT_SYMBOL_GPL(of_pci_range_parser_init); + +int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + return parser_init(parser, node, "dma-ranges"); +} +EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init); +#define of_dma_range_parser_init of_pci_dma_range_parser_init + +struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, + struct of_pci_range *range) +{ + int na = parser->na; + int ns = parser->ns; + int np = parser->pna + na + ns; + int busflag_na = 0; + + if (!range) + return NULL; + + if (!parser->range || parser->range + np > parser->end) + return NULL; + + range->flags = parser->bus->get_flags(parser->range); + + /* A extra cell for resource flags */ + if (parser->bus->has_flags) + busflag_na = 1; + + range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na); + + if (parser->dma) + range->cpu_addr = of_translate_dma_address(parser->node, + parser->range + na); + else + range->cpu_addr = of_translate_address(parser->node, + parser->range + na); + range->size = of_read_number(parser->range + parser->pna + na, ns); + + parser->range += np; + + /* Now consume following elements while they are contiguous */ + while (parser->range + np <= parser->end) { + u32 flags = 0; + u64 bus_addr, cpu_addr, size; + + flags = parser->bus->get_flags(parser->range); + bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na); + if (parser->dma) + cpu_addr = of_translate_dma_address(parser->node, + parser->range + na); + else + cpu_addr = of_translate_address(parser->node, + parser->range + na); + size = of_read_number(parser->range + parser->pna + na, ns); + + if (flags != range->flags) + break; + if (bus_addr != range->bus_addr + range->size || + cpu_addr != range->cpu_addr + range->size) + break; + + range->size += size; + parser->range += np; + } + + return range; +} +EXPORT_SYMBOL_GPL(of_pci_range_parser_one); + +static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr, + u64 size) +{ + u64 taddr; + unsigned long port; + struct device_node *host; + + taddr = __of_translate_address(dev, of_get_parent, + in_addr, "ranges", &host); + if (host) { + /* host-specific port access */ + port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size); + of_node_put(host); + } else { + /* memory-mapped I/O range */ + port = pci_address_to_pio(taddr); + } + + if (port == (unsigned long)-1) + return OF_BAD_ADDR; + + return port; +} + +static int __of_address_to_resource(struct device_node *dev, int index, int bar_no, + struct resource *r) +{ + u64 taddr; + const __be32 *addrp; + u64 size; + unsigned int flags; + const char *name = NULL; + + addrp = __of_get_address(dev, index, bar_no, &size, &flags); + if (addrp == NULL) + return -EINVAL; + + /* Get optional "reg-names" property to add a name to a resource */ + if (index >= 0) + of_property_read_string_index(dev, "reg-names", index, &name); + + if (flags & IORESOURCE_MEM) + taddr = of_translate_address(dev, addrp); + else if (flags & IORESOURCE_IO) + taddr = of_translate_ioport(dev, addrp, size); + else + return -EINVAL; + + if (taddr == OF_BAD_ADDR) + return -EINVAL; + memset(r, 0, sizeof(struct resource)); + + if (of_mmio_is_nonposted(dev)) + flags |= IORESOURCE_MEM_NONPOSTED; + + r->start = taddr; + r->end = taddr + size - 1; + r->flags = flags; + r->name = name ? name : dev->full_name; + + return 0; +} + +/** + * of_address_to_resource - Translate device tree address and return as resource + * @dev: Caller's Device Node + * @index: Index into the array + * @r: Pointer to resource array + * + * Note that if your address is a PIO address, the conversion will fail if + * the physical address can't be internally converted to an IO token with + * pci_address_to_pio(), that is because it's either called too early or it + * can't be matched to any host bridge IO space + */ +int of_address_to_resource(struct device_node *dev, int index, + struct resource *r) +{ + return __of_address_to_resource(dev, index, -1, r); +} +EXPORT_SYMBOL_GPL(of_address_to_resource); + +/** + * of_iomap - Maps the memory mapped IO for a given device_node + * @np: the device whose io range will be mapped + * @index: index of the io range + * + * Returns a pointer to the mapped memory + */ +void __iomem *of_iomap(struct device_node *np, int index) +{ + struct resource res; + + if (of_address_to_resource(np, index, &res)) + return NULL; + + if (res.flags & IORESOURCE_MEM_NONPOSTED) + return ioremap_np(res.start, resource_size(&res)); + else + return ioremap(res.start, resource_size(&res)); +} +EXPORT_SYMBOL(of_iomap); + +/* + * of_io_request_and_map - Requests a resource and maps the memory mapped IO + * for a given device_node + * @device: the device whose io range will be mapped + * @index: index of the io range + * @name: name "override" for the memory region request or NULL + * + * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded + * error code on failure. Usage example: + * + * base = of_io_request_and_map(node, 0, "foo"); + * if (IS_ERR(base)) + * return PTR_ERR(base); + */ +void __iomem *of_io_request_and_map(struct device_node *np, int index, + const char *name) +{ + struct resource res; + void __iomem *mem; + + if (of_address_to_resource(np, index, &res)) + return IOMEM_ERR_PTR(-EINVAL); + + if (!name) + name = res.name; + if (!request_mem_region(res.start, resource_size(&res), name)) + return IOMEM_ERR_PTR(-EBUSY); + + if (res.flags & IORESOURCE_MEM_NONPOSTED) + mem = ioremap_np(res.start, resource_size(&res)); + else + mem = ioremap(res.start, resource_size(&res)); + + if (!mem) { + release_mem_region(res.start, resource_size(&res)); + return IOMEM_ERR_PTR(-ENOMEM); + } + + return mem; +} +EXPORT_SYMBOL(of_io_request_and_map); + +#ifdef CONFIG_HAS_DMA +/** + * of_dma_get_range - Get DMA range info and put it into a map array + * @np: device node to get DMA range info + * @map: dma range structure to return + * + * Look in bottom up direction for the first "dma-ranges" property + * and parse it. Put the information into a DMA offset map array. + * + * dma-ranges format: + * DMA addr (dma_addr) : naddr cells + * CPU addr (phys_addr_t) : pna cells + * size : nsize cells + * + * It returns -ENODEV if "dma-ranges" property was not found for this + * device in the DT. + */ +int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map) +{ + struct device_node *node = of_node_get(np); + const __be32 *ranges = NULL; + bool found_dma_ranges = false; + struct of_range_parser parser; + struct of_range range; + struct bus_dma_region *r; + int len, num_ranges = 0; + int ret = 0; + + while (node) { + ranges = of_get_property(node, "dma-ranges", &len); + + /* Ignore empty ranges, they imply no translation required */ + if (ranges && len > 0) + break; + + /* Once we find 'dma-ranges', then a missing one is an error */ + if (found_dma_ranges && !ranges) { + ret = -ENODEV; + goto out; + } + found_dma_ranges = true; + + node = of_get_next_dma_parent(node); + } + + if (!node || !ranges) { + pr_debug("no dma-ranges found for node(%pOF)\n", np); + ret = -ENODEV; + goto out; + } + + of_dma_range_parser_init(&parser, node); + for_each_of_range(&parser, &range) { + if (range.cpu_addr == OF_BAD_ADDR) { + pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n", + range.bus_addr, node); + continue; + } + num_ranges++; + } + + if (!num_ranges) { + ret = -EINVAL; + goto out; + } + + r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL); + if (!r) { + ret = -ENOMEM; + goto out; + } + + /* + * Record all info in the generic DMA ranges array for struct device, + * returning an error if we don't find any parsable ranges. + */ + *map = r; + of_dma_range_parser_init(&parser, node); + for_each_of_range(&parser, &range) { + pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n", + range.bus_addr, range.cpu_addr, range.size); + if (range.cpu_addr == OF_BAD_ADDR) + continue; + r->cpu_start = range.cpu_addr; + r->dma_start = range.bus_addr; + r->size = range.size; + r->offset = range.cpu_addr - range.bus_addr; + r++; + } +out: + of_node_put(node); + return ret; +} +#endif /* CONFIG_HAS_DMA */ + +/** + * of_dma_get_max_cpu_address - Gets highest CPU address suitable for DMA + * @np: The node to start searching from or NULL to start from the root + * + * Gets the highest CPU physical address that is addressable by all DMA masters + * in the sub-tree pointed by np, or the whole tree if NULL is passed. If no + * DMA constrained device is found, it returns PHYS_ADDR_MAX. + */ +phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np) +{ + phys_addr_t max_cpu_addr = PHYS_ADDR_MAX; + struct of_range_parser parser; + phys_addr_t subtree_max_addr; + struct device_node *child; + struct of_range range; + const __be32 *ranges; + u64 cpu_end = 0; + int len; + + if (!np) + np = of_root; + + ranges = of_get_property(np, "dma-ranges", &len); + if (ranges && len) { + of_dma_range_parser_init(&parser, np); + for_each_of_range(&parser, &range) + if (range.cpu_addr + range.size > cpu_end) + cpu_end = range.cpu_addr + range.size - 1; + + if (max_cpu_addr > cpu_end) + max_cpu_addr = cpu_end; + } + + for_each_available_child_of_node(np, child) { + subtree_max_addr = of_dma_get_max_cpu_address(child); + if (max_cpu_addr > subtree_max_addr) + max_cpu_addr = subtree_max_addr; + } + + return max_cpu_addr; +} + +/** + * of_dma_is_coherent - Check if device is coherent + * @np: device node + * + * It returns true if "dma-coherent" property was found + * for this device in the DT, or if DMA is coherent by + * default for OF devices on the current platform and no + * "dma-noncoherent" property was found for this device. + */ +bool of_dma_is_coherent(struct device_node *np) +{ + struct device_node *node; + bool is_coherent = IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT); + + node = of_node_get(np); + + while (node) { + if (of_property_read_bool(node, "dma-coherent")) { + is_coherent = true; + break; + } + if (of_property_read_bool(node, "dma-noncoherent")) { + is_coherent = false; + break; + } + node = of_get_next_dma_parent(node); + } + of_node_put(node); + return is_coherent; +} +EXPORT_SYMBOL_GPL(of_dma_is_coherent); + +/** + * of_mmio_is_nonposted - Check if device uses non-posted MMIO + * @np: device node + * + * Returns true if the "nonposted-mmio" property was found for + * the device's bus. + * + * This is currently only enabled on builds that support Apple ARM devices, as + * an optimization. + */ +static bool of_mmio_is_nonposted(struct device_node *np) +{ + struct device_node *parent; + bool nonposted; + + if (!IS_ENABLED(CONFIG_ARCH_APPLE)) + return false; + + parent = of_get_parent(np); + if (!parent) + return false; + + nonposted = of_property_read_bool(parent, "nonposted-mmio"); + + of_node_put(parent); + return nonposted; +} diff --git a/drivers/of/base.c b/drivers/of/base.c new file mode 100644 index 000000000..f849bbb9e --- /dev/null +++ b/drivers/of/base.c @@ -0,0 +1,2208 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Procedures for creating, accessing and interpreting the device tree. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net + * + * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and + * Grant Likely. + */ + +#define pr_fmt(fmt) "OF: " fmt + +#include <linux/console.h> +#include <linux/ctype.h> +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/proc_fs.h> + +#include "of_private.h" + +LIST_HEAD(aliases_lookup); + +struct device_node *of_root; +EXPORT_SYMBOL(of_root); +struct device_node *of_chosen; +EXPORT_SYMBOL(of_chosen); +struct device_node *of_aliases; +struct device_node *of_stdout; +static const char *of_stdout_options; + +struct kset *of_kset; + +/* + * Used to protect the of_aliases, to hold off addition of nodes to sysfs. + * This mutex must be held whenever modifications are being made to the + * device tree. The of_{attach,detach}_node() and + * of_{add,remove,update}_property() helpers make sure this happens. + */ +DEFINE_MUTEX(of_mutex); + +/* use when traversing tree through the child, sibling, + * or parent members of struct device_node. + */ +DEFINE_RAW_SPINLOCK(devtree_lock); + +bool of_node_name_eq(const struct device_node *np, const char *name) +{ + const char *node_name; + size_t len; + + if (!np) + return false; + + node_name = kbasename(np->full_name); + len = strchrnul(node_name, '@') - node_name; + + return (strlen(name) == len) && (strncmp(node_name, name, len) == 0); +} +EXPORT_SYMBOL(of_node_name_eq); + +bool of_node_name_prefix(const struct device_node *np, const char *prefix) +{ + if (!np) + return false; + + return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0; +} +EXPORT_SYMBOL(of_node_name_prefix); + +static bool __of_node_is_type(const struct device_node *np, const char *type) +{ + const char *match = __of_get_property(np, "device_type", NULL); + + return np && match && type && !strcmp(match, type); +} + +int of_bus_n_addr_cells(struct device_node *np) +{ + u32 cells; + + for (; np; np = np->parent) + if (!of_property_read_u32(np, "#address-cells", &cells)) + return cells; + + /* No #address-cells property for the root node */ + return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; +} + +int of_n_addr_cells(struct device_node *np) +{ + if (np->parent) + np = np->parent; + + return of_bus_n_addr_cells(np); +} +EXPORT_SYMBOL(of_n_addr_cells); + +int of_bus_n_size_cells(struct device_node *np) +{ + u32 cells; + + for (; np; np = np->parent) + if (!of_property_read_u32(np, "#size-cells", &cells)) + return cells; + + /* No #size-cells property for the root node */ + return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; +} + +int of_n_size_cells(struct device_node *np) +{ + if (np->parent) + np = np->parent; + + return of_bus_n_size_cells(np); +} +EXPORT_SYMBOL(of_n_size_cells); + +#ifdef CONFIG_NUMA +int __weak of_node_to_nid(struct device_node *np) +{ + return NUMA_NO_NODE; +} +#endif + +#define OF_PHANDLE_CACHE_BITS 7 +#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) + +static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; + +static u32 of_phandle_cache_hash(phandle handle) +{ + return hash_32(handle, OF_PHANDLE_CACHE_BITS); +} + +/* + * Caller must hold devtree_lock. + */ +void __of_phandle_cache_inv_entry(phandle handle) +{ + u32 handle_hash; + struct device_node *np; + + if (!handle) + return; + + handle_hash = of_phandle_cache_hash(handle); + + np = phandle_cache[handle_hash]; + if (np && handle == np->phandle) + phandle_cache[handle_hash] = NULL; +} + +void __init of_core_init(void) +{ + struct device_node *np; + + + /* Create the kset, and register existing nodes */ + mutex_lock(&of_mutex); + of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); + if (!of_kset) { + mutex_unlock(&of_mutex); + pr_err("failed to register existing nodes\n"); + return; + } + for_each_of_allnodes(np) { + __of_attach_node_sysfs(np); + if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) + phandle_cache[of_phandle_cache_hash(np->phandle)] = np; + } + mutex_unlock(&of_mutex); + + /* Symlink in /proc as required by userspace ABI */ + if (of_root) + proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); +} + +static struct property *__of_find_property(const struct device_node *np, + const char *name, int *lenp) +{ + struct property *pp; + + if (!np) + return NULL; + + for (pp = np->properties; pp; pp = pp->next) { + if (of_prop_cmp(pp->name, name) == 0) { + if (lenp) + *lenp = pp->length; + break; + } + } + + return pp; +} + +struct property *of_find_property(const struct device_node *np, + const char *name, + int *lenp) +{ + struct property *pp; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + pp = __of_find_property(np, name, lenp); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + return pp; +} +EXPORT_SYMBOL(of_find_property); + +struct device_node *__of_find_all_nodes(struct device_node *prev) +{ + struct device_node *np; + if (!prev) { + np = of_root; + } else if (prev->child) { + np = prev->child; + } else { + /* Walk back up looking for a sibling, or the end of the structure */ + np = prev; + while (np->parent && !np->sibling) + np = np->parent; + np = np->sibling; /* Might be null at the end of the tree */ + } + return np; +} + +/** + * of_find_all_nodes - Get next node in global list + * @prev: Previous node or NULL to start iteration + * of_node_put() will be called on it + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_all_nodes(struct device_node *prev) +{ + struct device_node *np; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + np = __of_find_all_nodes(prev); + of_node_get(np); + of_node_put(prev); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_all_nodes); + +/* + * Find a property with a given name for a given node + * and return the value. + */ +const void *__of_get_property(const struct device_node *np, + const char *name, int *lenp) +{ + struct property *pp = __of_find_property(np, name, lenp); + + return pp ? pp->value : NULL; +} + +/* + * Find a property with a given name for a given node + * and return the value. + */ +const void *of_get_property(const struct device_node *np, const char *name, + int *lenp) +{ + struct property *pp = of_find_property(np, name, lenp); + + return pp ? pp->value : NULL; +} +EXPORT_SYMBOL(of_get_property); + +/** + * of_get_cpu_hwid - Get the hardware ID from a CPU device node + * + * @cpun: CPU number(logical index) for which device node is required + * @thread: The local thread number to get the hardware ID for. + * + * Return: The hardware ID for the CPU node or ~0ULL if not found. + */ +u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread) +{ + const __be32 *cell; + int ac, len; + + ac = of_n_addr_cells(cpun); + cell = of_get_property(cpun, "reg", &len); + if (!cell || !ac || ((sizeof(*cell) * ac * (thread + 1)) > len)) + return ~0ULL; + + cell += ac * thread; + return of_read_number(cell, ac); +} + +/* + * arch_match_cpu_phys_id - Match the given logical CPU and physical id + * + * @cpu: logical cpu index of a core/thread + * @phys_id: physical identifier of a core/thread + * + * CPU logical to physical index mapping is architecture specific. + * However this __weak function provides a default match of physical + * id to logical cpu index. phys_id provided here is usually values read + * from the device tree which must match the hardware internal registers. + * + * Returns true if the physical identifier and the logical cpu index + * correspond to the same core/thread, false otherwise. + */ +bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id) +{ + return (u32)phys_id == cpu; +} + +/* + * Checks if the given "prop_name" property holds the physical id of the + * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not + * NULL, local thread number within the core is returned in it. + */ +static bool __of_find_n_match_cpu_property(struct device_node *cpun, + const char *prop_name, int cpu, unsigned int *thread) +{ + const __be32 *cell; + int ac, prop_len, tid; + u64 hwid; + + ac = of_n_addr_cells(cpun); + cell = of_get_property(cpun, prop_name, &prop_len); + if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0)) + return true; + if (!cell || !ac) + return false; + prop_len /= sizeof(*cell) * ac; + for (tid = 0; tid < prop_len; tid++) { + hwid = of_read_number(cell, ac); + if (arch_match_cpu_phys_id(cpu, hwid)) { + if (thread) + *thread = tid; + return true; + } + cell += ac; + } + return false; +} + +/* + * arch_find_n_match_cpu_physical_id - See if the given device node is + * for the cpu corresponding to logical cpu 'cpu'. Return true if so, + * else false. If 'thread' is non-NULL, the local thread number within the + * core is returned in it. + */ +bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun, + int cpu, unsigned int *thread) +{ + /* Check for non-standard "ibm,ppc-interrupt-server#s" property + * for thread ids on PowerPC. If it doesn't exist fallback to + * standard "reg" property. + */ + if (IS_ENABLED(CONFIG_PPC) && + __of_find_n_match_cpu_property(cpun, + "ibm,ppc-interrupt-server#s", + cpu, thread)) + return true; + + return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread); +} + +/** + * of_get_cpu_node - Get device node associated with the given logical CPU + * + * @cpu: CPU number(logical index) for which device node is required + * @thread: if not NULL, local thread number within the physical core is + * returned + * + * The main purpose of this function is to retrieve the device node for the + * given logical CPU index. It should be used to initialize the of_node in + * cpu device. Once of_node in cpu device is populated, all the further + * references can use that instead. + * + * CPU logical to physical index mapping is architecture specific and is built + * before booting secondary cores. This function uses arch_match_cpu_phys_id + * which can be overridden by architecture specific implementation. + * + * Return: A node pointer for the logical cpu with refcount incremented, use + * of_node_put() on it when done. Returns NULL if not found. + */ +struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) +{ + struct device_node *cpun; + + for_each_of_cpu_node(cpun) { + if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread)) + return cpun; + } + return NULL; +} +EXPORT_SYMBOL(of_get_cpu_node); + +/** + * of_cpu_node_to_id: Get the logical CPU number for a given device_node + * + * @cpu_node: Pointer to the device_node for CPU. + * + * Return: The logical CPU number of the given CPU device_node or -ENODEV if the + * CPU is not found. + */ +int of_cpu_node_to_id(struct device_node *cpu_node) +{ + int cpu; + bool found = false; + struct device_node *np; + + for_each_possible_cpu(cpu) { + np = of_cpu_device_node_get(cpu); + found = (cpu_node == np); + of_node_put(np); + if (found) + return cpu; + } + + return -ENODEV; +} +EXPORT_SYMBOL(of_cpu_node_to_id); + +/** + * of_get_cpu_state_node - Get CPU's idle state node at the given index + * + * @cpu_node: The device node for the CPU + * @index: The index in the list of the idle states + * + * Two generic methods can be used to describe a CPU's idle states, either via + * a flattened description through the "cpu-idle-states" binding or via the + * hierarchical layout, using the "power-domains" and the "domain-idle-states" + * bindings. This function check for both and returns the idle state node for + * the requested index. + * + * Return: An idle state node if found at @index. The refcount is incremented + * for it, so call of_node_put() on it when done. Returns NULL if not found. + */ +struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, + int index) +{ + struct of_phandle_args args; + int err; + + err = of_parse_phandle_with_args(cpu_node, "power-domains", + "#power-domain-cells", 0, &args); + if (!err) { + struct device_node *state_node = + of_parse_phandle(args.np, "domain-idle-states", index); + + of_node_put(args.np); + if (state_node) + return state_node; + } + + return of_parse_phandle(cpu_node, "cpu-idle-states", index); +} +EXPORT_SYMBOL(of_get_cpu_state_node); + +/** + * __of_device_is_compatible() - Check if the node matches given constraints + * @device: pointer to node + * @compat: required compatible string, NULL or "" for any match + * @type: required device_type value, NULL or "" for any match + * @name: required node name, NULL or "" for any match + * + * Checks if the given @compat, @type and @name strings match the + * properties of the given @device. A constraints can be skipped by + * passing NULL or an empty string as the constraint. + * + * Returns 0 for no match, and a positive integer on match. The return + * value is a relative score with larger values indicating better + * matches. The score is weighted for the most specific compatible value + * to get the highest score. Matching type is next, followed by matching + * name. Practically speaking, this results in the following priority + * order for matches: + * + * 1. specific compatible && type && name + * 2. specific compatible && type + * 3. specific compatible && name + * 4. specific compatible + * 5. general compatible && type && name + * 6. general compatible && type + * 7. general compatible && name + * 8. general compatible + * 9. type && name + * 10. type + * 11. name + */ +static int __of_device_is_compatible(const struct device_node *device, + const char *compat, const char *type, const char *name) +{ + struct property *prop; + const char *cp; + int index = 0, score = 0; + + /* Compatible match has highest priority */ + if (compat && compat[0]) { + prop = __of_find_property(device, "compatible", NULL); + for (cp = of_prop_next_string(prop, NULL); cp; + cp = of_prop_next_string(prop, cp), index++) { + if (of_compat_cmp(cp, compat, strlen(compat)) == 0) { + score = INT_MAX/2 - (index << 2); + break; + } + } + if (!score) + return 0; + } + + /* Matching type is better than matching name */ + if (type && type[0]) { + if (!__of_node_is_type(device, type)) + return 0; + score += 2; + } + + /* Matching name is a bit better than not */ + if (name && name[0]) { + if (!of_node_name_eq(device, name)) + return 0; + score++; + } + + return score; +} + +/** Checks if the given "compat" string matches one of the strings in + * the device's "compatible" property + */ +int of_device_is_compatible(const struct device_node *device, + const char *compat) +{ + unsigned long flags; + int res; + + raw_spin_lock_irqsave(&devtree_lock, flags); + res = __of_device_is_compatible(device, compat, NULL, NULL); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return res; +} +EXPORT_SYMBOL(of_device_is_compatible); + +/** Checks if the device is compatible with any of the entries in + * a NULL terminated array of strings. Returns the best match + * score or 0. + */ +int of_device_compatible_match(const struct device_node *device, + const char *const *compat) +{ + unsigned int tmp, score = 0; + + if (!compat) + return 0; + + while (*compat) { + tmp = of_device_is_compatible(device, *compat); + if (tmp > score) + score = tmp; + compat++; + } + + return score; +} +EXPORT_SYMBOL_GPL(of_device_compatible_match); + +/** + * of_machine_is_compatible - Test root of device tree for a given compatible value + * @compat: compatible string to look for in root node's compatible property. + * + * Return: A positive integer if the root node has the given value in its + * compatible property. + */ +int of_machine_is_compatible(const char *compat) +{ + struct device_node *root; + int rc = 0; + + root = of_find_node_by_path("/"); + if (root) { + rc = of_device_is_compatible(root, compat); + of_node_put(root); + } + return rc; +} +EXPORT_SYMBOL(of_machine_is_compatible); + +/** + * __of_device_is_available - check if a device is available for use + * + * @device: Node to check for availability, with locks already held + * + * Return: True if the status property is absent or set to "okay" or "ok", + * false otherwise + */ +static bool __of_device_is_available(const struct device_node *device) +{ + const char *status; + int statlen; + + if (!device) + return false; + + status = __of_get_property(device, "status", &statlen); + if (status == NULL) + return true; + + if (statlen > 0) { + if (!strcmp(status, "okay") || !strcmp(status, "ok")) + return true; + } + + return false; +} + +/** + * of_device_is_available - check if a device is available for use + * + * @device: Node to check for availability + * + * Return: True if the status property is absent or set to "okay" or "ok", + * false otherwise + */ +bool of_device_is_available(const struct device_node *device) +{ + unsigned long flags; + bool res; + + raw_spin_lock_irqsave(&devtree_lock, flags); + res = __of_device_is_available(device); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return res; + +} +EXPORT_SYMBOL(of_device_is_available); + +/** + * __of_device_is_fail - check if a device has status "fail" or "fail-..." + * + * @device: Node to check status for, with locks already held + * + * Return: True if the status property is set to "fail" or "fail-..." (for any + * error code suffix), false otherwise + */ +static bool __of_device_is_fail(const struct device_node *device) +{ + const char *status; + + if (!device) + return false; + + status = __of_get_property(device, "status", NULL); + if (status == NULL) + return false; + + return !strcmp(status, "fail") || !strncmp(status, "fail-", 5); +} + +/** + * of_device_is_big_endian - check if a device has BE registers + * + * @device: Node to check for endianness + * + * Return: True if the device has a "big-endian" property, or if the kernel + * was compiled for BE *and* the device has a "native-endian" property. + * Returns false otherwise. + * + * Callers would nominally use ioread32be/iowrite32be if + * of_device_is_big_endian() == true, or readl/writel otherwise. + */ +bool of_device_is_big_endian(const struct device_node *device) +{ + if (of_property_read_bool(device, "big-endian")) + return true; + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) && + of_property_read_bool(device, "native-endian")) + return true; + return false; +} +EXPORT_SYMBOL(of_device_is_big_endian); + +/** + * of_get_parent - Get a node's parent if any + * @node: Node to get parent + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_get_parent(const struct device_node *node) +{ + struct device_node *np; + unsigned long flags; + + if (!node) + return NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); + np = of_node_get(node->parent); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_get_parent); + +/** + * of_get_next_parent - Iterate to a node's parent + * @node: Node to get parent of + * + * This is like of_get_parent() except that it drops the + * refcount on the passed node, making it suitable for iterating + * through a node's parents. + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_get_next_parent(struct device_node *node) +{ + struct device_node *parent; + unsigned long flags; + + if (!node) + return NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); + parent = of_node_get(node->parent); + of_node_put(node); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return parent; +} +EXPORT_SYMBOL(of_get_next_parent); + +static struct device_node *__of_get_next_child(const struct device_node *node, + struct device_node *prev) +{ + struct device_node *next; + + if (!node) + return NULL; + + next = prev ? prev->sibling : node->child; + of_node_get(next); + of_node_put(prev); + return next; +} +#define __for_each_child_of_node(parent, child) \ + for (child = __of_get_next_child(parent, NULL); child != NULL; \ + child = __of_get_next_child(parent, child)) + +/** + * of_get_next_child - Iterate a node childs + * @node: parent node + * @prev: previous child of the parent node, or NULL to get first + * + * Return: A node pointer with refcount incremented, use of_node_put() on + * it when done. Returns NULL when prev is the last child. Decrements the + * refcount of prev. + */ +struct device_node *of_get_next_child(const struct device_node *node, + struct device_node *prev) +{ + struct device_node *next; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + next = __of_get_next_child(node, prev); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return next; +} +EXPORT_SYMBOL(of_get_next_child); + +/** + * of_get_next_available_child - Find the next available child node + * @node: parent node + * @prev: previous child of the parent node, or NULL to get first + * + * This function is like of_get_next_child(), except that it + * automatically skips any disabled nodes (i.e. status = "disabled"). + */ +struct device_node *of_get_next_available_child(const struct device_node *node, + struct device_node *prev) +{ + struct device_node *next; + unsigned long flags; + + if (!node) + return NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); + next = prev ? prev->sibling : node->child; + for (; next; next = next->sibling) { + if (!__of_device_is_available(next)) + continue; + if (of_node_get(next)) + break; + } + of_node_put(prev); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return next; +} +EXPORT_SYMBOL(of_get_next_available_child); + +/** + * of_get_next_cpu_node - Iterate on cpu nodes + * @prev: previous child of the /cpus node, or NULL to get first + * + * Unusable CPUs (those with the status property set to "fail" or "fail-...") + * will be skipped. + * + * Return: A cpu node pointer with refcount incremented, use of_node_put() + * on it when done. Returns NULL when prev is the last child. Decrements + * the refcount of prev. + */ +struct device_node *of_get_next_cpu_node(struct device_node *prev) +{ + struct device_node *next = NULL; + unsigned long flags; + struct device_node *node; + + if (!prev) + node = of_find_node_by_path("/cpus"); + + raw_spin_lock_irqsave(&devtree_lock, flags); + if (prev) + next = prev->sibling; + else if (node) { + next = node->child; + of_node_put(node); + } + for (; next; next = next->sibling) { + if (__of_device_is_fail(next)) + continue; + if (!(of_node_name_eq(next, "cpu") || + __of_node_is_type(next, "cpu"))) + continue; + if (of_node_get(next)) + break; + } + of_node_put(prev); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return next; +} +EXPORT_SYMBOL(of_get_next_cpu_node); + +/** + * of_get_compatible_child - Find compatible child node + * @parent: parent node + * @compatible: compatible string + * + * Lookup child node whose compatible property contains the given compatible + * string. + * + * Return: a node pointer with refcount incremented, use of_node_put() on it + * when done; or NULL if not found. + */ +struct device_node *of_get_compatible_child(const struct device_node *parent, + const char *compatible) +{ + struct device_node *child; + + for_each_child_of_node(parent, child) { + if (of_device_is_compatible(child, compatible)) + break; + } + + return child; +} +EXPORT_SYMBOL(of_get_compatible_child); + +/** + * of_get_child_by_name - Find the child node by name for a given parent + * @node: parent node + * @name: child name to look for. + * + * This function looks for child node for given matching name + * + * Return: A node pointer if found, with refcount incremented, use + * of_node_put() on it when done. + * Returns NULL if node is not found. + */ +struct device_node *of_get_child_by_name(const struct device_node *node, + const char *name) +{ + struct device_node *child; + + for_each_child_of_node(node, child) + if (of_node_name_eq(child, name)) + break; + return child; +} +EXPORT_SYMBOL(of_get_child_by_name); + +struct device_node *__of_find_node_by_path(struct device_node *parent, + const char *path) +{ + struct device_node *child; + int len; + + len = strcspn(path, "/:"); + if (!len) + return NULL; + + __for_each_child_of_node(parent, child) { + const char *name = kbasename(child->full_name); + if (strncmp(path, name, len) == 0 && (strlen(name) == len)) + return child; + } + return NULL; +} + +struct device_node *__of_find_node_by_full_path(struct device_node *node, + const char *path) +{ + const char *separator = strchr(path, ':'); + + while (node && *path == '/') { + struct device_node *tmp = node; + + path++; /* Increment past '/' delimiter */ + node = __of_find_node_by_path(node, path); + of_node_put(tmp); + path = strchrnul(path, '/'); + if (separator && separator < path) + break; + } + return node; +} + +/** + * of_find_node_opts_by_path - Find a node matching a full OF path + * @path: Either the full path to match, or if the path does not + * start with '/', the name of a property of the /aliases + * node (an alias). In the case of an alias, the node + * matching the alias' value will be returned. + * @opts: Address of a pointer into which to store the start of + * an options string appended to the end of the path with + * a ':' separator. + * + * Valid paths: + * * /foo/bar Full path + * * foo Valid alias + * * foo/bar Valid alias + relative path + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_node_opts_by_path(const char *path, const char **opts) +{ + struct device_node *np = NULL; + struct property *pp; + unsigned long flags; + const char *separator = strchr(path, ':'); + + if (opts) + *opts = separator ? separator + 1 : NULL; + + if (strcmp(path, "/") == 0) + return of_node_get(of_root); + + /* The path could begin with an alias */ + if (*path != '/') { + int len; + const char *p = separator; + + if (!p) + p = strchrnul(path, '/'); + len = p - path; + + /* of_aliases must not be NULL */ + if (!of_aliases) + return NULL; + + for_each_property_of_node(of_aliases, pp) { + if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { + np = of_find_node_by_path(pp->value); + break; + } + } + if (!np) + return NULL; + path = p; + } + + /* Step down the tree matching path components */ + raw_spin_lock_irqsave(&devtree_lock, flags); + if (!np) + np = of_node_get(of_root); + np = __of_find_node_by_full_path(np, path); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_node_opts_by_path); + +/** + * of_find_node_by_name - Find a node by its "name" property + * @from: The node to start searching from or NULL; the node + * you pass will not be searched, only the next one + * will. Typically, you pass what the previous call + * returned. of_node_put() will be called on @from. + * @name: The name string to match against + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_node_by_name(struct device_node *from, + const char *name) +{ + struct device_node *np; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + for_each_of_allnodes_from(from, np) + if (of_node_name_eq(np, name) && of_node_get(np)) + break; + of_node_put(from); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_node_by_name); + +/** + * of_find_node_by_type - Find a node by its "device_type" property + * @from: The node to start searching from, or NULL to start searching + * the entire device tree. The node you pass will not be + * searched, only the next one will; typically, you pass + * what the previous call returned. of_node_put() will be + * called on from for you. + * @type: The type string to match against + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_node_by_type(struct device_node *from, + const char *type) +{ + struct device_node *np; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + for_each_of_allnodes_from(from, np) + if (__of_node_is_type(np, type) && of_node_get(np)) + break; + of_node_put(from); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_node_by_type); + +/** + * of_find_compatible_node - Find a node based on type and one of the + * tokens in its "compatible" property + * @from: The node to start searching from or NULL, the node + * you pass will not be searched, only the next one + * will; typically, you pass what the previous call + * returned. of_node_put() will be called on it + * @type: The type string to match "device_type" or NULL to ignore + * @compatible: The string to match to one of the tokens in the device + * "compatible" list. + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_compatible_node(struct device_node *from, + const char *type, const char *compatible) +{ + struct device_node *np; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + for_each_of_allnodes_from(from, np) + if (__of_device_is_compatible(np, compatible, type, NULL) && + of_node_get(np)) + break; + of_node_put(from); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_compatible_node); + +/** + * of_find_node_with_property - Find a node which has a property with + * the given name. + * @from: The node to start searching from or NULL, the node + * you pass will not be searched, only the next one + * will; typically, you pass what the previous call + * returned. of_node_put() will be called on it + * @prop_name: The name of the property to look for. + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_node_with_property(struct device_node *from, + const char *prop_name) +{ + struct device_node *np; + struct property *pp; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + for_each_of_allnodes_from(from, np) { + for (pp = np->properties; pp; pp = pp->next) { + if (of_prop_cmp(pp->name, prop_name) == 0) { + of_node_get(np); + goto out; + } + } + } +out: + of_node_put(from); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_node_with_property); + +static +const struct of_device_id *__of_match_node(const struct of_device_id *matches, + const struct device_node *node) +{ + const struct of_device_id *best_match = NULL; + int score, best_score = 0; + + if (!matches) + return NULL; + + for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) { + score = __of_device_is_compatible(node, matches->compatible, + matches->type, matches->name); + if (score > best_score) { + best_match = matches; + best_score = score; + } + } + + return best_match; +} + +/** + * of_match_node - Tell if a device_node has a matching of_match structure + * @matches: array of of device match structures to search in + * @node: the of device structure to match against + * + * Low level utility function used by device matching. + */ +const struct of_device_id *of_match_node(const struct of_device_id *matches, + const struct device_node *node) +{ + const struct of_device_id *match; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + match = __of_match_node(matches, node); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return match; +} +EXPORT_SYMBOL(of_match_node); + +/** + * of_find_matching_node_and_match - Find a node based on an of_device_id + * match table. + * @from: The node to start searching from or NULL, the node + * you pass will not be searched, only the next one + * will; typically, you pass what the previous call + * returned. of_node_put() will be called on it + * @matches: array of of device match structures to search in + * @match: Updated to point at the matches entry which matched + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_matching_node_and_match(struct device_node *from, + const struct of_device_id *matches, + const struct of_device_id **match) +{ + struct device_node *np; + const struct of_device_id *m; + unsigned long flags; + + if (match) + *match = NULL; + + raw_spin_lock_irqsave(&devtree_lock, flags); + for_each_of_allnodes_from(from, np) { + m = __of_match_node(matches, np); + if (m && of_node_get(np)) { + if (match) + *match = m; + break; + } + } + of_node_put(from); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_matching_node_and_match); + +/** + * of_modalias_node - Lookup appropriate modalias for a device node + * @node: pointer to a device tree node + * @modalias: Pointer to buffer that modalias value will be copied into + * @len: Length of modalias value + * + * Based on the value of the compatible property, this routine will attempt + * to choose an appropriate modalias value for a particular device tree node. + * It does this by stripping the manufacturer prefix (as delimited by a ',') + * from the first entry in the compatible list property. + * + * Return: This routine returns 0 on success, <0 on failure. + */ +int of_modalias_node(struct device_node *node, char *modalias, int len) +{ + const char *compatible, *p; + int cplen; + + compatible = of_get_property(node, "compatible", &cplen); + if (!compatible || strlen(compatible) > cplen) + return -ENODEV; + p = strchr(compatible, ','); + strscpy(modalias, p ? p + 1 : compatible, len); + return 0; +} +EXPORT_SYMBOL_GPL(of_modalias_node); + +/** + * of_find_node_by_phandle - Find a node given a phandle + * @handle: phandle of the node to find + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. + */ +struct device_node *of_find_node_by_phandle(phandle handle) +{ + struct device_node *np = NULL; + unsigned long flags; + u32 handle_hash; + + if (!handle) + return NULL; + + handle_hash = of_phandle_cache_hash(handle); + + raw_spin_lock_irqsave(&devtree_lock, flags); + + if (phandle_cache[handle_hash] && + handle == phandle_cache[handle_hash]->phandle) + np = phandle_cache[handle_hash]; + + if (!np) { + for_each_of_allnodes(np) + if (np->phandle == handle && + !of_node_check_flag(np, OF_DETACHED)) { + phandle_cache[handle_hash] = np; + break; + } + } + + of_node_get(np); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + return np; +} +EXPORT_SYMBOL(of_find_node_by_phandle); + +void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) +{ + int i; + printk("%s %pOF", msg, args->np); + for (i = 0; i < args->args_count; i++) { + const char delim = i ? ',' : ':'; + + pr_cont("%c%08x", delim, args->args[i]); + } + pr_cont("\n"); +} + +int of_phandle_iterator_init(struct of_phandle_iterator *it, + const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count) +{ + const __be32 *list; + int size; + + memset(it, 0, sizeof(*it)); + + /* + * one of cell_count or cells_name must be provided to determine the + * argument length. + */ + if (cell_count < 0 && !cells_name) + return -EINVAL; + + list = of_get_property(np, list_name, &size); + if (!list) + return -ENOENT; + + it->cells_name = cells_name; + it->cell_count = cell_count; + it->parent = np; + it->list_end = list + size / sizeof(*list); + it->phandle_end = list; + it->cur = list; + + return 0; +} +EXPORT_SYMBOL_GPL(of_phandle_iterator_init); + +int of_phandle_iterator_next(struct of_phandle_iterator *it) +{ + uint32_t count = 0; + + if (it->node) { + of_node_put(it->node); + it->node = NULL; + } + + if (!it->cur || it->phandle_end >= it->list_end) + return -ENOENT; + + it->cur = it->phandle_end; + + /* If phandle is 0, then it is an empty entry with no arguments. */ + it->phandle = be32_to_cpup(it->cur++); + + if (it->phandle) { + + /* + * Find the provider node and parse the #*-cells property to + * determine the argument length. + */ + it->node = of_find_node_by_phandle(it->phandle); + + if (it->cells_name) { + if (!it->node) { + pr_err("%pOF: could not find phandle %d\n", + it->parent, it->phandle); + goto err; + } + + if (of_property_read_u32(it->node, it->cells_name, + &count)) { + /* + * If both cell_count and cells_name is given, + * fall back to cell_count in absence + * of the cells_name property + */ + if (it->cell_count >= 0) { + count = it->cell_count; + } else { + pr_err("%pOF: could not get %s for %pOF\n", + it->parent, + it->cells_name, + it->node); + goto err; + } + } + } else { + count = it->cell_count; + } + + /* + * Make sure that the arguments actually fit in the remaining + * property data length + */ + if (it->cur + count > it->list_end) { + if (it->cells_name) + pr_err("%pOF: %s = %d found %td\n", + it->parent, it->cells_name, + count, it->list_end - it->cur); + else + pr_err("%pOF: phandle %s needs %d, found %td\n", + it->parent, of_node_full_name(it->node), + count, it->list_end - it->cur); + goto err; + } + } + + it->phandle_end = it->cur + count; + it->cur_count = count; + + return 0; + +err: + if (it->node) { + of_node_put(it->node); + it->node = NULL; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(of_phandle_iterator_next); + +int of_phandle_iterator_args(struct of_phandle_iterator *it, + uint32_t *args, + int size) +{ + int i, count; + + count = it->cur_count; + + if (WARN_ON(size < count)) + count = size; + + for (i = 0; i < count; i++) + args[i] = be32_to_cpup(it->cur++); + + return count; +} + +int __of_parse_phandle_with_args(const struct device_node *np, + const char *list_name, + const char *cells_name, + int cell_count, int index, + struct of_phandle_args *out_args) +{ + struct of_phandle_iterator it; + int rc, cur_index = 0; + + if (index < 0) + return -EINVAL; + + /* Loop over the phandles until all the requested entry is found */ + of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) { + /* + * All of the error cases bail out of the loop, so at + * this point, the parsing is successful. If the requested + * index matches, then fill the out_args structure and return, + * or return -ENOENT for an empty entry. + */ + rc = -ENOENT; + if (cur_index == index) { + if (!it.phandle) + goto err; + + if (out_args) { + int c; + + c = of_phandle_iterator_args(&it, + out_args->args, + MAX_PHANDLE_ARGS); + out_args->np = it.node; + out_args->args_count = c; + } else { + of_node_put(it.node); + } + + /* Found it! return success */ + return 0; + } + + cur_index++; + } + + /* + * Unlock node before returning result; will be one of: + * -ENOENT : index is for empty phandle + * -EINVAL : parsing error on data + */ + + err: + of_node_put(it.node); + return rc; +} +EXPORT_SYMBOL(__of_parse_phandle_with_args); + +/** + * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it + * @np: pointer to a device tree node containing a list + * @list_name: property name that contains a list + * @stem_name: stem of property names that specify phandles' arguments count + * @index: index of a phandle to parse out + * @out_args: optional pointer to output arguments structure (will be filled) + * + * This function is useful to parse lists of phandles and their arguments. + * Returns 0 on success and fills out_args, on error returns appropriate errno + * value. The difference between this function and of_parse_phandle_with_args() + * is that this API remaps a phandle if the node the phandle points to has + * a <@stem_name>-map property. + * + * Caller is responsible to call of_node_put() on the returned out_args->np + * pointer. + * + * Example:: + * + * phandle1: node1 { + * #list-cells = <2>; + * }; + * + * phandle2: node2 { + * #list-cells = <1>; + * }; + * + * phandle3: node3 { + * #list-cells = <1>; + * list-map = <0 &phandle2 3>, + * <1 &phandle2 2>, + * <2 &phandle1 5 1>; + * list-map-mask = <0x3>; + * }; + * + * node4 { + * list = <&phandle1 1 2 &phandle3 0>; + * }; + * + * To get a device_node of the ``node2`` node you may call this: + * of_parse_phandle_with_args(node4, "list", "list", 1, &args); + */ +int of_parse_phandle_with_args_map(const struct device_node *np, + const char *list_name, + const char *stem_name, + int index, struct of_phandle_args *out_args) +{ + char *cells_name, *map_name = NULL, *mask_name = NULL; + char *pass_name = NULL; + struct device_node *cur, *new = NULL; + const __be32 *map, *mask, *pass; + static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 }; + static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 }; + __be32 initial_match_array[MAX_PHANDLE_ARGS]; + const __be32 *match_array = initial_match_array; + int i, ret, map_len, match; + u32 list_size, new_size; + + if (index < 0) + return -EINVAL; + + cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name); + if (!cells_name) + return -ENOMEM; + + ret = -ENOMEM; + map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name); + if (!map_name) + goto free; + + mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name); + if (!mask_name) + goto free; + + pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name); + if (!pass_name) + goto free; + + ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, + out_args); + if (ret) + goto free; + + /* Get the #<list>-cells property */ + cur = out_args->np; + ret = of_property_read_u32(cur, cells_name, &list_size); + if (ret < 0) + goto put; + + /* Precalculate the match array - this simplifies match loop */ + for (i = 0; i < list_size; i++) + initial_match_array[i] = cpu_to_be32(out_args->args[i]); + + ret = -EINVAL; + while (cur) { + /* Get the <list>-map property */ + map = of_get_property(cur, map_name, &map_len); + if (!map) { + ret = 0; + goto free; + } + map_len /= sizeof(u32); + + /* Get the <list>-map-mask property (optional) */ + mask = of_get_property(cur, mask_name, NULL); + if (!mask) + mask = dummy_mask; + /* Iterate through <list>-map property */ + match = 0; + while (map_len > (list_size + 1) && !match) { + /* Compare specifiers */ + match = 1; + for (i = 0; i < list_size; i++, map_len--) + match &= !((match_array[i] ^ *map++) & mask[i]); + + of_node_put(new); + new = of_find_node_by_phandle(be32_to_cpup(map)); + map++; + map_len--; + + /* Check if not found */ + if (!new) + goto put; + + if (!of_device_is_available(new)) + match = 0; + + ret = of_property_read_u32(new, cells_name, &new_size); + if (ret) + goto put; + + /* Check for malformed properties */ + if (WARN_ON(new_size > MAX_PHANDLE_ARGS)) + goto put; + if (map_len < new_size) + goto put; + + /* Move forward by new node's #<list>-cells amount */ + map += new_size; + map_len -= new_size; + } + if (!match) + goto put; + + /* Get the <list>-map-pass-thru property (optional) */ + pass = of_get_property(cur, pass_name, NULL); + if (!pass) + pass = dummy_pass; + + /* + * Successfully parsed a <list>-map translation; copy new + * specifier into the out_args structure, keeping the + * bits specified in <list>-map-pass-thru. + */ + match_array = map - new_size; + for (i = 0; i < new_size; i++) { + __be32 val = *(map - new_size + i); + + if (i < list_size) { + val &= ~pass[i]; + val |= cpu_to_be32(out_args->args[i]) & pass[i]; + } + + out_args->args[i] = be32_to_cpu(val); + } + out_args->args_count = list_size = new_size; + /* Iterate again with new provider */ + out_args->np = new; + of_node_put(cur); + cur = new; + new = NULL; + } +put: + of_node_put(cur); + of_node_put(new); +free: + kfree(mask_name); + kfree(map_name); + kfree(cells_name); + kfree(pass_name); + + return ret; +} +EXPORT_SYMBOL(of_parse_phandle_with_args_map); + +/** + * of_count_phandle_with_args() - Find the number of phandles references in a property + * @np: pointer to a device tree node containing a list + * @list_name: property name that contains a list + * @cells_name: property name that specifies phandles' arguments count + * + * Return: The number of phandle + argument tuples within a property. It + * is a typical pattern to encode a list of phandle and variable + * arguments into a single property. The number of arguments is encoded + * by a property in the phandle-target node. For example, a gpios + * property would contain a list of GPIO specifies consisting of a + * phandle and 1 or more arguments. The number of arguments are + * determined by the #gpio-cells property in the node pointed to by the + * phandle. + */ +int of_count_phandle_with_args(const struct device_node *np, const char *list_name, + const char *cells_name) +{ + struct of_phandle_iterator it; + int rc, cur_index = 0; + + /* + * If cells_name is NULL we assume a cell count of 0. This makes + * counting the phandles trivial as each 32bit word in the list is a + * phandle and no arguments are to consider. So we don't iterate through + * the list but just use the length to determine the phandle count. + */ + if (!cells_name) { + const __be32 *list; + int size; + + list = of_get_property(np, list_name, &size); + if (!list) + return -ENOENT; + + return size / sizeof(*list); + } + + rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); + if (rc) + return rc; + + while ((rc = of_phandle_iterator_next(&it)) == 0) + cur_index += 1; + + if (rc != -ENOENT) + return rc; + + return cur_index; +} +EXPORT_SYMBOL(of_count_phandle_with_args); + +/** + * __of_add_property - Add a property to a node without lock operations + * @np: Caller's Device Node + * @prop: Property to add + */ +int __of_add_property(struct device_node *np, struct property *prop) +{ + struct property **next; + + prop->next = NULL; + next = &np->properties; + while (*next) { + if (strcmp(prop->name, (*next)->name) == 0) + /* duplicate ! don't insert it */ + return -EEXIST; + + next = &(*next)->next; + } + *next = prop; + + return 0; +} + +/** + * of_add_property - Add a property to a node + * @np: Caller's Device Node + * @prop: Property to add + */ +int of_add_property(struct device_node *np, struct property *prop) +{ + unsigned long flags; + int rc; + + mutex_lock(&of_mutex); + + raw_spin_lock_irqsave(&devtree_lock, flags); + rc = __of_add_property(np, prop); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + if (!rc) + __of_add_property_sysfs(np, prop); + + mutex_unlock(&of_mutex); + + if (!rc) + of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL); + + return rc; +} +EXPORT_SYMBOL_GPL(of_add_property); + +int __of_remove_property(struct device_node *np, struct property *prop) +{ + struct property **next; + + for (next = &np->properties; *next; next = &(*next)->next) { + if (*next == prop) + break; + } + if (*next == NULL) + return -ENODEV; + + /* found the node */ + *next = prop->next; + prop->next = np->deadprops; + np->deadprops = prop; + + return 0; +} + +/** + * of_remove_property - Remove a property from a node. + * @np: Caller's Device Node + * @prop: Property to remove + * + * Note that we don't actually remove it, since we have given out + * who-knows-how-many pointers to the data using get-property. + * Instead we just move the property to the "dead properties" + * list, so it won't be found any more. + */ +int of_remove_property(struct device_node *np, struct property *prop) +{ + unsigned long flags; + int rc; + + if (!prop) + return -ENODEV; + + mutex_lock(&of_mutex); + + raw_spin_lock_irqsave(&devtree_lock, flags); + rc = __of_remove_property(np, prop); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + if (!rc) + __of_remove_property_sysfs(np, prop); + + mutex_unlock(&of_mutex); + + if (!rc) + of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL); + + return rc; +} +EXPORT_SYMBOL_GPL(of_remove_property); + +int __of_update_property(struct device_node *np, struct property *newprop, + struct property **oldpropp) +{ + struct property **next, *oldprop; + + for (next = &np->properties; *next; next = &(*next)->next) { + if (of_prop_cmp((*next)->name, newprop->name) == 0) + break; + } + *oldpropp = oldprop = *next; + + if (oldprop) { + /* replace the node */ + newprop->next = oldprop->next; + *next = newprop; + oldprop->next = np->deadprops; + np->deadprops = oldprop; + } else { + /* new node */ + newprop->next = NULL; + *next = newprop; + } + + return 0; +} + +/* + * of_update_property - Update a property in a node, if the property does + * not exist, add it. + * + * Note that we don't actually remove it, since we have given out + * who-knows-how-many pointers to the data using get-property. + * Instead we just move the property to the "dead properties" list, + * and add the new property to the property list + */ +int of_update_property(struct device_node *np, struct property *newprop) +{ + struct property *oldprop; + unsigned long flags; + int rc; + + if (!newprop->name) + return -EINVAL; + + mutex_lock(&of_mutex); + + raw_spin_lock_irqsave(&devtree_lock, flags); + rc = __of_update_property(np, newprop, &oldprop); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + if (!rc) + __of_update_property_sysfs(np, newprop, oldprop); + + mutex_unlock(&of_mutex); + + if (!rc) + of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop); + + return rc; +} + +static void of_alias_add(struct alias_prop *ap, struct device_node *np, + int id, const char *stem, int stem_len) +{ + ap->np = np; + ap->id = id; + strncpy(ap->stem, stem, stem_len); + ap->stem[stem_len] = 0; + list_add_tail(&ap->link, &aliases_lookup); + pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n", + ap->alias, ap->stem, ap->id, np); +} + +/** + * of_alias_scan - Scan all properties of the 'aliases' node + * @dt_alloc: An allocator that provides a virtual address to memory + * for storing the resulting tree + * + * The function scans all the properties of the 'aliases' node and populates + * the global lookup table with the properties. It returns the + * number of alias properties found, or an error code in case of failure. + */ +void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) +{ + struct property *pp; + + of_aliases = of_find_node_by_path("/aliases"); + of_chosen = of_find_node_by_path("/chosen"); + if (of_chosen == NULL) + of_chosen = of_find_node_by_path("/chosen@0"); + + if (of_chosen) { + /* linux,stdout-path and /aliases/stdout are for legacy compatibility */ + const char *name = NULL; + + if (of_property_read_string(of_chosen, "stdout-path", &name)) + of_property_read_string(of_chosen, "linux,stdout-path", + &name); + if (IS_ENABLED(CONFIG_PPC) && !name) + of_property_read_string(of_aliases, "stdout", &name); + if (name) + of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); + if (of_stdout) + of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT; + } + + if (!of_aliases) + return; + + for_each_property_of_node(of_aliases, pp) { + const char *start = pp->name; + const char *end = start + strlen(start); + struct device_node *np; + struct alias_prop *ap; + int id, len; + + /* Skip those we do not want to proceed */ + if (!strcmp(pp->name, "name") || + !strcmp(pp->name, "phandle") || + !strcmp(pp->name, "linux,phandle")) + continue; + + np = of_find_node_by_path(pp->value); + if (!np) + continue; + + /* walk the alias backwards to extract the id and work out + * the 'stem' string */ + while (isdigit(*(end-1)) && end > start) + end--; + len = end - start; + + if (kstrtoint(end, 10, &id) < 0) + continue; + + /* Allocate an alias_prop with enough space for the stem */ + ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap)); + if (!ap) + continue; + memset(ap, 0, sizeof(*ap) + len + 1); + ap->alias = start; + of_alias_add(ap, np, id, start, len); + } +} + +/** + * of_alias_get_id - Get alias id for the given device_node + * @np: Pointer to the given device_node + * @stem: Alias stem of the given device_node + * + * The function travels the lookup table to get the alias id for the given + * device_node and alias stem. + * + * Return: The alias id if found. + */ +int of_alias_get_id(struct device_node *np, const char *stem) +{ + struct alias_prop *app; + int id = -ENODEV; + + mutex_lock(&of_mutex); + list_for_each_entry(app, &aliases_lookup, link) { + if (strcmp(app->stem, stem) != 0) + continue; + + if (np == app->np) { + id = app->id; + break; + } + } + mutex_unlock(&of_mutex); + + return id; +} +EXPORT_SYMBOL_GPL(of_alias_get_id); + +/** + * of_alias_get_highest_id - Get highest alias id for the given stem + * @stem: Alias stem to be examined + * + * The function travels the lookup table to get the highest alias id for the + * given alias stem. It returns the alias id if found. + */ +int of_alias_get_highest_id(const char *stem) +{ + struct alias_prop *app; + int id = -ENODEV; + + mutex_lock(&of_mutex); + list_for_each_entry(app, &aliases_lookup, link) { + if (strcmp(app->stem, stem) != 0) + continue; + + if (app->id > id) + id = app->id; + } + mutex_unlock(&of_mutex); + + return id; +} +EXPORT_SYMBOL_GPL(of_alias_get_highest_id); + +/** + * of_console_check() - Test and setup console for DT setup + * @dn: Pointer to device node + * @name: Name to use for preferred console without index. ex. "ttyS" + * @index: Index to use for preferred console. + * + * Check if the given device node matches the stdout-path property in the + * /chosen node. If it does then register it as the preferred console. + * + * Return: TRUE if console successfully setup. Otherwise return FALSE. + */ +bool of_console_check(struct device_node *dn, char *name, int index) +{ + if (!dn || dn != of_stdout || console_set_on_cmdline) + return false; + + /* + * XXX: cast `options' to char pointer to suppress complication + * warnings: printk, UART and console drivers expect char pointer. + */ + return !add_preferred_console(name, index, (char *)of_stdout_options); +} +EXPORT_SYMBOL_GPL(of_console_check); + +/** + * of_find_next_cache_node - Find a node's subsidiary cache + * @np: node of type "cpu" or "cache" + * + * Return: A node pointer with refcount incremented, use + * of_node_put() on it when done. Caller should hold a reference + * to np. + */ +struct device_node *of_find_next_cache_node(const struct device_node *np) +{ + struct device_node *child, *cache_node; + + cache_node = of_parse_phandle(np, "l2-cache", 0); + if (!cache_node) + cache_node = of_parse_phandle(np, "next-level-cache", 0); + + if (cache_node) + return cache_node; + + /* OF on pmac has nodes instead of properties named "l2-cache" + * beneath CPU nodes. + */ + if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) + for_each_child_of_node(np, child) + if (of_node_is_type(child, "cache")) + return child; + + return NULL; +} + +/** + * of_find_last_cache_level - Find the level at which the last cache is + * present for the given logical cpu + * + * @cpu: cpu number(logical index) for which the last cache level is needed + * + * Return: The level at which the last cache is present. It is exactly + * same as the total number of cache levels for the given logical cpu. + */ +int of_find_last_cache_level(unsigned int cpu) +{ + u32 cache_level = 0; + struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu); + + while (np) { + of_node_put(prev); + prev = np; + np = of_find_next_cache_node(np); + } + + of_property_read_u32(prev, "cache-level", &cache_level); + of_node_put(prev); + + return cache_level; +} + +/** + * of_map_id - Translate an ID through a downstream mapping. + * @np: root complex device node. + * @id: device ID to map. + * @map_name: property name of the map to use. + * @map_mask_name: optional property name of the mask to use. + * @target: optional pointer to a target device node. + * @id_out: optional pointer to receive the translated ID. + * + * Given a device ID, look up the appropriate implementation-defined + * platform ID and/or the target device which receives transactions on that + * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or + * @id_out may be NULL if only the other is required. If @target points to + * a non-NULL device node pointer, only entries targeting that node will be + * matched; if it points to a NULL value, it will receive the device node of + * the first matching target phandle, with a reference held. + * + * Return: 0 on success or a standard error code on failure. + */ +int of_map_id(struct device_node *np, u32 id, + const char *map_name, const char *map_mask_name, + struct device_node **target, u32 *id_out) +{ + u32 map_mask, masked_id; + int map_len; + const __be32 *map = NULL; + + if (!np || !map_name || (!target && !id_out)) + return -EINVAL; + + map = of_get_property(np, map_name, &map_len); + if (!map) { + if (target) + return -ENODEV; + /* Otherwise, no map implies no translation */ + *id_out = id; + return 0; + } + + if (!map_len || map_len % (4 * sizeof(*map))) { + pr_err("%pOF: Error: Bad %s length: %d\n", np, + map_name, map_len); + return -EINVAL; + } + + /* The default is to select all bits. */ + map_mask = 0xffffffff; + + /* + * Can be overridden by "{iommu,msi}-map-mask" property. + * If of_property_read_u32() fails, the default is used. + */ + if (map_mask_name) + of_property_read_u32(np, map_mask_name, &map_mask); + + masked_id = map_mask & id; + for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { + struct device_node *phandle_node; + u32 id_base = be32_to_cpup(map + 0); + u32 phandle = be32_to_cpup(map + 1); + u32 out_base = be32_to_cpup(map + 2); + u32 id_len = be32_to_cpup(map + 3); + + if (id_base & ~map_mask) { + pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", + np, map_name, map_name, + map_mask, id_base); + return -EFAULT; + } + + if (masked_id < id_base || masked_id >= id_base + id_len) + continue; + + phandle_node = of_find_node_by_phandle(phandle); + if (!phandle_node) + return -ENODEV; + + if (target) { + if (*target) + of_node_put(phandle_node); + else + *target = phandle_node; + + if (*target != phandle_node) + continue; + } + + if (id_out) + *id_out = masked_id - id_base + out_base; + + pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", + np, map_name, map_mask, id_base, out_base, + id_len, id, masked_id - id_base + out_base); + return 0; + } + + pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, + id, target && *target ? *target : NULL); + + /* Bypasses translation */ + if (id_out) + *id_out = id; + return 0; +} +EXPORT_SYMBOL_GPL(of_map_id); diff --git a/drivers/of/device.c b/drivers/of/device.c new file mode 100644 index 000000000..ce225d259 --- /dev/null +++ b/drivers/of/device.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_address.h> +#include <linux/of_iommu.h> +#include <linux/of_reserved_mem.h> +#include <linux/dma-direct.h> /* for bus_dma_region */ +#include <linux/dma-map-ops.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/slab.h> +#include <linux/platform_device.h> + +#include <asm/errno.h> +#include "of_private.h" + +/** + * of_match_device - Tell if a struct device matches an of_device_id list + * @matches: array of of device match structures to search in + * @dev: the of device structure to match against + * + * Used by a driver to check whether an platform_device present in the + * system is in its list of supported devices. + */ +const struct of_device_id *of_match_device(const struct of_device_id *matches, + const struct device *dev) +{ + if (!matches || !dev->of_node || dev->of_node_reused) + return NULL; + return of_match_node(matches, dev->of_node); +} +EXPORT_SYMBOL(of_match_device); + +int of_device_add(struct platform_device *ofdev) +{ + BUG_ON(ofdev->dev.of_node == NULL); + + /* name and id have to be set so that the platform bus doesn't get + * confused on matching */ + ofdev->name = dev_name(&ofdev->dev); + ofdev->id = PLATFORM_DEVID_NONE; + + /* + * If this device has not binding numa node in devicetree, that is + * of_node_to_nid returns NUMA_NO_NODE. device_add will assume that this + * device is on the same node as the parent. + */ + set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node)); + + return device_add(&ofdev->dev); +} + +static void +of_dma_set_restricted_buffer(struct device *dev, struct device_node *np) +{ + struct device_node *node, *of_node = dev->of_node; + int count, i; + + if (!IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL)) + return; + + count = of_property_count_elems_of_size(of_node, "memory-region", + sizeof(u32)); + /* + * If dev->of_node doesn't exist or doesn't contain memory-region, try + * the OF node having DMA configuration. + */ + if (count <= 0) { + of_node = np; + count = of_property_count_elems_of_size( + of_node, "memory-region", sizeof(u32)); + } + + for (i = 0; i < count; i++) { + node = of_parse_phandle(of_node, "memory-region", i); + /* + * There might be multiple memory regions, but only one + * restricted-dma-pool region is allowed. + */ + if (of_device_is_compatible(node, "restricted-dma-pool") && + of_device_is_available(node)) { + of_node_put(node); + break; + } + of_node_put(node); + } + + /* + * Attempt to initialize a restricted-dma-pool region if one was found. + * Note that count can hold a negative error code. + */ + if (i < count && of_reserved_mem_device_init_by_idx(dev, of_node, i)) + dev_warn(dev, "failed to initialise \"restricted-dma-pool\" memory node\n"); +} + +/** + * of_dma_configure_id - Setup DMA configuration + * @dev: Device to apply DMA configuration + * @np: Pointer to OF node having DMA configuration + * @force_dma: Whether device is to be set up by of_dma_configure() even if + * DMA capability is not explicitly described by firmware. + * @id: Optional const pointer value input id + * + * Try to get devices's DMA configuration from DT and update it + * accordingly. + * + * If platform code needs to use its own special DMA configuration, it + * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events + * to fix up DMA configuration. + */ +int of_dma_configure_id(struct device *dev, struct device_node *np, + bool force_dma, const u32 *id) +{ + const struct iommu_ops *iommu; + const struct bus_dma_region *map = NULL; + struct device_node *bus_np; + u64 dma_start = 0; + u64 mask, end, size = 0; + bool coherent; + int ret; + + if (np == dev->of_node) + bus_np = __of_get_dma_parent(np); + else + bus_np = of_node_get(np); + + ret = of_dma_get_range(bus_np, &map); + of_node_put(bus_np); + if (ret < 0) { + /* + * For legacy reasons, we have to assume some devices need + * DMA configuration regardless of whether "dma-ranges" is + * correctly specified or not. + */ + if (!force_dma) + return ret == -ENODEV ? 0 : ret; + } else { + const struct bus_dma_region *r = map; + u64 dma_end = 0; + + /* Determine the overall bounds of all DMA regions */ + for (dma_start = ~0; r->size; r++) { + /* Take lower and upper limits */ + if (r->dma_start < dma_start) + dma_start = r->dma_start; + if (r->dma_start + r->size > dma_end) + dma_end = r->dma_start + r->size; + } + size = dma_end - dma_start; + + /* + * Add a work around to treat the size as mask + 1 in case + * it is defined in DT as a mask. + */ + if (size & 1) { + dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n", + size); + size = size + 1; + } + + if (!size) { + dev_err(dev, "Adjusted size 0x%llx invalid\n", size); + kfree(map); + return -EINVAL; + } + } + + /* + * If @dev is expected to be DMA-capable then the bus code that created + * it should have initialised its dma_mask pointer by this point. For + * now, we'll continue the legacy behaviour of coercing it to the + * coherent mask if not, but we'll no longer do so quietly. + */ + if (!dev->dma_mask) { + dev_warn(dev, "DMA mask not set\n"); + dev->dma_mask = &dev->coherent_dma_mask; + } + + if (!size && dev->coherent_dma_mask) + size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + else if (!size) + size = 1ULL << 32; + + /* + * Limit coherent and dma mask based on size and default mask + * set by the driver. + */ + end = dma_start + size - 1; + mask = DMA_BIT_MASK(ilog2(end) + 1); + dev->coherent_dma_mask &= mask; + *dev->dma_mask &= mask; + /* ...but only set bus limit and range map if we found valid dma-ranges earlier */ + if (!ret) { + dev->bus_dma_limit = end; + dev->dma_range_map = map; + } + + coherent = of_dma_is_coherent(np); + dev_dbg(dev, "device is%sdma coherent\n", + coherent ? " " : " not "); + + iommu = of_iommu_configure(dev, np, id); + if (PTR_ERR(iommu) == -EPROBE_DEFER) { + /* Don't touch range map if it wasn't set from a valid dma-ranges */ + if (!ret) + dev->dma_range_map = NULL; + kfree(map); + return -EPROBE_DEFER; + } + + dev_dbg(dev, "device is%sbehind an iommu\n", + iommu ? " " : " not "); + + arch_setup_dma_ops(dev, dma_start, size, iommu, coherent); + + if (!iommu) + of_dma_set_restricted_buffer(dev, np); + + return 0; +} +EXPORT_SYMBOL_GPL(of_dma_configure_id); + +int of_device_register(struct platform_device *pdev) +{ + device_initialize(&pdev->dev); + return of_device_add(pdev); +} +EXPORT_SYMBOL(of_device_register); + +void of_device_unregister(struct platform_device *ofdev) +{ + device_unregister(&ofdev->dev); +} +EXPORT_SYMBOL(of_device_unregister); + +const void *of_device_get_match_data(const struct device *dev) +{ + const struct of_device_id *match; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match) + return NULL; + + return match->data; +} +EXPORT_SYMBOL(of_device_get_match_data); + +static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) +{ + const char *compat; + char *c; + struct property *p; + ssize_t csize; + ssize_t tsize; + + if ((!dev) || (!dev->of_node)) + return -ENODEV; + + /* Name & Type */ + /* %p eats all alphanum characters, so %c must be used here */ + csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T', + of_node_get_device_type(dev->of_node)); + tsize = csize; + len -= csize; + if (str) + str += csize; + + of_property_for_each_string(dev->of_node, "compatible", p, compat) { + csize = strlen(compat) + 1; + tsize += csize; + if (csize > len) + continue; + + csize = snprintf(str, len, "C%s", compat); + for (c = str; c; ) { + c = strchr(c, ' '); + if (c) + *c++ = '_'; + } + len -= csize; + str += csize; + } + + return tsize; +} + +int of_device_request_module(struct device *dev) +{ + char *str; + ssize_t size; + int ret; + + size = of_device_get_modalias(dev, NULL, 0); + if (size < 0) + return size; + + /* Reserve an additional byte for the trailing '\0' */ + size++; + + str = kmalloc(size, GFP_KERNEL); + if (!str) + return -ENOMEM; + + of_device_get_modalias(dev, str, size); + str[size - 1] = '\0'; + ret = request_module(str); + kfree(str); + + return ret; +} +EXPORT_SYMBOL_GPL(of_device_request_module); + +/** + * of_device_modalias - Fill buffer with newline terminated modalias string + * @dev: Calling device + * @str: Modalias string + * @len: Size of @str + */ +ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len) +{ + ssize_t sl = of_device_get_modalias(dev, str, len - 2); + if (sl < 0) + return sl; + if (sl > len - 2) + return -ENOMEM; + + str[sl++] = '\n'; + str[sl] = 0; + return sl; +} +EXPORT_SYMBOL_GPL(of_device_modalias); + +/** + * of_device_uevent - Display OF related uevent information + * @dev: Device to apply DMA configuration + * @env: Kernel object's userspace event reference + */ +void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const char *compat, *type; + struct alias_prop *app; + struct property *p; + int seen = 0; + + if ((!dev) || (!dev->of_node)) + return; + + add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node); + add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node); + type = of_node_get_device_type(dev->of_node); + if (type) + add_uevent_var(env, "OF_TYPE=%s", type); + + /* Since the compatible field can contain pretty much anything + * it's not really legal to split it out with commas. We split it + * up using a number of environment variables instead. */ + of_property_for_each_string(dev->of_node, "compatible", p, compat) { + add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat); + seen++; + } + add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen); + + seen = 0; + mutex_lock(&of_mutex); + list_for_each_entry(app, &aliases_lookup, link) { + if (dev->of_node == app->np) { + add_uevent_var(env, "OF_ALIAS_%d=%s", seen, + app->alias); + seen++; + } + } + mutex_unlock(&of_mutex); +} + +int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) +{ + int sl; + + if ((!dev) || (!dev->of_node)) + return -ENODEV; + + /* Devicetree modalias is tricky, we add it in 2 steps */ + if (add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + + sl = of_device_get_modalias(dev, &env->buf[env->buflen-1], + sizeof(env->buf) - env->buflen); + if (sl >= (sizeof(env->buf) - env->buflen)) + return -ENOMEM; + env->buflen += sl; + + return 0; +} +EXPORT_SYMBOL_GPL(of_device_uevent_modalias); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c new file mode 100644 index 000000000..9bb9fe0fa --- /dev/null +++ b/drivers/of/dynamic.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for dynamic device trees. + * + * On some platforms, the device tree can be manipulated at runtime. + * The routines in this section support adding, removing and changing + * device tree nodes. + */ + +#define pr_fmt(fmt) "OF: " fmt + +#include <linux/of.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/proc_fs.h> + +#include "of_private.h" + +static struct device_node *kobj_to_device_node(struct kobject *kobj) +{ + return container_of(kobj, struct device_node, kobj); +} + +/** + * of_node_get() - Increment refcount of a node + * @node: Node to inc refcount, NULL is supported to simplify writing of + * callers + * + * Return: The node with refcount incremented. + */ +struct device_node *of_node_get(struct device_node *node) +{ + if (node) + kobject_get(&node->kobj); + return node; +} +EXPORT_SYMBOL(of_node_get); + +/** + * of_node_put() - Decrement refcount of a node + * @node: Node to dec refcount, NULL is supported to simplify writing of + * callers + */ +void of_node_put(struct device_node *node) +{ + if (node) + kobject_put(&node->kobj); +} +EXPORT_SYMBOL(of_node_put); + +static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain); + +int of_reconfig_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&of_reconfig_chain, nb); +} +EXPORT_SYMBOL_GPL(of_reconfig_notifier_register); + +int of_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&of_reconfig_chain, nb); +} +EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); + +static const char *action_names[] = { + [0] = "INVALID", + [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE", + [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE", + [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY", + [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY", + [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY", +}; + +int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) +{ + int rc; +#ifdef DEBUG + struct of_reconfig_data *pr = p; + + switch (action) { + case OF_RECONFIG_ATTACH_NODE: + case OF_RECONFIG_DETACH_NODE: + pr_debug("notify %-15s %pOF\n", action_names[action], + pr->dn); + break; + case OF_RECONFIG_ADD_PROPERTY: + case OF_RECONFIG_REMOVE_PROPERTY: + case OF_RECONFIG_UPDATE_PROPERTY: + pr_debug("notify %-15s %pOF:%s\n", action_names[action], + pr->dn, pr->prop->name); + break; + + } +#endif + rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p); + return notifier_to_errno(rc); +} + +/* + * of_reconfig_get_state_change() - Returns new state of device + * @action - action of the of notifier + * @arg - argument of the of notifier + * + * Returns the new state of a device based on the notifier used. + * + * Return: OF_RECONFIG_CHANGE_REMOVE on device going from enabled to + * disabled, OF_RECONFIG_CHANGE_ADD on device going from disabled to + * enabled and OF_RECONFIG_NO_CHANGE on no change. + */ +int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr) +{ + struct property *prop, *old_prop = NULL; + int is_status, status_state, old_status_state, prev_state, new_state; + + /* figure out if a device should be created or destroyed */ + switch (action) { + case OF_RECONFIG_ATTACH_NODE: + case OF_RECONFIG_DETACH_NODE: + prop = of_find_property(pr->dn, "status", NULL); + break; + case OF_RECONFIG_ADD_PROPERTY: + case OF_RECONFIG_REMOVE_PROPERTY: + prop = pr->prop; + break; + case OF_RECONFIG_UPDATE_PROPERTY: + prop = pr->prop; + old_prop = pr->old_prop; + break; + default: + return OF_RECONFIG_NO_CHANGE; + } + + is_status = 0; + status_state = -1; + old_status_state = -1; + prev_state = -1; + new_state = -1; + + if (prop && !strcmp(prop->name, "status")) { + is_status = 1; + status_state = !strcmp(prop->value, "okay") || + !strcmp(prop->value, "ok"); + if (old_prop) + old_status_state = !strcmp(old_prop->value, "okay") || + !strcmp(old_prop->value, "ok"); + } + + switch (action) { + case OF_RECONFIG_ATTACH_NODE: + prev_state = 0; + /* -1 & 0 status either missing or okay */ + new_state = status_state != 0; + break; + case OF_RECONFIG_DETACH_NODE: + /* -1 & 0 status either missing or okay */ + prev_state = status_state != 0; + new_state = 0; + break; + case OF_RECONFIG_ADD_PROPERTY: + if (is_status) { + /* no status property -> enabled (legacy) */ + prev_state = 1; + new_state = status_state; + } + break; + case OF_RECONFIG_REMOVE_PROPERTY: + if (is_status) { + prev_state = status_state; + /* no status property -> enabled (legacy) */ + new_state = 1; + } + break; + case OF_RECONFIG_UPDATE_PROPERTY: + if (is_status) { + prev_state = old_status_state != 0; + new_state = status_state != 0; + } + break; + } + + if (prev_state == new_state) + return OF_RECONFIG_NO_CHANGE; + + return new_state ? OF_RECONFIG_CHANGE_ADD : OF_RECONFIG_CHANGE_REMOVE; +} +EXPORT_SYMBOL_GPL(of_reconfig_get_state_change); + +int of_property_notify(int action, struct device_node *np, + struct property *prop, struct property *oldprop) +{ + struct of_reconfig_data pr; + + /* only call notifiers if the node is attached */ + if (!of_node_is_attached(np)) + return 0; + + pr.dn = np; + pr.prop = prop; + pr.old_prop = oldprop; + return of_reconfig_notify(action, &pr); +} + +static void __of_attach_node(struct device_node *np) +{ + const __be32 *phandle; + int sz; + + if (!of_node_check_flag(np, OF_OVERLAY)) { + np->name = __of_get_property(np, "name", NULL); + if (!np->name) + np->name = "<NULL>"; + + phandle = __of_get_property(np, "phandle", &sz); + if (!phandle) + phandle = __of_get_property(np, "linux,phandle", &sz); + if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle) + phandle = __of_get_property(np, "ibm,phandle", &sz); + if (phandle && (sz >= 4)) + np->phandle = be32_to_cpup(phandle); + else + np->phandle = 0; + } + + np->child = NULL; + np->sibling = np->parent->child; + np->parent->child = np; + of_node_clear_flag(np, OF_DETACHED); + np->fwnode.flags |= FWNODE_FLAG_NOT_DEVICE; +} + +/** + * of_attach_node() - Plug a device node into the tree and global list. + * @np: Pointer to the caller's Device Node + */ +int of_attach_node(struct device_node *np) +{ + struct of_reconfig_data rd; + unsigned long flags; + + memset(&rd, 0, sizeof(rd)); + rd.dn = np; + + mutex_lock(&of_mutex); + raw_spin_lock_irqsave(&devtree_lock, flags); + __of_attach_node(np); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + __of_attach_node_sysfs(np); + mutex_unlock(&of_mutex); + + of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, &rd); + + return 0; +} + +void __of_detach_node(struct device_node *np) +{ + struct device_node *parent; + + if (WARN_ON(of_node_check_flag(np, OF_DETACHED))) + return; + + parent = np->parent; + if (WARN_ON(!parent)) + return; + + if (parent->child == np) + parent->child = np->sibling; + else { + struct device_node *prevsib; + for (prevsib = np->parent->child; + prevsib->sibling != np; + prevsib = prevsib->sibling) + ; + prevsib->sibling = np->sibling; + } + + of_node_set_flag(np, OF_DETACHED); + + /* race with of_find_node_by_phandle() prevented by devtree_lock */ + __of_phandle_cache_inv_entry(np->phandle); +} + +/** + * of_detach_node() - "Unplug" a node from the device tree. + * @np: Pointer to the caller's Device Node + */ +int of_detach_node(struct device_node *np) +{ + struct of_reconfig_data rd; + unsigned long flags; + + memset(&rd, 0, sizeof(rd)); + rd.dn = np; + + mutex_lock(&of_mutex); + raw_spin_lock_irqsave(&devtree_lock, flags); + __of_detach_node(np); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + __of_detach_node_sysfs(np); + mutex_unlock(&of_mutex); + + of_reconfig_notify(OF_RECONFIG_DETACH_NODE, &rd); + + return 0; +} +EXPORT_SYMBOL_GPL(of_detach_node); + +static void property_list_free(struct property *prop_list) +{ + struct property *prop, *next; + + for (prop = prop_list; prop != NULL; prop = next) { + next = prop->next; + kfree(prop->name); + kfree(prop->value); + kfree(prop); + } +} + +/** + * of_node_release() - release a dynamically allocated node + * @kobj: kernel object of the node to be released + * + * In of_node_put() this function is passed to kref_put() as the destructor. + */ +void of_node_release(struct kobject *kobj) +{ + struct device_node *node = kobj_to_device_node(kobj); + + /* We should never be releasing nodes that haven't been detached. */ + if (!of_node_check_flag(node, OF_DETACHED)) { + pr_err("ERROR: Bad of_node_put() on %pOF\n", node); + dump_stack(); + return; + } + if (!of_node_check_flag(node, OF_DYNAMIC)) + return; + + if (of_node_check_flag(node, OF_OVERLAY)) { + + if (!of_node_check_flag(node, OF_OVERLAY_FREE_CSET)) { + /* premature refcount of zero, do not free memory */ + pr_err("ERROR: memory leak before free overlay changeset, %pOF\n", + node); + return; + } + + /* + * If node->properties non-empty then properties were added + * to this node either by different overlay that has not + * yet been removed, or by a non-overlay mechanism. + */ + if (node->properties) + pr_err("ERROR: %s(), unexpected properties in %pOF\n", + __func__, node); + } + + property_list_free(node->properties); + property_list_free(node->deadprops); + fwnode_links_purge(of_fwnode_handle(node)); + + kfree(node->full_name); + kfree(node->data); + kfree(node); +} + +/** + * __of_prop_dup - Copy a property dynamically. + * @prop: Property to copy + * @allocflags: Allocation flags (typically pass GFP_KERNEL) + * + * Copy a property by dynamically allocating the memory of both the + * property structure and the property name & contents. The property's + * flags have the OF_DYNAMIC bit set so that we can differentiate between + * dynamically allocated properties and not. + * + * Return: The newly allocated property or NULL on out of memory error. + */ +struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags) +{ + struct property *new; + + new = kzalloc(sizeof(*new), allocflags); + if (!new) + return NULL; + + /* + * NOTE: There is no check for zero length value. + * In case of a boolean property, this will allocate a value + * of zero bytes. We do this to work around the use + * of of_get_property() calls on boolean values. + */ + new->name = kstrdup(prop->name, allocflags); + new->value = kmemdup(prop->value, prop->length, allocflags); + new->length = prop->length; + if (!new->name || !new->value) + goto err_free; + + /* mark the property as dynamic */ + of_property_set_flag(new, OF_DYNAMIC); + + return new; + + err_free: + kfree(new->name); + kfree(new->value); + kfree(new); + return NULL; +} + +/** + * __of_node_dup() - Duplicate or create an empty device node dynamically. + * @np: if not NULL, contains properties to be duplicated in new node + * @full_name: string value to be duplicated into new node's full_name field + * + * Create a device tree node, optionally duplicating the properties of + * another node. The node data are dynamically allocated and all the node + * flags have the OF_DYNAMIC & OF_DETACHED bits set. + * + * Return: The newly allocated node or NULL on out of memory error. + */ +struct device_node *__of_node_dup(const struct device_node *np, + const char *full_name) +{ + struct device_node *node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return NULL; + node->full_name = kstrdup(full_name, GFP_KERNEL); + if (!node->full_name) { + kfree(node); + return NULL; + } + + of_node_set_flag(node, OF_DYNAMIC); + of_node_set_flag(node, OF_DETACHED); + of_node_init(node); + + /* Iterate over and duplicate all properties */ + if (np) { + struct property *pp, *new_pp; + for_each_property_of_node(np, pp) { + new_pp = __of_prop_dup(pp, GFP_KERNEL); + if (!new_pp) + goto err_prop; + if (__of_add_property(node, new_pp)) { + kfree(new_pp->name); + kfree(new_pp->value); + kfree(new_pp); + goto err_prop; + } + } + } + return node; + + err_prop: + of_node_put(node); /* Frees the node and properties */ + return NULL; +} + +static void __of_changeset_entry_destroy(struct of_changeset_entry *ce) +{ + if (ce->action == OF_RECONFIG_ATTACH_NODE && + of_node_check_flag(ce->np, OF_OVERLAY)) { + if (kref_read(&ce->np->kobj.kref) > 1) { + pr_err("ERROR: memory leak, expected refcount 1 instead of %d, of_node_get()/of_node_put() unbalanced - destroy cset entry: attach overlay node %pOF\n", + kref_read(&ce->np->kobj.kref), ce->np); + } else { + of_node_set_flag(ce->np, OF_OVERLAY_FREE_CSET); + } + } + + of_node_put(ce->np); + list_del(&ce->node); + kfree(ce); +} + +#ifdef DEBUG +static void __of_changeset_entry_dump(struct of_changeset_entry *ce) +{ + switch (ce->action) { + case OF_RECONFIG_ADD_PROPERTY: + case OF_RECONFIG_REMOVE_PROPERTY: + case OF_RECONFIG_UPDATE_PROPERTY: + pr_debug("cset<%p> %-15s %pOF/%s\n", ce, action_names[ce->action], + ce->np, ce->prop->name); + break; + case OF_RECONFIG_ATTACH_NODE: + case OF_RECONFIG_DETACH_NODE: + pr_debug("cset<%p> %-15s %pOF\n", ce, action_names[ce->action], + ce->np); + break; + } +} +#else +static inline void __of_changeset_entry_dump(struct of_changeset_entry *ce) +{ + /* empty */ +} +#endif + +static void __of_changeset_entry_invert(struct of_changeset_entry *ce, + struct of_changeset_entry *rce) +{ + memcpy(rce, ce, sizeof(*rce)); + + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + rce->action = OF_RECONFIG_DETACH_NODE; + break; + case OF_RECONFIG_DETACH_NODE: + rce->action = OF_RECONFIG_ATTACH_NODE; + break; + case OF_RECONFIG_ADD_PROPERTY: + rce->action = OF_RECONFIG_REMOVE_PROPERTY; + break; + case OF_RECONFIG_REMOVE_PROPERTY: + rce->action = OF_RECONFIG_ADD_PROPERTY; + break; + case OF_RECONFIG_UPDATE_PROPERTY: + rce->old_prop = ce->prop; + rce->prop = ce->old_prop; + /* update was used but original property did not exist */ + if (!rce->prop) { + rce->action = OF_RECONFIG_REMOVE_PROPERTY; + rce->prop = ce->prop; + } + break; + } +} + +static int __of_changeset_entry_notify(struct of_changeset_entry *ce, + bool revert) +{ + struct of_reconfig_data rd; + struct of_changeset_entry ce_inverted; + int ret = 0; + + if (revert) { + __of_changeset_entry_invert(ce, &ce_inverted); + ce = &ce_inverted; + } + + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + case OF_RECONFIG_DETACH_NODE: + memset(&rd, 0, sizeof(rd)); + rd.dn = ce->np; + ret = of_reconfig_notify(ce->action, &rd); + break; + case OF_RECONFIG_ADD_PROPERTY: + case OF_RECONFIG_REMOVE_PROPERTY: + case OF_RECONFIG_UPDATE_PROPERTY: + ret = of_property_notify(ce->action, ce->np, ce->prop, ce->old_prop); + break; + default: + pr_err("invalid devicetree changeset action: %i\n", + (int)ce->action); + ret = -EINVAL; + } + + if (ret) + pr_err("changeset notifier error @%pOF\n", ce->np); + return ret; +} + +static int __of_changeset_entry_apply(struct of_changeset_entry *ce) +{ + struct property *old_prop, **propp; + unsigned long flags; + int ret = 0; + + __of_changeset_entry_dump(ce); + + raw_spin_lock_irqsave(&devtree_lock, flags); + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + __of_attach_node(ce->np); + break; + case OF_RECONFIG_DETACH_NODE: + __of_detach_node(ce->np); + break; + case OF_RECONFIG_ADD_PROPERTY: + /* If the property is in deadprops then it must be removed */ + for (propp = &ce->np->deadprops; *propp; propp = &(*propp)->next) { + if (*propp == ce->prop) { + *propp = ce->prop->next; + ce->prop->next = NULL; + break; + } + } + + ret = __of_add_property(ce->np, ce->prop); + break; + case OF_RECONFIG_REMOVE_PROPERTY: + ret = __of_remove_property(ce->np, ce->prop); + break; + + case OF_RECONFIG_UPDATE_PROPERTY: + /* If the property is in deadprops then it must be removed */ + for (propp = &ce->np->deadprops; *propp; propp = &(*propp)->next) { + if (*propp == ce->prop) { + *propp = ce->prop->next; + ce->prop->next = NULL; + break; + } + } + + ret = __of_update_property(ce->np, ce->prop, &old_prop); + break; + default: + ret = -EINVAL; + } + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + if (ret) { + pr_err("changeset: apply failed: %-15s %pOF:%s\n", + action_names[ce->action], ce->np, ce->prop->name); + return ret; + } + + switch (ce->action) { + case OF_RECONFIG_ATTACH_NODE: + __of_attach_node_sysfs(ce->np); + break; + case OF_RECONFIG_DETACH_NODE: + __of_detach_node_sysfs(ce->np); + break; + case OF_RECONFIG_ADD_PROPERTY: + /* ignore duplicate names */ + __of_add_property_sysfs(ce->np, ce->prop); + break; + case OF_RECONFIG_REMOVE_PROPERTY: + __of_remove_property_sysfs(ce->np, ce->prop); + break; + case OF_RECONFIG_UPDATE_PROPERTY: + __of_update_property_sysfs(ce->np, ce->prop, ce->old_prop); + break; + } + + return 0; +} + +static inline int __of_changeset_entry_revert(struct of_changeset_entry *ce) +{ + struct of_changeset_entry ce_inverted; + + __of_changeset_entry_invert(ce, &ce_inverted); + return __of_changeset_entry_apply(&ce_inverted); +} + +/** + * of_changeset_init - Initialize a changeset for use + * + * @ocs: changeset pointer + * + * Initialize a changeset structure + */ +void of_changeset_init(struct of_changeset *ocs) +{ + memset(ocs, 0, sizeof(*ocs)); + INIT_LIST_HEAD(&ocs->entries); +} +EXPORT_SYMBOL_GPL(of_changeset_init); + +/** + * of_changeset_destroy - Destroy a changeset + * + * @ocs: changeset pointer + * + * Destroys a changeset. Note that if a changeset is applied, + * its changes to the tree cannot be reverted. + */ +void of_changeset_destroy(struct of_changeset *ocs) +{ + struct of_changeset_entry *ce, *cen; + + list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node) + __of_changeset_entry_destroy(ce); +} +EXPORT_SYMBOL_GPL(of_changeset_destroy); + +/* + * Apply the changeset entries in @ocs. + * If apply fails, an attempt is made to revert the entries that were + * successfully applied. + * + * If multiple revert errors occur then only the final revert error is reported. + * + * Returns 0 on success, a negative error value in case of an error. + * If a revert error occurs, it is returned in *ret_revert. + */ +int __of_changeset_apply_entries(struct of_changeset *ocs, int *ret_revert) +{ + struct of_changeset_entry *ce; + int ret, ret_tmp; + + pr_debug("changeset: applying...\n"); + list_for_each_entry(ce, &ocs->entries, node) { + ret = __of_changeset_entry_apply(ce); + if (ret) { + pr_err("Error applying changeset (%d)\n", ret); + list_for_each_entry_continue_reverse(ce, &ocs->entries, + node) { + ret_tmp = __of_changeset_entry_revert(ce); + if (ret_tmp) + *ret_revert = ret_tmp; + } + return ret; + } + } + + return 0; +} + +/* + * Returns 0 on success, a negative error value in case of an error. + * + * If multiple changeset entry notification errors occur then only the + * final notification error is reported. + */ +int __of_changeset_apply_notify(struct of_changeset *ocs) +{ + struct of_changeset_entry *ce; + int ret = 0, ret_tmp; + + pr_debug("changeset: emitting notifiers.\n"); + + /* drop the global lock while emitting notifiers */ + mutex_unlock(&of_mutex); + list_for_each_entry(ce, &ocs->entries, node) { + ret_tmp = __of_changeset_entry_notify(ce, 0); + if (ret_tmp) + ret = ret_tmp; + } + mutex_lock(&of_mutex); + pr_debug("changeset: notifiers sent.\n"); + + return ret; +} + +/* + * Returns 0 on success, a negative error value in case of an error. + * + * If a changeset entry apply fails, an attempt is made to revert any + * previous entries in the changeset. If any of the reverts fails, + * that failure is not reported. Thus the state of the device tree + * is unknown if an apply error occurs. + */ +static int __of_changeset_apply(struct of_changeset *ocs) +{ + int ret, ret_revert = 0; + + ret = __of_changeset_apply_entries(ocs, &ret_revert); + if (!ret) + ret = __of_changeset_apply_notify(ocs); + + return ret; +} + +/** + * of_changeset_apply - Applies a changeset + * + * @ocs: changeset pointer + * + * Applies a changeset to the live tree. + * Any side-effects of live tree state changes are applied here on + * success, like creation/destruction of devices and side-effects + * like creation of sysfs properties and directories. + * + * Return: 0 on success, a negative error value in case of an error. + * On error the partially applied effects are reverted. + */ +int of_changeset_apply(struct of_changeset *ocs) +{ + int ret; + + mutex_lock(&of_mutex); + ret = __of_changeset_apply(ocs); + mutex_unlock(&of_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(of_changeset_apply); + +/* + * Revert the changeset entries in @ocs. + * If revert fails, an attempt is made to re-apply the entries that were + * successfully removed. + * + * If multiple re-apply errors occur then only the final apply error is + * reported. + * + * Returns 0 on success, a negative error value in case of an error. + * If an apply error occurs, it is returned in *ret_apply. + */ +int __of_changeset_revert_entries(struct of_changeset *ocs, int *ret_apply) +{ + struct of_changeset_entry *ce; + int ret, ret_tmp; + + pr_debug("changeset: reverting...\n"); + list_for_each_entry_reverse(ce, &ocs->entries, node) { + ret = __of_changeset_entry_revert(ce); + if (ret) { + pr_err("Error reverting changeset (%d)\n", ret); + list_for_each_entry_continue(ce, &ocs->entries, node) { + ret_tmp = __of_changeset_entry_apply(ce); + if (ret_tmp) + *ret_apply = ret_tmp; + } + return ret; + } + } + + return 0; +} + +/* + * If multiple changeset entry notification errors occur then only the + * final notification error is reported. + */ +int __of_changeset_revert_notify(struct of_changeset *ocs) +{ + struct of_changeset_entry *ce; + int ret = 0, ret_tmp; + + pr_debug("changeset: emitting notifiers.\n"); + + /* drop the global lock while emitting notifiers */ + mutex_unlock(&of_mutex); + list_for_each_entry_reverse(ce, &ocs->entries, node) { + ret_tmp = __of_changeset_entry_notify(ce, 1); + if (ret_tmp) + ret = ret_tmp; + } + mutex_lock(&of_mutex); + pr_debug("changeset: notifiers sent.\n"); + + return ret; +} + +static int __of_changeset_revert(struct of_changeset *ocs) +{ + int ret, ret_reply; + + ret_reply = 0; + ret = __of_changeset_revert_entries(ocs, &ret_reply); + + if (!ret) + ret = __of_changeset_revert_notify(ocs); + + return ret; +} + +/** + * of_changeset_revert - Reverts an applied changeset + * + * @ocs: changeset pointer + * + * Reverts a changeset returning the state of the tree to what it + * was before the application. + * Any side-effects like creation/destruction of devices and + * removal of sysfs properties and directories are applied. + * + * Return: 0 on success, a negative error value in case of an error. + */ +int of_changeset_revert(struct of_changeset *ocs) +{ + int ret; + + mutex_lock(&of_mutex); + ret = __of_changeset_revert(ocs); + mutex_unlock(&of_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(of_changeset_revert); + +/** + * of_changeset_action - Add an action to the tail of the changeset list + * + * @ocs: changeset pointer + * @action: action to perform + * @np: Pointer to device node + * @prop: Pointer to property + * + * On action being one of: + * + OF_RECONFIG_ATTACH_NODE + * + OF_RECONFIG_DETACH_NODE, + * + OF_RECONFIG_ADD_PROPERTY + * + OF_RECONFIG_REMOVE_PROPERTY, + * + OF_RECONFIG_UPDATE_PROPERTY + * + * Return: 0 on success, a negative error value in case of an error. + */ +int of_changeset_action(struct of_changeset *ocs, unsigned long action, + struct device_node *np, struct property *prop) +{ + struct of_changeset_entry *ce; + + if (WARN_ON(action >= ARRAY_SIZE(action_names))) + return -EINVAL; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) + return -ENOMEM; + + /* get a reference to the node */ + ce->action = action; + ce->np = of_node_get(np); + ce->prop = prop; + + if (action == OF_RECONFIG_UPDATE_PROPERTY && prop) + ce->old_prop = of_find_property(np, prop->name, NULL); + + /* add it to the list */ + list_add_tail(&ce->node, &ocs->entries); + return 0; +} +EXPORT_SYMBOL_GPL(of_changeset_action); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c new file mode 100644 index 000000000..d1a68b6d0 --- /dev/null +++ b/drivers/of/fdt.c @@ -0,0 +1,1398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Functions for working with the Flattened Device Tree data format + * + * Copyright 2009 Benjamin Herrenschmidt, IBM Corp + * benh@kernel.crashing.org + */ + +#define pr_fmt(fmt) "OF: fdt: " fmt + +#include <linux/crash_dump.h> +#include <linux/crc32.h> +#include <linux/kernel.h> +#include <linux/initrd.h> +#include <linux/memblock.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_reserved_mem.h> +#include <linux/sizes.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/libfdt.h> +#include <linux/debugfs.h> +#include <linux/serial_core.h> +#include <linux/sysfs.h> +#include <linux/random.h> + +#include <asm/setup.h> /* for COMMAND_LINE_SIZE */ +#include <asm/page.h> + +#include "of_private.h" + +/* + * of_fdt_limit_memory - limit the number of regions in the /memory node + * @limit: maximum entries + * + * Adjust the flattened device tree to have at most 'limit' number of + * memory entries in the /memory node. This function may be called + * any time after initial_boot_param is set. + */ +void __init of_fdt_limit_memory(int limit) +{ + int memory; + int len; + const void *val; + int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT; + int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT; + const __be32 *addr_prop; + const __be32 *size_prop; + int root_offset; + int cell_size; + + root_offset = fdt_path_offset(initial_boot_params, "/"); + if (root_offset < 0) + return; + + addr_prop = fdt_getprop(initial_boot_params, root_offset, + "#address-cells", NULL); + if (addr_prop) + nr_address_cells = fdt32_to_cpu(*addr_prop); + + size_prop = fdt_getprop(initial_boot_params, root_offset, + "#size-cells", NULL); + if (size_prop) + nr_size_cells = fdt32_to_cpu(*size_prop); + + cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells); + + memory = fdt_path_offset(initial_boot_params, "/memory"); + if (memory > 0) { + val = fdt_getprop(initial_boot_params, memory, "reg", &len); + if (len > limit*cell_size) { + len = limit*cell_size; + pr_debug("Limiting number of entries to %d\n", limit); + fdt_setprop(initial_boot_params, memory, "reg", val, + len); + } + } +} + +static bool of_fdt_device_is_available(const void *blob, unsigned long node) +{ + const char *status = fdt_getprop(blob, node, "status", NULL); + + if (!status) + return true; + + if (!strcmp(status, "ok") || !strcmp(status, "okay")) + return true; + + return false; +} + +static void *unflatten_dt_alloc(void **mem, unsigned long size, + unsigned long align) +{ + void *res; + + *mem = PTR_ALIGN(*mem, align); + res = *mem; + *mem += size; + + return res; +} + +static void populate_properties(const void *blob, + int offset, + void **mem, + struct device_node *np, + const char *nodename, + bool dryrun) +{ + struct property *pp, **pprev = NULL; + int cur; + bool has_name = false; + + pprev = &np->properties; + for (cur = fdt_first_property_offset(blob, offset); + cur >= 0; + cur = fdt_next_property_offset(blob, cur)) { + const __be32 *val; + const char *pname; + u32 sz; + + val = fdt_getprop_by_offset(blob, cur, &pname, &sz); + if (!val) { + pr_warn("Cannot locate property at 0x%x\n", cur); + continue; + } + + if (!pname) { + pr_warn("Cannot find property name at 0x%x\n", cur); + continue; + } + + if (!strcmp(pname, "name")) + has_name = true; + + pp = unflatten_dt_alloc(mem, sizeof(struct property), + __alignof__(struct property)); + if (dryrun) + continue; + + /* We accept flattened tree phandles either in + * ePAPR-style "phandle" properties, or the + * legacy "linux,phandle" properties. If both + * appear and have different values, things + * will get weird. Don't do that. + */ + if (!strcmp(pname, "phandle") || + !strcmp(pname, "linux,phandle")) { + if (!np->phandle) + np->phandle = be32_to_cpup(val); + } + + /* And we process the "ibm,phandle" property + * used in pSeries dynamic device tree + * stuff + */ + if (!strcmp(pname, "ibm,phandle")) + np->phandle = be32_to_cpup(val); + + pp->name = (char *)pname; + pp->length = sz; + pp->value = (__be32 *)val; + *pprev = pp; + pprev = &pp->next; + } + + /* With version 0x10 we may not have the name property, + * recreate it here from the unit name if absent + */ + if (!has_name) { + const char *p = nodename, *ps = p, *pa = NULL; + int len; + + while (*p) { + if ((*p) == '@') + pa = p; + else if ((*p) == '/') + ps = p + 1; + p++; + } + + if (pa < ps) + pa = p; + len = (pa - ps) + 1; + pp = unflatten_dt_alloc(mem, sizeof(struct property) + len, + __alignof__(struct property)); + if (!dryrun) { + pp->name = "name"; + pp->length = len; + pp->value = pp + 1; + *pprev = pp; + memcpy(pp->value, ps, len - 1); + ((char *)pp->value)[len - 1] = 0; + pr_debug("fixed up name for %s -> %s\n", + nodename, (char *)pp->value); + } + } +} + +static int populate_node(const void *blob, + int offset, + void **mem, + struct device_node *dad, + struct device_node **pnp, + bool dryrun) +{ + struct device_node *np; + const char *pathp; + int len; + + pathp = fdt_get_name(blob, offset, &len); + if (!pathp) { + *pnp = NULL; + return len; + } + + len++; + + np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len, + __alignof__(struct device_node)); + if (!dryrun) { + char *fn; + of_node_init(np); + np->full_name = fn = ((char *)np) + sizeof(*np); + + memcpy(fn, pathp, len); + + if (dad != NULL) { + np->parent = dad; + np->sibling = dad->child; + dad->child = np; + } + } + + populate_properties(blob, offset, mem, np, pathp, dryrun); + if (!dryrun) { + np->name = of_get_property(np, "name", NULL); + if (!np->name) + np->name = "<NULL>"; + } + + *pnp = np; + return 0; +} + +static void reverse_nodes(struct device_node *parent) +{ + struct device_node *child, *next; + + /* In-depth first */ + child = parent->child; + while (child) { + reverse_nodes(child); + + child = child->sibling; + } + + /* Reverse the nodes in the child list */ + child = parent->child; + parent->child = NULL; + while (child) { + next = child->sibling; + + child->sibling = parent->child; + parent->child = child; + child = next; + } +} + +/** + * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree + * @blob: The parent device tree blob + * @mem: Memory chunk to use for allocating device nodes and properties + * @dad: Parent struct device_node + * @nodepp: The device_node tree created by the call + * + * Return: The size of unflattened device tree or error code + */ +static int unflatten_dt_nodes(const void *blob, + void *mem, + struct device_node *dad, + struct device_node **nodepp) +{ + struct device_node *root; + int offset = 0, depth = 0, initial_depth = 0; +#define FDT_MAX_DEPTH 64 + struct device_node *nps[FDT_MAX_DEPTH]; + void *base = mem; + bool dryrun = !base; + int ret; + + if (nodepp) + *nodepp = NULL; + + /* + * We're unflattening device sub-tree if @dad is valid. There are + * possibly multiple nodes in the first level of depth. We need + * set @depth to 1 to make fdt_next_node() happy as it bails + * immediately when negative @depth is found. Otherwise, the device + * nodes except the first one won't be unflattened successfully. + */ + if (dad) + depth = initial_depth = 1; + + root = dad; + nps[depth] = dad; + + for (offset = 0; + offset >= 0 && depth >= initial_depth; + offset = fdt_next_node(blob, offset, &depth)) { + if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1)) + continue; + + if (!IS_ENABLED(CONFIG_OF_KOBJ) && + !of_fdt_device_is_available(blob, offset)) + continue; + + ret = populate_node(blob, offset, &mem, nps[depth], + &nps[depth+1], dryrun); + if (ret < 0) + return ret; + + if (!dryrun && nodepp && !*nodepp) + *nodepp = nps[depth+1]; + if (!dryrun && !root) + root = nps[depth+1]; + } + + if (offset < 0 && offset != -FDT_ERR_NOTFOUND) { + pr_err("Error %d processing FDT\n", offset); + return -EINVAL; + } + + /* + * Reverse the child list. Some drivers assumes node order matches .dts + * node order + */ + if (!dryrun) + reverse_nodes(root); + + return mem - base; +} + +/** + * __unflatten_device_tree - create tree of device_nodes from flat blob + * @blob: The blob to expand + * @dad: Parent device node + * @mynodes: The device_node tree created by the call + * @dt_alloc: An allocator that provides a virtual address to memory + * for the resulting tree + * @detached: if true set OF_DETACHED on @mynodes + * + * unflattens a device-tree, creating the tree of struct device_node. It also + * fills the "name" and "type" pointers of the nodes so the normal device-tree + * walking functions can be used. + * + * Return: NULL on failure or the memory chunk containing the unflattened + * device tree on success. + */ +void *__unflatten_device_tree(const void *blob, + struct device_node *dad, + struct device_node **mynodes, + void *(*dt_alloc)(u64 size, u64 align), + bool detached) +{ + int size; + void *mem; + int ret; + + if (mynodes) + *mynodes = NULL; + + pr_debug(" -> unflatten_device_tree()\n"); + + if (!blob) { + pr_debug("No device tree pointer\n"); + return NULL; + } + + pr_debug("Unflattening device tree:\n"); + pr_debug("magic: %08x\n", fdt_magic(blob)); + pr_debug("size: %08x\n", fdt_totalsize(blob)); + pr_debug("version: %08x\n", fdt_version(blob)); + + if (fdt_check_header(blob)) { + pr_err("Invalid device tree blob header\n"); + return NULL; + } + + /* First pass, scan for size */ + size = unflatten_dt_nodes(blob, NULL, dad, NULL); + if (size <= 0) + return NULL; + + size = ALIGN(size, 4); + pr_debug(" size is %d, allocating...\n", size); + + /* Allocate memory for the expanded device tree */ + mem = dt_alloc(size + 4, __alignof__(struct device_node)); + if (!mem) + return NULL; + + memset(mem, 0, size); + + *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); + + pr_debug(" unflattening %p...\n", mem); + + /* Second pass, do actual unflattening */ + ret = unflatten_dt_nodes(blob, mem, dad, mynodes); + + if (be32_to_cpup(mem + size) != 0xdeadbeef) + pr_warn("End of tree marker overwritten: %08x\n", + be32_to_cpup(mem + size)); + + if (ret <= 0) + return NULL; + + if (detached && mynodes && *mynodes) { + of_node_set_flag(*mynodes, OF_DETACHED); + pr_debug("unflattened tree is detached\n"); + } + + pr_debug(" <- unflatten_device_tree()\n"); + return mem; +} + +static void *kernel_tree_alloc(u64 size, u64 align) +{ + return kzalloc(size, GFP_KERNEL); +} + +static DEFINE_MUTEX(of_fdt_unflatten_mutex); + +/** + * of_fdt_unflatten_tree - create tree of device_nodes from flat blob + * @blob: Flat device tree blob + * @dad: Parent device node + * @mynodes: The device tree created by the call + * + * unflattens the device-tree passed by the firmware, creating the + * tree of struct device_node. It also fills the "name" and "type" + * pointers of the nodes so the normal device-tree walking functions + * can be used. + * + * Return: NULL on failure or the memory chunk containing the unflattened + * device tree on success. + */ +void *of_fdt_unflatten_tree(const unsigned long *blob, + struct device_node *dad, + struct device_node **mynodes) +{ + void *mem; + + mutex_lock(&of_fdt_unflatten_mutex); + mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc, + true); + mutex_unlock(&of_fdt_unflatten_mutex); + + return mem; +} +EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree); + +/* Everything below here references initial_boot_params directly. */ +int __initdata dt_root_addr_cells; +int __initdata dt_root_size_cells; + +void *initial_boot_params __ro_after_init; + +#ifdef CONFIG_OF_EARLY_FLATTREE + +static u32 of_fdt_crc32; + +static int __init early_init_dt_reserve_memory(phys_addr_t base, + phys_addr_t size, bool nomap) +{ + if (nomap) { + /* + * If the memory is already reserved (by another region), we + * should not allow it to be marked nomap, but don't worry + * if the region isn't memory as it won't be mapped. + */ + if (memblock_overlaps_region(&memblock.memory, base, size) && + memblock_is_region_reserved(base, size)) + return -EBUSY; + + return memblock_mark_nomap(base, size); + } + return memblock_reserve(base, size); +} + +/* + * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property + */ +static int __init __reserved_mem_reserve_reg(unsigned long node, + const char *uname) +{ + int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); + phys_addr_t base, size; + int len; + const __be32 *prop; + int first = 1; + bool nomap; + + prop = of_get_flat_dt_prop(node, "reg", &len); + if (!prop) + return -ENOENT; + + if (len && len % t_len != 0) { + pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n", + uname); + return -EINVAL; + } + + nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + + while (len >= t_len) { + base = dt_mem_next_cell(dt_root_addr_cells, &prop); + size = dt_mem_next_cell(dt_root_size_cells, &prop); + + if (size && + early_init_dt_reserve_memory(base, size, nomap) == 0) + pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n", + uname, &base, (unsigned long)(size / SZ_1M)); + else + pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n", + uname, &base, (unsigned long)(size / SZ_1M)); + + len -= t_len; + if (first) { + fdt_reserved_mem_save_node(node, uname, base, size); + first = 0; + } + } + return 0; +} + +/* + * __reserved_mem_check_root() - check if #size-cells, #address-cells provided + * in /reserved-memory matches the values supported by the current implementation, + * also check if ranges property has been provided + */ +static int __init __reserved_mem_check_root(unsigned long node) +{ + const __be32 *prop; + + prop = of_get_flat_dt_prop(node, "#size-cells", NULL); + if (!prop || be32_to_cpup(prop) != dt_root_size_cells) + return -EINVAL; + + prop = of_get_flat_dt_prop(node, "#address-cells", NULL); + if (!prop || be32_to_cpup(prop) != dt_root_addr_cells) + return -EINVAL; + + prop = of_get_flat_dt_prop(node, "ranges", NULL); + if (!prop) + return -EINVAL; + return 0; +} + +/* + * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory + */ +static int __init fdt_scan_reserved_mem(void) +{ + int node, child; + const void *fdt = initial_boot_params; + + node = fdt_path_offset(fdt, "/reserved-memory"); + if (node < 0) + return -ENODEV; + + if (__reserved_mem_check_root(node) != 0) { + pr_err("Reserved memory: unsupported node format, ignoring\n"); + return -EINVAL; + } + + fdt_for_each_subnode(child, fdt, node) { + const char *uname; + int err; + + if (!of_fdt_device_is_available(fdt, child)) + continue; + + uname = fdt_get_name(fdt, child, NULL); + + err = __reserved_mem_reserve_reg(child, uname); + if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL)) + fdt_reserved_mem_save_node(child, uname, 0, 0); + } + return 0; +} + +/* + * fdt_reserve_elfcorehdr() - reserves memory for elf core header + * + * This function reserves the memory occupied by an elf core header + * described in the device tree. This region contains all the + * information about primary kernel's core image and is used by a dump + * capture kernel to access the system memory on primary kernel. + */ +static void __init fdt_reserve_elfcorehdr(void) +{ + if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size) + return; + + if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { + pr_warn("elfcorehdr is overlapped\n"); + return; + } + + memblock_reserve(elfcorehdr_addr, elfcorehdr_size); + + pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n", + elfcorehdr_size >> 10, elfcorehdr_addr); +} + +/** + * early_init_fdt_scan_reserved_mem() - create reserved memory regions + * + * This function grabs memory from early allocator for device exclusive use + * defined in device tree structures. It should be called by arch specific code + * once the early allocator (i.e. memblock) has been fully activated. + */ +void __init early_init_fdt_scan_reserved_mem(void) +{ + int n; + u64 base, size; + + if (!initial_boot_params) + return; + + /* Process header /memreserve/ fields */ + for (n = 0; ; n++) { + fdt_get_mem_rsv(initial_boot_params, n, &base, &size); + if (!size) + break; + memblock_reserve(base, size); + } + + fdt_scan_reserved_mem(); + fdt_reserve_elfcorehdr(); + fdt_init_reserved_mem(); +} + +/** + * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob + */ +void __init early_init_fdt_reserve_self(void) +{ + if (!initial_boot_params) + return; + + /* Reserve the dtb region */ + memblock_reserve(__pa(initial_boot_params), + fdt_totalsize(initial_boot_params)); +} + +/** + * of_scan_flat_dt - scan flattened tree blob and call callback on each. + * @it: callback function + * @data: context data pointer + * + * This function is used to scan the flattened device-tree, it is + * used to extract the memory information at boot before we can + * unflatten the tree + */ +int __init of_scan_flat_dt(int (*it)(unsigned long node, + const char *uname, int depth, + void *data), + void *data) +{ + const void *blob = initial_boot_params; + const char *pathp; + int offset, rc = 0, depth = -1; + + if (!blob) + return 0; + + for (offset = fdt_next_node(blob, -1, &depth); + offset >= 0 && depth >= 0 && !rc; + offset = fdt_next_node(blob, offset, &depth)) { + + pathp = fdt_get_name(blob, offset, NULL); + rc = it(offset, pathp, depth, data); + } + return rc; +} + +/** + * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each. + * @parent: parent node + * @it: callback function + * @data: context data pointer + * + * This function is used to scan sub-nodes of a node. + */ +int __init of_scan_flat_dt_subnodes(unsigned long parent, + int (*it)(unsigned long node, + const char *uname, + void *data), + void *data) +{ + const void *blob = initial_boot_params; + int node; + + fdt_for_each_subnode(node, blob, parent) { + const char *pathp; + int rc; + + pathp = fdt_get_name(blob, node, NULL); + rc = it(node, pathp, data); + if (rc) + return rc; + } + return 0; +} + +/** + * of_get_flat_dt_subnode_by_name - get the subnode by given name + * + * @node: the parent node + * @uname: the name of subnode + * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none + */ + +int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname) +{ + return fdt_subnode_offset(initial_boot_params, node, uname); +} + +/* + * of_get_flat_dt_root - find the root node in the flat blob + */ +unsigned long __init of_get_flat_dt_root(void) +{ + return 0; +} + +/* + * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr + * + * This function can be used within scan_flattened_dt callback to get + * access to properties + */ +const void *__init of_get_flat_dt_prop(unsigned long node, const char *name, + int *size) +{ + return fdt_getprop(initial_boot_params, node, name, size); +} + +/** + * of_fdt_is_compatible - Return true if given node from the given blob has + * compat in its compatible list + * @blob: A device tree blob + * @node: node to test + * @compat: compatible string to compare with compatible list. + * + * Return: a non-zero value on match with smaller values returned for more + * specific compatible values. + */ +static int of_fdt_is_compatible(const void *blob, + unsigned long node, const char *compat) +{ + const char *cp; + int cplen; + unsigned long l, score = 0; + + cp = fdt_getprop(blob, node, "compatible", &cplen); + if (cp == NULL) + return 0; + while (cplen > 0) { + score++; + if (of_compat_cmp(cp, compat, strlen(compat)) == 0) + return score; + l = strlen(cp) + 1; + cp += l; + cplen -= l; + } + + return 0; +} + +/** + * of_flat_dt_is_compatible - Return true if given node has compat in compatible list + * @node: node to test + * @compat: compatible string to compare with compatible list. + */ +int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) +{ + return of_fdt_is_compatible(initial_boot_params, node, compat); +} + +/* + * of_flat_dt_match - Return true if node matches a list of compatible values + */ +static int __init of_flat_dt_match(unsigned long node, const char *const *compat) +{ + unsigned int tmp, score = 0; + + if (!compat) + return 0; + + while (*compat) { + tmp = of_fdt_is_compatible(initial_boot_params, node, *compat); + if (tmp && (score == 0 || (tmp < score))) + score = tmp; + compat++; + } + + return score; +} + +/* + * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle + */ +uint32_t __init of_get_flat_dt_phandle(unsigned long node) +{ + return fdt_get_phandle(initial_boot_params, node); +} + +const char * __init of_flat_dt_get_machine_name(void) +{ + const char *name; + unsigned long dt_root = of_get_flat_dt_root(); + + name = of_get_flat_dt_prop(dt_root, "model", NULL); + if (!name) + name = of_get_flat_dt_prop(dt_root, "compatible", NULL); + return name; +} + +/** + * of_flat_dt_match_machine - Iterate match tables to find matching machine. + * + * @default_match: A machine specific ptr to return in case of no match. + * @get_next_compat: callback function to return next compatible match table. + * + * Iterate through machine match tables to find the best match for the machine + * compatible string in the FDT. + */ +const void * __init of_flat_dt_match_machine(const void *default_match, + const void * (*get_next_compat)(const char * const**)) +{ + const void *data = NULL; + const void *best_data = default_match; + const char *const *compat; + unsigned long dt_root; + unsigned int best_score = ~1, score = 0; + + dt_root = of_get_flat_dt_root(); + while ((data = get_next_compat(&compat))) { + score = of_flat_dt_match(dt_root, compat); + if (score > 0 && score < best_score) { + best_data = data; + best_score = score; + } + } + if (!best_data) { + const char *prop; + int size; + + pr_err("\n unrecognized device tree list:\n[ "); + + prop = of_get_flat_dt_prop(dt_root, "compatible", &size); + if (prop) { + while (size > 0) { + printk("'%s' ", prop); + size -= strlen(prop) + 1; + prop += strlen(prop) + 1; + } + } + printk("]\n\n"); + return NULL; + } + + pr_info("Machine model: %s\n", of_flat_dt_get_machine_name()); + + return best_data; +} + +static void __early_init_dt_declare_initrd(unsigned long start, + unsigned long end) +{ + /* ARM64 would cause a BUG to occur here when CONFIG_DEBUG_VM is + * enabled since __va() is called too early. ARM64 does make use + * of phys_initrd_start/phys_initrd_size so we can skip this + * conversion. + */ + if (!IS_ENABLED(CONFIG_ARM64)) { + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(end); + initrd_below_start_ok = 1; + } +} + +/** + * early_init_dt_check_for_initrd - Decode initrd location from flat tree + * @node: reference to node containing initrd location ('chosen') + */ +static void __init early_init_dt_check_for_initrd(unsigned long node) +{ + u64 start, end; + int len; + const __be32 *prop; + + if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD)) + return; + + pr_debug("Looking for initrd properties... "); + + prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len); + if (!prop) + return; + start = of_read_number(prop, len/4); + + prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len); + if (!prop) + return; + end = of_read_number(prop, len/4); + if (start > end) + return; + + __early_init_dt_declare_initrd(start, end); + phys_initrd_start = start; + phys_initrd_size = end - start; + + pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end); +} + +/** + * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat + * tree + * @node: reference to node containing elfcorehdr location ('chosen') + */ +static void __init early_init_dt_check_for_elfcorehdr(unsigned long node) +{ + const __be32 *prop; + int len; + + if (!IS_ENABLED(CONFIG_CRASH_DUMP)) + return; + + pr_debug("Looking for elfcorehdr property... "); + + prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); + if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells))) + return; + + elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop); + elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop); + + pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n", + elfcorehdr_addr, elfcorehdr_size); +} + +static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND; + +/* + * The main usage of linux,usable-memory-range is for crash dump kernel. + * Originally, the number of usable-memory regions is one. Now there may + * be two regions, low region and high region. + * To make compatibility with existing user-space and older kdump, the low + * region is always the last range of linux,usable-memory-range if exist. + */ +#define MAX_USABLE_RANGES 2 + +/** + * early_init_dt_check_for_usable_mem_range - Decode usable memory range + * location from flat tree + */ +void __init early_init_dt_check_for_usable_mem_range(void) +{ + struct memblock_region rgn[MAX_USABLE_RANGES] = {0}; + const __be32 *prop, *endp; + int len, i; + unsigned long node = chosen_node_offset; + + if ((long)node < 0) + return; + + pr_debug("Looking for usable-memory-range property... "); + + prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); + if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells))) + return; + + endp = prop + (len / sizeof(__be32)); + for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) { + rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop); + rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop); + + pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n", + i, &rgn[i].base, &rgn[i].size); + } + + memblock_cap_memory_range(rgn[0].base, rgn[0].size); + for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++) + memblock_add(rgn[i].base, rgn[i].size); +} + +#ifdef CONFIG_SERIAL_EARLYCON + +int __init early_init_dt_scan_chosen_stdout(void) +{ + int offset; + const char *p, *q, *options = NULL; + int l; + const struct earlycon_id *match; + const void *fdt = initial_boot_params; + int ret; + + offset = fdt_path_offset(fdt, "/chosen"); + if (offset < 0) + offset = fdt_path_offset(fdt, "/chosen@0"); + if (offset < 0) + return -ENOENT; + + p = fdt_getprop(fdt, offset, "stdout-path", &l); + if (!p) + p = fdt_getprop(fdt, offset, "linux,stdout-path", &l); + if (!p || !l) + return -ENOENT; + + q = strchrnul(p, ':'); + if (*q != '\0') + options = q + 1; + l = q - p; + + /* Get the node specified by stdout-path */ + offset = fdt_path_offset_namelen(fdt, p, l); + if (offset < 0) { + pr_warn("earlycon: stdout-path %.*s not found\n", l, p); + return 0; + } + + for (match = __earlycon_table; match < __earlycon_table_end; match++) { + if (!match->compatible[0]) + continue; + + if (fdt_node_check_compatible(fdt, offset, match->compatible)) + continue; + + ret = of_setup_earlycon(match, offset, options); + if (!ret || ret == -EALREADY) + return 0; + } + return -ENODEV; +} +#endif + +/* + * early_init_dt_scan_root - fetch the top level address and size cells + */ +int __init early_init_dt_scan_root(void) +{ + const __be32 *prop; + const void *fdt = initial_boot_params; + int node = fdt_path_offset(fdt, "/"); + + if (node < 0) + return -ENODEV; + + dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT; + dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT; + + prop = of_get_flat_dt_prop(node, "#size-cells", NULL); + if (prop) + dt_root_size_cells = be32_to_cpup(prop); + pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells); + + prop = of_get_flat_dt_prop(node, "#address-cells", NULL); + if (prop) + dt_root_addr_cells = be32_to_cpup(prop); + pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells); + + return 0; +} + +u64 __init dt_mem_next_cell(int s, const __be32 **cellp) +{ + const __be32 *p = *cellp; + + *cellp = p + s; + return of_read_number(p, s); +} + +/* + * early_init_dt_scan_memory - Look for and parse memory nodes + */ +int __init early_init_dt_scan_memory(void) +{ + int node, found_memory = 0; + const void *fdt = initial_boot_params; + + fdt_for_each_subnode(node, fdt, 0) { + const char *type = of_get_flat_dt_prop(node, "device_type", NULL); + const __be32 *reg, *endp; + int l; + bool hotpluggable; + + /* We are scanning "memory" nodes only */ + if (type == NULL || strcmp(type, "memory") != 0) + continue; + + if (!of_fdt_device_is_available(fdt, node)) + continue; + + reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l); + if (reg == NULL) + reg = of_get_flat_dt_prop(node, "reg", &l); + if (reg == NULL) + continue; + + endp = reg + (l / sizeof(__be32)); + hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL); + + pr_debug("memory scan node %s, reg size %d,\n", + fdt_get_name(fdt, node, NULL), l); + + while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { + u64 base, size; + + base = dt_mem_next_cell(dt_root_addr_cells, ®); + size = dt_mem_next_cell(dt_root_size_cells, ®); + + if (size == 0) + continue; + pr_debug(" - %llx, %llx\n", base, size); + + early_init_dt_add_memory_arch(base, size); + + found_memory = 1; + + if (!hotpluggable) + continue; + + if (memblock_mark_hotplug(base, size)) + pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n", + base, base + size); + } + } + return found_memory; +} + +int __init early_init_dt_scan_chosen(char *cmdline) +{ + int l, node; + const char *p; + const void *rng_seed; + const void *fdt = initial_boot_params; + + node = fdt_path_offset(fdt, "/chosen"); + if (node < 0) + node = fdt_path_offset(fdt, "/chosen@0"); + if (node < 0) + /* Handle the cmdline config options even if no /chosen node */ + goto handle_cmdline; + + chosen_node_offset = node; + + early_init_dt_check_for_initrd(node); + early_init_dt_check_for_elfcorehdr(node); + + rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); + if (rng_seed && l > 0) { + add_bootloader_randomness(rng_seed, l); + + /* try to clear seed so it won't be found. */ + fdt_nop_property(initial_boot_params, node, "rng-seed"); + + /* update CRC check value */ + of_fdt_crc32 = crc32_be(~0, initial_boot_params, + fdt_totalsize(initial_boot_params)); + } + + /* Retrieve command line */ + p = of_get_flat_dt_prop(node, "bootargs", &l); + if (p != NULL && l > 0) + strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE)); + +handle_cmdline: + /* + * CONFIG_CMDLINE is meant to be a default in case nothing else + * managed to set the command line, unless CONFIG_CMDLINE_FORCE + * is set in which case we override whatever was found earlier. + */ +#ifdef CONFIG_CMDLINE +#if defined(CONFIG_CMDLINE_EXTEND) + strlcat(cmdline, " ", COMMAND_LINE_SIZE); + strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#elif defined(CONFIG_CMDLINE_FORCE) + strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#else + /* No arguments from boot loader, use kernel's cmdl*/ + if (!((char *)cmdline)[0]) + strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE); +#endif +#endif /* CONFIG_CMDLINE */ + + pr_debug("Command line is: %s\n", (char *)cmdline); + + return 0; +} + +#ifndef MIN_MEMBLOCK_ADDR +#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) +#endif +#ifndef MAX_MEMBLOCK_ADDR +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) +#endif + +void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) +{ + const u64 phys_offset = MIN_MEMBLOCK_ADDR; + + if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (!PAGE_ALIGNED(base)) { + size -= PAGE_SIZE - (base & ~PAGE_MASK); + base = PAGE_ALIGN(base); + } + size &= PAGE_MASK; + + if (base > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base + size - 1 > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); + size = MAX_MEMBLOCK_ADDR - base + 1; + } + + if (base + size < phys_offset) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + if (base < phys_offset) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + base, phys_offset); + size -= phys_offset - base; + base = phys_offset; + } + memblock_add(base, size); +} + +static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + void *ptr = memblock_alloc(size, align); + + if (!ptr) + panic("%s: Failed to allocate %llu bytes align=0x%llx\n", + __func__, size, align); + + return ptr; +} + +bool __init early_init_dt_verify(void *params) +{ + if (!params) + return false; + + /* check device tree validity */ + if (fdt_check_header(params)) + return false; + + /* Setup flat device-tree pointer */ + initial_boot_params = params; + of_fdt_crc32 = crc32_be(~0, initial_boot_params, + fdt_totalsize(initial_boot_params)); + return true; +} + + +void __init early_init_dt_scan_nodes(void) +{ + int rc; + + /* Initialize {size,address}-cells info */ + early_init_dt_scan_root(); + + /* Retrieve various information from the /chosen node */ + rc = early_init_dt_scan_chosen(boot_command_line); + if (rc) + pr_warn("No chosen node found, continuing without\n"); + + /* Setup memory, calling early_init_dt_add_memory_arch */ + early_init_dt_scan_memory(); + + /* Handle linux,usable-memory-range property */ + early_init_dt_check_for_usable_mem_range(); +} + +bool __init early_init_dt_scan(void *params) +{ + bool status; + + status = early_init_dt_verify(params); + if (!status) + return false; + + early_init_dt_scan_nodes(); + return true; +} + +/** + * unflatten_device_tree - create tree of device_nodes from flat blob + * + * unflattens the device-tree passed by the firmware, creating the + * tree of struct device_node. It also fills the "name" and "type" + * pointers of the nodes so the normal device-tree walking functions + * can be used. + */ +void __init unflatten_device_tree(void) +{ + __unflatten_device_tree(initial_boot_params, NULL, &of_root, + early_init_dt_alloc_memory_arch, false); + + /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ + of_alias_scan(early_init_dt_alloc_memory_arch); + + unittest_unflatten_overlay_base(); +} + +/** + * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob + * + * Copies and unflattens the device-tree passed by the firmware, creating the + * tree of struct device_node. It also fills the "name" and "type" + * pointers of the nodes so the normal device-tree walking functions + * can be used. This should only be used when the FDT memory has not been + * reserved such is the case when the FDT is built-in to the kernel init + * section. If the FDT memory is reserved already then unflatten_device_tree + * should be used instead. + */ +void __init unflatten_and_copy_device_tree(void) +{ + int size; + void *dt; + + if (!initial_boot_params) { + pr_warn("No valid device tree found, continuing without\n"); + return; + } + + size = fdt_totalsize(initial_boot_params); + dt = early_init_dt_alloc_memory_arch(size, + roundup_pow_of_two(FDT_V17_SIZE)); + + if (dt) { + memcpy(dt, initial_boot_params, size); + initial_boot_params = dt; + } + unflatten_device_tree(); +} + +#ifdef CONFIG_SYSFS +static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + memcpy(buf, initial_boot_params + off, count); + return count; +} + +static int __init of_fdt_raw_init(void) +{ + static struct bin_attribute of_fdt_raw_attr = + __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0); + + if (!initial_boot_params) + return 0; + + if (of_fdt_crc32 != crc32_be(~0, initial_boot_params, + fdt_totalsize(initial_boot_params))) { + pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n"); + return 0; + } + of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params); + return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr); +} +late_initcall(of_fdt_raw_init); +#endif + +#endif /* CONFIG_OF_EARLY_FLATTREE */ diff --git a/drivers/of/fdt_address.c b/drivers/of/fdt_address.c new file mode 100644 index 000000000..1dc15ab78 --- /dev/null +++ b/drivers/of/fdt_address.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * FDT Address translation based on u-boot fdt_support.c which in turn was + * based on the kernel unflattened DT address translation code. + * + * (C) Copyright 2007 + * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com + * + * Copyright 2010-2011 Freescale Semiconductor, Inc. + */ + +#define pr_fmt(fmt) "OF: fdt: " fmt + +#include <linux/kernel.h> +#include <linux/libfdt.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/sizes.h> + +/* Max address size we deal with */ +#define OF_MAX_ADDR_CELLS 4 +#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ + (ns) > 0) + +/* Debug utility */ +#ifdef DEBUG +static void __init of_dump_addr(const char *s, const __be32 *addr, int na) +{ + pr_debug("%s", s); + while(na--) + pr_cont(" %08x", *(addr++)); + pr_cont("\n"); +} +#else +static void __init of_dump_addr(const char *s, const __be32 *addr, int na) { } +#endif + +/* Callbacks for bus specific translators */ +struct of_bus { + void (*count_cells)(const void *blob, int parentoffset, + int *addrc, int *sizec); + u64 (*map)(__be32 *addr, const __be32 *range, + int na, int ns, int pna); + int (*translate)(__be32 *addr, u64 offset, int na); +}; + +/* Default translator (generic bus) */ +static void __init fdt_bus_default_count_cells(const void *blob, int parentoffset, + int *addrc, int *sizec) +{ + const __be32 *prop; + + if (addrc) { + prop = fdt_getprop(blob, parentoffset, "#address-cells", NULL); + if (prop) + *addrc = be32_to_cpup(prop); + else + *addrc = dt_root_addr_cells; + } + + if (sizec) { + prop = fdt_getprop(blob, parentoffset, "#size-cells", NULL); + if (prop) + *sizec = be32_to_cpup(prop); + else + *sizec = dt_root_size_cells; + } +} + +static u64 __init fdt_bus_default_map(__be32 *addr, const __be32 *range, + int na, int ns, int pna) +{ + u64 cp, s, da; + + cp = of_read_number(range, na); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr, na); + + pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", + cp, s, da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int __init fdt_bus_default_translate(__be32 *addr, u64 offset, int na) +{ + u64 a = of_read_number(addr, na); + memset(addr, 0, na * 4); + a += offset; + if (na > 1) + addr[na - 2] = cpu_to_fdt32(a >> 32); + addr[na - 1] = cpu_to_fdt32(a & 0xffffffffu); + + return 0; +} + +/* Array of bus specific translators */ +static const struct of_bus of_busses[] __initconst = { + /* Default */ + { + .count_cells = fdt_bus_default_count_cells, + .map = fdt_bus_default_map, + .translate = fdt_bus_default_translate, + }, +}; + +static int __init fdt_translate_one(const void *blob, int parent, + const struct of_bus *bus, + const struct of_bus *pbus, __be32 *addr, + int na, int ns, int pna, const char *rprop) +{ + const __be32 *ranges; + int rlen; + int rone; + u64 offset = OF_BAD_ADDR; + + ranges = fdt_getprop(blob, parent, rprop, &rlen); + if (!ranges) + return 1; + if (rlen == 0) { + offset = of_read_number(addr, na); + memset(addr, 0, pna * 4); + pr_debug("empty ranges, 1:1 translation\n"); + goto finish; + } + + pr_debug("walking ranges...\n"); + + /* Now walk through the ranges */ + rlen /= 4; + rone = na + pna + ns; + for (; rlen >= rone; rlen -= rone, ranges += rone) { + offset = bus->map(addr, ranges, na, ns, pna); + if (offset != OF_BAD_ADDR) + break; + } + if (offset == OF_BAD_ADDR) { + pr_debug("not found !\n"); + return 1; + } + memcpy(addr, ranges + na, 4 * pna); + + finish: + of_dump_addr("parent translation for:", addr, pna); + pr_debug("with offset: %llx\n", offset); + + /* Translate it into parent bus space */ + return pbus->translate(addr, offset, pna); +} + +/* + * Translate an address from the device-tree into a CPU physical address, + * this walks up the tree and applies the various bus mappings on the + * way. + * + * Note: We consider that crossing any level with #size-cells == 0 to mean + * that translation is impossible (that is we are not dealing with a value + * that can be mapped to a cpu physical address). This is not really specified + * that way, but this is traditionally the way IBM at least do things + */ +static u64 __init fdt_translate_address(const void *blob, int node_offset) +{ + int parent, len; + const struct of_bus *bus, *pbus; + const __be32 *reg; + __be32 addr[OF_MAX_ADDR_CELLS]; + int na, ns, pna, pns; + u64 result = OF_BAD_ADDR; + + pr_debug("** translation for device %s **\n", + fdt_get_name(blob, node_offset, NULL)); + + reg = fdt_getprop(blob, node_offset, "reg", &len); + if (!reg) { + pr_err("warning: device tree node '%s' has no address.\n", + fdt_get_name(blob, node_offset, NULL)); + goto bail; + } + + /* Get parent & match bus type */ + parent = fdt_parent_offset(blob, node_offset); + if (parent < 0) + goto bail; + bus = &of_busses[0]; + + /* Cound address cells & copy address locally */ + bus->count_cells(blob, parent, &na, &ns); + if (!OF_CHECK_COUNTS(na, ns)) { + pr_err("Bad cell count for %s\n", + fdt_get_name(blob, node_offset, NULL)); + goto bail; + } + memcpy(addr, reg, na * 4); + + pr_debug("bus (na=%d, ns=%d) on %s\n", + na, ns, fdt_get_name(blob, parent, NULL)); + of_dump_addr("translating address:", addr, na); + + /* Translate */ + for (;;) { + /* Switch to parent bus */ + node_offset = parent; + parent = fdt_parent_offset(blob, node_offset); + + /* If root, we have finished */ + if (parent < 0) { + pr_debug("reached root node\n"); + result = of_read_number(addr, na); + break; + } + + /* Get new parent bus and counts */ + pbus = &of_busses[0]; + pbus->count_cells(blob, parent, &pna, &pns); + if (!OF_CHECK_COUNTS(pna, pns)) { + pr_err("Bad cell count for %s\n", + fdt_get_name(blob, node_offset, NULL)); + break; + } + + pr_debug("parent bus (na=%d, ns=%d) on %s\n", + pna, pns, fdt_get_name(blob, parent, NULL)); + + /* Apply bus translation */ + if (fdt_translate_one(blob, node_offset, bus, pbus, + addr, na, ns, pna, "ranges")) + break; + + /* Complete the move up one level */ + na = pna; + ns = pns; + bus = pbus; + + of_dump_addr("one level translation:", addr, na); + } + bail: + return result; +} + +/** + * of_flat_dt_translate_address - translate DT addr into CPU phys addr + * @node: node in the flat blob + */ +u64 __init of_flat_dt_translate_address(unsigned long node) +{ + return fdt_translate_address(initial_boot_params, node); +} diff --git a/drivers/of/irq.c b/drivers/of/irq.c new file mode 100644 index 000000000..2bac44f09 --- /dev/null +++ b/drivers/of/irq.c @@ -0,0 +1,744 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Derived from arch/i386/kernel/irq.c + * Copyright (C) 1992 Linus Torvalds + * Adapted from arch/i386 by Gary Thomas + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * Updated and modified by Cort Dougan <cort@fsmlabs.com> + * Copyright (C) 1996-2001 Cort Dougan + * Adapted for Power Macintosh by Paul Mackerras + * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) + * + * This file contains the code used to make IRQ descriptions in the + * device tree to actual irq numbers on an interrupt controller + * driver. + */ + +#define pr_fmt(fmt) "OF: " fmt + +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/string.h> +#include <linux/slab.h> + +/** + * irq_of_parse_and_map - Parse and map an interrupt into linux virq space + * @dev: Device node of the device whose interrupt is to be mapped + * @index: Index of the interrupt to map + * + * This function is a wrapper that chains of_irq_parse_one() and + * irq_create_of_mapping() to make things easier to callers + */ +unsigned int irq_of_parse_and_map(struct device_node *dev, int index) +{ + struct of_phandle_args oirq; + + if (of_irq_parse_one(dev, index, &oirq)) + return 0; + + return irq_create_of_mapping(&oirq); +} +EXPORT_SYMBOL_GPL(irq_of_parse_and_map); + +/** + * of_irq_find_parent - Given a device node, find its interrupt parent node + * @child: pointer to device node + * + * Return: A pointer to the interrupt parent node, or NULL if the interrupt + * parent could not be determined. + */ +struct device_node *of_irq_find_parent(struct device_node *child) +{ + struct device_node *p; + phandle parent; + + if (!of_node_get(child)) + return NULL; + + do { + if (of_property_read_u32(child, "interrupt-parent", &parent)) { + p = of_get_parent(child); + } else { + if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) + p = of_node_get(of_irq_dflt_pic); + else + p = of_find_node_by_phandle(parent); + } + of_node_put(child); + child = p; + } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); + + return p; +} +EXPORT_SYMBOL_GPL(of_irq_find_parent); + +/* + * These interrupt controllers abuse interrupt-map for unspeakable + * reasons and rely on the core code to *ignore* it (the drivers do + * their own parsing of the property). + * + * If you think of adding to the list for something *new*, think + * again. There is a high chance that you will be sent back to the + * drawing board. + */ +static const char * const of_irq_imap_abusers[] = { + "CBEA,platform-spider-pic", + "sti,platform-spider-pic", + "realtek,rtl-intc", + "fsl,ls1021a-extirq", + "fsl,ls1043a-extirq", + "fsl,ls1088a-extirq", + "renesas,rza1-irqc", + NULL, +}; + +/** + * of_irq_parse_raw - Low level interrupt tree parsing + * @addr: address specifier (start of "reg" property of the device) in be32 format + * @out_irq: structure of_phandle_args updated by this function + * + * This function is a low-level interrupt tree walking function. It + * can be used to do a partial walk with synthetized reg and interrupts + * properties, for example when resolving PCI interrupts when no device + * node exist for the parent. It takes an interrupt specifier structure as + * input, walks the tree looking for any interrupt-map properties, translates + * the specifier for each map, and then returns the translated map. + * + * Return: 0 on success and a negative number on error + */ +int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) +{ + struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; + __be32 initial_match_array[MAX_PHANDLE_ARGS]; + const __be32 *match_array = initial_match_array; + const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) }; + u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; + int imaplen, match, i, rc = -EINVAL; + +#ifdef DEBUG + of_print_phandle_args("of_irq_parse_raw: ", out_irq); +#endif + + ipar = of_node_get(out_irq->np); + + /* First get the #interrupt-cells property of the current cursor + * that tells us how to interpret the passed-in intspec. If there + * is none, we are nice and just walk up the tree + */ + do { + if (!of_property_read_u32(ipar, "#interrupt-cells", &intsize)) + break; + tnode = ipar; + ipar = of_irq_find_parent(ipar); + of_node_put(tnode); + } while (ipar); + if (ipar == NULL) { + pr_debug(" -> no parent found !\n"); + goto fail; + } + + pr_debug("of_irq_parse_raw: ipar=%pOF, size=%d\n", ipar, intsize); + + if (out_irq->args_count != intsize) + goto fail; + + /* Look for this #address-cells. We have to implement the old linux + * trick of looking for the parent here as some device-trees rely on it + */ + old = of_node_get(ipar); + do { + tmp = of_get_property(old, "#address-cells", NULL); + tnode = of_get_parent(old); + of_node_put(old); + old = tnode; + } while (old && tmp == NULL); + of_node_put(old); + old = NULL; + addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp); + + pr_debug(" -> addrsize=%d\n", addrsize); + + /* Range check so that the temporary buffer doesn't overflow */ + if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS)) { + rc = -EFAULT; + goto fail; + } + + /* Precalculate the match array - this simplifies match loop */ + for (i = 0; i < addrsize; i++) + initial_match_array[i] = addr ? addr[i] : 0; + for (i = 0; i < intsize; i++) + initial_match_array[addrsize + i] = cpu_to_be32(out_irq->args[i]); + + /* Now start the actual "proper" walk of the interrupt tree */ + while (ipar != NULL) { + /* + * Now check if cursor is an interrupt-controller and + * if it is then we are done, unless there is an + * interrupt-map which takes precedence except on one + * of these broken platforms that want to parse + * interrupt-map themselves for $reason. + */ + bool intc = of_property_read_bool(ipar, "interrupt-controller"); + + imap = of_get_property(ipar, "interrupt-map", &imaplen); + if (intc && + (!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) { + pr_debug(" -> got it !\n"); + return 0; + } + + /* + * interrupt-map parsing does not work without a reg + * property when #address-cells != 0 + */ + if (addrsize && !addr) { + pr_debug(" -> no reg passed in when needed !\n"); + goto fail; + } + + /* No interrupt map, check for an interrupt parent */ + if (imap == NULL) { + pr_debug(" -> no map, getting parent\n"); + newpar = of_irq_find_parent(ipar); + goto skiplevel; + } + imaplen /= sizeof(u32); + + /* Look for a mask */ + imask = of_get_property(ipar, "interrupt-map-mask", NULL); + if (!imask) + imask = dummy_imask; + + /* Parse interrupt-map */ + match = 0; + while (imaplen > (addrsize + intsize + 1) && !match) { + /* Compare specifiers */ + match = 1; + for (i = 0; i < (addrsize + intsize); i++, imaplen--) + match &= !((match_array[i] ^ *imap++) & imask[i]); + + pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen); + + /* Get the interrupt parent */ + if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) + newpar = of_node_get(of_irq_dflt_pic); + else + newpar = of_find_node_by_phandle(be32_to_cpup(imap)); + imap++; + --imaplen; + + /* Check if not found */ + if (newpar == NULL) { + pr_debug(" -> imap parent not found !\n"); + goto fail; + } + + if (!of_device_is_available(newpar)) + match = 0; + + /* Get #interrupt-cells and #address-cells of new + * parent + */ + if (of_property_read_u32(newpar, "#interrupt-cells", + &newintsize)) { + pr_debug(" -> parent lacks #interrupt-cells!\n"); + goto fail; + } + if (of_property_read_u32(newpar, "#address-cells", + &newaddrsize)) + newaddrsize = 0; + + pr_debug(" -> newintsize=%d, newaddrsize=%d\n", + newintsize, newaddrsize); + + /* Check for malformed properties */ + if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS) + || (imaplen < (newaddrsize + newintsize))) { + rc = -EFAULT; + goto fail; + } + + imap += newaddrsize + newintsize; + imaplen -= newaddrsize + newintsize; + + pr_debug(" -> imaplen=%d\n", imaplen); + } + if (!match) { + if (intc) { + /* + * The PASEMI Nemo is a known offender, so + * let's only warn for anyone else. + */ + WARN(!IS_ENABLED(CONFIG_PPC_PASEMI), + "%pOF interrupt-map failed, using interrupt-controller\n", + ipar); + return 0; + } + + goto fail; + } + + /* + * Successfully parsed an interrupt-map translation; copy new + * interrupt specifier into the out_irq structure + */ + match_array = imap - newaddrsize - newintsize; + for (i = 0; i < newintsize; i++) + out_irq->args[i] = be32_to_cpup(imap - newintsize + i); + out_irq->args_count = intsize = newintsize; + addrsize = newaddrsize; + + if (ipar == newpar) { + pr_debug("%pOF interrupt-map entry to self\n", ipar); + return 0; + } + + skiplevel: + /* Iterate again with new parent */ + out_irq->np = newpar; + pr_debug(" -> new parent: %pOF\n", newpar); + of_node_put(ipar); + ipar = newpar; + newpar = NULL; + } + rc = -ENOENT; /* No interrupt-map found */ + + fail: + of_node_put(ipar); + of_node_put(newpar); + + return rc; +} +EXPORT_SYMBOL_GPL(of_irq_parse_raw); + +/** + * of_irq_parse_one - Resolve an interrupt for a device + * @device: the device whose interrupt is to be resolved + * @index: index of the interrupt to resolve + * @out_irq: structure of_phandle_args filled by this function + * + * This function resolves an interrupt for a node by walking the interrupt tree, + * finding which interrupt controller node it is attached to, and returning the + * interrupt specifier that can be used to retrieve a Linux IRQ number. + */ +int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq) +{ + struct device_node *p; + const __be32 *addr; + u32 intsize; + int i, res; + + pr_debug("of_irq_parse_one: dev=%pOF, index=%d\n", device, index); + + /* OldWorld mac stuff is "special", handle out of line */ + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) + return of_irq_parse_oldworld(device, index, out_irq); + + /* Get the reg property (if any) */ + addr = of_get_property(device, "reg", NULL); + + /* Try the new-style interrupts-extended first */ + res = of_parse_phandle_with_args(device, "interrupts-extended", + "#interrupt-cells", index, out_irq); + if (!res) + return of_irq_parse_raw(addr, out_irq); + + /* Look for the interrupt parent. */ + p = of_irq_find_parent(device); + if (p == NULL) + return -EINVAL; + + /* Get size of interrupt specifier */ + if (of_property_read_u32(p, "#interrupt-cells", &intsize)) { + res = -EINVAL; + goto out; + } + + pr_debug(" parent=%pOF, intsize=%d\n", p, intsize); + + /* Copy intspec into irq structure */ + out_irq->np = p; + out_irq->args_count = intsize; + for (i = 0; i < intsize; i++) { + res = of_property_read_u32_index(device, "interrupts", + (index * intsize) + i, + out_irq->args + i); + if (res) + goto out; + } + + pr_debug(" intspec=%d\n", *out_irq->args); + + + /* Check if there are any interrupt-map translations to process */ + res = of_irq_parse_raw(addr, out_irq); + out: + of_node_put(p); + return res; +} +EXPORT_SYMBOL_GPL(of_irq_parse_one); + +/** + * of_irq_to_resource - Decode a node's IRQ and return it as a resource + * @dev: pointer to device tree node + * @index: zero-based index of the irq + * @r: pointer to resource structure to return result into. + */ +int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) +{ + int irq = of_irq_get(dev, index); + + if (irq < 0) + return irq; + + /* Only dereference the resource if both the + * resource and the irq are valid. */ + if (r && irq) { + const char *name = NULL; + + memset(r, 0, sizeof(*r)); + /* + * Get optional "interrupt-names" property to add a name + * to the resource. + */ + of_property_read_string_index(dev, "interrupt-names", index, + &name); + + r->start = r->end = irq; + r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq)); + r->name = name ? name : of_node_full_name(dev); + } + + return irq; +} +EXPORT_SYMBOL_GPL(of_irq_to_resource); + +/** + * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number + * @dev: pointer to device tree node + * @index: zero-based index of the IRQ + * + * Return: Linux IRQ number on success, or 0 on the IRQ mapping failure, or + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case + * of any other failure. + */ +int of_irq_get(struct device_node *dev, int index) +{ + int rc; + struct of_phandle_args oirq; + struct irq_domain *domain; + + rc = of_irq_parse_one(dev, index, &oirq); + if (rc) + return rc; + + domain = irq_find_host(oirq.np); + if (!domain) + return -EPROBE_DEFER; + + return irq_create_of_mapping(&oirq); +} +EXPORT_SYMBOL_GPL(of_irq_get); + +/** + * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number + * @dev: pointer to device tree node + * @name: IRQ name + * + * Return: Linux IRQ number on success, or 0 on the IRQ mapping failure, or + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case + * of any other failure. + */ +int of_irq_get_byname(struct device_node *dev, const char *name) +{ + int index; + + if (unlikely(!name)) + return -EINVAL; + + index = of_property_match_string(dev, "interrupt-names", name); + if (index < 0) + return index; + + return of_irq_get(dev, index); +} +EXPORT_SYMBOL_GPL(of_irq_get_byname); + +/** + * of_irq_count - Count the number of IRQs a node uses + * @dev: pointer to device tree node + */ +int of_irq_count(struct device_node *dev) +{ + struct of_phandle_args irq; + int nr = 0; + + while (of_irq_parse_one(dev, nr, &irq) == 0) + nr++; + + return nr; +} + +/** + * of_irq_to_resource_table - Fill in resource table with node's IRQ info + * @dev: pointer to device tree node + * @res: array of resources to fill in + * @nr_irqs: the number of IRQs (and upper bound for num of @res elements) + * + * Return: The size of the filled in table (up to @nr_irqs). + */ +int of_irq_to_resource_table(struct device_node *dev, struct resource *res, + int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++, res++) + if (of_irq_to_resource(dev, i, res) <= 0) + break; + + return i; +} +EXPORT_SYMBOL_GPL(of_irq_to_resource_table); + +struct of_intc_desc { + struct list_head list; + of_irq_init_cb_t irq_init_cb; + struct device_node *dev; + struct device_node *interrupt_parent; +}; + +/** + * of_irq_init - Scan and init matching interrupt controllers in DT + * @matches: 0 terminated array of nodes to match and init function to call + * + * This function scans the device tree for matching interrupt controller nodes, + * and calls their initialization functions in order with parents first. + */ +void __init of_irq_init(const struct of_device_id *matches) +{ + const struct of_device_id *match; + struct device_node *np, *parent = NULL; + struct of_intc_desc *desc, *temp_desc; + struct list_head intc_desc_list, intc_parent_list; + + INIT_LIST_HEAD(&intc_desc_list); + INIT_LIST_HEAD(&intc_parent_list); + + for_each_matching_node_and_match(np, matches, &match) { + if (!of_property_read_bool(np, "interrupt-controller") || + !of_device_is_available(np)) + continue; + + if (WARN(!match->data, "of_irq_init: no init function for %s\n", + match->compatible)) + continue; + + /* + * Here, we allocate and populate an of_intc_desc with the node + * pointer, interrupt-parent device_node etc. + */ + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) { + of_node_put(np); + goto err; + } + + desc->irq_init_cb = match->data; + desc->dev = of_node_get(np); + /* + * interrupts-extended can reference multiple parent domains. + * Arbitrarily pick the first one; assume any other parents + * are the same distance away from the root irq controller. + */ + desc->interrupt_parent = of_parse_phandle(np, "interrupts-extended", 0); + if (!desc->interrupt_parent) + desc->interrupt_parent = of_irq_find_parent(np); + if (desc->interrupt_parent == np) { + of_node_put(desc->interrupt_parent); + desc->interrupt_parent = NULL; + } + list_add_tail(&desc->list, &intc_desc_list); + } + + /* + * The root irq controller is the one without an interrupt-parent. + * That one goes first, followed by the controllers that reference it, + * followed by the ones that reference the 2nd level controllers, etc. + */ + while (!list_empty(&intc_desc_list)) { + /* + * Process all controllers with the current 'parent'. + * First pass will be looking for NULL as the parent. + * The assumption is that NULL parent means a root controller. + */ + list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) { + int ret; + + if (desc->interrupt_parent != parent) + continue; + + list_del(&desc->list); + + of_node_set_flag(desc->dev, OF_POPULATED); + + pr_debug("of_irq_init: init %pOF (%p), parent %p\n", + desc->dev, + desc->dev, desc->interrupt_parent); + ret = desc->irq_init_cb(desc->dev, + desc->interrupt_parent); + if (ret) { + pr_err("%s: Failed to init %pOF (%p), parent %p\n", + __func__, desc->dev, desc->dev, + desc->interrupt_parent); + of_node_clear_flag(desc->dev, OF_POPULATED); + kfree(desc); + continue; + } + + /* + * This one is now set up; add it to the parent list so + * its children can get processed in a subsequent pass. + */ + list_add_tail(&desc->list, &intc_parent_list); + } + + /* Get the next pending parent that might have children */ + desc = list_first_entry_or_null(&intc_parent_list, + typeof(*desc), list); + if (!desc) { + pr_err("of_irq_init: children remain, but no parents\n"); + break; + } + list_del(&desc->list); + parent = desc->dev; + kfree(desc); + } + + list_for_each_entry_safe(desc, temp_desc, &intc_parent_list, list) { + list_del(&desc->list); + kfree(desc); + } +err: + list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) { + list_del(&desc->list); + of_node_put(desc->dev); + kfree(desc); + } +} + +static u32 __of_msi_map_id(struct device *dev, struct device_node **np, + u32 id_in) +{ + struct device *parent_dev; + u32 id_out = id_in; + + /* + * Walk up the device parent links looking for one with a + * "msi-map" property. + */ + for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) + if (!of_map_id(parent_dev->of_node, id_in, "msi-map", + "msi-map-mask", np, &id_out)) + break; + return id_out; +} + +/** + * of_msi_map_id - Map a MSI ID for a device. + * @dev: device for which the mapping is to be done. + * @msi_np: device node of the expected msi controller. + * @id_in: unmapped MSI ID for the device. + * + * Walk up the device hierarchy looking for devices with a "msi-map" + * property. If found, apply the mapping to @id_in. + * + * Return: The mapped MSI ID. + */ +u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in) +{ + return __of_msi_map_id(dev, &msi_np, id_in); +} + +/** + * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain + * @dev: device for which the mapping is to be done. + * @id: Device ID. + * @bus_token: Bus token + * + * Walk up the device hierarchy looking for devices with a "msi-map" + * property. + * + * Returns: the MSI domain for this device (or NULL on failure) + */ +struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, + u32 bus_token) +{ + struct device_node *np = NULL; + + __of_msi_map_id(dev, &np, id); + return irq_find_matching_host(np, bus_token); +} + +/** + * of_msi_get_domain - Use msi-parent to find the relevant MSI domain + * @dev: device for which the domain is requested + * @np: device node for @dev + * @token: bus type for this domain + * + * Parse the msi-parent property (both the simple and the complex + * versions), and returns the corresponding MSI domain. + * + * Returns: the MSI domain for this device (or NULL on failure). + */ +struct irq_domain *of_msi_get_domain(struct device *dev, + struct device_node *np, + enum irq_domain_bus_token token) +{ + struct device_node *msi_np; + struct irq_domain *d; + + /* Check for a single msi-parent property */ + msi_np = of_parse_phandle(np, "msi-parent", 0); + if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) { + d = irq_find_matching_host(msi_np, token); + if (!d) + of_node_put(msi_np); + return d; + } + + if (token == DOMAIN_BUS_PLATFORM_MSI) { + /* Check for the complex msi-parent version */ + struct of_phandle_args args; + int index = 0; + + while (!of_parse_phandle_with_args(np, "msi-parent", + "#msi-cells", + index, &args)) { + d = irq_find_matching_host(args.np, token); + if (d) + return d; + + of_node_put(args.np); + index++; + } + } + + return NULL; +} + +/** + * of_msi_configure - Set the msi_domain field of a device + * @dev: device structure to associate with an MSI irq domain + * @np: device node for that device + */ +void of_msi_configure(struct device *dev, struct device_node *np) +{ + dev_set_msi_domain(dev, + of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI)); +} +EXPORT_SYMBOL_GPL(of_msi_configure); diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c new file mode 100644 index 000000000..68278340c --- /dev/null +++ b/drivers/of/kexec.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Arm Limited + * + * Based on arch/arm64/kernel/machine_kexec_file.c: + * Copyright (C) 2018 Linaro Limited + * + * And arch/powerpc/kexec/file_load.c: + * Copyright (C) 2016 IBM Corporation + */ + +#include <linux/ima.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/memblock.h> +#include <linux/libfdt.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/types.h> + +#define RNG_SEED_SIZE 128 + +/* + * Additional space needed for the FDT buffer so that we can add initrd, + * bootargs, kaslr-seed, rng-seed, useable-memory-range and elfcorehdr. + */ +#define FDT_EXTRA_SPACE 0x1000 + +/** + * fdt_find_and_del_mem_rsv - delete memory reservation with given address and size + * + * @fdt: Flattened device tree for the current kernel. + * @start: Starting address of the reserved memory. + * @size: Size of the reserved memory. + * + * Return: 0 on success, or negative errno on error. + */ +static int fdt_find_and_del_mem_rsv(void *fdt, unsigned long start, unsigned long size) +{ + int i, ret, num_rsvs = fdt_num_mem_rsv(fdt); + + for (i = 0; i < num_rsvs; i++) { + u64 rsv_start, rsv_size; + + ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size); + if (ret) { + pr_err("Malformed device tree.\n"); + return -EINVAL; + } + + if (rsv_start == start && rsv_size == size) { + ret = fdt_del_mem_rsv(fdt, i); + if (ret) { + pr_err("Error deleting device tree reservation.\n"); + return -EINVAL; + } + + return 0; + } + } + + return -ENOENT; +} + +/** + * get_addr_size_cells - Get address and size of root node + * + * @addr_cells: Return address of the root node + * @size_cells: Return size of the root node + * + * Return: 0 on success, or negative errno on error. + */ +static int get_addr_size_cells(int *addr_cells, int *size_cells) +{ + struct device_node *root; + + root = of_find_node_by_path("/"); + if (!root) + return -EINVAL; + + *addr_cells = of_n_addr_cells(root); + *size_cells = of_n_size_cells(root); + + of_node_put(root); + + return 0; +} + +/** + * do_get_kexec_buffer - Get address and size of device tree property + * + * @prop: Device tree property + * @len: Size of @prop + * @addr: Return address of the node + * @size: Return size of the node + * + * Return: 0 on success, or negative errno on error. + */ +static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr, + size_t *size) +{ + int ret, addr_cells, size_cells; + + ret = get_addr_size_cells(&addr_cells, &size_cells); + if (ret) + return ret; + + if (len < 4 * (addr_cells + size_cells)) + return -ENOENT; + + *addr = of_read_number(prop, addr_cells); + *size = of_read_number(prop + 4 * addr_cells, size_cells); + + return 0; +} + +#ifdef CONFIG_HAVE_IMA_KEXEC +/** + * ima_get_kexec_buffer - get IMA buffer from the previous kernel + * @addr: On successful return, set to point to the buffer contents. + * @size: On successful return, set to the buffer size. + * + * Return: 0 on success, negative errno on error. + */ +int __init ima_get_kexec_buffer(void **addr, size_t *size) +{ + int ret, len; + unsigned long tmp_addr; + unsigned long start_pfn, end_pfn; + size_t tmp_size; + const void *prop; + + prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len); + if (!prop) + return -ENOENT; + + ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size); + if (ret) + return ret; + + /* Do some sanity on the returned size for the ima-kexec buffer */ + if (!tmp_size) + return -ENOENT; + + /* + * Calculate the PFNs for the buffer and ensure + * they are with in addressable memory. + */ + start_pfn = PHYS_PFN(tmp_addr); + end_pfn = PHYS_PFN(tmp_addr + tmp_size - 1); + if (!page_is_ram(start_pfn) || !page_is_ram(end_pfn)) { + pr_warn("IMA buffer at 0x%lx, size = 0x%zx beyond memory\n", + tmp_addr, tmp_size); + return -EINVAL; + } + + *addr = __va(tmp_addr); + *size = tmp_size; + + return 0; +} + +/** + * ima_free_kexec_buffer - free memory used by the IMA buffer + */ +int __init ima_free_kexec_buffer(void) +{ + int ret; + unsigned long addr; + size_t size; + struct property *prop; + + prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL); + if (!prop) + return -ENOENT; + + ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size); + if (ret) + return ret; + + ret = of_remove_property(of_chosen, prop); + if (ret) + return ret; + + memblock_free_late(addr, size); + return 0; +} +#endif + +/** + * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt + * + * @fdt: Flattened Device Tree to update + * @chosen_node: Offset to the chosen node in the device tree + * + * The IMA measurement buffer is of no use to a subsequent kernel, so we always + * remove it from the device tree. + */ +static void remove_ima_buffer(void *fdt, int chosen_node) +{ + int ret, len; + unsigned long addr; + size_t size; + const void *prop; + + if (!IS_ENABLED(CONFIG_HAVE_IMA_KEXEC)) + return; + + prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len); + if (!prop) + return; + + ret = do_get_kexec_buffer(prop, len, &addr, &size); + fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer"); + if (ret) + return; + + ret = fdt_find_and_del_mem_rsv(fdt, addr, size); + if (!ret) + pr_debug("Removed old IMA buffer reservation.\n"); +} + +#ifdef CONFIG_IMA_KEXEC +/** + * setup_ima_buffer - add IMA buffer information to the fdt + * @image: kexec image being loaded. + * @fdt: Flattened device tree for the next kernel. + * @chosen_node: Offset to the chosen node. + * + * Return: 0 on success, or negative errno on error. + */ +static int setup_ima_buffer(const struct kimage *image, void *fdt, + int chosen_node) +{ + int ret; + + if (!image->ima_buffer_size) + return 0; + + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, + "linux,ima-kexec-buffer", + image->ima_buffer_addr, + image->ima_buffer_size); + if (ret < 0) + return -EINVAL; + + ret = fdt_add_mem_rsv(fdt, image->ima_buffer_addr, + image->ima_buffer_size); + if (ret) + return -EINVAL; + + pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n", + image->ima_buffer_addr, image->ima_buffer_size); + + return 0; +} +#else /* CONFIG_IMA_KEXEC */ +static inline int setup_ima_buffer(const struct kimage *image, void *fdt, + int chosen_node) +{ + return 0; +} +#endif /* CONFIG_IMA_KEXEC */ + +/* + * of_kexec_alloc_and_setup_fdt - Alloc and setup a new Flattened Device Tree + * + * @image: kexec image being loaded. + * @initrd_load_addr: Address where the next initrd will be loaded. + * @initrd_len: Size of the next initrd, or 0 if there will be none. + * @cmdline: Command line for the next kernel, or NULL if there will + * be none. + * @extra_fdt_size: Additional size for the new FDT buffer. + * + * Return: fdt on success, or NULL errno on error. + */ +void *of_kexec_alloc_and_setup_fdt(const struct kimage *image, + unsigned long initrd_load_addr, + unsigned long initrd_len, + const char *cmdline, size_t extra_fdt_size) +{ + void *fdt; + int ret, chosen_node, len; + const void *prop; + size_t fdt_size; + + fdt_size = fdt_totalsize(initial_boot_params) + + (cmdline ? strlen(cmdline) : 0) + + FDT_EXTRA_SPACE + + extra_fdt_size; + fdt = kvmalloc(fdt_size, GFP_KERNEL); + if (!fdt) + return NULL; + + ret = fdt_open_into(initial_boot_params, fdt, fdt_size); + if (ret < 0) { + pr_err("Error %d setting up the new device tree.\n", ret); + goto out; + } + + /* Remove memory reservation for the current device tree. */ + ret = fdt_find_and_del_mem_rsv(fdt, __pa(initial_boot_params), + fdt_totalsize(initial_boot_params)); + if (ret == -EINVAL) { + pr_err("Error removing memory reservation.\n"); + goto out; + } + + chosen_node = fdt_path_offset(fdt, "/chosen"); + if (chosen_node == -FDT_ERR_NOTFOUND) + chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), + "chosen"); + if (chosen_node < 0) { + ret = chosen_node; + goto out; + } + + ret = fdt_delprop(fdt, chosen_node, "linux,elfcorehdr"); + if (ret && ret != -FDT_ERR_NOTFOUND) + goto out; + ret = fdt_delprop(fdt, chosen_node, "linux,usable-memory-range"); + if (ret && ret != -FDT_ERR_NOTFOUND) + goto out; + + /* Did we boot using an initrd? */ + prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", &len); + if (prop) { + u64 tmp_start, tmp_end, tmp_size; + + tmp_start = of_read_number(prop, len / 4); + + prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", &len); + if (!prop) { + ret = -EINVAL; + goto out; + } + + tmp_end = of_read_number(prop, len / 4); + + /* + * kexec reserves exact initrd size, while firmware may + * reserve a multiple of PAGE_SIZE, so check for both. + */ + tmp_size = tmp_end - tmp_start; + ret = fdt_find_and_del_mem_rsv(fdt, tmp_start, tmp_size); + if (ret == -ENOENT) + ret = fdt_find_and_del_mem_rsv(fdt, tmp_start, + round_up(tmp_size, PAGE_SIZE)); + if (ret == -EINVAL) + goto out; + } + + /* add initrd-* */ + if (initrd_load_addr) { + ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-start", + initrd_load_addr); + if (ret) + goto out; + + ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end", + initrd_load_addr + initrd_len); + if (ret) + goto out; + + ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len); + if (ret) + goto out; + + } else { + ret = fdt_delprop(fdt, chosen_node, "linux,initrd-start"); + if (ret && (ret != -FDT_ERR_NOTFOUND)) + goto out; + + ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end"); + if (ret && (ret != -FDT_ERR_NOTFOUND)) + goto out; + } + + if (image->type == KEXEC_TYPE_CRASH) { + /* add linux,elfcorehdr */ + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, + "linux,elfcorehdr", image->elf_load_addr, + image->elf_headers_sz); + if (ret) + goto out; + + /* + * Avoid elfcorehdr from being stomped on in kdump kernel by + * setting up memory reserve map. + */ + ret = fdt_add_mem_rsv(fdt, image->elf_load_addr, + image->elf_headers_sz); + if (ret) + goto out; + + /* add linux,usable-memory-range */ + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, + "linux,usable-memory-range", crashk_res.start, + crashk_res.end - crashk_res.start + 1); + if (ret) + goto out; + + if (crashk_low_res.end) { + ret = fdt_appendprop_addrrange(fdt, 0, chosen_node, + "linux,usable-memory-range", + crashk_low_res.start, + crashk_low_res.end - crashk_low_res.start + 1); + if (ret) + goto out; + } + } + + /* add bootargs */ + if (cmdline) { + ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline); + if (ret) + goto out; + } else { + ret = fdt_delprop(fdt, chosen_node, "bootargs"); + if (ret && (ret != -FDT_ERR_NOTFOUND)) + goto out; + } + + /* add kaslr-seed */ + ret = fdt_delprop(fdt, chosen_node, "kaslr-seed"); + if (ret == -FDT_ERR_NOTFOUND) + ret = 0; + else if (ret) + goto out; + + if (rng_is_initialized()) { + u64 seed = get_random_u64(); + + ret = fdt_setprop_u64(fdt, chosen_node, "kaslr-seed", seed); + if (ret) + goto out; + } else { + pr_notice("RNG is not initialised: omitting \"%s\" property\n", + "kaslr-seed"); + } + + /* add rng-seed */ + if (rng_is_initialized()) { + void *rng_seed; + + ret = fdt_setprop_placeholder(fdt, chosen_node, "rng-seed", + RNG_SEED_SIZE, &rng_seed); + if (ret) + goto out; + get_random_bytes(rng_seed, RNG_SEED_SIZE); + } else { + pr_notice("RNG is not initialised: omitting \"%s\" property\n", + "rng-seed"); + } + + ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0); + if (ret) + goto out; + + remove_ima_buffer(fdt, chosen_node); + ret = setup_ima_buffer(image, fdt, fdt_path_offset(fdt, "/chosen")); + +out: + if (ret) { + kvfree(fdt); + fdt = NULL; + } + + return fdt; +} diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c new file mode 100644 index 000000000..7d3853a5a --- /dev/null +++ b/drivers/of/kobj.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/of.h> +#include <linux/slab.h> + +#include "of_private.h" + +/* true when node is initialized */ +static int of_node_is_initialized(const struct device_node *node) +{ + return node && node->kobj.state_initialized; +} + +/* true when node is attached (i.e. present on sysfs) */ +int of_node_is_attached(const struct device_node *node) +{ + return node && node->kobj.state_in_sysfs; +} + + +#ifndef CONFIG_OF_DYNAMIC +static void of_node_release(struct kobject *kobj) +{ + /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */ +} +#endif /* CONFIG_OF_DYNAMIC */ + +struct kobj_type of_node_ktype = { + .release = of_node_release, +}; + +static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t offset, size_t count) +{ + struct property *pp = container_of(bin_attr, struct property, attr); + return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length); +} + +/* always return newly allocated name, caller must free after use */ +static const char *safe_name(struct kobject *kobj, const char *orig_name) +{ + const char *name = orig_name; + struct kernfs_node *kn; + int i = 0; + + /* don't be a hero. After 16 tries give up */ + while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, name))) { + sysfs_put(kn); + if (name != orig_name) + kfree(name); + name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i); + } + + if (name == orig_name) { + name = kstrdup(orig_name, GFP_KERNEL); + } else { + pr_warn("Duplicate name in %s, renamed to \"%s\"\n", + kobject_name(kobj), name); + } + return name; +} + +int __of_add_property_sysfs(struct device_node *np, struct property *pp) +{ + int rc; + + /* Important: Don't leak passwords */ + bool secure = strncmp(pp->name, "security-", 9) == 0; + + if (!IS_ENABLED(CONFIG_SYSFS)) + return 0; + + if (!of_kset || !of_node_is_attached(np)) + return 0; + + sysfs_bin_attr_init(&pp->attr); + pp->attr.attr.name = safe_name(&np->kobj, pp->name); + pp->attr.attr.mode = secure ? 0400 : 0444; + pp->attr.size = secure ? 0 : pp->length; + pp->attr.read = of_node_property_read; + + rc = sysfs_create_bin_file(&np->kobj, &pp->attr); + WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np); + return rc; +} + +void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop) +{ + if (!IS_ENABLED(CONFIG_SYSFS)) + return; + + sysfs_remove_bin_file(&np->kobj, &prop->attr); + kfree(prop->attr.attr.name); +} + +void __of_remove_property_sysfs(struct device_node *np, struct property *prop) +{ + /* at early boot, bail here and defer setup to of_init() */ + if (of_kset && of_node_is_attached(np)) + __of_sysfs_remove_bin_file(np, prop); +} + +void __of_update_property_sysfs(struct device_node *np, struct property *newprop, + struct property *oldprop) +{ + /* At early boot, bail out and defer setup to of_init() */ + if (!of_kset) + return; + + if (oldprop) + __of_sysfs_remove_bin_file(np, oldprop); + __of_add_property_sysfs(np, newprop); +} + +int __of_attach_node_sysfs(struct device_node *np) +{ + const char *name; + struct kobject *parent; + struct property *pp; + int rc; + + if (!IS_ENABLED(CONFIG_SYSFS) || !of_kset) + return 0; + + np->kobj.kset = of_kset; + if (!np->parent) { + /* Nodes without parents are new top level trees */ + name = safe_name(&of_kset->kobj, "base"); + parent = NULL; + } else { + name = safe_name(&np->parent->kobj, kbasename(np->full_name)); + parent = &np->parent->kobj; + } + if (!name) + return -ENOMEM; + + rc = kobject_add(&np->kobj, parent, "%s", name); + kfree(name); + if (rc) + return rc; + + for_each_property_of_node(np, pp) + __of_add_property_sysfs(np, pp); + + of_node_get(np); + return 0; +} + +void __of_detach_node_sysfs(struct device_node *np) +{ + struct property *pp; + + BUG_ON(!of_node_is_initialized(np)); + if (!of_kset) + return; + + /* only remove properties if on sysfs */ + if (of_node_is_attached(np)) { + for_each_property_of_node(np, pp) + __of_sysfs_remove_bin_file(np, pp); + kobject_del(&np->kobj); + } + + of_node_put(np); +} diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c new file mode 100644 index 000000000..5949829a1 --- /dev/null +++ b/drivers/of/of_numa.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OF NUMA Parsing support. + * + * Copyright (C) 2015 - 2016 Cavium Inc. + */ + +#define pr_fmt(fmt) "OF: NUMA: " fmt + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/nodemask.h> + +#include <asm/numa.h> + +/* define default numa node to 0 */ +#define DEFAULT_NODE 0 + +/* + * Even though we connect cpus to numa domains later in SMP + * init, we need to know the node ids now for all cpus. +*/ +static void __init of_numa_parse_cpu_nodes(void) +{ + u32 nid; + int r; + struct device_node *np; + + for_each_of_cpu_node(np) { + r = of_property_read_u32(np, "numa-node-id", &nid); + if (r) + continue; + + pr_debug("CPU on %u\n", nid); + if (nid >= MAX_NUMNODES) + pr_warn("Node id %u exceeds maximum value\n", nid); + else + node_set(nid, numa_nodes_parsed); + } +} + +static int __init of_numa_parse_memory_nodes(void) +{ + struct device_node *np = NULL; + struct resource rsrc; + u32 nid; + int i, r; + + for_each_node_by_type(np, "memory") { + r = of_property_read_u32(np, "numa-node-id", &nid); + if (r == -EINVAL) + /* + * property doesn't exist if -EINVAL, continue + * looking for more memory nodes with + * "numa-node-id" property + */ + continue; + + if (nid >= MAX_NUMNODES) { + pr_warn("Node id %u exceeds maximum value\n", nid); + r = -EINVAL; + } + + for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++) + r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1); + + if (!i || r) { + of_node_put(np); + pr_err("bad property in memory node\n"); + return r ? : -EINVAL; + } + } + + return 0; +} + +static int __init of_numa_parse_distance_map_v1(struct device_node *map) +{ + const __be32 *matrix; + int entry_count; + int i; + + pr_info("parsing numa-distance-map-v1\n"); + + matrix = of_get_property(map, "distance-matrix", NULL); + if (!matrix) { + pr_err("No distance-matrix property in distance-map\n"); + return -EINVAL; + } + + entry_count = of_property_count_u32_elems(map, "distance-matrix"); + if (entry_count <= 0) { + pr_err("Invalid distance-matrix\n"); + return -EINVAL; + } + + for (i = 0; i + 2 < entry_count; i += 3) { + u32 nodea, nodeb, distance; + + nodea = of_read_number(matrix, 1); + matrix++; + nodeb = of_read_number(matrix, 1); + matrix++; + distance = of_read_number(matrix, 1); + matrix++; + + if ((nodea == nodeb && distance != LOCAL_DISTANCE) || + (nodea != nodeb && distance <= LOCAL_DISTANCE)) { + pr_err("Invalid distance[node%d -> node%d] = %d\n", + nodea, nodeb, distance); + return -EINVAL; + } + + node_set(nodea, numa_nodes_parsed); + + numa_set_distance(nodea, nodeb, distance); + + /* Set default distance of node B->A same as A->B */ + if (nodeb > nodea) + numa_set_distance(nodeb, nodea, distance); + } + + return 0; +} + +static int __init of_numa_parse_distance_map(void) +{ + int ret = 0; + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, + "numa-distance-map-v1"); + if (np) + ret = of_numa_parse_distance_map_v1(np); + + of_node_put(np); + return ret; +} + +int of_node_to_nid(struct device_node *device) +{ + struct device_node *np; + u32 nid; + int r = -ENODATA; + + np = of_node_get(device); + + while (np) { + r = of_property_read_u32(np, "numa-node-id", &nid); + /* + * -EINVAL indicates the property was not found, and + * we walk up the tree trying to find a parent with a + * "numa-node-id". Any other type of error indicates + * a bad device tree and we give up. + */ + if (r != -EINVAL) + break; + + np = of_get_next_parent(np); + } + if (np && r) + pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n", + np); + of_node_put(np); + + /* + * If numa=off passed on command line, or with a defective + * device tree, the nid may not be in the set of possible + * nodes. Check for this case and return NUMA_NO_NODE. + */ + if (!r && nid < MAX_NUMNODES && node_possible(nid)) + return nid; + + return NUMA_NO_NODE; +} + +int __init of_numa_init(void) +{ + int r; + + of_numa_parse_cpu_nodes(); + r = of_numa_parse_memory_nodes(); + if (r) + return r; + return of_numa_parse_distance_map(); +} diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h new file mode 100644 index 000000000..fb6792d38 --- /dev/null +++ b/drivers/of/of_private.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_OF_PRIVATE_H +#define _LINUX_OF_PRIVATE_H +/* + * Private symbols used by OF support code + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + */ + +#define FDT_ALIGN_SIZE 8 + +/** + * struct alias_prop - Alias property in 'aliases' node + * @link: List node to link the structure in aliases_lookup list + * @alias: Alias property name + * @np: Pointer to device_node that the alias stands for + * @id: Index value from end of alias name + * @stem: Alias string without the index + * + * The structure represents one alias property of 'aliases' node as + * an entry in aliases_lookup list. + */ +struct alias_prop { + struct list_head link; + const char *alias; + struct device_node *np; + int id; + char stem[]; +}; + +#if defined(CONFIG_SPARC) +#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 +#else +#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1 +#endif + +#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 + +extern struct mutex of_mutex; +extern struct list_head aliases_lookup; +extern struct kset *of_kset; + +#if defined(CONFIG_OF_DYNAMIC) +extern int of_property_notify(int action, struct device_node *np, + struct property *prop, struct property *old_prop); +extern void of_node_release(struct kobject *kobj); +extern int __of_changeset_apply_entries(struct of_changeset *ocs, + int *ret_revert); +extern int __of_changeset_apply_notify(struct of_changeset *ocs); +extern int __of_changeset_revert_entries(struct of_changeset *ocs, + int *ret_apply); +extern int __of_changeset_revert_notify(struct of_changeset *ocs); +#else /* CONFIG_OF_DYNAMIC */ +static inline int of_property_notify(int action, struct device_node *np, + struct property *prop, struct property *old_prop) +{ + return 0; +} +#endif /* CONFIG_OF_DYNAMIC */ + +#if defined(CONFIG_OF_KOBJ) +int of_node_is_attached(const struct device_node *node); +int __of_add_property_sysfs(struct device_node *np, struct property *pp); +void __of_remove_property_sysfs(struct device_node *np, struct property *prop); +void __of_update_property_sysfs(struct device_node *np, struct property *newprop, + struct property *oldprop); +int __of_attach_node_sysfs(struct device_node *np); +void __of_detach_node_sysfs(struct device_node *np); +#else +static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp) +{ + return 0; +} +static inline void __of_remove_property_sysfs(struct device_node *np, struct property *prop) {} +static inline void __of_update_property_sysfs(struct device_node *np, + struct property *newprop, struct property *oldprop) {} +static inline int __of_attach_node_sysfs(struct device_node *np) +{ + return 0; +} +static inline void __of_detach_node_sysfs(struct device_node *np) {} +#endif + +#if defined(CONFIG_OF_RESOLVE) +int of_resolve_phandles(struct device_node *tree); +#endif + +void __of_phandle_cache_inv_entry(phandle handle); + +#if defined(CONFIG_OF_OVERLAY) +void of_overlay_mutex_lock(void); +void of_overlay_mutex_unlock(void); +#else +static inline void of_overlay_mutex_lock(void) {}; +static inline void of_overlay_mutex_unlock(void) {}; +#endif + +#if defined(CONFIG_OF_UNITTEST) && defined(CONFIG_OF_OVERLAY) +extern void __init unittest_unflatten_overlay_base(void); +#else +static inline void unittest_unflatten_overlay_base(void) {}; +#endif + +extern void *__unflatten_device_tree(const void *blob, + struct device_node *dad, + struct device_node **mynodes, + void *(*dt_alloc)(u64 size, u64 align), + bool detached); + +/** + * General utilities for working with live trees. + * + * All functions with two leading underscores operate + * without taking node references, so you either have to + * own the devtree lock or work on detached trees only. + */ +struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags); +struct device_node *__of_node_dup(const struct device_node *np, + const char *full_name); + +struct device_node *__of_find_node_by_path(struct device_node *parent, + const char *path); +struct device_node *__of_find_node_by_full_path(struct device_node *node, + const char *path); + +extern const void *__of_get_property(const struct device_node *np, + const char *name, int *lenp); +extern int __of_add_property(struct device_node *np, struct property *prop); +extern int __of_remove_property(struct device_node *np, struct property *prop); +extern int __of_update_property(struct device_node *np, + struct property *newprop, struct property **oldprop); + +extern void __of_detach_node(struct device_node *np); + +extern void __of_sysfs_remove_bin_file(struct device_node *np, + struct property *prop); + +/* illegal phandle value (set when unresolved) */ +#define OF_PHANDLE_ILLEGAL 0xdeadbeef + +/* iterators for transactions, used for overlays */ +/* forward iterator */ +#define for_each_transaction_entry(_oft, _te) \ + list_for_each_entry(_te, &(_oft)->te_list, node) + +/* reverse iterator */ +#define for_each_transaction_entry_reverse(_oft, _te) \ + list_for_each_entry_reverse(_te, &(_oft)->te_list, node) + +extern int of_bus_n_addr_cells(struct device_node *np); +extern int of_bus_n_size_cells(struct device_node *np); + +struct bus_dma_region; +#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_HAS_DMA) +int of_dma_get_range(struct device_node *np, + const struct bus_dma_region **map); +struct device_node *__of_get_dma_parent(const struct device_node *np); +#else +static inline int of_dma_get_range(struct device_node *np, + const struct bus_dma_region **map) +{ + return -ENODEV; +} +static inline struct device_node *__of_get_dma_parent(const struct device_node *np) +{ + return of_get_parent(np); +} +#endif + +void fdt_init_reserved_mem(void); +void fdt_reserved_mem_save_node(unsigned long node, const char *uname, + phys_addr_t base, phys_addr_t size); + +#endif /* _LINUX_OF_PRIVATE_H */ diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c new file mode 100644 index 000000000..f90975e00 --- /dev/null +++ b/drivers/of/of_reserved_mem.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Device tree based initialization code for reserved memory. + * + * Copyright (c) 2013, 2015 The Linux Foundation. All Rights Reserved. + * Copyright (c) 2013,2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Marek Szyprowski <m.szyprowski@samsung.com> + * Author: Josh Cartwright <joshc@codeaurora.org> + */ + +#define pr_fmt(fmt) "OF: reserved mem: " fmt + +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_platform.h> +#include <linux/mm.h> +#include <linux/sizes.h> +#include <linux/of_reserved_mem.h> +#include <linux/sort.h> +#include <linux/slab.h> +#include <linux/memblock.h> +#include <linux/kmemleak.h> +#include <linux/cma.h> + +#include "of_private.h" + +#define MAX_RESERVED_REGIONS 64 +static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; +static int reserved_mem_count; + +static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, + phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, + phys_addr_t *res_base) +{ + phys_addr_t base; + int err = 0; + + end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; + align = !align ? SMP_CACHE_BYTES : align; + base = memblock_phys_alloc_range(size, align, start, end); + if (!base) + return -ENOMEM; + + *res_base = base; + if (nomap) { + err = memblock_mark_nomap(base, size); + if (err) + memblock_phys_free(base, size); + } + + kmemleak_ignore_phys(base); + + return err; +} + +/* + * fdt_reserved_mem_save_node() - save fdt node for second pass initialization + */ +void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, + phys_addr_t base, phys_addr_t size) +{ + struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; + + if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { + pr_err("not enough space for all defined regions.\n"); + return; + } + + rmem->fdt_node = node; + rmem->name = uname; + rmem->base = base; + rmem->size = size; + + reserved_mem_count++; + return; +} + +/* + * __reserved_mem_alloc_size() - allocate reserved memory described by + * 'size', 'alignment' and 'alloc-ranges' properties. + */ +static int __init __reserved_mem_alloc_size(unsigned long node, + const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) +{ + int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32); + phys_addr_t start = 0, end = 0; + phys_addr_t base = 0, align = 0, size; + int len; + const __be32 *prop; + bool nomap; + int ret; + + prop = of_get_flat_dt_prop(node, "size", &len); + if (!prop) + return -EINVAL; + + if (len != dt_root_size_cells * sizeof(__be32)) { + pr_err("invalid size property in '%s' node.\n", uname); + return -EINVAL; + } + size = dt_mem_next_cell(dt_root_size_cells, &prop); + + prop = of_get_flat_dt_prop(node, "alignment", &len); + if (prop) { + if (len != dt_root_addr_cells * sizeof(__be32)) { + pr_err("invalid alignment property in '%s' node.\n", + uname); + return -EINVAL; + } + align = dt_mem_next_cell(dt_root_addr_cells, &prop); + } + + nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + + /* Need adjust the alignment to satisfy the CMA requirement */ + if (IS_ENABLED(CONFIG_CMA) + && of_flat_dt_is_compatible(node, "shared-dma-pool") + && of_get_flat_dt_prop(node, "reusable", NULL) + && !nomap) + align = max_t(phys_addr_t, align, CMA_MIN_ALIGNMENT_BYTES); + + prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); + if (prop) { + + if (len % t_len != 0) { + pr_err("invalid alloc-ranges property in '%s', skipping node.\n", + uname); + return -EINVAL; + } + + base = 0; + + while (len > 0) { + start = dt_mem_next_cell(dt_root_addr_cells, &prop); + end = start + dt_mem_next_cell(dt_root_size_cells, + &prop); + + ret = early_init_dt_alloc_reserved_memory_arch(size, + align, start, end, nomap, &base); + if (ret == 0) { + pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", + uname, &base, + (unsigned long)(size / SZ_1M)); + break; + } + len -= t_len; + } + + } else { + ret = early_init_dt_alloc_reserved_memory_arch(size, align, + 0, 0, nomap, &base); + if (ret == 0) + pr_debug("allocated memory for '%s' node: base %pa, size %lu MiB\n", + uname, &base, (unsigned long)(size / SZ_1M)); + } + + if (base == 0) { + pr_err("failed to allocate memory for node '%s': size %lu MiB\n", + uname, (unsigned long)(size / SZ_1M)); + return -ENOMEM; + } + + *res_base = base; + *res_size = size; + + return 0; +} + +static const struct of_device_id __rmem_of_table_sentinel + __used __section("__reservedmem_of_table_end"); + +/* + * __reserved_mem_init_node() - call region specific reserved memory init code + */ +static int __init __reserved_mem_init_node(struct reserved_mem *rmem) +{ + extern const struct of_device_id __reservedmem_of_table[]; + const struct of_device_id *i; + int ret = -ENOENT; + + for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { + reservedmem_of_init_fn initfn = i->data; + const char *compat = i->compatible; + + if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) + continue; + + ret = initfn(rmem); + if (ret == 0) { + pr_info("initialized node %s, compatible id %s\n", + rmem->name, compat); + break; + } + } + return ret; +} + +static int __init __rmem_cmp(const void *a, const void *b) +{ + const struct reserved_mem *ra = a, *rb = b; + + if (ra->base < rb->base) + return -1; + + if (ra->base > rb->base) + return 1; + + /* + * Put the dynamic allocations (address == 0, size == 0) before static + * allocations at address 0x0 so that overlap detection works + * correctly. + */ + if (ra->size < rb->size) + return -1; + if (ra->size > rb->size) + return 1; + + return 0; +} + +static void __init __rmem_check_for_overlap(void) +{ + int i; + + if (reserved_mem_count < 2) + return; + + sort(reserved_mem, reserved_mem_count, sizeof(reserved_mem[0]), + __rmem_cmp, NULL); + for (i = 0; i < reserved_mem_count - 1; i++) { + struct reserved_mem *this, *next; + + this = &reserved_mem[i]; + next = &reserved_mem[i + 1]; + + if (this->base + this->size > next->base) { + phys_addr_t this_end, next_end; + + this_end = this->base + this->size; + next_end = next->base + next->size; + pr_err("OVERLAP DETECTED!\n%s (%pa--%pa) overlaps with %s (%pa--%pa)\n", + this->name, &this->base, &this_end, + next->name, &next->base, &next_end); + } + } +} + +/** + * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions + */ +void __init fdt_init_reserved_mem(void) +{ + int i; + + /* check for overlapping reserved regions */ + __rmem_check_for_overlap(); + + for (i = 0; i < reserved_mem_count; i++) { + struct reserved_mem *rmem = &reserved_mem[i]; + unsigned long node = rmem->fdt_node; + int len; + const __be32 *prop; + int err = 0; + bool nomap; + + nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + prop = of_get_flat_dt_prop(node, "phandle", &len); + if (!prop) + prop = of_get_flat_dt_prop(node, "linux,phandle", &len); + if (prop) + rmem->phandle = of_read_number(prop, len/4); + + if (rmem->size == 0) + err = __reserved_mem_alloc_size(node, rmem->name, + &rmem->base, &rmem->size); + if (err == 0) { + err = __reserved_mem_init_node(rmem); + if (err != 0 && err != -ENOENT) { + pr_info("node %s compatible matching fail\n", + rmem->name); + if (nomap) + memblock_clear_nomap(rmem->base, rmem->size); + else + memblock_phys_free(rmem->base, + rmem->size); + } + } + } +} + +static inline struct reserved_mem *__find_rmem(struct device_node *node) +{ + unsigned int i; + + if (!node->phandle) + return NULL; + + for (i = 0; i < reserved_mem_count; i++) + if (reserved_mem[i].phandle == node->phandle) + return &reserved_mem[i]; + return NULL; +} + +struct rmem_assigned_device { + struct device *dev; + struct reserved_mem *rmem; + struct list_head list; +}; + +static LIST_HEAD(of_rmem_assigned_device_list); +static DEFINE_MUTEX(of_rmem_assigned_device_mutex); + +/** + * of_reserved_mem_device_init_by_idx() - assign reserved memory region to + * given device + * @dev: Pointer to the device to configure + * @np: Pointer to the device_node with 'reserved-memory' property + * @idx: Index of selected region + * + * This function assigns respective DMA-mapping operations based on reserved + * memory region specified by 'memory-region' property in @np node to the @dev + * device. When driver needs to use more than one reserved memory region, it + * should allocate child devices and initialize regions by name for each of + * child device. + * + * Returns error code or zero on success. + */ +int of_reserved_mem_device_init_by_idx(struct device *dev, + struct device_node *np, int idx) +{ + struct rmem_assigned_device *rd; + struct device_node *target; + struct reserved_mem *rmem; + int ret; + + if (!np || !dev) + return -EINVAL; + + target = of_parse_phandle(np, "memory-region", idx); + if (!target) + return -ENODEV; + + if (!of_device_is_available(target)) { + of_node_put(target); + return 0; + } + + rmem = __find_rmem(target); + of_node_put(target); + + if (!rmem || !rmem->ops || !rmem->ops->device_init) + return -EINVAL; + + rd = kmalloc(sizeof(struct rmem_assigned_device), GFP_KERNEL); + if (!rd) + return -ENOMEM; + + ret = rmem->ops->device_init(rmem, dev); + if (ret == 0) { + rd->dev = dev; + rd->rmem = rmem; + + mutex_lock(&of_rmem_assigned_device_mutex); + list_add(&rd->list, &of_rmem_assigned_device_list); + mutex_unlock(&of_rmem_assigned_device_mutex); + + dev_info(dev, "assigned reserved memory node %s\n", rmem->name); + } else { + kfree(rd); + } + + return ret; +} +EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); + +/** + * of_reserved_mem_device_init_by_name() - assign named reserved memory region + * to given device + * @dev: pointer to the device to configure + * @np: pointer to the device node with 'memory-region' property + * @name: name of the selected memory region + * + * Returns: 0 on success or a negative error-code on failure. + */ +int of_reserved_mem_device_init_by_name(struct device *dev, + struct device_node *np, + const char *name) +{ + int idx = of_property_match_string(np, "memory-region-names", name); + + return of_reserved_mem_device_init_by_idx(dev, np, idx); +} +EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); + +/** + * of_reserved_mem_device_release() - release reserved memory device structures + * @dev: Pointer to the device to deconfigure + * + * This function releases structures allocated for memory region handling for + * the given device. + */ +void of_reserved_mem_device_release(struct device *dev) +{ + struct rmem_assigned_device *rd, *tmp; + LIST_HEAD(release_list); + + mutex_lock(&of_rmem_assigned_device_mutex); + list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { + if (rd->dev == dev) + list_move_tail(&rd->list, &release_list); + } + mutex_unlock(&of_rmem_assigned_device_mutex); + + list_for_each_entry_safe(rd, tmp, &release_list, list) { + if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) + rd->rmem->ops->device_release(rd->rmem, dev); + + kfree(rd); + } +} +EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); + +/** + * of_reserved_mem_lookup() - acquire reserved_mem from a device node + * @np: node pointer of the desired reserved-memory region + * + * This function allows drivers to acquire a reference to the reserved_mem + * struct based on a device node handle. + * + * Returns a reserved_mem reference, or NULL on error. + */ +struct reserved_mem *of_reserved_mem_lookup(struct device_node *np) +{ + const char *name; + int i; + + if (!np->full_name) + return NULL; + + name = kbasename(np->full_name); + for (i = 0; i < reserved_mem_count; i++) + if (!strcmp(reserved_mem[i].name, name)) + return &reserved_mem[i]; + + return NULL; +} +EXPORT_SYMBOL_GPL(of_reserved_mem_lookup); diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c new file mode 100644 index 000000000..4402871b5 --- /dev/null +++ b/drivers/of/overlay.c @@ -0,0 +1,1266 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Functions for working with device tree overlays + * + * Copyright (C) 2012 Pantelis Antoniou <panto@antoniou-consulting.com> + * Copyright (C) 2012 Texas Instruments Inc. + */ + +#define pr_fmt(fmt) "OF: overlay: " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_fdt.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/libfdt.h> +#include <linux/err.h> +#include <linux/idr.h> + +#include "of_private.h" + +/** + * struct target - info about current target node as recursing through overlay + * @np: node where current level of overlay will be applied + * @in_livetree: @np is a node in the live devicetree + * + * Used in the algorithm to create the portion of a changeset that describes + * an overlay fragment, which is a devicetree subtree. Initially @np is a node + * in the live devicetree where the overlay subtree is targeted to be grafted + * into. When recursing to the next level of the overlay subtree, the target + * also recurses to the next level of the live devicetree, as long as overlay + * subtree node also exists in the live devicetree. When a node in the overlay + * subtree does not exist at the same level in the live devicetree, target->np + * points to a newly allocated node, and all subsequent targets in the subtree + * will be newly allocated nodes. + */ +struct target { + struct device_node *np; + bool in_livetree; +}; + +/** + * struct fragment - info about fragment nodes in overlay expanded device tree + * @target: target of the overlay operation + * @overlay: pointer to the __overlay__ node + */ +struct fragment { + struct device_node *overlay; + struct device_node *target; +}; + +/** + * struct overlay_changeset + * @id: changeset identifier + * @ovcs_list: list on which we are located + * @new_fdt: Memory allocated to hold unflattened aligned FDT + * @overlay_mem: the memory chunk that contains @overlay_root + * @overlay_root: expanded device tree that contains the fragment nodes + * @notify_state: most recent notify action used on overlay + * @count: count of fragment structures + * @fragments: fragment nodes in the overlay expanded device tree + * @symbols_fragment: last element of @fragments[] is the __symbols__ node + * @cset: changeset to apply fragments to live device tree + */ +struct overlay_changeset { + int id; + struct list_head ovcs_list; + const void *new_fdt; + const void *overlay_mem; + struct device_node *overlay_root; + enum of_overlay_notify_action notify_state; + int count; + struct fragment *fragments; + bool symbols_fragment; + struct of_changeset cset; +}; + +/* flags are sticky - once set, do not reset */ +static int devicetree_state_flags; +#define DTSF_APPLY_FAIL 0x01 +#define DTSF_REVERT_FAIL 0x02 + +/* + * If a changeset apply or revert encounters an error, an attempt will + * be made to undo partial changes, but may fail. If the undo fails + * we do not know the state of the devicetree. + */ +static int devicetree_corrupt(void) +{ + return devicetree_state_flags & + (DTSF_APPLY_FAIL | DTSF_REVERT_FAIL); +} + +static int build_changeset_next_level(struct overlay_changeset *ovcs, + struct target *target, const struct device_node *overlay_node); + +/* + * of_resolve_phandles() finds the largest phandle in the live tree. + * of_overlay_apply() may add a larger phandle to the live tree. + * Do not allow race between two overlays being applied simultaneously: + * mutex_lock(&of_overlay_phandle_mutex) + * of_resolve_phandles() + * of_overlay_apply() + * mutex_unlock(&of_overlay_phandle_mutex) + */ +static DEFINE_MUTEX(of_overlay_phandle_mutex); + +void of_overlay_mutex_lock(void) +{ + mutex_lock(&of_overlay_phandle_mutex); +} + +void of_overlay_mutex_unlock(void) +{ + mutex_unlock(&of_overlay_phandle_mutex); +} + +static LIST_HEAD(ovcs_list); +static DEFINE_IDR(ovcs_idr); + +static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain); + +/** + * of_overlay_notifier_register() - Register notifier for overlay operations + * @nb: Notifier block to register + * + * Register for notification on overlay operations on device tree nodes. The + * reported actions definied by @of_reconfig_change. The notifier callback + * furthermore receives a pointer to the affected device tree node. + * + * Note that a notifier callback is not supposed to store pointers to a device + * tree node or its content beyond @OF_OVERLAY_POST_REMOVE corresponding to the + * respective node it received. + */ +int of_overlay_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&overlay_notify_chain, nb); +} +EXPORT_SYMBOL_GPL(of_overlay_notifier_register); + +/** + * of_overlay_notifier_unregister() - Unregister notifier for overlay operations + * @nb: Notifier block to unregister + */ +int of_overlay_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&overlay_notify_chain, nb); +} +EXPORT_SYMBOL_GPL(of_overlay_notifier_unregister); + +static int overlay_notify(struct overlay_changeset *ovcs, + enum of_overlay_notify_action action) +{ + struct of_overlay_notify_data nd; + int i, ret; + + ovcs->notify_state = action; + + for (i = 0; i < ovcs->count; i++) { + struct fragment *fragment = &ovcs->fragments[i]; + + nd.target = fragment->target; + nd.overlay = fragment->overlay; + + ret = blocking_notifier_call_chain(&overlay_notify_chain, + action, &nd); + if (notifier_to_errno(ret)) { + ret = notifier_to_errno(ret); + pr_err("overlay changeset %s notifier error %d, target: %pOF\n", + of_overlay_action_name(action), ret, nd.target); + return ret; + } + } + + return 0; +} + +/* + * The values of properties in the "/__symbols__" node are paths in + * the ovcs->overlay_root. When duplicating the properties, the paths + * need to be adjusted to be the correct path for the live device tree. + * + * The paths refer to a node in the subtree of a fragment node's "__overlay__" + * node, for example "/fragment@0/__overlay__/symbol_path_tail", + * where symbol_path_tail can be a single node or it may be a multi-node path. + * + * The duplicated property value will be modified by replacing the + * "/fragment_name/__overlay/" portion of the value with the target + * path from the fragment node. + */ +static struct property *dup_and_fixup_symbol_prop( + struct overlay_changeset *ovcs, const struct property *prop) +{ + struct fragment *fragment; + struct property *new_prop; + struct device_node *fragment_node; + struct device_node *overlay_node; + const char *path; + const char *path_tail; + const char *target_path; + int k; + int overlay_name_len; + int path_len; + int path_tail_len; + int target_path_len; + + if (!prop->value) + return NULL; + if (strnlen(prop->value, prop->length) >= prop->length) + return NULL; + path = prop->value; + path_len = strlen(path); + + if (path_len < 1) + return NULL; + fragment_node = __of_find_node_by_path(ovcs->overlay_root, path + 1); + overlay_node = __of_find_node_by_path(fragment_node, "__overlay__/"); + of_node_put(fragment_node); + of_node_put(overlay_node); + + for (k = 0; k < ovcs->count; k++) { + fragment = &ovcs->fragments[k]; + if (fragment->overlay == overlay_node) + break; + } + if (k >= ovcs->count) + return NULL; + + overlay_name_len = snprintf(NULL, 0, "%pOF", fragment->overlay); + + if (overlay_name_len > path_len) + return NULL; + path_tail = path + overlay_name_len; + path_tail_len = strlen(path_tail); + + target_path = kasprintf(GFP_KERNEL, "%pOF", fragment->target); + if (!target_path) + return NULL; + target_path_len = strlen(target_path); + + new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); + if (!new_prop) + goto err_free_target_path; + + new_prop->name = kstrdup(prop->name, GFP_KERNEL); + new_prop->length = target_path_len + path_tail_len + 1; + new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); + if (!new_prop->name || !new_prop->value) + goto err_free_new_prop; + + strcpy(new_prop->value, target_path); + strcpy(new_prop->value + target_path_len, path_tail); + + of_property_set_flag(new_prop, OF_DYNAMIC); + + kfree(target_path); + + return new_prop; + +err_free_new_prop: + kfree(new_prop->name); + kfree(new_prop->value); + kfree(new_prop); +err_free_target_path: + kfree(target_path); + + return NULL; +} + +/** + * add_changeset_property() - add @overlay_prop to overlay changeset + * @ovcs: overlay changeset + * @target: where @overlay_prop will be placed + * @overlay_prop: property to add or update, from overlay tree + * @is_symbols_prop: 1 if @overlay_prop is from node "/__symbols__" + * + * If @overlay_prop does not already exist in live devicetree, add changeset + * entry to add @overlay_prop in @target, else add changeset entry to update + * value of @overlay_prop. + * + * @target may be either in the live devicetree or in a new subtree that + * is contained in the changeset. + * + * Some special properties are not added or updated (no error returned): + * "name", "phandle", "linux,phandle". + * + * Properties "#address-cells" and "#size-cells" are not updated if they + * are already in the live tree, but if present in the live tree, the values + * in the overlay must match the values in the live tree. + * + * Update of property in symbols node is not allowed. + * + * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if + * invalid @overlay. + */ +static int add_changeset_property(struct overlay_changeset *ovcs, + struct target *target, struct property *overlay_prop, + bool is_symbols_prop) +{ + struct property *new_prop = NULL, *prop; + int ret = 0; + + if (target->in_livetree) + if (!of_prop_cmp(overlay_prop->name, "name") || + !of_prop_cmp(overlay_prop->name, "phandle") || + !of_prop_cmp(overlay_prop->name, "linux,phandle")) + return 0; + + if (target->in_livetree) + prop = of_find_property(target->np, overlay_prop->name, NULL); + else + prop = NULL; + + if (prop) { + if (!of_prop_cmp(prop->name, "#address-cells")) { + if (!of_prop_val_eq(prop, overlay_prop)) { + pr_err("ERROR: changing value of #address-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + return ret; + + } else if (!of_prop_cmp(prop->name, "#size-cells")) { + if (!of_prop_val_eq(prop, overlay_prop)) { + pr_err("ERROR: changing value of #size-cells is not allowed in %pOF\n", + target->np); + ret = -EINVAL; + } + return ret; + } + } + + if (is_symbols_prop) { + if (prop) + return -EINVAL; + new_prop = dup_and_fixup_symbol_prop(ovcs, overlay_prop); + } else { + new_prop = __of_prop_dup(overlay_prop, GFP_KERNEL); + } + + if (!new_prop) + return -ENOMEM; + + if (!prop) { + if (!target->in_livetree) { + new_prop->next = target->np->deadprops; + target->np->deadprops = new_prop; + } + ret = of_changeset_add_property(&ovcs->cset, target->np, + new_prop); + } else { + ret = of_changeset_update_property(&ovcs->cset, target->np, + new_prop); + } + + if (!of_node_check_flag(target->np, OF_OVERLAY)) + pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n", + target->np, new_prop->name); + + if (ret) { + kfree(new_prop->name); + kfree(new_prop->value); + kfree(new_prop); + } + return ret; +} + +/** + * add_changeset_node() - add @node (and children) to overlay changeset + * @ovcs: overlay changeset + * @target: where @node will be placed in live tree or changeset + * @node: node from within overlay device tree fragment + * + * If @node does not already exist in @target, add changeset entry + * to add @node in @target. + * + * If @node already exists in @target, and the existing node has + * a phandle, the overlay node is not allowed to have a phandle. + * + * If @node has child nodes, add the children recursively via + * build_changeset_next_level(). + * + * NOTE_1: A live devicetree created from a flattened device tree (FDT) will + * not contain the full path in node->full_name. Thus an overlay + * created from an FDT also will not contain the full path in + * node->full_name. However, a live devicetree created from Open + * Firmware may have the full path in node->full_name. + * + * add_changeset_node() follows the FDT convention and does not include + * the full path in node->full_name. Even though it expects the overlay + * to not contain the full path, it uses kbasename() to remove the + * full path should it exist. It also uses kbasename() in comparisons + * to nodes in the live devicetree so that it can apply an overlay to + * a live devicetree created from Open Firmware. + * + * NOTE_2: Multiple mods of created nodes not supported. + * + * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if + * invalid @overlay. + */ +static int add_changeset_node(struct overlay_changeset *ovcs, + struct target *target, struct device_node *node) +{ + const char *node_kbasename; + const __be32 *phandle; + struct device_node *tchild; + struct target target_child; + int ret = 0, size; + + node_kbasename = kbasename(node->full_name); + + for_each_child_of_node(target->np, tchild) + if (!of_node_cmp(node_kbasename, kbasename(tchild->full_name))) + break; + + if (!tchild) { + tchild = __of_node_dup(NULL, node_kbasename); + if (!tchild) + return -ENOMEM; + + tchild->parent = target->np; + tchild->name = __of_get_property(node, "name", NULL); + + if (!tchild->name) + tchild->name = "<NULL>"; + + /* ignore obsolete "linux,phandle" */ + phandle = __of_get_property(node, "phandle", &size); + if (phandle && (size == 4)) + tchild->phandle = be32_to_cpup(phandle); + + of_node_set_flag(tchild, OF_OVERLAY); + + ret = of_changeset_attach_node(&ovcs->cset, tchild); + if (ret) + return ret; + + target_child.np = tchild; + target_child.in_livetree = false; + + ret = build_changeset_next_level(ovcs, &target_child, node); + of_node_put(tchild); + return ret; + } + + if (node->phandle && tchild->phandle) { + ret = -EINVAL; + } else { + target_child.np = tchild; + target_child.in_livetree = target->in_livetree; + ret = build_changeset_next_level(ovcs, &target_child, node); + } + of_node_put(tchild); + + return ret; +} + +/** + * build_changeset_next_level() - add level of overlay changeset + * @ovcs: overlay changeset + * @target: where to place @overlay_node in live tree + * @overlay_node: node from within an overlay device tree fragment + * + * Add the properties (if any) and nodes (if any) from @overlay_node to the + * @ovcs->cset changeset. If an added node has child nodes, they will + * be added recursively. + * + * Do not allow symbols node to have any children. + * + * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if + * invalid @overlay_node. + */ +static int build_changeset_next_level(struct overlay_changeset *ovcs, + struct target *target, const struct device_node *overlay_node) +{ + struct device_node *child; + struct property *prop; + int ret; + + for_each_property_of_node(overlay_node, prop) { + ret = add_changeset_property(ovcs, target, prop, 0); + if (ret) { + pr_debug("Failed to apply prop @%pOF/%s, err=%d\n", + target->np, prop->name, ret); + return ret; + } + } + + for_each_child_of_node(overlay_node, child) { + ret = add_changeset_node(ovcs, target, child); + if (ret) { + pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n", + target->np, child, ret); + of_node_put(child); + return ret; + } + } + + return 0; +} + +/* + * Add the properties from __overlay__ node to the @ovcs->cset changeset. + */ +static int build_changeset_symbols_node(struct overlay_changeset *ovcs, + struct target *target, + const struct device_node *overlay_symbols_node) +{ + struct property *prop; + int ret; + + for_each_property_of_node(overlay_symbols_node, prop) { + ret = add_changeset_property(ovcs, target, prop, 1); + if (ret) { + pr_debug("Failed to apply symbols prop @%pOF/%s, err=%d\n", + target->np, prop->name, ret); + return ret; + } + } + + return 0; +} + +static int find_dup_cset_node_entry(struct overlay_changeset *ovcs, + struct of_changeset_entry *ce_1) +{ + struct of_changeset_entry *ce_2; + char *fn_1, *fn_2; + int node_path_match; + + if (ce_1->action != OF_RECONFIG_ATTACH_NODE && + ce_1->action != OF_RECONFIG_DETACH_NODE) + return 0; + + ce_2 = ce_1; + list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) { + if ((ce_2->action != OF_RECONFIG_ATTACH_NODE && + ce_2->action != OF_RECONFIG_DETACH_NODE) || + of_node_cmp(ce_1->np->full_name, ce_2->np->full_name)) + continue; + + fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); + fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); + node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); + kfree(fn_1); + kfree(fn_2); + if (node_path_match) { + pr_err("ERROR: multiple fragments add and/or delete node %pOF\n", + ce_1->np); + return -EINVAL; + } + } + + return 0; +} + +static int find_dup_cset_prop(struct overlay_changeset *ovcs, + struct of_changeset_entry *ce_1) +{ + struct of_changeset_entry *ce_2; + char *fn_1, *fn_2; + int node_path_match; + + if (ce_1->action != OF_RECONFIG_ADD_PROPERTY && + ce_1->action != OF_RECONFIG_REMOVE_PROPERTY && + ce_1->action != OF_RECONFIG_UPDATE_PROPERTY) + return 0; + + ce_2 = ce_1; + list_for_each_entry_continue(ce_2, &ovcs->cset.entries, node) { + if ((ce_2->action != OF_RECONFIG_ADD_PROPERTY && + ce_2->action != OF_RECONFIG_REMOVE_PROPERTY && + ce_2->action != OF_RECONFIG_UPDATE_PROPERTY) || + of_node_cmp(ce_1->np->full_name, ce_2->np->full_name)) + continue; + + fn_1 = kasprintf(GFP_KERNEL, "%pOF", ce_1->np); + fn_2 = kasprintf(GFP_KERNEL, "%pOF", ce_2->np); + node_path_match = !fn_1 || !fn_2 || !strcmp(fn_1, fn_2); + kfree(fn_1); + kfree(fn_2); + if (node_path_match && + !of_prop_cmp(ce_1->prop->name, ce_2->prop->name)) { + pr_err("ERROR: multiple fragments add, update, and/or delete property %pOF/%s\n", + ce_1->np, ce_1->prop->name); + return -EINVAL; + } + } + + return 0; +} + +/** + * changeset_dup_entry_check() - check for duplicate entries + * @ovcs: Overlay changeset + * + * Check changeset @ovcs->cset for multiple {add or delete} node entries for + * the same node or duplicate {add, delete, or update} properties entries + * for the same property. + * + * Return: 0 on success, or -EINVAL if duplicate changeset entry found. + */ +static int changeset_dup_entry_check(struct overlay_changeset *ovcs) +{ + struct of_changeset_entry *ce_1; + int dup_entry = 0; + + list_for_each_entry(ce_1, &ovcs->cset.entries, node) { + dup_entry |= find_dup_cset_node_entry(ovcs, ce_1); + dup_entry |= find_dup_cset_prop(ovcs, ce_1); + } + + return dup_entry ? -EINVAL : 0; +} + +/** + * build_changeset() - populate overlay changeset in @ovcs from @ovcs->fragments + * @ovcs: Overlay changeset + * + * Create changeset @ovcs->cset to contain the nodes and properties of the + * overlay device tree fragments in @ovcs->fragments[]. If an error occurs, + * any portions of the changeset that were successfully created will remain + * in @ovcs->cset. + * + * Return: 0 on success, -ENOMEM if memory allocation failure, or -EINVAL if + * invalid overlay in @ovcs->fragments[]. + */ +static int build_changeset(struct overlay_changeset *ovcs) +{ + struct fragment *fragment; + struct target target; + int fragments_count, i, ret; + + /* + * if there is a symbols fragment in ovcs->fragments[i] it is + * the final element in the array + */ + if (ovcs->symbols_fragment) + fragments_count = ovcs->count - 1; + else + fragments_count = ovcs->count; + + for (i = 0; i < fragments_count; i++) { + fragment = &ovcs->fragments[i]; + + target.np = fragment->target; + target.in_livetree = true; + ret = build_changeset_next_level(ovcs, &target, + fragment->overlay); + if (ret) { + pr_debug("fragment apply failed '%pOF'\n", + fragment->target); + return ret; + } + } + + if (ovcs->symbols_fragment) { + fragment = &ovcs->fragments[ovcs->count - 1]; + + target.np = fragment->target; + target.in_livetree = true; + ret = build_changeset_symbols_node(ovcs, &target, + fragment->overlay); + if (ret) { + pr_debug("symbols fragment apply failed '%pOF'\n", + fragment->target); + return ret; + } + } + + return changeset_dup_entry_check(ovcs); +} + +/* + * Find the target node using a number of different strategies + * in order of preference: + * + * 1) "target" property containing the phandle of the target + * 2) "target-path" property containing the path of the target + */ +static struct device_node *find_target(struct device_node *info_node) +{ + struct device_node *node; + const char *path; + u32 val; + int ret; + + ret = of_property_read_u32(info_node, "target", &val); + if (!ret) { + node = of_find_node_by_phandle(val); + if (!node) + pr_err("find target, node: %pOF, phandle 0x%x not found\n", + info_node, val); + return node; + } + + ret = of_property_read_string(info_node, "target-path", &path); + if (!ret) { + node = of_find_node_by_path(path); + if (!node) + pr_err("find target, node: %pOF, path '%s' not found\n", + info_node, path); + return node; + } + + pr_err("find target, node: %pOF, no target property\n", info_node); + + return NULL; +} + +/** + * init_overlay_changeset() - initialize overlay changeset from overlay tree + * @ovcs: Overlay changeset to build + * + * Initialize @ovcs. Populate @ovcs->fragments with node information from + * the top level of @overlay_root. The relevant top level nodes are the + * fragment nodes and the __symbols__ node. Any other top level node will + * be ignored. Populate other @ovcs fields. + * + * Return: 0 on success, -ENOMEM if memory allocation failure, -EINVAL if error + * detected in @overlay_root. On error return, the caller of + * init_overlay_changeset() must call free_overlay_changeset(). + */ +static int init_overlay_changeset(struct overlay_changeset *ovcs) +{ + struct device_node *node, *overlay_node; + struct fragment *fragment; + struct fragment *fragments; + int cnt, ret; + + /* + * None of the resources allocated by this function will be freed in + * the error paths. Instead the caller of this function is required + * to call free_overlay_changeset() (which will free the resources) + * if error return. + */ + + /* + * Warn for some issues. Can not return -EINVAL for these until + * of_unittest_apply_overlay() is fixed to pass these checks. + */ + if (!of_node_check_flag(ovcs->overlay_root, OF_DYNAMIC)) + pr_debug("%s() ovcs->overlay_root is not dynamic\n", __func__); + + if (!of_node_check_flag(ovcs->overlay_root, OF_DETACHED)) + pr_debug("%s() ovcs->overlay_root is not detached\n", __func__); + + if (!of_node_is_root(ovcs->overlay_root)) + pr_debug("%s() ovcs->overlay_root is not root\n", __func__); + + cnt = 0; + + /* fragment nodes */ + for_each_child_of_node(ovcs->overlay_root, node) { + overlay_node = of_get_child_by_name(node, "__overlay__"); + if (overlay_node) { + cnt++; + of_node_put(overlay_node); + } + } + + node = of_get_child_by_name(ovcs->overlay_root, "__symbols__"); + if (node) { + cnt++; + of_node_put(node); + } + + fragments = kcalloc(cnt, sizeof(*fragments), GFP_KERNEL); + if (!fragments) { + ret = -ENOMEM; + goto err_out; + } + ovcs->fragments = fragments; + + cnt = 0; + for_each_child_of_node(ovcs->overlay_root, node) { + overlay_node = of_get_child_by_name(node, "__overlay__"); + if (!overlay_node) + continue; + + fragment = &fragments[cnt]; + fragment->overlay = overlay_node; + fragment->target = find_target(node); + if (!fragment->target) { + of_node_put(fragment->overlay); + ret = -EINVAL; + of_node_put(node); + goto err_out; + } + + cnt++; + } + + /* + * if there is a symbols fragment in ovcs->fragments[i] it is + * the final element in the array + */ + node = of_get_child_by_name(ovcs->overlay_root, "__symbols__"); + if (node) { + ovcs->symbols_fragment = 1; + fragment = &fragments[cnt]; + fragment->overlay = node; + fragment->target = of_find_node_by_path("/__symbols__"); + + if (!fragment->target) { + pr_err("symbols in overlay, but not in live tree\n"); + ret = -EINVAL; + of_node_put(node); + goto err_out; + } + + cnt++; + } + + if (!cnt) { + pr_err("no fragments or symbols in overlay\n"); + ret = -EINVAL; + goto err_out; + } + + ovcs->count = cnt; + + return 0; + +err_out: + pr_err("%s() failed, ret = %d\n", __func__, ret); + + return ret; +} + +static void free_overlay_changeset(struct overlay_changeset *ovcs) +{ + int i; + + if (ovcs->cset.entries.next) + of_changeset_destroy(&ovcs->cset); + + if (ovcs->id) { + idr_remove(&ovcs_idr, ovcs->id); + list_del(&ovcs->ovcs_list); + ovcs->id = 0; + } + + + for (i = 0; i < ovcs->count; i++) { + of_node_put(ovcs->fragments[i].target); + of_node_put(ovcs->fragments[i].overlay); + } + kfree(ovcs->fragments); + + /* + * There should be no live pointers into ovcs->overlay_mem and + * ovcs->new_fdt due to the policy that overlay notifiers are not + * allowed to retain pointers into the overlay devicetree other + * than during the window from OF_OVERLAY_PRE_APPLY overlay + * notifiers until the OF_OVERLAY_POST_REMOVE overlay notifiers. + * + * A memory leak will occur here if within the window. + */ + + if (ovcs->notify_state == OF_OVERLAY_INIT || + ovcs->notify_state == OF_OVERLAY_POST_REMOVE) { + kfree(ovcs->overlay_mem); + kfree(ovcs->new_fdt); + } + kfree(ovcs); +} + +/* + * internal documentation + * + * of_overlay_apply() - Create and apply an overlay changeset + * @ovcs: overlay changeset + * + * Creates and applies an overlay changeset. + * + * If an error is returned by an overlay changeset pre-apply notifier + * then no further overlay changeset pre-apply notifier will be called. + * + * If an error is returned by an overlay changeset post-apply notifier + * then no further overlay changeset post-apply notifier will be called. + * + * If more than one notifier returns an error, then the last notifier + * error to occur is returned. + * + * If an error occurred while applying the overlay changeset, then an + * attempt is made to revert any changes that were made to the + * device tree. If there were any errors during the revert attempt + * then the state of the device tree can not be determined, and any + * following attempt to apply or remove an overlay changeset will be + * refused. + * + * Returns 0 on success, or a negative error number. On error return, + * the caller of of_overlay_apply() must call free_overlay_changeset(). + */ + +static int of_overlay_apply(struct overlay_changeset *ovcs) +{ + int ret = 0, ret_revert, ret_tmp; + + ret = of_resolve_phandles(ovcs->overlay_root); + if (ret) + goto out; + + ret = init_overlay_changeset(ovcs); + if (ret) + goto out; + + ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY); + if (ret) + goto out; + + ret = build_changeset(ovcs); + if (ret) + goto out; + + ret_revert = 0; + ret = __of_changeset_apply_entries(&ovcs->cset, &ret_revert); + if (ret) { + if (ret_revert) { + pr_debug("overlay changeset revert error %d\n", + ret_revert); + devicetree_state_flags |= DTSF_APPLY_FAIL; + } + goto out; + } + + ret = __of_changeset_apply_notify(&ovcs->cset); + if (ret) + pr_err("overlay apply changeset entry notify error %d\n", ret); + /* notify failure is not fatal, continue */ + + ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_APPLY); + if (ret_tmp) + if (!ret) + ret = ret_tmp; + +out: + pr_debug("%s() err=%d\n", __func__, ret); + + return ret; +} + +/* + * of_overlay_fdt_apply() - Create and apply an overlay changeset + * @overlay_fdt: pointer to overlay FDT + * @overlay_fdt_size: number of bytes in @overlay_fdt + * @ret_ovcs_id: pointer for returning created changeset id + * + * Creates and applies an overlay changeset. + * + * See of_overlay_apply() for important behavior information. + * + * Return: 0 on success, or a negative error number. *@ret_ovcs_id is set to + * the value of overlay changeset id, which can be passed to of_overlay_remove() + * to remove the overlay. + * + * On error return, the changeset may be partially applied. This is especially + * likely if an OF_OVERLAY_POST_APPLY notifier returns an error. In this case + * the caller should call of_overlay_remove() with the value in *@ret_ovcs_id. + */ + +int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, + int *ret_ovcs_id) +{ + void *new_fdt; + void *new_fdt_align; + void *overlay_mem; + int ret; + u32 size; + struct overlay_changeset *ovcs; + + *ret_ovcs_id = 0; + + if (devicetree_corrupt()) { + pr_err("devicetree state suspect, refuse to apply overlay\n"); + return -EBUSY; + } + + if (overlay_fdt_size < sizeof(struct fdt_header) || + fdt_check_header(overlay_fdt)) { + pr_err("Invalid overlay_fdt header\n"); + return -EINVAL; + } + + size = fdt_totalsize(overlay_fdt); + if (overlay_fdt_size < size) + return -EINVAL; + + ovcs = kzalloc(sizeof(*ovcs), GFP_KERNEL); + if (!ovcs) + return -ENOMEM; + + of_overlay_mutex_lock(); + mutex_lock(&of_mutex); + + /* + * ovcs->notify_state must be set to OF_OVERLAY_INIT before allocating + * ovcs resources, implicitly set by kzalloc() of ovcs + */ + + ovcs->id = idr_alloc(&ovcs_idr, ovcs, 1, 0, GFP_KERNEL); + if (ovcs->id <= 0) { + ret = ovcs->id; + goto err_free_ovcs; + } + + INIT_LIST_HEAD(&ovcs->ovcs_list); + list_add_tail(&ovcs->ovcs_list, &ovcs_list); + of_changeset_init(&ovcs->cset); + + /* + * Must create permanent copy of FDT because of_fdt_unflatten_tree() + * will create pointers to the passed in FDT in the unflattened tree. + */ + new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL); + if (!new_fdt) { + ret = -ENOMEM; + goto err_free_ovcs; + } + ovcs->new_fdt = new_fdt; + + new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE); + memcpy(new_fdt_align, overlay_fdt, size); + + overlay_mem = of_fdt_unflatten_tree(new_fdt_align, NULL, + &ovcs->overlay_root); + if (!overlay_mem) { + pr_err("unable to unflatten overlay_fdt\n"); + ret = -EINVAL; + goto err_free_ovcs; + } + ovcs->overlay_mem = overlay_mem; + + ret = of_overlay_apply(ovcs); + /* + * If of_overlay_apply() error, calling free_overlay_changeset() may + * result in a memory leak if the apply partly succeeded, so do NOT + * goto err_free_ovcs. Instead, the caller of of_overlay_fdt_apply() + * can call of_overlay_remove(); + */ + *ret_ovcs_id = ovcs->id; + goto out_unlock; + +err_free_ovcs: + free_overlay_changeset(ovcs); + +out_unlock: + mutex_unlock(&of_mutex); + of_overlay_mutex_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(of_overlay_fdt_apply); + +/* + * Find @np in @tree. + * + * Returns 1 if @np is @tree or is contained in @tree, else 0 + */ +static int find_node(struct device_node *tree, struct device_node *np) +{ + struct device_node *child; + + if (tree == np) + return 1; + + for_each_child_of_node(tree, child) { + if (find_node(child, np)) { + of_node_put(child); + return 1; + } + } + + return 0; +} + +/* + * Is @remove_ce_node a child of, a parent of, or the same as any + * node in an overlay changeset more topmost than @remove_ovcs? + * + * Returns 1 if found, else 0 + */ +static int node_overlaps_later_cs(struct overlay_changeset *remove_ovcs, + struct device_node *remove_ce_node) +{ + struct overlay_changeset *ovcs; + struct of_changeset_entry *ce; + + list_for_each_entry_reverse(ovcs, &ovcs_list, ovcs_list) { + if (ovcs == remove_ovcs) + break; + + list_for_each_entry(ce, &ovcs->cset.entries, node) { + if (find_node(ce->np, remove_ce_node)) { + pr_err("%s: #%d overlaps with #%d @%pOF\n", + __func__, remove_ovcs->id, ovcs->id, + remove_ce_node); + return 1; + } + if (find_node(remove_ce_node, ce->np)) { + pr_err("%s: #%d overlaps with #%d @%pOF\n", + __func__, remove_ovcs->id, ovcs->id, + remove_ce_node); + return 1; + } + } + } + + return 0; +} + +/* + * We can safely remove the overlay only if it's the top-most one. + * Newly applied overlays are inserted at the tail of the overlay list, + * so a top most overlay is the one that is closest to the tail. + * + * The topmost check is done by exploiting this property. For each + * affected device node in the log list we check if this overlay is + * the one closest to the tail. If another overlay has affected this + * device node and is closest to the tail, then removal is not permited. + */ +static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs) +{ + struct of_changeset_entry *remove_ce; + + list_for_each_entry(remove_ce, &remove_ovcs->cset.entries, node) { + if (node_overlaps_later_cs(remove_ovcs, remove_ce->np)) { + pr_err("overlay #%d is not topmost\n", remove_ovcs->id); + return 0; + } + } + + return 1; +} + +/** + * of_overlay_remove() - Revert and free an overlay changeset + * @ovcs_id: Pointer to overlay changeset id + * + * Removes an overlay if it is permissible. @ovcs_id was previously returned + * by of_overlay_fdt_apply(). + * + * If an error occurred while attempting to revert the overlay changeset, + * then an attempt is made to re-apply any changeset entry that was + * reverted. If an error occurs on re-apply then the state of the device + * tree can not be determined, and any following attempt to apply or remove + * an overlay changeset will be refused. + * + * A non-zero return value will not revert the changeset if error is from: + * - parameter checks + * - overlay changeset pre-remove notifier + * - overlay changeset entry revert + * + * If an error is returned by an overlay changeset pre-remove notifier + * then no further overlay changeset pre-remove notifier will be called. + * + * If more than one notifier returns an error, then the last notifier + * error to occur is returned. + * + * A non-zero return value will revert the changeset if error is from: + * - overlay changeset entry notifier + * - overlay changeset post-remove notifier + * + * If an error is returned by an overlay changeset post-remove notifier + * then no further overlay changeset post-remove notifier will be called. + * + * Return: 0 on success, or a negative error number. *@ovcs_id is set to + * zero after reverting the changeset, even if a subsequent error occurs. + */ +int of_overlay_remove(int *ovcs_id) +{ + struct overlay_changeset *ovcs; + int ret, ret_apply, ret_tmp; + + if (devicetree_corrupt()) { + pr_err("suspect devicetree state, refuse to remove overlay\n"); + ret = -EBUSY; + goto out; + } + + mutex_lock(&of_mutex); + + ovcs = idr_find(&ovcs_idr, *ovcs_id); + if (!ovcs) { + ret = -ENODEV; + pr_err("remove: Could not find overlay #%d\n", *ovcs_id); + goto err_unlock; + } + + if (!overlay_removal_is_ok(ovcs)) { + ret = -EBUSY; + goto err_unlock; + } + + ret = overlay_notify(ovcs, OF_OVERLAY_PRE_REMOVE); + if (ret) + goto err_unlock; + + ret_apply = 0; + ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); + if (ret) { + if (ret_apply) + devicetree_state_flags |= DTSF_REVERT_FAIL; + goto err_unlock; + } + + ret = __of_changeset_revert_notify(&ovcs->cset); + if (ret) + pr_err("overlay remove changeset entry notify error %d\n", ret); + /* notify failure is not fatal, continue */ + + *ovcs_id = 0; + + /* + * Note that the overlay memory will be kfree()ed by + * free_overlay_changeset() even if the notifier for + * OF_OVERLAY_POST_REMOVE returns an error. + */ + ret_tmp = overlay_notify(ovcs, OF_OVERLAY_POST_REMOVE); + if (ret_tmp) + if (!ret) + ret = ret_tmp; + + free_overlay_changeset(ovcs); + +err_unlock: + /* + * If jumped over free_overlay_changeset(), then did not kfree() + * overlay related memory. This is a memory leak unless a subsequent + * of_overlay_remove() of this overlay is successful. + */ + mutex_unlock(&of_mutex); + +out: + pr_debug("%s() err=%d\n", __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(of_overlay_remove); + +/** + * of_overlay_remove_all() - Reverts and frees all overlay changesets + * + * Removes all overlays from the system in the correct order. + * + * Return: 0 on success, or a negative error number + */ +int of_overlay_remove_all(void) +{ + struct overlay_changeset *ovcs, *ovcs_n; + int ret; + + /* the tail of list is guaranteed to be safe to remove */ + list_for_each_entry_safe_reverse(ovcs, ovcs_n, &ovcs_list, ovcs_list) { + ret = of_overlay_remove(&ovcs->id); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(of_overlay_remove_all); diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c new file mode 100644 index 000000000..7eda43c66 --- /dev/null +++ b/drivers/of/pdt.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* pdt.c: OF PROM device tree support code. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * Adapted for sparc by David S. Miller davem@davemloft.net + * Adapted for multiple architectures by Andres Salomon <dilinger@queued.net> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_pdt.h> + +static struct of_pdt_ops *of_pdt_prom_ops __initdata; + +#if defined(CONFIG_SPARC) +unsigned int of_pdt_unique_id __initdata; + +#define of_pdt_incr_unique_id(p) do { \ + (p)->unique_id = of_pdt_unique_id++; \ +} while (0) + +static char * __init of_pdt_build_full_name(struct device_node *dp) +{ + return build_path_component(dp); +} + +#else /* CONFIG_SPARC */ + +static inline void of_pdt_incr_unique_id(void *p) { } +static inline void irq_trans_init(struct device_node *dp) { } + +static char * __init of_pdt_build_full_name(struct device_node *dp) +{ + static int failsafe_id = 0; /* for generating unique names on failure */ + const char *name; + char path[256]; + char *buf; + int len; + + if (!of_pdt_prom_ops->pkg2path(dp->phandle, path, sizeof(path), &len)) { + name = kbasename(path); + buf = prom_early_alloc(strlen(name) + 1); + strcpy(buf, name); + return buf; + } + + name = of_get_property(dp, "name", &len); + buf = prom_early_alloc(len + 16); + sprintf(buf, "%s@unknown%i", name, failsafe_id++); + pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf); + return buf; +} + +#endif /* !CONFIG_SPARC */ + +static struct property * __init of_pdt_build_one_prop(phandle node, char *prev, + char *special_name, + void *special_val, + int special_len) +{ + static struct property *tmp = NULL; + struct property *p; + int err; + + if (tmp) { + p = tmp; + memset(p, 0, sizeof(*p) + 32); + tmp = NULL; + } else { + p = prom_early_alloc(sizeof(struct property) + 32); + of_pdt_incr_unique_id(p); + } + + p->name = (char *) (p + 1); + if (special_name) { + strcpy(p->name, special_name); + p->length = special_len; + p->value = prom_early_alloc(special_len); + memcpy(p->value, special_val, special_len); + } else { + err = of_pdt_prom_ops->nextprop(node, prev, p->name); + if (err) { + tmp = p; + return NULL; + } + p->length = of_pdt_prom_ops->getproplen(node, p->name); + if (p->length <= 0) { + p->length = 0; + } else { + int len; + + p->value = prom_early_alloc(p->length + 1); + len = of_pdt_prom_ops->getproperty(node, p->name, + p->value, p->length); + if (len <= 0) + p->length = 0; + ((unsigned char *)p->value)[p->length] = '\0'; + } + } + return p; +} + +static struct property * __init of_pdt_build_prop_list(phandle node) +{ + struct property *head, *tail; + + head = tail = of_pdt_build_one_prop(node, NULL, + ".node", &node, sizeof(node)); + + tail->next = of_pdt_build_one_prop(node, NULL, NULL, NULL, 0); + tail = tail->next; + while(tail) { + tail->next = of_pdt_build_one_prop(node, tail->name, + NULL, NULL, 0); + tail = tail->next; + } + + return head; +} + +static char * __init of_pdt_get_one_property(phandle node, const char *name) +{ + char *buf = "<NULL>"; + int len; + + len = of_pdt_prom_ops->getproplen(node, name); + if (len > 0) { + buf = prom_early_alloc(len); + len = of_pdt_prom_ops->getproperty(node, name, buf, len); + } + + return buf; +} + +static struct device_node * __init of_pdt_create_node(phandle node, + struct device_node *parent) +{ + struct device_node *dp; + + if (!node) + return NULL; + + dp = prom_early_alloc(sizeof(*dp)); + of_node_init(dp); + of_pdt_incr_unique_id(dp); + dp->parent = parent; + + dp->name = of_pdt_get_one_property(node, "name"); + dp->phandle = node; + + dp->properties = of_pdt_build_prop_list(node); + + dp->full_name = of_pdt_build_full_name(dp); + + irq_trans_init(dp); + + return dp; +} + +static struct device_node * __init of_pdt_build_tree(struct device_node *parent, + phandle node) +{ + struct device_node *ret = NULL, *prev_sibling = NULL; + struct device_node *dp; + + while (1) { + dp = of_pdt_create_node(node, parent); + if (!dp) + break; + + if (prev_sibling) + prev_sibling->sibling = dp; + + if (!ret) + ret = dp; + prev_sibling = dp; + + dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node)); + + node = of_pdt_prom_ops->getsibling(node); + } + + return ret; +} + +static void * __init kernel_tree_alloc(u64 size, u64 align) +{ + return prom_early_alloc(size); +} + +void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops) +{ + BUG_ON(!ops); + of_pdt_prom_ops = ops; + + of_root = of_pdt_create_node(root_node, NULL); + of_root->full_name = "/"; + + of_root->child = of_pdt_build_tree(of_root, + of_pdt_prom_ops->getchild(of_root->phandle)); + + /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ + of_alias_scan(kernel_tree_alloc); +} diff --git a/drivers/of/platform.c b/drivers/of/platform.c new file mode 100644 index 000000000..bf96862cb --- /dev/null +++ b/drivers/of/platform.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * and Arnd Bergmann, IBM Corp. + * Merged from powerpc/kernel/of_platform.c and + * sparc{,64}/kernel/of_device.c by Stephen Rothwell + */ + +#define pr_fmt(fmt) "OF: " fmt + +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/amba/bus.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +const struct of_device_id of_default_bus_match_table[] = { + { .compatible = "simple-bus", }, + { .compatible = "simple-mfd", }, + { .compatible = "isa", }, +#ifdef CONFIG_ARM_AMBA + { .compatible = "arm,amba-bus", }, +#endif /* CONFIG_ARM_AMBA */ + {} /* Empty terminated list */ +}; + +static const struct of_device_id of_skipped_node_table[] = { + { .compatible = "operating-points-v2", }, + {} /* Empty terminated list */ +}; + +/** + * of_find_device_by_node - Find the platform_device associated with a node + * @np: Pointer to device tree node + * + * Takes a reference to the embedded struct device which needs to be dropped + * after use. + * + * Return: platform_device pointer, or NULL if not found + */ +struct platform_device *of_find_device_by_node(struct device_node *np) +{ + struct device *dev; + + dev = bus_find_device_by_of_node(&platform_bus_type, np); + return dev ? to_platform_device(dev) : NULL; +} +EXPORT_SYMBOL(of_find_device_by_node); + +#ifdef CONFIG_OF_ADDRESS +/* + * The following routines scan a subtree and registers a device for + * each applicable node. + * + * Note: sparc doesn't use these routines because it has a different + * mechanism for creating devices from device tree nodes. + */ + +/** + * of_device_make_bus_id - Use the device node data to assign a unique name + * @dev: pointer to device structure that is linked to a device tree node + * + * This routine will first try using the translated bus address to + * derive a unique name. If it cannot, then it will prepend names from + * parent nodes until a unique name can be derived. + */ +static void of_device_make_bus_id(struct device *dev) +{ + struct device_node *node = dev->of_node; + const __be32 *reg; + u64 addr; + u32 mask; + + /* Construct the name, using parent nodes if necessary to ensure uniqueness */ + while (node->parent) { + /* + * If the address can be translated, then that is as much + * uniqueness as we need. Make it the first component and return + */ + reg = of_get_property(node, "reg", NULL); + if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { + if (!of_property_read_u32(node, "mask", &mask)) + dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn", + addr, ffs(mask) - 1, node, dev_name(dev)); + + else + dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", + addr, node, dev_name(dev)); + return; + } + + /* format arguments only used if dev_name() resolves to NULL */ + dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s", + kbasename(node->full_name), dev_name(dev)); + node = node->parent; + } +} + +/** + * of_device_alloc - Allocate and initialize an of_device + * @np: device node to assign to device + * @bus_id: Name to assign to the device. May be null to use default name. + * @parent: Parent device. + */ +struct platform_device *of_device_alloc(struct device_node *np, + const char *bus_id, + struct device *parent) +{ + struct platform_device *dev; + int rc, i, num_reg = 0; + struct resource *res, temp_res; + + dev = platform_device_alloc("", PLATFORM_DEVID_NONE); + if (!dev) + return NULL; + + /* count the io resources */ + while (of_address_to_resource(np, num_reg, &temp_res) == 0) + num_reg++; + + /* Populate the resource table */ + if (num_reg) { + res = kcalloc(num_reg, sizeof(*res), GFP_KERNEL); + if (!res) { + platform_device_put(dev); + return NULL; + } + + dev->num_resources = num_reg; + dev->resource = res; + for (i = 0; i < num_reg; i++, res++) { + rc = of_address_to_resource(np, i, res); + WARN_ON(rc); + } + } + + dev->dev.of_node = of_node_get(np); + dev->dev.fwnode = &np->fwnode; + dev->dev.parent = parent ? : &platform_bus; + + if (bus_id) + dev_set_name(&dev->dev, "%s", bus_id); + else + of_device_make_bus_id(&dev->dev); + + return dev; +} +EXPORT_SYMBOL(of_device_alloc); + +/** + * of_platform_device_create_pdata - Alloc, initialize and register an of_device + * @np: pointer to node to create device for + * @bus_id: name to assign device + * @platform_data: pointer to populate platform_data pointer with + * @parent: Linux device model parent device. + * + * Return: Pointer to created platform device, or NULL if a device was not + * registered. Unavailable devices will not get registered. + */ +static struct platform_device *of_platform_device_create_pdata( + struct device_node *np, + const char *bus_id, + void *platform_data, + struct device *parent) +{ + struct platform_device *dev; + + if (!of_device_is_available(np) || + of_node_test_and_set_flag(np, OF_POPULATED)) + return NULL; + + dev = of_device_alloc(np, bus_id, parent); + if (!dev) + goto err_clear_flag; + + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + if (!dev->dev.dma_mask) + dev->dev.dma_mask = &dev->dev.coherent_dma_mask; + dev->dev.bus = &platform_bus_type; + dev->dev.platform_data = platform_data; + of_msi_configure(&dev->dev, dev->dev.of_node); + + if (of_device_add(dev) != 0) { + platform_device_put(dev); + goto err_clear_flag; + } + + return dev; + +err_clear_flag: + of_node_clear_flag(np, OF_POPULATED); + return NULL; +} + +/** + * of_platform_device_create - Alloc, initialize and register an of_device + * @np: pointer to node to create device for + * @bus_id: name to assign device + * @parent: Linux device model parent device. + * + * Return: Pointer to created platform device, or NULL if a device was not + * registered. Unavailable devices will not get registered. + */ +struct platform_device *of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent) +{ + return of_platform_device_create_pdata(np, bus_id, NULL, parent); +} +EXPORT_SYMBOL(of_platform_device_create); + +#ifdef CONFIG_ARM_AMBA +static struct amba_device *of_amba_device_create(struct device_node *node, + const char *bus_id, + void *platform_data, + struct device *parent) +{ + struct amba_device *dev; + const void *prop; + int ret; + + pr_debug("Creating amba device %pOF\n", node); + + if (!of_device_is_available(node) || + of_node_test_and_set_flag(node, OF_POPULATED)) + return NULL; + + dev = amba_device_alloc(NULL, 0, 0); + if (!dev) + goto err_clear_flag; + + /* AMBA devices only support a single DMA mask */ + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + dev->dev.dma_mask = &dev->dev.coherent_dma_mask; + + /* setup generic device info */ + dev->dev.of_node = of_node_get(node); + dev->dev.fwnode = &node->fwnode; + dev->dev.parent = parent ? : &platform_bus; + dev->dev.platform_data = platform_data; + if (bus_id) + dev_set_name(&dev->dev, "%s", bus_id); + else + of_device_make_bus_id(&dev->dev); + + /* Allow the HW Peripheral ID to be overridden */ + prop = of_get_property(node, "arm,primecell-periphid", NULL); + if (prop) + dev->periphid = of_read_ulong(prop, 1); + + ret = of_address_to_resource(node, 0, &dev->res); + if (ret) { + pr_err("amba: of_address_to_resource() failed (%d) for %pOF\n", + ret, node); + goto err_free; + } + + ret = amba_device_add(dev, &iomem_resource); + if (ret) { + pr_err("amba_device_add() failed (%d) for %pOF\n", + ret, node); + goto err_free; + } + + return dev; + +err_free: + amba_device_put(dev); +err_clear_flag: + of_node_clear_flag(node, OF_POPULATED); + return NULL; +} +#else /* CONFIG_ARM_AMBA */ +static struct amba_device *of_amba_device_create(struct device_node *node, + const char *bus_id, + void *platform_data, + struct device *parent) +{ + return NULL; +} +#endif /* CONFIG_ARM_AMBA */ + +/* + * of_dev_lookup() - Given a device node, lookup the preferred Linux name + */ +static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup, + struct device_node *np) +{ + const struct of_dev_auxdata *auxdata; + struct resource res; + int compatible = 0; + + if (!lookup) + return NULL; + + auxdata = lookup; + for (; auxdata->compatible; auxdata++) { + if (!of_device_is_compatible(np, auxdata->compatible)) + continue; + compatible++; + if (!of_address_to_resource(np, 0, &res)) + if (res.start != auxdata->phys_addr) + continue; + pr_debug("%pOF: devname=%s\n", np, auxdata->name); + return auxdata; + } + + if (!compatible) + return NULL; + + /* Try compatible match if no phys_addr and name are specified */ + auxdata = lookup; + for (; auxdata->compatible; auxdata++) { + if (!of_device_is_compatible(np, auxdata->compatible)) + continue; + if (!auxdata->phys_addr && !auxdata->name) { + pr_debug("%pOF: compatible match\n", np); + return auxdata; + } + } + + return NULL; +} + +/** + * of_platform_bus_create() - Create a device for a node and its children. + * @bus: device node of the bus to instantiate + * @matches: match table for bus nodes + * @lookup: auxdata table for matching id and platform_data with device nodes + * @parent: parent for new device, or NULL for top level. + * @strict: require compatible property + * + * Creates a platform_device for the provided device_node, and optionally + * recursively create devices for all the child nodes. + */ +static int of_platform_bus_create(struct device_node *bus, + const struct of_device_id *matches, + const struct of_dev_auxdata *lookup, + struct device *parent, bool strict) +{ + const struct of_dev_auxdata *auxdata; + struct device_node *child; + struct platform_device *dev; + const char *bus_id = NULL; + void *platform_data = NULL; + int rc = 0; + + /* Make sure it has a compatible property */ + if (strict && (!of_get_property(bus, "compatible", NULL))) { + pr_debug("%s() - skipping %pOF, no compatible prop\n", + __func__, bus); + return 0; + } + + /* Skip nodes for which we don't want to create devices */ + if (unlikely(of_match_node(of_skipped_node_table, bus))) { + pr_debug("%s() - skipping %pOF node\n", __func__, bus); + return 0; + } + + if (of_node_check_flag(bus, OF_POPULATED_BUS)) { + pr_debug("%s() - skipping %pOF, already populated\n", + __func__, bus); + return 0; + } + + auxdata = of_dev_lookup(lookup, bus); + if (auxdata) { + bus_id = auxdata->name; + platform_data = auxdata->platform_data; + } + + if (of_device_is_compatible(bus, "arm,primecell")) { + /* + * Don't return an error here to keep compatibility with older + * device tree files. + */ + of_amba_device_create(bus, bus_id, platform_data, parent); + return 0; + } + + dev = of_platform_device_create_pdata(bus, bus_id, platform_data, parent); + if (!dev || !of_match_node(matches, bus)) + return 0; + + for_each_child_of_node(bus, child) { + pr_debug(" create child: %pOF\n", child); + rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict); + if (rc) { + of_node_put(child); + break; + } + } + of_node_set_flag(bus, OF_POPULATED_BUS); + return rc; +} + +/** + * of_platform_bus_probe() - Probe the device-tree for platform buses + * @root: parent of the first level to probe or NULL for the root of the tree + * @matches: match table for bus nodes + * @parent: parent to hook devices from, NULL for toplevel + * + * Note that children of the provided root are not instantiated as devices + * unless the specified root itself matches the bus list and is not NULL. + */ +int of_platform_bus_probe(struct device_node *root, + const struct of_device_id *matches, + struct device *parent) +{ + struct device_node *child; + int rc = 0; + + root = root ? of_node_get(root) : of_find_node_by_path("/"); + if (!root) + return -EINVAL; + + pr_debug("%s()\n", __func__); + pr_debug(" starting at: %pOF\n", root); + + /* Do a self check of bus type, if there's a match, create children */ + if (of_match_node(matches, root)) { + rc = of_platform_bus_create(root, matches, NULL, parent, false); + } else for_each_child_of_node(root, child) { + if (!of_match_node(matches, child)) + continue; + rc = of_platform_bus_create(child, matches, NULL, parent, false); + if (rc) { + of_node_put(child); + break; + } + } + + of_node_put(root); + return rc; +} +EXPORT_SYMBOL(of_platform_bus_probe); + +/** + * of_platform_populate() - Populate platform_devices from device tree data + * @root: parent of the first level to probe or NULL for the root of the tree + * @matches: match table, NULL to use the default + * @lookup: auxdata table for matching id and platform_data with device nodes + * @parent: parent to hook devices from, NULL for toplevel + * + * Similar to of_platform_bus_probe(), this function walks the device tree + * and creates devices from nodes. It differs in that it follows the modern + * convention of requiring all device nodes to have a 'compatible' property, + * and it is suitable for creating devices which are children of the root + * node (of_platform_bus_probe will only create children of the root which + * are selected by the @matches argument). + * + * New board support should be using this function instead of + * of_platform_bus_probe(). + * + * Return: 0 on success, < 0 on failure. + */ +int of_platform_populate(struct device_node *root, + const struct of_device_id *matches, + const struct of_dev_auxdata *lookup, + struct device *parent) +{ + struct device_node *child; + int rc = 0; + + root = root ? of_node_get(root) : of_find_node_by_path("/"); + if (!root) + return -EINVAL; + + pr_debug("%s()\n", __func__); + pr_debug(" starting at: %pOF\n", root); + + device_links_supplier_sync_state_pause(); + for_each_child_of_node(root, child) { + rc = of_platform_bus_create(child, matches, lookup, parent, true); + if (rc) { + of_node_put(child); + break; + } + } + device_links_supplier_sync_state_resume(); + + of_node_set_flag(root, OF_POPULATED_BUS); + + of_node_put(root); + return rc; +} +EXPORT_SYMBOL_GPL(of_platform_populate); + +int of_platform_default_populate(struct device_node *root, + const struct of_dev_auxdata *lookup, + struct device *parent) +{ + return of_platform_populate(root, of_default_bus_match_table, lookup, + parent); +} +EXPORT_SYMBOL_GPL(of_platform_default_populate); + +static const struct of_device_id reserved_mem_matches[] = { + { .compatible = "phram" }, + { .compatible = "qcom,rmtfs-mem" }, + { .compatible = "qcom,cmd-db" }, + { .compatible = "qcom,smem" }, + { .compatible = "ramoops" }, + { .compatible = "nvmem-rmem" }, + { .compatible = "google,open-dice" }, + {} +}; + +static int __init of_platform_default_populate_init(void) +{ + struct device_node *node; + + device_links_supplier_sync_state_pause(); + + if (!of_have_populated_dt()) + return -ENODEV; + + if (IS_ENABLED(CONFIG_PPC)) { + struct device_node *boot_display = NULL; + struct platform_device *dev; + int display_number = 0; + int ret; + + /* Check if we have a MacOS display without a node spec */ + if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL)) { + /* + * The old code tried to work out which node was the MacOS + * display based on the address. I'm dropping that since the + * lack of a node spec only happens with old BootX versions + * (users can update) and with this code, they'll still get + * a display (just not the palette hacks). + */ + dev = platform_device_alloc("bootx-noscreen", 0); + if (WARN_ON(!dev)) + return -ENOMEM; + ret = platform_device_add(dev); + if (WARN_ON(ret)) { + platform_device_put(dev); + return ret; + } + } + + /* + * For OF framebuffers, first create the device for the boot display, + * then for the other framebuffers. Only fail for the boot display; + * ignore errors for the rest. + */ + for_each_node_by_type(node, "display") { + if (!of_get_property(node, "linux,opened", NULL) || + !of_get_property(node, "linux,boot-display", NULL)) + continue; + dev = of_platform_device_create(node, "of-display", NULL); + of_node_put(node); + if (WARN_ON(!dev)) + return -ENOMEM; + boot_display = node; + display_number++; + break; + } + for_each_node_by_type(node, "display") { + char buf[14]; + const char *of_display_format = "of-display.%d"; + + if (!of_get_property(node, "linux,opened", NULL) || node == boot_display) + continue; + ret = snprintf(buf, sizeof(buf), of_display_format, display_number++); + if (ret < sizeof(buf)) + of_platform_device_create(node, buf, NULL); + } + + } else { + /* + * Handle certain compatibles explicitly, since we don't want to create + * platform_devices for every node in /reserved-memory with a + * "compatible", + */ + for_each_matching_node(node, reserved_mem_matches) + of_platform_device_create(node, NULL, NULL); + + node = of_find_node_by_path("/firmware"); + if (node) { + of_platform_populate(node, NULL, NULL, NULL); + of_node_put(node); + } + + node = of_get_compatible_child(of_chosen, "simple-framebuffer"); + of_platform_device_create(node, NULL, NULL); + of_node_put(node); + + /* Populate everything else. */ + of_platform_default_populate(NULL, NULL, NULL); + } + + return 0; +} +arch_initcall_sync(of_platform_default_populate_init); + +static int __init of_platform_sync_state_init(void) +{ + device_links_supplier_sync_state_resume(); + return 0; +} +late_initcall_sync(of_platform_sync_state_init); + +int of_platform_device_destroy(struct device *dev, void *data) +{ + /* Do not touch devices not populated from the device tree */ + if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) + return 0; + + /* Recurse for any nodes that were treated as busses */ + if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS)) + device_for_each_child(dev, NULL, of_platform_device_destroy); + + of_node_clear_flag(dev->of_node, OF_POPULATED); + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); + + if (dev->bus == &platform_bus_type) + platform_device_unregister(to_platform_device(dev)); +#ifdef CONFIG_ARM_AMBA + else if (dev->bus == &amba_bustype) + amba_device_unregister(to_amba_device(dev)); +#endif + + return 0; +} +EXPORT_SYMBOL_GPL(of_platform_device_destroy); + +/** + * of_platform_depopulate() - Remove devices populated from device tree + * @parent: device which children will be removed + * + * Complementary to of_platform_populate(), this function removes children + * of the given device (and, recurrently, their children) that have been + * created from their respective device tree nodes (and only those, + * leaving others - eg. manually created - unharmed). + */ +void of_platform_depopulate(struct device *parent) +{ + if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) { + device_for_each_child_reverse(parent, NULL, of_platform_device_destroy); + of_node_clear_flag(parent->of_node, OF_POPULATED_BUS); + } +} +EXPORT_SYMBOL_GPL(of_platform_depopulate); + +static void devm_of_platform_populate_release(struct device *dev, void *res) +{ + of_platform_depopulate(*(struct device **)res); +} + +/** + * devm_of_platform_populate() - Populate platform_devices from device tree data + * @dev: device that requested to populate from device tree data + * + * Similar to of_platform_populate(), but will automatically call + * of_platform_depopulate() when the device is unbound from the bus. + * + * Return: 0 on success, < 0 on failure. + */ +int devm_of_platform_populate(struct device *dev) +{ + struct device **ptr; + int ret; + + if (!dev) + return -EINVAL; + + ptr = devres_alloc(devm_of_platform_populate_release, + sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = of_platform_populate(dev->of_node, NULL, NULL, dev); + if (ret) { + devres_free(ptr); + } else { + *ptr = dev; + devres_add(dev, ptr); + } + + return ret; +} +EXPORT_SYMBOL_GPL(devm_of_platform_populate); + +static int devm_of_platform_match(struct device *dev, void *res, void *data) +{ + struct device **ptr = res; + + if (!ptr) { + WARN_ON(!ptr); + return 0; + } + + return *ptr == data; +} + +/** + * devm_of_platform_depopulate() - Remove devices populated from device tree + * @dev: device that requested to depopulate from device tree data + * + * Complementary to devm_of_platform_populate(), this function removes children + * of the given device (and, recurrently, their children) that have been + * created from their respective device tree nodes (and only those, + * leaving others - eg. manually created - unharmed). + */ +void devm_of_platform_depopulate(struct device *dev) +{ + int ret; + + ret = devres_release(dev, devm_of_platform_populate_release, + devm_of_platform_match, dev); + + WARN_ON(ret); +} +EXPORT_SYMBOL_GPL(devm_of_platform_depopulate); + +#ifdef CONFIG_OF_DYNAMIC +static int of_platform_notify(struct notifier_block *nb, + unsigned long action, void *arg) +{ + struct of_reconfig_data *rd = arg; + struct platform_device *pdev_parent, *pdev; + bool children_left; + + switch (of_reconfig_get_state_change(action, rd)) { + case OF_RECONFIG_CHANGE_ADD: + /* verify that the parent is a bus */ + if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) + return NOTIFY_OK; /* not for us */ + + /* already populated? (driver using of_populate manually) */ + if (of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + + /* + * Clear the flag before adding the device so that fw_devlink + * doesn't skip adding consumers to this device. + */ + rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; + /* pdev_parent may be NULL when no bus platform device */ + pdev_parent = of_find_device_by_node(rd->dn->parent); + pdev = of_platform_device_create(rd->dn, NULL, + pdev_parent ? &pdev_parent->dev : NULL); + platform_device_put(pdev_parent); + + if (pdev == NULL) { + pr_err("%s: failed to create for '%pOF'\n", + __func__, rd->dn); + /* of_platform_device_create tosses the error code */ + return notifier_from_errno(-EINVAL); + } + break; + + case OF_RECONFIG_CHANGE_REMOVE: + + /* already depopulated? */ + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + + /* find our device by node */ + pdev = of_find_device_by_node(rd->dn); + if (pdev == NULL) + return NOTIFY_OK; /* no? not meant for us */ + + /* unregister takes one ref away */ + of_platform_device_destroy(&pdev->dev, &children_left); + + /* and put the reference of the find */ + platform_device_put(pdev); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block platform_of_notifier = { + .notifier_call = of_platform_notify, +}; + +void of_platform_register_reconfig_notifier(void) +{ + WARN_ON(of_reconfig_notifier_register(&platform_of_notifier)); +} +#endif /* CONFIG_OF_DYNAMIC */ + +#endif /* CONFIG_OF_ADDRESS */ diff --git a/drivers/of/property.c b/drivers/of/property.c new file mode 100644 index 000000000..b636777e6 --- /dev/null +++ b/drivers/of/property.c @@ -0,0 +1,1473 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * drivers/of/property.c - Procedures for accessing and interpreting + * Devicetree properties and graphs. + * + * Initially created by copying procedures from drivers/of/base.c. This + * file contains the OF property as well as the OF graph interface + * functions. + * + * Paul Mackerras August 1996. + * Copyright (C) 1996-2005 Paul Mackerras. + * + * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. + * {engebret|bergner}@us.ibm.com + * + * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net + * + * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and + * Grant Likely. + */ + +#define pr_fmt(fmt) "OF: " fmt + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/of_irq.h> +#include <linux/string.h> +#include <linux/moduleparam.h> + +#include "of_private.h" + +/** + * of_graph_is_present() - check graph's presence + * @node: pointer to device_node containing graph port + * + * Return: True if @node has a port or ports (with a port) sub-node, + * false otherwise. + */ +bool of_graph_is_present(const struct device_node *node) +{ + struct device_node *ports, *port; + + ports = of_get_child_by_name(node, "ports"); + if (ports) + node = ports; + + port = of_get_child_by_name(node, "port"); + of_node_put(ports); + of_node_put(port); + + return !!port; +} +EXPORT_SYMBOL(of_graph_is_present); + +/** + * of_property_count_elems_of_size - Count the number of elements in a property + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @elem_size: size of the individual element + * + * Search for a property in a device node and count the number of elements of + * size elem_size in it. + * + * Return: The number of elements on sucess, -EINVAL if the property does not + * exist or its length does not match a multiple of elem_size and -ENODATA if + * the property does not have a value. + */ +int of_property_count_elems_of_size(const struct device_node *np, + const char *propname, int elem_size) +{ + struct property *prop = of_find_property(np, propname, NULL); + + if (!prop) + return -EINVAL; + if (!prop->value) + return -ENODATA; + + if (prop->length % elem_size != 0) { + pr_err("size of %s in node %pOF is not a multiple of %d\n", + propname, np, elem_size); + return -EINVAL; + } + + return prop->length / elem_size; +} +EXPORT_SYMBOL_GPL(of_property_count_elems_of_size); + +/** + * of_find_property_value_of_size + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @min: minimum allowed length of property value + * @max: maximum allowed length of property value (0 means unlimited) + * @len: if !=NULL, actual length is written to here + * + * Search for a property in a device node and valid the requested size. + * + * Return: The property value on success, -EINVAL if the property does not + * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data is too small or too large. + * + */ +static void *of_find_property_value_of_size(const struct device_node *np, + const char *propname, u32 min, u32 max, size_t *len) +{ + struct property *prop = of_find_property(np, propname, NULL); + + if (!prop) + return ERR_PTR(-EINVAL); + if (!prop->value) + return ERR_PTR(-ENODATA); + if (prop->length < min) + return ERR_PTR(-EOVERFLOW); + if (max && prop->length > max) + return ERR_PTR(-EOVERFLOW); + + if (len) + *len = prop->length; + + return prop->value; +} + +/** + * of_property_read_u32_index - Find and read a u32 from a multi-value property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @index: index of the u32 in the list of values + * @out_value: pointer to return value, modified only if no error. + * + * Search for a property in a device node and read nth 32-bit value from + * it. + * + * Return: 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_value is modified only if a valid u32 value can be decoded. + */ +int of_property_read_u32_index(const struct device_node *np, + const char *propname, + u32 index, u32 *out_value) +{ + const u32 *val = of_find_property_value_of_size(np, propname, + ((index + 1) * sizeof(*out_value)), + 0, + NULL); + + if (IS_ERR(val)) + return PTR_ERR(val); + + *out_value = be32_to_cpup(((__be32 *)val) + index); + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u32_index); + +/** + * of_property_read_u64_index - Find and read a u64 from a multi-value property. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @index: index of the u64 in the list of values + * @out_value: pointer to return value, modified only if no error. + * + * Search for a property in a device node and read nth 64-bit value from + * it. + * + * Return: 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_value is modified only if a valid u64 value can be decoded. + */ +int of_property_read_u64_index(const struct device_node *np, + const char *propname, + u32 index, u64 *out_value) +{ + const u64 *val = of_find_property_value_of_size(np, propname, + ((index + 1) * sizeof(*out_value)), + 0, NULL); + + if (IS_ERR(val)) + return PTR_ERR(val); + + *out_value = be64_to_cpup(((__be64 *)val) + index); + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u64_index); + +/** + * of_property_read_variable_u8_array - Find and read an array of u8 from a + * property, with bounds on the minimum and maximum array size. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to found values. + * @sz_min: minimum number of array elements to read + * @sz_max: maximum number of array elements to read, if zero there is no + * upper limit on the number of elements in the dts entry but only + * sz_min will be read. + * + * Search for a property in a device node and read 8-bit value(s) from + * it. + * + * dts entry of array should be like: + * ``property = /bits/ 8 <0x50 0x60 0x70>;`` + * + * Return: The number of elements read on success, -EINVAL if the property + * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW + * if the property data is smaller than sz_min or longer than sz_max. + * + * The out_values is modified only if a valid u8 value can be decoded. + */ +int of_property_read_variable_u8_array(const struct device_node *np, + const char *propname, u8 *out_values, + size_t sz_min, size_t sz_max) +{ + size_t sz, count; + const u8 *val = of_find_property_value_of_size(np, propname, + (sz_min * sizeof(*out_values)), + (sz_max * sizeof(*out_values)), + &sz); + + if (IS_ERR(val)) + return PTR_ERR(val); + + if (!sz_max) + sz = sz_min; + else + sz /= sizeof(*out_values); + + count = sz; + while (count--) + *out_values++ = *val++; + + return sz; +} +EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array); + +/** + * of_property_read_variable_u16_array - Find and read an array of u16 from a + * property, with bounds on the minimum and maximum array size. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to found values. + * @sz_min: minimum number of array elements to read + * @sz_max: maximum number of array elements to read, if zero there is no + * upper limit on the number of elements in the dts entry but only + * sz_min will be read. + * + * Search for a property in a device node and read 16-bit value(s) from + * it. + * + * dts entry of array should be like: + * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;`` + * + * Return: The number of elements read on success, -EINVAL if the property + * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW + * if the property data is smaller than sz_min or longer than sz_max. + * + * The out_values is modified only if a valid u16 value can be decoded. + */ +int of_property_read_variable_u16_array(const struct device_node *np, + const char *propname, u16 *out_values, + size_t sz_min, size_t sz_max) +{ + size_t sz, count; + const __be16 *val = of_find_property_value_of_size(np, propname, + (sz_min * sizeof(*out_values)), + (sz_max * sizeof(*out_values)), + &sz); + + if (IS_ERR(val)) + return PTR_ERR(val); + + if (!sz_max) + sz = sz_min; + else + sz /= sizeof(*out_values); + + count = sz; + while (count--) + *out_values++ = be16_to_cpup(val++); + + return sz; +} +EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array); + +/** + * of_property_read_variable_u32_array - Find and read an array of 32 bit + * integers from a property, with bounds on the minimum and maximum array size. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to return found values. + * @sz_min: minimum number of array elements to read + * @sz_max: maximum number of array elements to read, if zero there is no + * upper limit on the number of elements in the dts entry but only + * sz_min will be read. + * + * Search for a property in a device node and read 32-bit value(s) from + * it. + * + * Return: The number of elements read on success, -EINVAL if the property + * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW + * if the property data is smaller than sz_min or longer than sz_max. + * + * The out_values is modified only if a valid u32 value can be decoded. + */ +int of_property_read_variable_u32_array(const struct device_node *np, + const char *propname, u32 *out_values, + size_t sz_min, size_t sz_max) +{ + size_t sz, count; + const __be32 *val = of_find_property_value_of_size(np, propname, + (sz_min * sizeof(*out_values)), + (sz_max * sizeof(*out_values)), + &sz); + + if (IS_ERR(val)) + return PTR_ERR(val); + + if (!sz_max) + sz = sz_min; + else + sz /= sizeof(*out_values); + + count = sz; + while (count--) + *out_values++ = be32_to_cpup(val++); + + return sz; +} +EXPORT_SYMBOL_GPL(of_property_read_variable_u32_array); + +/** + * of_property_read_u64 - Find and read a 64 bit integer from a property + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_value: pointer to return value, modified only if return value is 0. + * + * Search for a property in a device node and read a 64-bit value from + * it. + * + * Return: 0 on success, -EINVAL if the property does not exist, + * -ENODATA if property does not have a value, and -EOVERFLOW if the + * property data isn't large enough. + * + * The out_value is modified only if a valid u64 value can be decoded. + */ +int of_property_read_u64(const struct device_node *np, const char *propname, + u64 *out_value) +{ + const __be32 *val = of_find_property_value_of_size(np, propname, + sizeof(*out_value), + 0, + NULL); + + if (IS_ERR(val)) + return PTR_ERR(val); + + *out_value = of_read_number(val, 2); + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_u64); + +/** + * of_property_read_variable_u64_array - Find and read an array of 64 bit + * integers from a property, with bounds on the minimum and maximum array size. + * + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_values: pointer to found values. + * @sz_min: minimum number of array elements to read + * @sz_max: maximum number of array elements to read, if zero there is no + * upper limit on the number of elements in the dts entry but only + * sz_min will be read. + * + * Search for a property in a device node and read 64-bit value(s) from + * it. + * + * Return: The number of elements read on success, -EINVAL if the property + * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW + * if the property data is smaller than sz_min or longer than sz_max. + * + * The out_values is modified only if a valid u64 value can be decoded. + */ +int of_property_read_variable_u64_array(const struct device_node *np, + const char *propname, u64 *out_values, + size_t sz_min, size_t sz_max) +{ + size_t sz, count; + const __be32 *val = of_find_property_value_of_size(np, propname, + (sz_min * sizeof(*out_values)), + (sz_max * sizeof(*out_values)), + &sz); + + if (IS_ERR(val)) + return PTR_ERR(val); + + if (!sz_max) + sz = sz_min; + else + sz /= sizeof(*out_values); + + count = sz; + while (count--) { + *out_values++ = of_read_number(val, 2); + val += 2; + } + + return sz; +} +EXPORT_SYMBOL_GPL(of_property_read_variable_u64_array); + +/** + * of_property_read_string - Find and read a string from a property + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_string: pointer to null terminated return string, modified only if + * return value is 0. + * + * Search for a property in a device tree node and retrieve a null + * terminated string value (pointer to data, not a copy). + * + * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if + * property does not have a value, and -EILSEQ if the string is not + * null-terminated within the length of the property data. + * + * Note that the empty string "" has length of 1, thus -ENODATA cannot + * be interpreted as an empty string. + * + * The out_string pointer is modified only if a valid string can be decoded. + */ +int of_property_read_string(const struct device_node *np, const char *propname, + const char **out_string) +{ + const struct property *prop = of_find_property(np, propname, NULL); + if (!prop) + return -EINVAL; + if (!prop->length) + return -ENODATA; + if (strnlen(prop->value, prop->length) >= prop->length) + return -EILSEQ; + *out_string = prop->value; + return 0; +} +EXPORT_SYMBOL_GPL(of_property_read_string); + +/** + * of_property_match_string() - Find string in a list and return index + * @np: pointer to node containing string list property + * @propname: string list property name + * @string: pointer to string to search for in string list + * + * This function searches a string list property and returns the index + * of a specific string value. + */ +int of_property_match_string(const struct device_node *np, const char *propname, + const char *string) +{ + const struct property *prop = of_find_property(np, propname, NULL); + size_t l; + int i; + const char *p, *end; + + if (!prop) + return -EINVAL; + if (!prop->value) + return -ENODATA; + + p = prop->value; + end = p + prop->length; + + for (i = 0; p < end; i++, p += l) { + l = strnlen(p, end - p) + 1; + if (p + l > end) + return -EILSEQ; + pr_debug("comparing %s with %s\n", string, p); + if (strcmp(string, p) == 0) + return i; /* Found it; return index */ + } + return -ENODATA; +} +EXPORT_SYMBOL_GPL(of_property_match_string); + +/** + * of_property_read_string_helper() - Utility helper for parsing string properties + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. + * @out_strs: output array of string pointers. + * @sz: number of array elements to read. + * @skip: Number of strings to skip over at beginning of list. + * + * Don't call this function directly. It is a utility helper for the + * of_property_read_string*() family of functions. + */ +int of_property_read_string_helper(const struct device_node *np, + const char *propname, const char **out_strs, + size_t sz, int skip) +{ + const struct property *prop = of_find_property(np, propname, NULL); + int l = 0, i = 0; + const char *p, *end; + + if (!prop) + return -EINVAL; + if (!prop->value) + return -ENODATA; + p = prop->value; + end = p + prop->length; + + for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) { + l = strnlen(p, end - p) + 1; + if (p + l > end) + return -EILSEQ; + if (out_strs && i >= skip) + *out_strs++ = p; + } + i -= skip; + return i <= 0 ? -ENODATA : i; +} +EXPORT_SYMBOL_GPL(of_property_read_string_helper); + +const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur, + u32 *pu) +{ + const void *curv = cur; + + if (!prop) + return NULL; + + if (!cur) { + curv = prop->value; + goto out_val; + } + + curv += sizeof(*cur); + if (curv >= prop->value + prop->length) + return NULL; + +out_val: + *pu = be32_to_cpup(curv); + return curv; +} +EXPORT_SYMBOL_GPL(of_prop_next_u32); + +const char *of_prop_next_string(struct property *prop, const char *cur) +{ + const void *curv = cur; + + if (!prop) + return NULL; + + if (!cur) + return prop->value; + + curv += strlen(cur) + 1; + if (curv >= prop->value + prop->length) + return NULL; + + return curv; +} +EXPORT_SYMBOL_GPL(of_prop_next_string); + +/** + * of_graph_parse_endpoint() - parse common endpoint node properties + * @node: pointer to endpoint device_node + * @endpoint: pointer to the OF endpoint data structure + * + * The caller should hold a reference to @node. + */ +int of_graph_parse_endpoint(const struct device_node *node, + struct of_endpoint *endpoint) +{ + struct device_node *port_node = of_get_parent(node); + + WARN_ONCE(!port_node, "%s(): endpoint %pOF has no parent node\n", + __func__, node); + + memset(endpoint, 0, sizeof(*endpoint)); + + endpoint->local_node = node; + /* + * It doesn't matter whether the two calls below succeed. + * If they don't then the default value 0 is used. + */ + of_property_read_u32(port_node, "reg", &endpoint->port); + of_property_read_u32(node, "reg", &endpoint->id); + + of_node_put(port_node); + + return 0; +} +EXPORT_SYMBOL(of_graph_parse_endpoint); + +/** + * of_graph_get_port_by_id() - get the port matching a given id + * @parent: pointer to the parent device node + * @id: id of the port + * + * Return: A 'port' node pointer with refcount incremented. The caller + * has to use of_node_put() on it when done. + */ +struct device_node *of_graph_get_port_by_id(struct device_node *parent, u32 id) +{ + struct device_node *node, *port; + + node = of_get_child_by_name(parent, "ports"); + if (node) + parent = node; + + for_each_child_of_node(parent, port) { + u32 port_id = 0; + + if (!of_node_name_eq(port, "port")) + continue; + of_property_read_u32(port, "reg", &port_id); + if (id == port_id) + break; + } + + of_node_put(node); + + return port; +} +EXPORT_SYMBOL(of_graph_get_port_by_id); + +/** + * of_graph_get_next_endpoint() - get next endpoint node + * @parent: pointer to the parent device node + * @prev: previous endpoint node, or NULL to get first + * + * Return: An 'endpoint' node pointer with refcount incremented. Refcount + * of the passed @prev node is decremented. + */ +struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, + struct device_node *prev) +{ + struct device_node *endpoint; + struct device_node *port; + + if (!parent) + return NULL; + + /* + * Start by locating the port node. If no previous endpoint is specified + * search for the first port node, otherwise get the previous endpoint + * parent port node. + */ + if (!prev) { + struct device_node *node; + + node = of_get_child_by_name(parent, "ports"); + if (node) + parent = node; + + port = of_get_child_by_name(parent, "port"); + of_node_put(node); + + if (!port) { + pr_err("graph: no port node found in %pOF\n", parent); + return NULL; + } + } else { + port = of_get_parent(prev); + if (WARN_ONCE(!port, "%s(): endpoint %pOF has no parent node\n", + __func__, prev)) + return NULL; + } + + while (1) { + /* + * Now that we have a port node, get the next endpoint by + * getting the next child. If the previous endpoint is NULL this + * will return the first child. + */ + endpoint = of_get_next_child(port, prev); + if (endpoint) { + of_node_put(port); + return endpoint; + } + + /* No more endpoints under this port, try the next one. */ + prev = NULL; + + do { + port = of_get_next_child(parent, port); + if (!port) + return NULL; + } while (!of_node_name_eq(port, "port")); + } +} +EXPORT_SYMBOL(of_graph_get_next_endpoint); + +/** + * of_graph_get_endpoint_by_regs() - get endpoint node of specific identifiers + * @parent: pointer to the parent device node + * @port_reg: identifier (value of reg property) of the parent port node + * @reg: identifier (value of reg property) of the endpoint node + * + * Return: An 'endpoint' node pointer which is identified by reg and at the same + * is the child of a port node identified by port_reg. reg and port_reg are + * ignored when they are -1. Use of_node_put() on the pointer when done. + */ +struct device_node *of_graph_get_endpoint_by_regs( + const struct device_node *parent, int port_reg, int reg) +{ + struct of_endpoint endpoint; + struct device_node *node = NULL; + + for_each_endpoint_of_node(parent, node) { + of_graph_parse_endpoint(node, &endpoint); + if (((port_reg == -1) || (endpoint.port == port_reg)) && + ((reg == -1) || (endpoint.id == reg))) + return node; + } + + return NULL; +} +EXPORT_SYMBOL(of_graph_get_endpoint_by_regs); + +/** + * of_graph_get_remote_endpoint() - get remote endpoint node + * @node: pointer to a local endpoint device_node + * + * Return: Remote endpoint node associated with remote endpoint node linked + * to @node. Use of_node_put() on it when done. + */ +struct device_node *of_graph_get_remote_endpoint(const struct device_node *node) +{ + /* Get remote endpoint node. */ + return of_parse_phandle(node, "remote-endpoint", 0); +} +EXPORT_SYMBOL(of_graph_get_remote_endpoint); + +/** + * of_graph_get_port_parent() - get port's parent node + * @node: pointer to a local endpoint device_node + * + * Return: device node associated with endpoint node linked + * to @node. Use of_node_put() on it when done. + */ +struct device_node *of_graph_get_port_parent(struct device_node *node) +{ + unsigned int depth; + + if (!node) + return NULL; + + /* + * Preserve usecount for passed in node as of_get_next_parent() + * will do of_node_put() on it. + */ + of_node_get(node); + + /* Walk 3 levels up only if there is 'ports' node. */ + for (depth = 3; depth && node; depth--) { + node = of_get_next_parent(node); + if (depth == 2 && !of_node_name_eq(node, "ports")) + break; + } + return node; +} +EXPORT_SYMBOL(of_graph_get_port_parent); + +/** + * of_graph_get_remote_port_parent() - get remote port's parent node + * @node: pointer to a local endpoint device_node + * + * Return: Remote device node associated with remote endpoint node linked + * to @node. Use of_node_put() on it when done. + */ +struct device_node *of_graph_get_remote_port_parent( + const struct device_node *node) +{ + struct device_node *np, *pp; + + /* Get remote endpoint node. */ + np = of_graph_get_remote_endpoint(node); + + pp = of_graph_get_port_parent(np); + + of_node_put(np); + + return pp; +} +EXPORT_SYMBOL(of_graph_get_remote_port_parent); + +/** + * of_graph_get_remote_port() - get remote port node + * @node: pointer to a local endpoint device_node + * + * Return: Remote port node associated with remote endpoint node linked + * to @node. Use of_node_put() on it when done. + */ +struct device_node *of_graph_get_remote_port(const struct device_node *node) +{ + struct device_node *np; + + /* Get remote endpoint node. */ + np = of_graph_get_remote_endpoint(node); + if (!np) + return NULL; + return of_get_next_parent(np); +} +EXPORT_SYMBOL(of_graph_get_remote_port); + +int of_graph_get_endpoint_count(const struct device_node *np) +{ + struct device_node *endpoint; + int num = 0; + + for_each_endpoint_of_node(np, endpoint) + num++; + + return num; +} +EXPORT_SYMBOL(of_graph_get_endpoint_count); + +/** + * of_graph_get_remote_node() - get remote parent device_node for given port/endpoint + * @node: pointer to parent device_node containing graph port/endpoint + * @port: identifier (value of reg property) of the parent port node + * @endpoint: identifier (value of reg property) of the endpoint node + * + * Return: Remote device node associated with remote endpoint node linked + * to @node. Use of_node_put() on it when done. + */ +struct device_node *of_graph_get_remote_node(const struct device_node *node, + u32 port, u32 endpoint) +{ + struct device_node *endpoint_node, *remote; + + endpoint_node = of_graph_get_endpoint_by_regs(node, port, endpoint); + if (!endpoint_node) { + pr_debug("no valid endpoint (%d, %d) for node %pOF\n", + port, endpoint, node); + return NULL; + } + + remote = of_graph_get_remote_port_parent(endpoint_node); + of_node_put(endpoint_node); + if (!remote) { + pr_debug("no valid remote node\n"); + return NULL; + } + + if (!of_device_is_available(remote)) { + pr_debug("not available for remote node\n"); + of_node_put(remote); + return NULL; + } + + return remote; +} +EXPORT_SYMBOL(of_graph_get_remote_node); + +static struct fwnode_handle *of_fwnode_get(struct fwnode_handle *fwnode) +{ + return of_fwnode_handle(of_node_get(to_of_node(fwnode))); +} + +static void of_fwnode_put(struct fwnode_handle *fwnode) +{ + of_node_put(to_of_node(fwnode)); +} + +static bool of_fwnode_device_is_available(const struct fwnode_handle *fwnode) +{ + return of_device_is_available(to_of_node(fwnode)); +} + +static bool of_fwnode_device_dma_supported(const struct fwnode_handle *fwnode) +{ + return true; +} + +static enum dev_dma_attr +of_fwnode_device_get_dma_attr(const struct fwnode_handle *fwnode) +{ + if (of_dma_is_coherent(to_of_node(fwnode))) + return DEV_DMA_COHERENT; + else + return DEV_DMA_NON_COHERENT; +} + +static bool of_fwnode_property_present(const struct fwnode_handle *fwnode, + const char *propname) +{ + return of_property_read_bool(to_of_node(fwnode), propname); +} + +static int of_fwnode_property_read_int_array(const struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) +{ + const struct device_node *node = to_of_node(fwnode); + + if (!val) + return of_property_count_elems_of_size(node, propname, + elem_size); + + switch (elem_size) { + case sizeof(u8): + return of_property_read_u8_array(node, propname, val, nval); + case sizeof(u16): + return of_property_read_u16_array(node, propname, val, nval); + case sizeof(u32): + return of_property_read_u32_array(node, propname, val, nval); + case sizeof(u64): + return of_property_read_u64_array(node, propname, val, nval); + } + + return -ENXIO; +} + +static int +of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode, + const char *propname, const char **val, + size_t nval) +{ + const struct device_node *node = to_of_node(fwnode); + + return val ? + of_property_read_string_array(node, propname, val, nval) : + of_property_count_strings(node, propname); +} + +static const char *of_fwnode_get_name(const struct fwnode_handle *fwnode) +{ + return kbasename(to_of_node(fwnode)->full_name); +} + +static const char *of_fwnode_get_name_prefix(const struct fwnode_handle *fwnode) +{ + /* Root needs no prefix here (its name is "/"). */ + if (!to_of_node(fwnode)->parent) + return ""; + + return "/"; +} + +static struct fwnode_handle * +of_fwnode_get_parent(const struct fwnode_handle *fwnode) +{ + return of_fwnode_handle(of_get_parent(to_of_node(fwnode))); +} + +static struct fwnode_handle * +of_fwnode_get_next_child_node(const struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + return of_fwnode_handle(of_get_next_available_child(to_of_node(fwnode), + to_of_node(child))); +} + +static struct fwnode_handle * +of_fwnode_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) +{ + const struct device_node *node = to_of_node(fwnode); + struct device_node *child; + + for_each_available_child_of_node(node, child) + if (of_node_name_eq(child, childname)) + return of_fwnode_handle(child); + + return NULL; +} + +static int +of_fwnode_get_reference_args(const struct fwnode_handle *fwnode, + const char *prop, const char *nargs_prop, + unsigned int nargs, unsigned int index, + struct fwnode_reference_args *args) +{ + struct of_phandle_args of_args; + unsigned int i; + int ret; + + if (nargs_prop) + ret = of_parse_phandle_with_args(to_of_node(fwnode), prop, + nargs_prop, index, &of_args); + else + ret = of_parse_phandle_with_fixed_args(to_of_node(fwnode), prop, + nargs, index, &of_args); + if (ret < 0) + return ret; + if (!args) { + of_node_put(of_args.np); + return 0; + } + + args->nargs = of_args.args_count; + args->fwnode = of_fwnode_handle(of_args.np); + + for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++) + args->args[i] = i < of_args.args_count ? of_args.args[i] : 0; + + return 0; +} + +static struct fwnode_handle * +of_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + return of_fwnode_handle(of_graph_get_next_endpoint(to_of_node(fwnode), + to_of_node(prev))); +} + +static struct fwnode_handle * +of_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) +{ + return of_fwnode_handle( + of_graph_get_remote_endpoint(to_of_node(fwnode))); +} + +static struct fwnode_handle * +of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) +{ + struct device_node *np; + + /* Get the parent of the port */ + np = of_get_parent(to_of_node(fwnode)); + if (!np) + return NULL; + + /* Is this the "ports" node? If not, it's the port parent. */ + if (!of_node_name_eq(np, "ports")) + return of_fwnode_handle(np); + + return of_fwnode_handle(of_get_next_parent(np)); +} + +static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint) +{ + const struct device_node *node = to_of_node(fwnode); + struct device_node *port_node = of_get_parent(node); + + endpoint->local_fwnode = fwnode; + + of_property_read_u32(port_node, "reg", &endpoint->port); + of_property_read_u32(node, "reg", &endpoint->id); + + of_node_put(port_node); + + return 0; +} + +static const void * +of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode, + const struct device *dev) +{ + return of_device_get_match_data(dev); +} + +static struct device_node *of_get_compat_node(struct device_node *np) +{ + of_node_get(np); + + while (np) { + if (!of_device_is_available(np)) { + of_node_put(np); + np = NULL; + } + + if (of_find_property(np, "compatible", NULL)) + break; + + np = of_get_next_parent(np); + } + + return np; +} + +static struct device_node *of_get_compat_node_parent(struct device_node *np) +{ + struct device_node *parent, *node; + + parent = of_get_parent(np); + node = of_get_compat_node(parent); + of_node_put(parent); + + return node; +} + +static void of_link_to_phandle(struct device_node *con_np, + struct device_node *sup_np) +{ + struct device_node *tmp_np = of_node_get(sup_np); + + /* Check that sup_np and its ancestors are available. */ + while (tmp_np) { + if (of_fwnode_handle(tmp_np)->dev) { + of_node_put(tmp_np); + break; + } + + if (!of_device_is_available(tmp_np)) { + of_node_put(tmp_np); + return; + } + + tmp_np = of_get_next_parent(tmp_np); + } + + fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np)); +} + +/** + * parse_prop_cells - Property parsing function for suppliers + * + * @np: Pointer to device tree node containing a list + * @prop_name: Name of property to be parsed. Expected to hold phandle values + * @index: For properties holding a list of phandles, this is the index + * into the list. + * @list_name: Property name that is known to contain list of phandle(s) to + * supplier(s) + * @cells_name: property name that specifies phandles' arguments count + * + * This is a helper function to parse properties that have a known fixed name + * and are a list of phandles and phandle arguments. + * + * Returns: + * - phandle node pointer with refcount incremented. Caller must of_node_put() + * on it when done. + * - NULL if no phandle found at index + */ +static struct device_node *parse_prop_cells(struct device_node *np, + const char *prop_name, int index, + const char *list_name, + const char *cells_name) +{ + struct of_phandle_args sup_args; + + if (strcmp(prop_name, list_name)) + return NULL; + + if (of_parse_phandle_with_args(np, list_name, cells_name, index, + &sup_args)) + return NULL; + + return sup_args.np; +} + +#define DEFINE_SIMPLE_PROP(fname, name, cells) \ +static struct device_node *parse_##fname(struct device_node *np, \ + const char *prop_name, int index) \ +{ \ + return parse_prop_cells(np, prop_name, index, name, cells); \ +} + +static int strcmp_suffix(const char *str, const char *suffix) +{ + unsigned int len, suffix_len; + + len = strlen(str); + suffix_len = strlen(suffix); + if (len <= suffix_len) + return -1; + return strcmp(str + len - suffix_len, suffix); +} + +/** + * parse_suffix_prop_cells - Suffix property parsing function for suppliers + * + * @np: Pointer to device tree node containing a list + * @prop_name: Name of property to be parsed. Expected to hold phandle values + * @index: For properties holding a list of phandles, this is the index + * into the list. + * @suffix: Property suffix that is known to contain list of phandle(s) to + * supplier(s) + * @cells_name: property name that specifies phandles' arguments count + * + * This is a helper function to parse properties that have a known fixed suffix + * and are a list of phandles and phandle arguments. + * + * Returns: + * - phandle node pointer with refcount incremented. Caller must of_node_put() + * on it when done. + * - NULL if no phandle found at index + */ +static struct device_node *parse_suffix_prop_cells(struct device_node *np, + const char *prop_name, int index, + const char *suffix, + const char *cells_name) +{ + struct of_phandle_args sup_args; + + if (strcmp_suffix(prop_name, suffix)) + return NULL; + + if (of_parse_phandle_with_args(np, prop_name, cells_name, index, + &sup_args)) + return NULL; + + return sup_args.np; +} + +#define DEFINE_SUFFIX_PROP(fname, suffix, cells) \ +static struct device_node *parse_##fname(struct device_node *np, \ + const char *prop_name, int index) \ +{ \ + return parse_suffix_prop_cells(np, prop_name, index, suffix, cells); \ +} + +/** + * struct supplier_bindings - Property parsing functions for suppliers + * + * @parse_prop: function name + * parse_prop() finds the node corresponding to a supplier phandle + * @parse_prop.np: Pointer to device node holding supplier phandle property + * @parse_prop.prop_name: Name of property holding a phandle value + * @parse_prop.index: For properties holding a list of phandles, this is the + * index into the list + * @optional: Describes whether a supplier is mandatory or not + * @node_not_dev: The consumer node containing the property is never converted + * to a struct device. Instead, parse ancestor nodes for the + * compatible property to find a node corresponding to a device. + * + * Returns: + * parse_prop() return values are + * - phandle node pointer with refcount incremented. Caller must of_node_put() + * on it when done. + * - NULL if no phandle found at index + */ +struct supplier_bindings { + struct device_node *(*parse_prop)(struct device_node *np, + const char *prop_name, int index); + bool optional; + bool node_not_dev; +}; + +DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells") +DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells") +DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells") +DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells") +DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells") +DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL) +DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells") +DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells") +DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells") +DEFINE_SIMPLE_PROP(extcon, "extcon", NULL) +DEFINE_SIMPLE_PROP(nvmem_cells, "nvmem-cells", NULL) +DEFINE_SIMPLE_PROP(phys, "phys", "#phy-cells") +DEFINE_SIMPLE_PROP(wakeup_parent, "wakeup-parent", NULL) +DEFINE_SIMPLE_PROP(pinctrl0, "pinctrl-0", NULL) +DEFINE_SIMPLE_PROP(pinctrl1, "pinctrl-1", NULL) +DEFINE_SIMPLE_PROP(pinctrl2, "pinctrl-2", NULL) +DEFINE_SIMPLE_PROP(pinctrl3, "pinctrl-3", NULL) +DEFINE_SIMPLE_PROP(pinctrl4, "pinctrl-4", NULL) +DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL) +DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL) +DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL) +DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) +DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL) +DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells") +DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") +DEFINE_SIMPLE_PROP(leds, "leds", NULL) +DEFINE_SIMPLE_PROP(backlight, "backlight", NULL) +DEFINE_SIMPLE_PROP(panel, "panel", NULL) +DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) +DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") + +static struct device_node *parse_gpios(struct device_node *np, + const char *prop_name, int index) +{ + if (!strcmp_suffix(prop_name, ",nr-gpios")) + return NULL; + + return parse_suffix_prop_cells(np, prop_name, index, "-gpios", + "#gpio-cells"); +} + +static struct device_node *parse_iommu_maps(struct device_node *np, + const char *prop_name, int index) +{ + if (strcmp(prop_name, "iommu-map")) + return NULL; + + return of_parse_phandle(np, prop_name, (index * 4) + 1); +} + +static struct device_node *parse_gpio_compat(struct device_node *np, + const char *prop_name, int index) +{ + struct of_phandle_args sup_args; + + if (strcmp(prop_name, "gpio") && strcmp(prop_name, "gpios")) + return NULL; + + /* + * Ignore node with gpio-hog property since its gpios are all provided + * by its parent. + */ + if (of_find_property(np, "gpio-hog", NULL)) + return NULL; + + if (of_parse_phandle_with_args(np, prop_name, "#gpio-cells", index, + &sup_args)) + return NULL; + + return sup_args.np; +} + +static struct device_node *parse_interrupts(struct device_node *np, + const char *prop_name, int index) +{ + struct of_phandle_args sup_args; + + if (!IS_ENABLED(CONFIG_OF_IRQ) || IS_ENABLED(CONFIG_PPC)) + return NULL; + + if (strcmp(prop_name, "interrupts") && + strcmp(prop_name, "interrupts-extended")) + return NULL; + + return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np; +} + +static const struct supplier_bindings of_supplier_bindings[] = { + { .parse_prop = parse_clocks, }, + { .parse_prop = parse_interconnects, }, + { .parse_prop = parse_iommus, .optional = true, }, + { .parse_prop = parse_iommu_maps, .optional = true, }, + { .parse_prop = parse_mboxes, }, + { .parse_prop = parse_io_channels, }, + { .parse_prop = parse_interrupt_parent, }, + { .parse_prop = parse_dmas, .optional = true, }, + { .parse_prop = parse_power_domains, }, + { .parse_prop = parse_hwlocks, }, + { .parse_prop = parse_extcon, }, + { .parse_prop = parse_nvmem_cells, }, + { .parse_prop = parse_phys, }, + { .parse_prop = parse_wakeup_parent, }, + { .parse_prop = parse_pinctrl0, }, + { .parse_prop = parse_pinctrl1, }, + { .parse_prop = parse_pinctrl2, }, + { .parse_prop = parse_pinctrl3, }, + { .parse_prop = parse_pinctrl4, }, + { .parse_prop = parse_pinctrl5, }, + { .parse_prop = parse_pinctrl6, }, + { .parse_prop = parse_pinctrl7, }, + { .parse_prop = parse_pinctrl8, }, + { .parse_prop = parse_remote_endpoint, .node_not_dev = true, }, + { .parse_prop = parse_pwms, }, + { .parse_prop = parse_resets, }, + { .parse_prop = parse_leds, }, + { .parse_prop = parse_backlight, }, + { .parse_prop = parse_panel, }, + { .parse_prop = parse_gpio_compat, }, + { .parse_prop = parse_interrupts, }, + { .parse_prop = parse_regulators, }, + { .parse_prop = parse_gpio, }, + { .parse_prop = parse_gpios, }, + {} +}; + +/** + * of_link_property - Create device links to suppliers listed in a property + * @con_np: The consumer device tree node which contains the property + * @prop_name: Name of property to be parsed + * + * This function checks if the property @prop_name that is present in the + * @con_np device tree node is one of the known common device tree bindings + * that list phandles to suppliers. If @prop_name isn't one, this function + * doesn't do anything. + * + * If @prop_name is one, this function attempts to create fwnode links from the + * consumer device tree node @con_np to all the suppliers device tree nodes + * listed in @prop_name. + * + * Any failed attempt to create a fwnode link will NOT result in an immediate + * return. of_link_property() must create links to all the available supplier + * device tree nodes even when attempts to create a link to one or more + * suppliers fail. + */ +static int of_link_property(struct device_node *con_np, const char *prop_name) +{ + struct device_node *phandle; + const struct supplier_bindings *s = of_supplier_bindings; + unsigned int i = 0; + bool matched = false; + + /* Do not stop at first failed link, link all available suppliers. */ + while (!matched && s->parse_prop) { + if (s->optional && !fw_devlink_is_strict()) { + s++; + continue; + } + + while ((phandle = s->parse_prop(con_np, prop_name, i))) { + struct device_node *con_dev_np; + + con_dev_np = s->node_not_dev + ? of_get_compat_node_parent(con_np) + : of_node_get(con_np); + matched = true; + i++; + of_link_to_phandle(con_dev_np, phandle); + of_node_put(phandle); + of_node_put(con_dev_np); + } + s++; + } + return 0; +} + +static void __iomem *of_fwnode_iomap(struct fwnode_handle *fwnode, int index) +{ +#ifdef CONFIG_OF_ADDRESS + return of_iomap(to_of_node(fwnode), index); +#else + return NULL; +#endif +} + +static int of_fwnode_irq_get(const struct fwnode_handle *fwnode, + unsigned int index) +{ + return of_irq_get(to_of_node(fwnode), index); +} + +static int of_fwnode_add_links(struct fwnode_handle *fwnode) +{ + struct property *p; + struct device_node *con_np = to_of_node(fwnode); + + if (IS_ENABLED(CONFIG_X86)) + return 0; + + if (!con_np) + return -EINVAL; + + for_each_property_of_node(con_np, p) + of_link_property(con_np, p->name); + + return 0; +} + +const struct fwnode_operations of_fwnode_ops = { + .get = of_fwnode_get, + .put = of_fwnode_put, + .device_is_available = of_fwnode_device_is_available, + .device_get_match_data = of_fwnode_device_get_match_data, + .device_dma_supported = of_fwnode_device_dma_supported, + .device_get_dma_attr = of_fwnode_device_get_dma_attr, + .property_present = of_fwnode_property_present, + .property_read_int_array = of_fwnode_property_read_int_array, + .property_read_string_array = of_fwnode_property_read_string_array, + .get_name = of_fwnode_get_name, + .get_name_prefix = of_fwnode_get_name_prefix, + .get_parent = of_fwnode_get_parent, + .get_next_child_node = of_fwnode_get_next_child_node, + .get_named_child_node = of_fwnode_get_named_child_node, + .get_reference_args = of_fwnode_get_reference_args, + .graph_get_next_endpoint = of_fwnode_graph_get_next_endpoint, + .graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint, + .graph_get_port_parent = of_fwnode_graph_get_port_parent, + .graph_parse_endpoint = of_fwnode_graph_parse_endpoint, + .iomap = of_fwnode_iomap, + .irq_get = of_fwnode_irq_get, + .add_links = of_fwnode_add_links, +}; +EXPORT_SYMBOL_GPL(of_fwnode_ops); diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c new file mode 100644 index 000000000..b278ab433 --- /dev/null +++ b/drivers/of/resolver.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Functions for dealing with DT resolution + * + * Copyright (C) 2012 Pantelis Antoniou <panto@antoniou-consulting.com> + * Copyright (C) 2012 Texas Instruments Inc. + */ + +#define pr_fmt(fmt) "OF: resolver: " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/string.h> +#include <linux/ctype.h> +#include <linux/errno.h> +#include <linux/slab.h> + +#include "of_private.h" + +static phandle live_tree_max_phandle(void) +{ + struct device_node *node; + phandle phandle; + unsigned long flags; + + raw_spin_lock_irqsave(&devtree_lock, flags); + phandle = 0; + for_each_of_allnodes(node) { + if (node->phandle != OF_PHANDLE_ILLEGAL && + node->phandle > phandle) + phandle = node->phandle; + } + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + return phandle; +} + +static void adjust_overlay_phandles(struct device_node *overlay, + int phandle_delta) +{ + struct device_node *child; + struct property *prop; + phandle phandle; + + /* adjust node's phandle in node */ + if (overlay->phandle != 0 && overlay->phandle != OF_PHANDLE_ILLEGAL) + overlay->phandle += phandle_delta; + + /* copy adjusted phandle into *phandle properties */ + for_each_property_of_node(overlay, prop) { + + if (of_prop_cmp(prop->name, "phandle") && + of_prop_cmp(prop->name, "linux,phandle")) + continue; + + if (prop->length < 4) + continue; + + phandle = be32_to_cpup(prop->value); + if (phandle == OF_PHANDLE_ILLEGAL) + continue; + + *(__be32 *)prop->value = cpu_to_be32(overlay->phandle); + } + + for_each_child_of_node(overlay, child) + adjust_overlay_phandles(child, phandle_delta); +} + +static int update_usages_of_a_phandle_reference(struct device_node *overlay, + struct property *prop_fixup, phandle phandle) +{ + struct device_node *refnode; + struct property *prop; + char *value, *cur, *end, *node_path, *prop_name, *s; + int offset, len; + int err = 0; + + value = kmemdup(prop_fixup->value, prop_fixup->length, GFP_KERNEL); + if (!value) + return -ENOMEM; + + /* prop_fixup contains a list of tuples of path:property_name:offset */ + end = value + prop_fixup->length; + for (cur = value; cur < end; cur += len + 1) { + len = strlen(cur); + + node_path = cur; + s = strchr(cur, ':'); + if (!s) { + err = -EINVAL; + goto err_fail; + } + *s++ = '\0'; + + prop_name = s; + s = strchr(s, ':'); + if (!s) { + err = -EINVAL; + goto err_fail; + } + *s++ = '\0'; + + err = kstrtoint(s, 10, &offset); + if (err) + goto err_fail; + + refnode = __of_find_node_by_full_path(of_node_get(overlay), node_path); + if (!refnode) + continue; + + for_each_property_of_node(refnode, prop) { + if (!of_prop_cmp(prop->name, prop_name)) + break; + } + of_node_put(refnode); + + if (!prop) { + err = -ENOENT; + goto err_fail; + } + + if (offset < 0 || offset + sizeof(__be32) > prop->length) { + err = -EINVAL; + goto err_fail; + } + + *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle); + } + +err_fail: + kfree(value); + return err; +} + +/* compare nodes taking into account that 'name' strips out the @ part */ +static int node_name_cmp(const struct device_node *dn1, + const struct device_node *dn2) +{ + const char *n1 = kbasename(dn1->full_name); + const char *n2 = kbasename(dn2->full_name); + + return of_node_cmp(n1, n2); +} + +/* + * Adjust the local phandle references by the given phandle delta. + * + * Subtree @local_fixups, which is overlay node __local_fixups__, + * mirrors the fragment node structure at the root of the overlay. + * + * For each property in the fragments that contains a phandle reference, + * @local_fixups has a property of the same name that contains a list + * of offsets of the phandle reference(s) within the respective property + * value(s). The values at these offsets will be fixed up. + */ +static int adjust_local_phandle_references(struct device_node *local_fixups, + struct device_node *overlay, int phandle_delta) +{ + struct device_node *child, *overlay_child; + struct property *prop_fix, *prop; + int err, i, count; + unsigned int off; + + if (!local_fixups) + return 0; + + for_each_property_of_node(local_fixups, prop_fix) { + + /* skip properties added automatically */ + if (!of_prop_cmp(prop_fix->name, "name") || + !of_prop_cmp(prop_fix->name, "phandle") || + !of_prop_cmp(prop_fix->name, "linux,phandle")) + continue; + + if ((prop_fix->length % 4) != 0 || prop_fix->length == 0) + return -EINVAL; + count = prop_fix->length / sizeof(__be32); + + for_each_property_of_node(overlay, prop) { + if (!of_prop_cmp(prop->name, prop_fix->name)) + break; + } + + if (!prop) + return -EINVAL; + + for (i = 0; i < count; i++) { + off = be32_to_cpu(((__be32 *)prop_fix->value)[i]); + if ((off + 4) > prop->length) + return -EINVAL; + + be32_add_cpu(prop->value + off, phandle_delta); + } + } + + /* + * These nested loops recurse down two subtrees in parallel, where the + * node names in the two subtrees match. + * + * The roots of the subtrees are the overlay's __local_fixups__ node + * and the overlay's root node. + */ + for_each_child_of_node(local_fixups, child) { + + for_each_child_of_node(overlay, overlay_child) + if (!node_name_cmp(child, overlay_child)) { + of_node_put(overlay_child); + break; + } + + if (!overlay_child) { + of_node_put(child); + return -EINVAL; + } + + err = adjust_local_phandle_references(child, overlay_child, + phandle_delta); + if (err) { + of_node_put(child); + return err; + } + } + + return 0; +} + +/** + * of_resolve_phandles - Relocate and resolve overlay against live tree + * + * @overlay: Pointer to devicetree overlay to relocate and resolve + * + * Modify (relocate) values of local phandles in @overlay to a range that + * does not conflict with the live expanded devicetree. Update references + * to the local phandles in @overlay. Update (resolve) phandle references + * in @overlay that refer to the live expanded devicetree. + * + * Phandle values in the live tree are in the range of + * 1 .. live_tree_max_phandle(). The range of phandle values in the overlay + * also begin with at 1. Adjust the phandle values in the overlay to begin + * at live_tree_max_phandle() + 1. Update references to the phandles to + * the adjusted phandle values. + * + * The name of each property in the "__fixups__" node in the overlay matches + * the name of a symbol (a label) in the live tree. The values of each + * property in the "__fixups__" node is a list of the property values in the + * overlay that need to be updated to contain the phandle reference + * corresponding to that symbol in the live tree. Update the references in + * the overlay with the phandle values in the live tree. + * + * @overlay must be detached. + * + * Resolving and applying @overlay to the live expanded devicetree must be + * protected by a mechanism to ensure that multiple overlays are processed + * in a single threaded manner so that multiple overlays will not relocate + * phandles to overlapping ranges. The mechanism to enforce this is not + * yet implemented. + * + * Return: %0 on success or a negative error value on error. + */ +int of_resolve_phandles(struct device_node *overlay) +{ + struct device_node *child, *local_fixups, *refnode; + struct device_node *tree_symbols, *overlay_fixups; + struct property *prop; + const char *refpath; + phandle phandle, phandle_delta; + int err; + + tree_symbols = NULL; + + if (!overlay) { + pr_err("null overlay\n"); + err = -EINVAL; + goto out; + } + + if (!of_node_check_flag(overlay, OF_DETACHED)) { + pr_err("overlay not detached\n"); + err = -EINVAL; + goto out; + } + + phandle_delta = live_tree_max_phandle() + 1; + adjust_overlay_phandles(overlay, phandle_delta); + + for_each_child_of_node(overlay, local_fixups) + if (of_node_name_eq(local_fixups, "__local_fixups__")) + break; + + err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta); + if (err) + goto out; + + overlay_fixups = NULL; + + for_each_child_of_node(overlay, child) { + if (of_node_name_eq(child, "__fixups__")) + overlay_fixups = child; + } + + if (!overlay_fixups) { + err = 0; + goto out; + } + + tree_symbols = of_find_node_by_path("/__symbols__"); + if (!tree_symbols) { + pr_err("no symbols in root of device tree.\n"); + err = -EINVAL; + goto out; + } + + for_each_property_of_node(overlay_fixups, prop) { + + /* skip properties added automatically */ + if (!of_prop_cmp(prop->name, "name")) + continue; + + err = of_property_read_string(tree_symbols, + prop->name, &refpath); + if (err) { + pr_err("node label '%s' not found in live devicetree symbols table\n", + prop->name); + goto out; + } + + refnode = of_find_node_by_path(refpath); + if (!refnode) { + err = -ENOENT; + goto out; + } + + phandle = refnode->phandle; + of_node_put(refnode); + + err = update_usages_of_a_phandle_reference(overlay, prop, phandle); + if (err) + break; + } + +out: + if (err) + pr_err("overlay phandle fixup failed: %d\n", err); + of_node_put(tree_symbols); + + return err; +} +EXPORT_SYMBOL_GPL(of_resolve_phandles); diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile new file mode 100644 index 000000000..d072f3ba3 --- /dev/null +++ b/drivers/of/unittest-data/Makefile @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += testcases.dtb.o + +obj-$(CONFIG_OF_OVERLAY) += overlay.dtb.o \ + overlay_0.dtb.o \ + overlay_1.dtb.o \ + overlay_2.dtb.o \ + overlay_3.dtb.o \ + overlay_4.dtb.o \ + overlay_5.dtb.o \ + overlay_6.dtb.o \ + overlay_7.dtb.o \ + overlay_8.dtb.o \ + overlay_9.dtb.o \ + overlay_10.dtb.o \ + overlay_11.dtb.o \ + overlay_12.dtb.o \ + overlay_13.dtb.o \ + overlay_15.dtb.o \ + overlay_16.dtb.o \ + overlay_17.dtb.o \ + overlay_18.dtb.o \ + overlay_19.dtb.o \ + overlay_20.dtb.o \ + overlay_bad_add_dup_node.dtb.o \ + overlay_bad_add_dup_prop.dtb.o \ + overlay_bad_phandle.dtb.o \ + overlay_bad_symbol.dtb.o \ + overlay_base.dtb.o \ + overlay_gpio_01.dtb.o \ + overlay_gpio_02a.dtb.o \ + overlay_gpio_02b.dtb.o \ + overlay_gpio_03.dtb.o \ + overlay_gpio_04a.dtb.o \ + overlay_gpio_04b.dtb.o + +# enable creation of __symbols__ node +DTC_FLAGS_overlay += -@ +DTC_FLAGS_overlay_bad_phandle += -@ +DTC_FLAGS_overlay_bad_symbol += -@ +DTC_FLAGS_overlay_base += -@ +DTC_FLAGS_testcases += -@ + +# suppress warnings about intentional errors +DTC_FLAGS_testcases += -Wno-interrupts_property \ + -Wno-node_name_vs_property_name \ + -Wno-interrupt_map + +# Apply overlays statically with fdtoverlay. This is a build time test that +# the overlays can be applied successfully by fdtoverlay. This does not +# guarantee that the overlays can be applied successfully at run time by +# unittest, but it provides a bit of build time test coverage for those +# who do not execute unittest. +# +# The overlays are applied on top of static_base_1.dtb and static_base_2.dtb to +# create static_test_1.dtb and static_test_2.dtb. If fdtoverlay detects an +# error than the kernel build will fail. static_test_1.dtb and +# static_test_2.dtb are not consumed by unittest. +# +# Some unittest overlays deliberately contain errors that unittest checks for. +# These overlays will cause fdtoverlay to fail, and are thus not included +# in the static test: +# overlay_bad_add_dup_node.dtbo \ +# overlay_bad_add_dup_prop.dtbo \ +# overlay_bad_phandle.dtbo \ +# overlay_bad_symbol.dtbo \ + +apply_static_overlay_1 := overlay_0.dtbo \ + overlay_1.dtbo \ + overlay_2.dtbo \ + overlay_3.dtbo \ + overlay_4.dtbo \ + overlay_5.dtbo \ + overlay_6.dtbo \ + overlay_7.dtbo \ + overlay_8.dtbo \ + overlay_9.dtbo \ + overlay_10.dtbo \ + overlay_11.dtbo \ + overlay_12.dtbo \ + overlay_13.dtbo \ + overlay_15.dtbo \ + overlay_16.dtbo \ + overlay_17.dtbo \ + overlay_18.dtbo \ + overlay_19.dtbo \ + overlay_20.dtbo \ + overlay_gpio_01.dtbo \ + overlay_gpio_02a.dtbo \ + overlay_gpio_02b.dtbo \ + overlay_gpio_03.dtbo \ + overlay_gpio_04a.dtbo \ + overlay_gpio_04b.dtbo + +apply_static_overlay_2 := overlay.dtbo + +DTC_FLAGS_static_base_1 += -Wno-interrupts_property \ + -Wno-node_name_vs_property_name \ + -Wno-interrupt_map + +static_test_1-dtbs := static_base_1.dtb $(apply_static_overlay_1) +static_test_2-dtbs := static_base_2.dtb $(apply_static_overlay_2) + +dtb-$(CONFIG_OF_OVERLAY) += static_test_1.dtb static_test_2.dtb diff --git a/drivers/of/unittest-data/overlay.dts b/drivers/of/unittest-data/overlay.dts new file mode 100644 index 000000000..3bbc59e92 --- /dev/null +++ b/drivers/of/unittest-data/overlay.dts @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&electric_1 { + + status = "okay"; + + hvac_2: hvac-large-1 { + compatible = "ot,hvac-large"; + heat-range = < 40 75 >; + cool-range = < 65 80 >; + }; +}; + +&rides_1 { + + #address-cells = <1>; + #size-cells = <1>; + status = "okay"; + + ride@100 { + #address-cells = <1>; + #size-cells = <1>; + + track@30 { + incline-up = < 48 32 16 >; + }; + + track@40 { + incline-up = < 47 31 15 >; + }; + }; + + ride_200: ride@200 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ot,ferris-wheel"; + reg = < 0x00000200 0x100 >; + hvac-provider = < &hvac_2 >; + hvac-thermostat = < 27 32 > ; + hvac-zones = < 12 5 >; + hvac-zone-names = "operator", "snack-bar"; + spin-controller = < &spin_ctrl_1 3 >; + spin-rph = < 30 >; + gondolas = < 16 >; + gondola-capacity = < 6 >; + + ride_200_left: track@10 { + reg = < 0x00000010 0x10 >; + }; + + ride_200_right: track@20 { + reg = < 0x00000020 0x10 >; + }; + }; +}; + +&lights_2 { + + status = "okay"; + color = "purple", "white", "red", "green"; + rate = < 3 256 >; +}; diff --git a/drivers/of/unittest-data/overlay_0.dts b/drivers/of/unittest-data/overlay_0.dts new file mode 100644 index 000000000..ac0f9e0fe --- /dev/null +++ b/drivers/of/unittest-data/overlay_0.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/ { + /* overlay_0 - enable using absolute target path */ + + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus/test-unittest0"; + __overlay__ { + status = "okay"; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_1.dts b/drivers/of/unittest-data/overlay_1.dts new file mode 100644 index 000000000..e92a626e2 --- /dev/null +++ b/drivers/of/unittest-data/overlay_1.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/ { + /* overlay_1 - disable using absolute target path */ + + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus/test-unittest1"; + __overlay__ { + status = "disabled"; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_10.dts b/drivers/of/unittest-data/overlay_10.dts new file mode 100644 index 000000000..73993bf23 --- /dev/null +++ b/drivers/of/unittest-data/overlay_10.dts @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_10 */ +/* overlays 8, 9, 10, 11 application and removal in bad sequence */ + +&unittest_test_bus { + /* suppress DTC warning */ + #address-cells = <1>; + #size-cells = <0>; + + test-unittest10 { + compatible = "unittest"; + status = "okay"; + reg = <10>; + + #address-cells = <1>; + #size-cells = <0>; + + test-unittest101 { + compatible = "unittest"; + status = "okay"; + reg = <1>; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_11.dts b/drivers/of/unittest-data/overlay_11.dts new file mode 100644 index 000000000..9a79b253a --- /dev/null +++ b/drivers/of/unittest-data/overlay_11.dts @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_11 */ +/* overlays 8, 9, 10, 11 application and removal in bad sequence */ + +&unittest_test_bus { + /* suppress DTC warning */ + #address-cells = <1>; + #size-cells = <0>; + + test-unittest11 { + compatible = "unittest"; + status = "okay"; + reg = <11>; + + #address-cells = <1>; + #size-cells = <0>; + + test-unittest111 { + compatible = "unittest"; + status = "okay"; + reg = <1>; + }; + + }; +}; diff --git a/drivers/of/unittest-data/overlay_12.dts b/drivers/of/unittest-data/overlay_12.dts new file mode 100644 index 000000000..ca3441e2c --- /dev/null +++ b/drivers/of/unittest-data/overlay_12.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/ { + /* overlay_12 - enable using absolute target path (i2c) */ + + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12"; + __overlay__ { + status = "okay"; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_13.dts b/drivers/of/unittest-data/overlay_13.dts new file mode 100644 index 000000000..3c30dec63 --- /dev/null +++ b/drivers/of/unittest-data/overlay_13.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/ { + /* overlay_13 - disable using absolute target path (i2c) */ + + fragment@0 { + target-path = "/testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13"; + __overlay__ { + status = "disabled"; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_15.dts b/drivers/of/unittest-data/overlay_15.dts new file mode 100644 index 000000000..572849047 --- /dev/null +++ b/drivers/of/unittest-data/overlay_15.dts @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_15 - mux overlay */ + +&unittest_i2c_test_bus { + #address-cells = <1>; + #size-cells = <0>; + test-unittest15 { + reg = <11>; + compatible = "unittest-i2c-mux"; + status = "okay"; + + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + test-mux-dev@20 { + reg = <0x20>; + compatible = "unittest-i2c-dev"; + status = "okay"; + }; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_16.dts b/drivers/of/unittest-data/overlay_16.dts new file mode 100644 index 000000000..80a46dc02 --- /dev/null +++ b/drivers/of/unittest-data/overlay_16.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_16 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest16 { + compatible = "unittest"; + reg = <16>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_17.dts b/drivers/of/unittest-data/overlay_17.dts new file mode 100644 index 000000000..5b8a22091 --- /dev/null +++ b/drivers/of/unittest-data/overlay_17.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_17 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest17 { + compatible = "unittest"; + reg = <17>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_18.dts b/drivers/of/unittest-data/overlay_18.dts new file mode 100644 index 000000000..dcddd5f6d --- /dev/null +++ b/drivers/of/unittest-data/overlay_18.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_18 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest18 { + compatible = "unittest"; + reg = <18>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_19.dts b/drivers/of/unittest-data/overlay_19.dts new file mode 100644 index 000000000..32b3ba0b6 --- /dev/null +++ b/drivers/of/unittest-data/overlay_19.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_19 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest19 { + compatible = "unittest"; + reg = <19>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_2.dts b/drivers/of/unittest-data/overlay_2.dts new file mode 100644 index 000000000..db8684ba8 --- /dev/null +++ b/drivers/of/unittest-data/overlay_2.dts @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_2 - enable using label */ + +&unittest2 { + status = "okay"; +}; diff --git a/drivers/of/unittest-data/overlay_20.dts b/drivers/of/unittest-data/overlay_20.dts new file mode 100644 index 000000000..d4a4f2f32 --- /dev/null +++ b/drivers/of/unittest-data/overlay_20.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_20 - notify test */ + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + + test-unittest20 { + compatible = "unittest"; + reg = <20>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_3.dts b/drivers/of/unittest-data/overlay_3.dts new file mode 100644 index 000000000..40f289e7c --- /dev/null +++ b/drivers/of/unittest-data/overlay_3.dts @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_3 - disable using label */ + +&unittest3 { + status = "disabled"; +}; diff --git a/drivers/of/unittest-data/overlay_4.dts b/drivers/of/unittest-data/overlay_4.dts new file mode 100644 index 000000000..a8a77ddf9 --- /dev/null +++ b/drivers/of/unittest-data/overlay_4.dts @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_4 - test insertion of a full node */ + +&unittest_test_bus { + + /* suppress DTC warning */ + #address-cells = <1>; + #size-cells = <0>; + + test-unittest4 { + compatible = "unittest"; + status = "okay"; + reg = <4>; + }; +}; diff --git a/drivers/of/unittest-data/overlay_5.dts b/drivers/of/unittest-data/overlay_5.dts new file mode 100644 index 000000000..706f5f1b7 --- /dev/null +++ b/drivers/of/unittest-data/overlay_5.dts @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_5 - test overlay apply revert */ + +&unittest5 { + status = "okay"; +}; diff --git a/drivers/of/unittest-data/overlay_6.dts b/drivers/of/unittest-data/overlay_6.dts new file mode 100644 index 000000000..21a7fa4ca --- /dev/null +++ b/drivers/of/unittest-data/overlay_6.dts @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_6 */ +/* overlays 6, 7 application and removal in sequence */ + +&unittest6 { + status = "okay"; +}; diff --git a/drivers/of/unittest-data/overlay_7.dts b/drivers/of/unittest-data/overlay_7.dts new file mode 100644 index 000000000..58ba1bb51 --- /dev/null +++ b/drivers/of/unittest-data/overlay_7.dts @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_7 */ +/* overlays 6, 7 application and removal in sequence */ + +&unittest7 { + status = "okay"; +}; diff --git a/drivers/of/unittest-data/overlay_8.dts b/drivers/of/unittest-data/overlay_8.dts new file mode 100644 index 000000000..e9718d118 --- /dev/null +++ b/drivers/of/unittest-data/overlay_8.dts @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_8 */ +/* overlays 8, 9, 10, 11 application and removal in bad sequence */ + +&unittest8 { + status = "okay"; +}; diff --git a/drivers/of/unittest-data/overlay_9.dts b/drivers/of/unittest-data/overlay_9.dts new file mode 100644 index 000000000..b35e23eda --- /dev/null +++ b/drivers/of/unittest-data/overlay_9.dts @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* overlay_9 */ +/* overlays 8, 9, 10, 11 application and removal in bad sequence */ + +&unittest8 { + property-foo = "bar"; +}; diff --git a/drivers/of/unittest-data/overlay_bad_add_dup_node.dts b/drivers/of/unittest-data/overlay_bad_add_dup_node.dts new file mode 100644 index 000000000..145dfc3b1 --- /dev/null +++ b/drivers/of/unittest-data/overlay_bad_add_dup_node.dts @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* + * &electric_1/motor-1 and &spin_ctrl_1 are the same node: + * /testcase-data-2/substation@100/motor-1 + * + * Thus the new node "controller" in each fragment will + * result in an attempt to add the same node twice. + * This will result in an error and the overlay apply + * will fail. + */ + +&electric_1 { + + motor-1 { + controller { + power_bus = < 0x1 0x2 >; + }; + }; +}; + +&spin_ctrl_1 { + controller { + power_bus_emergency = < 0x101 0x102 >; + }; +}; diff --git a/drivers/of/unittest-data/overlay_bad_add_dup_prop.dts b/drivers/of/unittest-data/overlay_bad_add_dup_prop.dts new file mode 100644 index 000000000..6327d1ffb --- /dev/null +++ b/drivers/of/unittest-data/overlay_bad_add_dup_prop.dts @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +/* + * &electric_1/motor-1/electric and &spin_ctrl_1/electric are the same node: + * /testcase-data-2/substation@100/motor-1/electric + * + * Thus the property "rpm_avail" in each fragment will + * result in an attempt to update the same property twice. + * This will result in an error and the overlay apply + * will fail. + * + * The previous version of this test did not include the extra + * level of node 'electric'. That resulted in the 'rpm_avail' + * property being located in the pre-existing node 'motor-1'. + * Modifying a property results in a WARNING that a memory leak + * will occur if the overlay is removed. Since the overlay apply + * fails, the memory leak does actually occur, and kmemleak will + * further report the memory leak if CONFIG_DEBUG_KMEMLEAK is + * enabled. Adding the overlay node 'electric' avoids the + * memory leak and thus people who use kmemleak will not + * have to debug this non-problem again. + */ + +&electric_1 { + + motor-1 { + electric { + rpm_avail = < 100 >; + }; + }; +}; + +&spin_ctrl_1 { + electric { + rpm_avail = < 100 200 >; + }; +}; diff --git a/drivers/of/unittest-data/overlay_bad_phandle.dts b/drivers/of/unittest-data/overlay_bad_phandle.dts new file mode 100644 index 000000000..83b797360 --- /dev/null +++ b/drivers/of/unittest-data/overlay_bad_phandle.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&electric_1 { + + // This label should cause an error when the overlay + // is applied. There is already a phandle value + // in the base tree for motor-1. + spin_ctrl_1_conflict: motor-1 { + accelerate = < 3 >; + decelerate = < 5 >; + }; +}; diff --git a/drivers/of/unittest-data/overlay_bad_symbol.dts b/drivers/of/unittest-data/overlay_bad_symbol.dts new file mode 100644 index 000000000..98c6d1de1 --- /dev/null +++ b/drivers/of/unittest-data/overlay_bad_symbol.dts @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&electric_1 { + + // This label should cause an error when the overlay + // is applied. There is already a symbol hvac_1 + // in the base tree + hvac_1: hvac-medium-2 { + compatible = "ot,hvac-medium"; + heat-range = < 50 75 >; + cool-range = < 60 80 >; + }; + +}; diff --git a/drivers/of/unittest-data/overlay_base.dts b/drivers/of/unittest-data/overlay_base.dts new file mode 100644 index 000000000..ab9014589 --- /dev/null +++ b/drivers/of/unittest-data/overlay_base.dts @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "overlay_common.dtsi" diff --git a/drivers/of/unittest-data/overlay_common.dtsi b/drivers/of/unittest-data/overlay_common.dtsi new file mode 100644 index 000000000..08874a725 --- /dev/null +++ b/drivers/of/unittest-data/overlay_common.dtsi @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Base device tree that overlays will be applied against. + * + * Do not add any properties in node "/". + * Do not add any nodes other than "/testcase-data-2" in node "/". + * Do not add anything that would result in dtc creating node "/__fixups__". + * dtc will create nodes "/__symbols__" and "/__local_fixups__". + */ + +/ { + testcase-data-2 { + #address-cells = <1>; + #size-cells = <1>; + + electric_1: substation@100 { + compatible = "ot,big-volts-control"; + reg = < 0x00000100 0x100 >; + status = "disabled"; + + hvac_1: hvac-medium-1 { + compatible = "ot,hvac-medium"; + heat-range = < 50 75 >; + cool-range = < 60 80 >; + }; + + spin_ctrl_1: motor-1 { + compatible = "ot,ferris-wheel-motor"; + spin = "clockwise"; + rpm_avail = < 50 >; + }; + + spin_ctrl_2: motor-8 { + compatible = "ot,roller-coaster-motor"; + }; + }; + + rides_1: fairway-1 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ot,rides"; + status = "disabled"; + orientation = < 127 >; + + ride@100 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ot,roller-coaster"; + reg = < 0x00000100 0x100 >; + hvac-provider = < &hvac_1 >; + hvac-thermostat = < 29 > ; + hvac-zones = < 14 >; + hvac-zone-names = "operator"; + spin-controller = < &spin_ctrl_2 5 &spin_ctrl_2 7 >; + spin-controller-names = "track_1", "track_2"; + queues = < 2 >; + + track@30 { + reg = < 0x00000030 0x10 >; + }; + + track@40 { + reg = < 0x00000040 0x10 >; + }; + + }; + }; + + lights_1: lights@30000 { + compatible = "ot,work-lights"; + reg = < 0x00030000 0x1000 >; + status = "disabled"; + }; + + lights_2: lights@40000 { + compatible = "ot,show-lights"; + reg = < 0x00040000 0x1000 >; + status = "disabled"; + rate = < 13 138 >; + }; + + retail_1: vending@50000 { + reg = < 0x00050000 0x1000 >; + compatible = "ot,tickets"; + status = "disabled"; + }; + + }; +}; + diff --git a/drivers/of/unittest-data/overlay_gpio_01.dts b/drivers/of/unittest-data/overlay_gpio_01.dts new file mode 100644 index 000000000..699ff104a --- /dev/null +++ b/drivers/of/unittest-data/overlay_gpio_01.dts @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + gpio@0 { + compatible = "unittest-gpio"; + reg = <0>; + gpio-controller; + #gpio-cells = <2>; + ngpios = <2>; + gpio-line-names = "line-A", "line-B"; + + line-b { + gpio-hog; + gpios = <2 0>; + input; + line-name = "line-B-input"; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_gpio_02a.dts b/drivers/of/unittest-data/overlay_gpio_02a.dts new file mode 100644 index 000000000..ec59aff6e --- /dev/null +++ b/drivers/of/unittest-data/overlay_gpio_02a.dts @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + gpio@2 { + compatible = "unittest-gpio"; + reg = <2>; + gpio-controller; + #gpio-cells = <2>; + ngpios = <2>; + gpio-line-names = "line-A", "line-B"; + }; +}; diff --git a/drivers/of/unittest-data/overlay_gpio_02b.dts b/drivers/of/unittest-data/overlay_gpio_02b.dts new file mode 100644 index 000000000..43ce111d4 --- /dev/null +++ b/drivers/of/unittest-data/overlay_gpio_02b.dts @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + gpio@2 { + line-a { + gpio-hog; + gpios = <1 0>; + input; + line-name = "line-A-input"; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_gpio_03.dts b/drivers/of/unittest-data/overlay_gpio_03.dts new file mode 100644 index 000000000..6e0312340 --- /dev/null +++ b/drivers/of/unittest-data/overlay_gpio_03.dts @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + gpio@3 { + compatible = "unittest-gpio"; + reg = <3>; + gpio-controller; + #gpio-cells = <2>; + ngpios = <2>; + gpio-line-names = "line-A", "line-B", "line-C", "line-D"; + + line-d { + gpio-hog; + gpios = <4 0>; + input; + line-name = "line-D-input"; + }; + }; +}; diff --git a/drivers/of/unittest-data/overlay_gpio_04a.dts b/drivers/of/unittest-data/overlay_gpio_04a.dts new file mode 100644 index 000000000..7b1e04ebf --- /dev/null +++ b/drivers/of/unittest-data/overlay_gpio_04a.dts @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + gpio@4 { + compatible = "unittest-gpio"; + reg = <4>; + gpio-controller; + #gpio-cells = <2>; + ngpios = <2>; + gpio-line-names = "line-A", "line-B", "line-C", "line-D"; + }; +}; diff --git a/drivers/of/unittest-data/overlay_gpio_04b.dts b/drivers/of/unittest-data/overlay_gpio_04b.dts new file mode 100644 index 000000000..a14e95c66 --- /dev/null +++ b/drivers/of/unittest-data/overlay_gpio_04b.dts @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +&unittest_test_bus { + #address-cells = <1>; + #size-cells = <0>; + gpio@4 { + line-c { + gpio-hog; + gpios = <3 0>; + input; + line-name = "line-C-input"; + }; + }; +}; diff --git a/drivers/of/unittest-data/static_base_1.dts b/drivers/of/unittest-data/static_base_1.dts new file mode 100644 index 000000000..10556cb3f --- /dev/null +++ b/drivers/of/unittest-data/static_base_1.dts @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; + +#include "testcases_common.dtsi" diff --git a/drivers/of/unittest-data/static_base_2.dts b/drivers/of/unittest-data/static_base_2.dts new file mode 100644 index 000000000..b0ea9504d --- /dev/null +++ b/drivers/of/unittest-data/static_base_2.dts @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; + +#include "overlay_common.dtsi" diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts new file mode 100644 index 000000000..61cdd3d5f --- /dev/null +++ b/drivers/of/unittest-data/testcases.dts @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +/plugin/; + +#include "testcases_common.dtsi" + +/ { + /* + * testcase data that intentionally results in an error is located here + * instead of in testcases_common.dtsi so that the static overlay apply + * tests will not include the error. + */ + testcase-data { + testcase-device2 { + compatible = "testcase-device"; + interrupt-parent = <&test_intc2>; + interrupts = <1>; /* invalid specifier - too short */ + }; + }; + +}; diff --git a/drivers/of/unittest-data/testcases_common.dtsi b/drivers/of/unittest-data/testcases_common.dtsi new file mode 100644 index 000000000..19292bbb4 --- /dev/null +++ b/drivers/of/unittest-data/testcases_common.dtsi @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + testcase-data { + changeset { + prop-update = "hello"; + prop-remove = "world"; + node-remove { + }; + }; + }; +}; + +#include "tests-phandle.dtsi" +#include "tests-interrupts.dtsi" +#include "tests-match.dtsi" +#include "tests-address.dtsi" +#include "tests-platform.dtsi" +#include "tests-overlay.dtsi" diff --git a/drivers/of/unittest-data/tests-address.dtsi b/drivers/of/unittest-data/tests-address.dtsi new file mode 100644 index 000000000..6604a52bf --- /dev/null +++ b/drivers/of/unittest-data/tests-address.dtsi @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + #address-cells = <1>; + #size-cells = <1>; + + testcase-data { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + address-tests { + #address-cells = <1>; + #size-cells = <1>; + /* ranges here is to make sure we don't use it for + * dma-ranges translation */ + ranges = <0x70000000 0x70000000 0x40000000>, + <0x00000000 0xd0000000 0x20000000>; + dma-ranges = <0x0 0x20000000 0x40000000>; + + device@70000000 { + reg = <0x70000000 0x1000>; + }; + + bus@80000000 { + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x0 0x0 0x80000000 0x0 0x100000>; + dma-ranges = <0x1 0x0 0x0 0x20 0x0>; + + device@1000 { + reg = <0x0 0x1000 0x0 0x1000>; + }; + }; + + pci@90000000 { + device_type = "pci"; + #address-cells = <3>; + #size-cells = <2>; + reg = <0x90000000 0x1000>; + ranges = <0x42000000 0x0 0x40000000 0x40000000 0x0 0x10000000>; + dma-ranges = <0x42000000 0x0 0x80000000 0x00000000 0x0 0x10000000>, + <0x42000000 0x0 0xc0000000 0x20000000 0x0 0x10000000>; + }; + + }; + }; +}; diff --git a/drivers/of/unittest-data/tests-interrupts.dtsi b/drivers/of/unittest-data/tests-interrupts.dtsi new file mode 100644 index 000000000..ecc74dbcc --- /dev/null +++ b/drivers/of/unittest-data/tests-interrupts.dtsi @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + testcase-data { + interrupts { + #address-cells = <1>; + #size-cells = <1>; + test_intc0: intc0 { + interrupt-controller; + #interrupt-cells = <1>; + }; + + test_intc1: intc1 { + interrupt-controller; + #interrupt-cells = <3>; + }; + + test_intc2: intc2 { + interrupt-controller; + #interrupt-cells = <2>; + }; + + test_intmap0: intmap0 { + #interrupt-cells = <1>; + #address-cells = <0>; + interrupt-map = <1 &test_intc0 9>, + <2 &test_intc1 10 11 12>, + <3 &test_intc2 13 14>, + <4 &test_intc2 15 16>; + }; + + test_intmap1: intmap1 { + #interrupt-cells = <2>; + /* + * #address-cells is required + * + * The property is not provided in this node to + * test that the code will properly handle + * this case for legacy .dts files. + * + * Not having #address-cells will result in a + * warning from dtc starting with + * version v1.6.1-19-g0a3a9d3449c8 + * The warning is suppressed by adding + * -Wno-interrupt_map to the Makefile for all + * .dts files this include this .dtsi + #address-cells = <1>; + */ + interrupt-map = <0x5000 1 2 &test_intc0 15>; + }; + + interrupts0 { + interrupt-parent = <&test_intc0>; + interrupts = <1>, <2>, <3>, <4>; + }; + + interrupts1 { + interrupt-parent = <&test_intmap0>; + interrupts = <1>, <2>, <3>, <4>; + }; + + interrupts-extended0 { + reg = <0x5000 0x100>; + /* + * Do not remove &test_intmap1 from this + * property - see comment in node intmap1 + */ + interrupts-extended = <&test_intc0 1>, + <&test_intc1 2 3 4>, + <&test_intc2 5 6>, + <&test_intmap0 1>, + <&test_intmap0 2>, + <&test_intmap0 3>, + <&test_intmap1 1 2>; + }; + }; + + testcase-device1 { + compatible = "testcase-device"; + interrupt-parent = <&test_intc0>; + interrupts = <1>; + }; + + /* + * testcase data that intentionally results in an error is + * located in testcases.dts instead of in this file so that the + * static overlay apply tests will not include the error. + */ + }; +}; diff --git a/drivers/of/unittest-data/tests-match.dtsi b/drivers/of/unittest-data/tests-match.dtsi new file mode 100644 index 000000000..1fd3b2131 --- /dev/null +++ b/drivers/of/unittest-data/tests-match.dtsi @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + testcase-data { + match-node { + name0 { }; + name1 { device_type = "type1"; }; + a { name2 { device_type = "type1"; }; }; + b { name2 { }; }; + c { name2 { device_type = "type2"; }; }; + name3 { compatible = "compat3"; }; + name4 { compatible = "compat2", "compat3"; }; + name5 { compatible = "compat2", "compat3"; }; + name6 { compatible = "compat1", "compat2", "compat3"; }; + name7 { compatible = "compat2"; device_type = "type1"; }; + name8 { compatible = "compat2"; device_type = "type1"; }; + name9 { compatible = "compat2"; }; + }; + }; +}; diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi new file mode 100644 index 000000000..4ea024d90 --- /dev/null +++ b/drivers/of/unittest-data/tests-overlay.dtsi @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + testcase-data { + overlay-node { + + /* test bus */ + unittest_test_bus: test-bus { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <0>; + + unittest100: test-unittest100 { + compatible = "unittest"; + status = "okay"; + reg = <100>; + }; + + unittest101: test-unittest101 { + compatible = "unittest"; + status = "disabled"; + reg = <101>; + }; + + unittest0: test-unittest0 { + compatible = "unittest"; + status = "disabled"; + reg = <0>; + }; + + unittest1: test-unittest1 { + compatible = "unittest"; + status = "okay"; + reg = <1>; + }; + + unittest2: test-unittest2 { + compatible = "unittest"; + status = "disabled"; + reg = <2>; + }; + + unittest3: test-unittest3 { + compatible = "unittest"; + status = "okay"; + reg = <3>; + }; + + unittest5: test-unittest5 { + compatible = "unittest"; + status = "disabled"; + reg = <5>; + }; + + unittest6: test-unittest6 { + compatible = "unittest"; + status = "disabled"; + reg = <6>; + }; + + unittest7: test-unittest7 { + compatible = "unittest"; + status = "disabled"; + reg = <7>; + }; + + unittest8: test-unittest8 { + compatible = "unittest"; + status = "disabled"; + reg = <8>; + }; + + unittest_i2c_test_bus: i2c-test-bus { + compatible = "unittest-i2c-bus"; + status = "okay"; + reg = <50>; + + #address-cells = <1>; + #size-cells = <0>; + + test-unittest12 { + reg = <8>; + compatible = "unittest-i2c-dev"; + status = "disabled"; + }; + + test-unittest13 { + reg = <9>; + compatible = "unittest-i2c-dev"; + status = "okay"; + }; + + test-unittest14 { + reg = <10>; + compatible = "unittest-i2c-mux"; + status = "okay"; + + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + test-mux-dev@20 { + reg = <0x20>; + compatible = "unittest-i2c-dev"; + status = "okay"; + }; + }; + }; + }; + }; + }; + }; +}; diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi new file mode 100644 index 000000000..aa0d7027f --- /dev/null +++ b/drivers/of/unittest-data/tests-phandle.dtsi @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + aliases { + testcase-alias = &testcase; + }; + + testcase: testcase-data { + security-password = "password"; + duplicate-name = "duplicate"; + duplicate-name { }; + phandle-tests { + provider0: provider0 { + #phandle-cells = <0>; + }; + + provider1: provider1 { + #phandle-cells = <1>; + }; + + provider2: provider2 { + #phandle-cells = <2>; + }; + + provider3: provider3 { + #phandle-cells = <3>; + }; + + provider4: provider4 { + #phandle-cells = <2>; + phandle-map = <0 1 &provider1 3>, + <4 0 &provider0>, + <16 5 &provider3 3 5 0>, + <200 8 &provider2 23 6>, + <19 0 &provider2 15 0>, + <2 3 &provider3 2 5 3>; + phandle-map-mask = <0xff 0xf>; + phandle-map-pass-thru = <0x0 0xf0>; + }; + + provider5: provider5 { + #phandle-cells = <2>; + phandle-map = <2 7 &provider4 2 3>; + phandle-map-mask = <0xff 0xf>; + phandle-map-pass-thru = <0x0 0xf0>; + }; + + consumer-a { + phandle-list = <&provider1 1>, + <&provider2 2 0>, + <0>, + <&provider3 4 4 3>, + <&provider2 5 100>, + <&provider0>, + <&provider1 7>; + phandle-list-names = "first", "second", "third"; + + phandle-list-bad-phandle = <12345678 0 0>; + phandle-list-bad-args = <&provider2 1 0>, + <&provider3 0>; + empty-property; + string-property = "foobar"; + unterminated-string = [40 41 42 43]; + unterminated-string-list = "first", "second", [40 41 42 43]; + }; + + consumer-b { + phandle-list = <&provider1 1>, + <&provider4 2 3>, + <0>, + <&provider4 4 0x100>, + <&provider4 0 0x61>, + <&provider0>, + <&provider4 19 0x20>, + <&provider5 2 7>; + phandle-list-bad-phandle = <12345678 0 0>; + phandle-list-bad-args = <&provider2 1 0>, + <&provider4 0>; + }; + }; + }; +}; diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi new file mode 100644 index 000000000..fa3961107 --- /dev/null +++ b/drivers/of/unittest-data/tests-platform.dtsi @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 + +/ { + testcase-data { + platform-tests { + #address-cells = <1>; + #size-cells = <0>; + + test-device@0 { + compatible = "test-device"; + reg = <0x0>; + + #address-cells = <1>; + #size-cells = <0>; + + dev@100 { + compatible = "test-sub-device"; + reg = <0x100>; + }; + }; + + test-device@1 { + compatible = "test-device"; + reg = <0x1>; + + #address-cells = <1>; + #size-cells = <0>; + + dev@100 { + compatible = "test-sub-device", + "test-compat2", + "test-compat3"; + reg = <0x100>; + }; + }; + }; + }; +}; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c new file mode 100644 index 000000000..e541a8960 --- /dev/null +++ b/drivers/of/unittest.c @@ -0,0 +1,3532 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Self tests for device tree subsystem + */ + +#define pr_fmt(fmt) "### dt-test ### " fmt + +#include <linux/memblock.h> +#include <linux/clk.h> +#include <linux/dma-direct.h> /* to test phys_to_dma/dma_to_phys */ +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/hashtable.h> +#include <linux/libfdt.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/kernel.h> + +#include <linux/i2c.h> +#include <linux/i2c-mux.h> +#include <linux/gpio/driver.h> + +#include <linux/bitops.h> + +#include "of_private.h" + +static struct unittest_results { + int passed; + int failed; +} unittest_results; + +#define unittest(result, fmt, ...) ({ \ + bool failed = !(result); \ + if (failed) { \ + unittest_results.failed++; \ + pr_err("FAIL %s():%i " fmt, __func__, __LINE__, ##__VA_ARGS__); \ + } else { \ + unittest_results.passed++; \ + pr_info("pass %s():%i\n", __func__, __LINE__); \ + } \ + failed; \ +}) + +/* + * Expected message may have a message level other than KERN_INFO. + * Print the expected message only if the current loglevel will allow + * the actual message to print. + * + * Do not use EXPECT_BEGIN() or EXPECT_END() for messages generated by + * pr_debug(). + */ +#define EXPECT_BEGIN(level, fmt, ...) \ + printk(level pr_fmt("EXPECT \\ : ") fmt, ##__VA_ARGS__) + +#define EXPECT_END(level, fmt, ...) \ + printk(level pr_fmt("EXPECT / : ") fmt, ##__VA_ARGS__) + +static void __init of_unittest_find_node_by_name(void) +{ + struct device_node *np; + const char *options, *name; + + np = of_find_node_by_path("/testcase-data"); + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && name && !strcmp("/testcase-data", name), + "find /testcase-data failed\n"); + of_node_put(np); + kfree(name); + + /* Test if trailing '/' works */ + np = of_find_node_by_path("/testcase-data/"); + unittest(!np, "trailing '/' on /testcase-data/ should fail\n"); + + np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name), + "find /testcase-data/phandle-tests/consumer-a failed\n"); + of_node_put(np); + kfree(name); + + np = of_find_node_by_path("testcase-alias"); + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && name && !strcmp("/testcase-data", name), + "find testcase-alias failed\n"); + of_node_put(np); + kfree(name); + + /* Test if trailing '/' works on aliases */ + np = of_find_node_by_path("testcase-alias/"); + unittest(!np, "trailing '/' on testcase-alias/ should fail\n"); + + np = of_find_node_by_path("testcase-alias/phandle-tests/consumer-a"); + name = kasprintf(GFP_KERNEL, "%pOF", np); + unittest(np && name && !strcmp("/testcase-data/phandle-tests/consumer-a", name), + "find testcase-alias/phandle-tests/consumer-a failed\n"); + of_node_put(np); + kfree(name); + + np = of_find_node_by_path("/testcase-data/missing-path"); + unittest(!np, "non-existent path returned node %pOF\n", np); + of_node_put(np); + + np = of_find_node_by_path("missing-alias"); + unittest(!np, "non-existent alias returned node %pOF\n", np); + of_node_put(np); + + np = of_find_node_by_path("testcase-alias/missing-path"); + unittest(!np, "non-existent alias with relative path returned node %pOF\n", np); + of_node_put(np); + + np = of_find_node_opts_by_path("/testcase-data:testoption", &options); + unittest(np && !strcmp("testoption", options), + "option path test failed\n"); + of_node_put(np); + + np = of_find_node_opts_by_path("/testcase-data:test/option", &options); + unittest(np && !strcmp("test/option", options), + "option path test, subcase #1 failed\n"); + of_node_put(np); + + np = of_find_node_opts_by_path("/testcase-data/testcase-device1:test/option", &options); + unittest(np && !strcmp("test/option", options), + "option path test, subcase #2 failed\n"); + of_node_put(np); + + np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); + unittest(np, "NULL option path test failed\n"); + of_node_put(np); + + np = of_find_node_opts_by_path("testcase-alias:testaliasoption", + &options); + unittest(np && !strcmp("testaliasoption", options), + "option alias path test failed\n"); + of_node_put(np); + + np = of_find_node_opts_by_path("testcase-alias:test/alias/option", + &options); + unittest(np && !strcmp("test/alias/option", options), + "option alias path test, subcase #1 failed\n"); + of_node_put(np); + + np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); + unittest(np, "NULL option alias path test failed\n"); + of_node_put(np); + + options = "testoption"; + np = of_find_node_opts_by_path("testcase-alias", &options); + unittest(np && !options, "option clearing test failed\n"); + of_node_put(np); + + options = "testoption"; + np = of_find_node_opts_by_path("/", &options); + unittest(np && !options, "option clearing root node test failed\n"); + of_node_put(np); +} + +static void __init of_unittest_dynamic(void) +{ + struct device_node *np; + struct property *prop; + + np = of_find_node_by_path("/testcase-data"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + /* Array of 4 properties for the purpose of testing */ + prop = kcalloc(4, sizeof(*prop), GFP_KERNEL); + if (!prop) { + unittest(0, "kzalloc() failed\n"); + return; + } + + /* Add a new property - should pass*/ + prop->name = "new-property"; + prop->value = "new-property-data"; + prop->length = strlen(prop->value) + 1; + unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n"); + + /* Try to add an existing property - should fail */ + prop++; + prop->name = "new-property"; + prop->value = "new-property-data-should-fail"; + prop->length = strlen(prop->value) + 1; + unittest(of_add_property(np, prop) != 0, + "Adding an existing property should have failed\n"); + + /* Try to modify an existing property - should pass */ + prop->value = "modify-property-data-should-pass"; + prop->length = strlen(prop->value) + 1; + unittest(of_update_property(np, prop) == 0, + "Updating an existing property should have passed\n"); + + /* Try to modify non-existent property - should pass*/ + prop++; + prop->name = "modify-property"; + prop->value = "modify-missing-property-data-should-pass"; + prop->length = strlen(prop->value) + 1; + unittest(of_update_property(np, prop) == 0, + "Updating a missing property should have passed\n"); + + /* Remove property - should pass */ + unittest(of_remove_property(np, prop) == 0, + "Removing a property should have passed\n"); + + /* Adding very large property - should pass */ + prop++; + prop->name = "large-property-PAGE_SIZEx8"; + prop->length = PAGE_SIZE * 8; + prop->value = kzalloc(prop->length, GFP_KERNEL); + unittest(prop->value != NULL, "Unable to allocate large buffer\n"); + if (prop->value) + unittest(of_add_property(np, prop) == 0, + "Adding a large property should have passed\n"); +} + +static int __init of_unittest_check_node_linkage(struct device_node *np) +{ + struct device_node *child; + int count = 0, rc; + + for_each_child_of_node(np, child) { + if (child->parent != np) { + pr_err("Child node %pOFn links to wrong parent %pOFn\n", + child, np); + rc = -EINVAL; + goto put_child; + } + + rc = of_unittest_check_node_linkage(child); + if (rc < 0) + goto put_child; + count += rc; + } + + return count + 1; +put_child: + of_node_put(child); + return rc; +} + +static void __init of_unittest_check_tree_linkage(void) +{ + struct device_node *np; + int allnode_count = 0, child_count; + + if (!of_root) + return; + + for_each_of_allnodes(np) + allnode_count++; + child_count = of_unittest_check_node_linkage(of_root); + + unittest(child_count > 0, "Device node data structure is corrupted\n"); + unittest(child_count == allnode_count, + "allnodes list size (%i) doesn't match sibling lists size (%i)\n", + allnode_count, child_count); + pr_debug("allnodes list size (%i); sibling lists size (%i)\n", allnode_count, child_count); +} + +static void __init of_unittest_printf_one(struct device_node *np, const char *fmt, + const char *expected) +{ + unsigned char *buf; + int buf_size; + int size, i; + + buf_size = strlen(expected) + 10; + buf = kmalloc(buf_size, GFP_KERNEL); + if (!buf) + return; + + /* Baseline; check conversion with a large size limit */ + memset(buf, 0xff, buf_size); + size = snprintf(buf, buf_size - 2, fmt, np); + + /* use strcmp() instead of strncmp() here to be absolutely sure strings match */ + unittest((strcmp(buf, expected) == 0) && (buf[size+1] == 0xff), + "sprintf failed; fmt='%s' expected='%s' rslt='%s'\n", + fmt, expected, buf); + + /* Make sure length limits work */ + size++; + for (i = 0; i < 2; i++, size--) { + /* Clear the buffer, and make sure it works correctly still */ + memset(buf, 0xff, buf_size); + snprintf(buf, size+1, fmt, np); + unittest(strncmp(buf, expected, size) == 0 && (buf[size+1] == 0xff), + "snprintf failed; size=%i fmt='%s' expected='%s' rslt='%s'\n", + size, fmt, expected, buf); + } + kfree(buf); +} + +static void __init of_unittest_printf(void) +{ + struct device_node *np; + const char *full_name = "/testcase-data/platform-tests/test-device@1/dev@100"; + char phandle_str[16] = ""; + + np = of_find_node_by_path(full_name); + if (!np) { + unittest(np, "testcase data missing\n"); + return; + } + + num_to_str(phandle_str, sizeof(phandle_str), np->phandle, 0); + + of_unittest_printf_one(np, "%pOF", full_name); + of_unittest_printf_one(np, "%pOFf", full_name); + of_unittest_printf_one(np, "%pOFn", "dev"); + of_unittest_printf_one(np, "%2pOFn", "dev"); + of_unittest_printf_one(np, "%5pOFn", " dev"); + of_unittest_printf_one(np, "%pOFnc", "dev:test-sub-device"); + of_unittest_printf_one(np, "%pOFp", phandle_str); + of_unittest_printf_one(np, "%pOFP", "dev@100"); + of_unittest_printf_one(np, "ABC %pOFP ABC", "ABC dev@100 ABC"); + of_unittest_printf_one(np, "%10pOFP", " dev@100"); + of_unittest_printf_one(np, "%-10pOFP", "dev@100 "); + of_unittest_printf_one(of_root, "%pOFP", "/"); + of_unittest_printf_one(np, "%pOFF", "----"); + of_unittest_printf_one(np, "%pOFPF", "dev@100:----"); + of_unittest_printf_one(np, "%pOFPFPc", "dev@100:----:dev@100:test-sub-device"); + of_unittest_printf_one(np, "%pOFc", "test-sub-device"); + of_unittest_printf_one(np, "%pOFC", + "\"test-sub-device\",\"test-compat2\",\"test-compat3\""); +} + +struct node_hash { + struct hlist_node node; + struct device_node *np; +}; + +static DEFINE_HASHTABLE(phandle_ht, 8); +static void __init of_unittest_check_phandles(void) +{ + struct device_node *np; + struct node_hash *nh; + struct hlist_node *tmp; + int i, dup_count = 0, phandle_count = 0; + + for_each_of_allnodes(np) { + if (!np->phandle) + continue; + + hash_for_each_possible(phandle_ht, nh, node, np->phandle) { + if (nh->np->phandle == np->phandle) { + pr_info("Duplicate phandle! %i used by %pOF and %pOF\n", + np->phandle, nh->np, np); + dup_count++; + break; + } + } + + nh = kzalloc(sizeof(*nh), GFP_KERNEL); + if (!nh) + return; + + nh->np = np; + hash_add(phandle_ht, &nh->node, np->phandle); + phandle_count++; + } + unittest(dup_count == 0, "Found %i duplicates in %i phandles\n", + dup_count, phandle_count); + + /* Clean up */ + hash_for_each_safe(phandle_ht, i, tmp, nh, node) { + hash_del(&nh->node); + kfree(nh); + } +} + +static void __init of_unittest_parse_phandle_with_args(void) +{ + struct device_node *np; + struct of_phandle_args args; + int i, rc; + + np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); + unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc); + + for (i = 0; i < 8; i++) { + bool passed = true; + + memset(&args, 0, sizeof(args)); + rc = of_parse_phandle_with_args(np, "phandle-list", + "#phandle-cells", i, &args); + + /* Test the values from tests-phandle.dtsi */ + switch (i) { + case 0: + passed &= !rc; + passed &= (args.args_count == 1); + passed &= (args.args[0] == (i + 1)); + break; + case 1: + passed &= !rc; + passed &= (args.args_count == 2); + passed &= (args.args[0] == (i + 1)); + passed &= (args.args[1] == 0); + break; + case 2: + passed &= (rc == -ENOENT); + break; + case 3: + passed &= !rc; + passed &= (args.args_count == 3); + passed &= (args.args[0] == (i + 1)); + passed &= (args.args[1] == 4); + passed &= (args.args[2] == 3); + break; + case 4: + passed &= !rc; + passed &= (args.args_count == 2); + passed &= (args.args[0] == (i + 1)); + passed &= (args.args[1] == 100); + break; + case 5: + passed &= !rc; + passed &= (args.args_count == 0); + break; + case 6: + passed &= !rc; + passed &= (args.args_count == 1); + passed &= (args.args[0] == (i + 1)); + break; + case 7: + passed &= (rc == -ENOENT); + break; + default: + passed = false; + } + + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); + + if (rc == 0) + of_node_put(args.np); + } + + /* Check for missing list property */ + memset(&args, 0, sizeof(args)); + rc = of_parse_phandle_with_args(np, "phandle-list-missing", + "#phandle-cells", 0, &args); + unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc); + rc = of_count_phandle_with_args(np, "phandle-list-missing", + "#phandle-cells"); + unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc); + + /* Check for missing cells property */ + memset(&args, 0, sizeof(args)); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1"); + + rc = of_parse_phandle_with_args(np, "phandle-list", + "#phandle-cells-missing", 0, &args); + + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1"); + + rc = of_count_phandle_with_args(np, "phandle-list", + "#phandle-cells-missing"); + + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not get #phandle-cells-missing for /testcase-data/phandle-tests/provider1"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + /* Check for bad phandle in list */ + memset(&args, 0, sizeof(args)); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle"); + + rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle", + "#phandle-cells", 0, &args); + + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle"); + + rc = of_count_phandle_with_args(np, "phandle-list-bad-phandle", + "#phandle-cells"); + + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: could not find phandle"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + /* Check for incorrectly formed argument list */ + memset(&args, 0, sizeof(args)); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); + + rc = of_parse_phandle_with_args(np, "phandle-list-bad-args", + "#phandle-cells", 1, &args); + + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); + + rc = of_count_phandle_with_args(np, "phandle-list-bad-args", + "#phandle-cells"); + + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); +} + +static void __init of_unittest_parse_phandle_with_args_map(void) +{ + struct device_node *np, *p[6] = {}; + struct of_phandle_args args; + unsigned int prefs[6]; + int i, rc; + + np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0"); + p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1"); + p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2"); + p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3"); + p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4"); + p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5"); + for (i = 0; i < ARRAY_SIZE(p); ++i) { + if (!p[i]) { + pr_err("missing testcase data\n"); + return; + } + prefs[i] = kref_read(&p[i]->kobj.kref); + } + + rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); + unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc); + + for (i = 0; i < 9; i++) { + bool passed = true; + + memset(&args, 0, sizeof(args)); + rc = of_parse_phandle_with_args_map(np, "phandle-list", + "phandle", i, &args); + + /* Test the values from tests-phandle.dtsi */ + switch (i) { + case 0: + passed &= !rc; + passed &= (args.np == p[1]); + passed &= (args.args_count == 1); + passed &= (args.args[0] == 1); + break; + case 1: + passed &= !rc; + passed &= (args.np == p[3]); + passed &= (args.args_count == 3); + passed &= (args.args[0] == 2); + passed &= (args.args[1] == 5); + passed &= (args.args[2] == 3); + break; + case 2: + passed &= (rc == -ENOENT); + break; + case 3: + passed &= !rc; + passed &= (args.np == p[0]); + passed &= (args.args_count == 0); + break; + case 4: + passed &= !rc; + passed &= (args.np == p[1]); + passed &= (args.args_count == 1); + passed &= (args.args[0] == 3); + break; + case 5: + passed &= !rc; + passed &= (args.np == p[0]); + passed &= (args.args_count == 0); + break; + case 6: + passed &= !rc; + passed &= (args.np == p[2]); + passed &= (args.args_count == 2); + passed &= (args.args[0] == 15); + passed &= (args.args[1] == 0x20); + break; + case 7: + passed &= !rc; + passed &= (args.np == p[3]); + passed &= (args.args_count == 3); + passed &= (args.args[0] == 2); + passed &= (args.args[1] == 5); + passed &= (args.args[2] == 3); + break; + case 8: + passed &= (rc == -ENOENT); + break; + default: + passed = false; + } + + unittest(passed, "index %i - data error on node %s rc=%i\n", + i, args.np->full_name, rc); + + if (rc == 0) + of_node_put(args.np); + } + + /* Check for missing list property */ + memset(&args, 0, sizeof(args)); + rc = of_parse_phandle_with_args_map(np, "phandle-list-missing", + "phandle", 0, &args); + unittest(rc == -ENOENT, "expected:%i got:%i\n", -ENOENT, rc); + + /* Check for missing cells,map,mask property */ + memset(&args, 0, sizeof(args)); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1"); + + rc = of_parse_phandle_with_args_map(np, "phandle-list", + "phandle-missing", 0, &args); + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-b: could not get #phandle-missing-cells for /testcase-data/phandle-tests/provider1"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + /* Check for bad phandle in list */ + memset(&args, 0, sizeof(args)); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); + + rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle", + "phandle", 0, &args); + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + /* Check for incorrectly formed argument list */ + memset(&args, 0, sizeof(args)); + + EXPECT_BEGIN(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); + + rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args", + "phandle", 1, &args); + EXPECT_END(KERN_INFO, + "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); + + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + for (i = 0; i < ARRAY_SIZE(p); ++i) { + unittest(prefs[i] == kref_read(&p[i]->kobj.kref), + "provider%d: expected:%d got:%d\n", + i, prefs[i], kref_read(&p[i]->kobj.kref)); + of_node_put(p[i]); + } +} + +static void __init of_unittest_property_string(void) +{ + const char *strings[4]; + struct device_node *np; + int rc; + + np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); + if (!np) { + pr_err("No testcase data in device tree\n"); + return; + } + + rc = of_property_match_string(np, "phandle-list-names", "first"); + unittest(rc == 0, "first expected:0 got:%i\n", rc); + rc = of_property_match_string(np, "phandle-list-names", "second"); + unittest(rc == 1, "second expected:1 got:%i\n", rc); + rc = of_property_match_string(np, "phandle-list-names", "third"); + unittest(rc == 2, "third expected:2 got:%i\n", rc); + rc = of_property_match_string(np, "phandle-list-names", "fourth"); + unittest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); + rc = of_property_match_string(np, "missing-property", "blah"); + unittest(rc == -EINVAL, "missing property; rc=%i\n", rc); + rc = of_property_match_string(np, "empty-property", "blah"); + unittest(rc == -ENODATA, "empty property; rc=%i\n", rc); + rc = of_property_match_string(np, "unterminated-string", "blah"); + unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + + /* of_property_count_strings() tests */ + rc = of_property_count_strings(np, "string-property"); + unittest(rc == 1, "Incorrect string count; rc=%i\n", rc); + rc = of_property_count_strings(np, "phandle-list-names"); + unittest(rc == 3, "Incorrect string count; rc=%i\n", rc); + rc = of_property_count_strings(np, "unterminated-string"); + unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + rc = of_property_count_strings(np, "unterminated-string-list"); + unittest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); + + /* of_property_read_string_index() tests */ + rc = of_property_read_string_index(np, "string-property", 0, strings); + unittest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "string-property", 1, strings); + unittest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 0, strings); + unittest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 1, strings); + unittest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "phandle-list-names", 2, strings); + unittest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "phandle-list-names", 3, strings); + unittest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "unterminated-string", 0, strings); + unittest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings); + unittest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); + strings[0] = NULL; + rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */ + unittest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); + strings[1] = NULL; + + /* of_property_read_string_array() tests */ + rc = of_property_read_string_array(np, "string-property", strings, 4); + unittest(rc == 1, "Incorrect string count; rc=%i\n", rc); + rc = of_property_read_string_array(np, "phandle-list-names", strings, 4); + unittest(rc == 3, "Incorrect string count; rc=%i\n", rc); + rc = of_property_read_string_array(np, "unterminated-string", strings, 4); + unittest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); + /* -- An incorrectly formed string should cause a failure */ + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4); + unittest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); + /* -- parsing the correctly formed strings should still work: */ + strings[2] = NULL; + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2); + unittest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc); + strings[1] = NULL; + rc = of_property_read_string_array(np, "phandle-list-names", strings, 1); + unittest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]); +} + +#define propcmp(p1, p2) (((p1)->length == (p2)->length) && \ + (p1)->value && (p2)->value && \ + !memcmp((p1)->value, (p2)->value, (p1)->length) && \ + !strcmp((p1)->name, (p2)->name)) +static void __init of_unittest_property_copy(void) +{ +#ifdef CONFIG_OF_DYNAMIC + struct property p1 = { .name = "p1", .length = 0, .value = "" }; + struct property p2 = { .name = "p2", .length = 5, .value = "abcd" }; + struct property *new; + + new = __of_prop_dup(&p1, GFP_KERNEL); + unittest(new && propcmp(&p1, new), "empty property didn't copy correctly\n"); + kfree(new->value); + kfree(new->name); + kfree(new); + + new = __of_prop_dup(&p2, GFP_KERNEL); + unittest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n"); + kfree(new->value); + kfree(new->name); + kfree(new); +#endif +} + +static void __init of_unittest_changeset(void) +{ +#ifdef CONFIG_OF_DYNAMIC + struct property *ppadd, padd = { .name = "prop-add", .length = 1, .value = "" }; + struct property *ppname_n1, pname_n1 = { .name = "name", .length = 3, .value = "n1" }; + struct property *ppname_n2, pname_n2 = { .name = "name", .length = 3, .value = "n2" }; + struct property *ppname_n21, pname_n21 = { .name = "name", .length = 3, .value = "n21" }; + struct property *ppupdate, pupdate = { .name = "prop-update", .length = 5, .value = "abcd" }; + struct property *ppremove; + struct device_node *n1, *n2, *n21, *nchangeset, *nremove, *parent, *np; + struct of_changeset chgset; + + n1 = __of_node_dup(NULL, "n1"); + unittest(n1, "testcase setup failure\n"); + + n2 = __of_node_dup(NULL, "n2"); + unittest(n2, "testcase setup failure\n"); + + n21 = __of_node_dup(NULL, "n21"); + unittest(n21, "testcase setup failure %p\n", n21); + + nchangeset = of_find_node_by_path("/testcase-data/changeset"); + nremove = of_get_child_by_name(nchangeset, "node-remove"); + unittest(nremove, "testcase setup failure\n"); + + ppadd = __of_prop_dup(&padd, GFP_KERNEL); + unittest(ppadd, "testcase setup failure\n"); + + ppname_n1 = __of_prop_dup(&pname_n1, GFP_KERNEL); + unittest(ppname_n1, "testcase setup failure\n"); + + ppname_n2 = __of_prop_dup(&pname_n2, GFP_KERNEL); + unittest(ppname_n2, "testcase setup failure\n"); + + ppname_n21 = __of_prop_dup(&pname_n21, GFP_KERNEL); + unittest(ppname_n21, "testcase setup failure\n"); + + ppupdate = __of_prop_dup(&pupdate, GFP_KERNEL); + unittest(ppupdate, "testcase setup failure\n"); + + parent = nchangeset; + n1->parent = parent; + n2->parent = parent; + n21->parent = n2; + + ppremove = of_find_property(parent, "prop-remove", NULL); + unittest(ppremove, "failed to find removal prop"); + + of_changeset_init(&chgset); + + unittest(!of_changeset_attach_node(&chgset, n1), "fail attach n1\n"); + unittest(!of_changeset_add_property(&chgset, n1, ppname_n1), "fail add prop name\n"); + + unittest(!of_changeset_attach_node(&chgset, n2), "fail attach n2\n"); + unittest(!of_changeset_add_property(&chgset, n2, ppname_n2), "fail add prop name\n"); + + unittest(!of_changeset_detach_node(&chgset, nremove), "fail remove node\n"); + unittest(!of_changeset_add_property(&chgset, n21, ppname_n21), "fail add prop name\n"); + + unittest(!of_changeset_attach_node(&chgset, n21), "fail attach n21\n"); + + unittest(!of_changeset_add_property(&chgset, parent, ppadd), "fail add prop prop-add\n"); + unittest(!of_changeset_update_property(&chgset, parent, ppupdate), "fail update prop\n"); + unittest(!of_changeset_remove_property(&chgset, parent, ppremove), "fail remove prop\n"); + + unittest(!of_changeset_apply(&chgset), "apply failed\n"); + + of_node_put(nchangeset); + + /* Make sure node names are constructed correctly */ + unittest((np = of_find_node_by_path("/testcase-data/changeset/n2/n21")), + "'%pOF' not added\n", n21); + of_node_put(np); + + unittest(!of_changeset_revert(&chgset), "revert failed\n"); + + of_changeset_destroy(&chgset); + + of_node_put(n1); + of_node_put(n2); + of_node_put(n21); +#endif +} + +static void __init of_unittest_dma_get_max_cpu_address(void) +{ + struct device_node *np; + phys_addr_t cpu_addr; + + if (!IS_ENABLED(CONFIG_OF_ADDRESS)) + return; + + np = of_find_node_by_path("/testcase-data/address-tests"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + cpu_addr = of_dma_get_max_cpu_address(np); + unittest(cpu_addr == 0x4fffffff, + "of_dma_get_max_cpu_address: wrong CPU addr %pad (expecting %x)\n", + &cpu_addr, 0x4fffffff); +} + +static void __init of_unittest_dma_ranges_one(const char *path, + u64 expect_dma_addr, u64 expect_paddr) +{ +#ifdef CONFIG_HAS_DMA + struct device_node *np; + const struct bus_dma_region *map = NULL; + int rc; + + np = of_find_node_by_path(path); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + rc = of_dma_get_range(np, &map); + + unittest(!rc, "of_dma_get_range failed on node %pOF rc=%i\n", np, rc); + + if (!rc) { + phys_addr_t paddr; + dma_addr_t dma_addr; + struct device *dev_bogus; + + dev_bogus = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!dev_bogus) { + unittest(0, "kzalloc() failed\n"); + kfree(map); + return; + } + + dev_bogus->dma_range_map = map; + paddr = dma_to_phys(dev_bogus, expect_dma_addr); + dma_addr = phys_to_dma(dev_bogus, expect_paddr); + + unittest(paddr == expect_paddr, + "of_dma_get_range: wrong phys addr %pap (expecting %llx) on node %pOF\n", + &paddr, expect_paddr, np); + unittest(dma_addr == expect_dma_addr, + "of_dma_get_range: wrong DMA addr %pad (expecting %llx) on node %pOF\n", + &dma_addr, expect_dma_addr, np); + + kfree(map); + kfree(dev_bogus); + } + of_node_put(np); +#endif +} + +static void __init of_unittest_parse_dma_ranges(void) +{ + of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000", + 0x0, 0x20000000); + if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) + of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000", + 0x100000000, 0x20000000); + of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000", + 0x80000000, 0x20000000); +} + +static void __init of_unittest_pci_dma_ranges(void) +{ + struct device_node *np; + struct of_pci_range range; + struct of_pci_range_parser parser; + int i = 0; + + if (!IS_ENABLED(CONFIG_PCI)) + return; + + np = of_find_node_by_path("/testcase-data/address-tests/pci@90000000"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + if (of_pci_dma_range_parser_init(&parser, np)) { + pr_err("missing dma-ranges property\n"); + return; + } + + /* + * Get the dma-ranges from the device tree + */ + for_each_of_pci_range(&parser, &range) { + if (!i) { + unittest(range.size == 0x10000000, + "for_each_of_pci_range wrong size on node %pOF size=%llx\n", + np, range.size); + unittest(range.cpu_addr == 0x20000000, + "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF", + range.cpu_addr, np); + unittest(range.pci_addr == 0x80000000, + "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF", + range.pci_addr, np); + } else { + unittest(range.size == 0x10000000, + "for_each_of_pci_range wrong size on node %pOF size=%llx\n", + np, range.size); + unittest(range.cpu_addr == 0x40000000, + "for_each_of_pci_range wrong CPU addr (%llx) on node %pOF", + range.cpu_addr, np); + unittest(range.pci_addr == 0xc0000000, + "for_each_of_pci_range wrong DMA addr (%llx) on node %pOF", + range.pci_addr, np); + } + i++; + } + + of_node_put(np); +} + +static void __init of_unittest_parse_interrupts(void) +{ + struct device_node *np; + struct of_phandle_args args; + int i, rc; + + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) + return; + + np = of_find_node_by_path("/testcase-data/interrupts/interrupts0"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + for (i = 0; i < 4; i++) { + bool passed = true; + + memset(&args, 0, sizeof(args)); + rc = of_irq_parse_one(np, i, &args); + + passed &= !rc; + passed &= (args.args_count == 1); + passed &= (args.args[0] == (i + 1)); + + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); + } + of_node_put(np); + + np = of_find_node_by_path("/testcase-data/interrupts/interrupts1"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + for (i = 0; i < 4; i++) { + bool passed = true; + + memset(&args, 0, sizeof(args)); + rc = of_irq_parse_one(np, i, &args); + + /* Test the values from tests-phandle.dtsi */ + switch (i) { + case 0: + passed &= !rc; + passed &= (args.args_count == 1); + passed &= (args.args[0] == 9); + break; + case 1: + passed &= !rc; + passed &= (args.args_count == 3); + passed &= (args.args[0] == 10); + passed &= (args.args[1] == 11); + passed &= (args.args[2] == 12); + break; + case 2: + passed &= !rc; + passed &= (args.args_count == 2); + passed &= (args.args[0] == 13); + passed &= (args.args[1] == 14); + break; + case 3: + passed &= !rc; + passed &= (args.args_count == 2); + passed &= (args.args[0] == 15); + passed &= (args.args[1] == 16); + break; + default: + passed = false; + } + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); + } + of_node_put(np); +} + +static void __init of_unittest_parse_interrupts_extended(void) +{ + struct device_node *np; + struct of_phandle_args args; + int i, rc; + + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) + return; + + np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + for (i = 0; i < 7; i++) { + bool passed = true; + + memset(&args, 0, sizeof(args)); + rc = of_irq_parse_one(np, i, &args); + + /* Test the values from tests-phandle.dtsi */ + switch (i) { + case 0: + passed &= !rc; + passed &= (args.args_count == 1); + passed &= (args.args[0] == 1); + break; + case 1: + passed &= !rc; + passed &= (args.args_count == 3); + passed &= (args.args[0] == 2); + passed &= (args.args[1] == 3); + passed &= (args.args[2] == 4); + break; + case 2: + passed &= !rc; + passed &= (args.args_count == 2); + passed &= (args.args[0] == 5); + passed &= (args.args[1] == 6); + break; + case 3: + passed &= !rc; + passed &= (args.args_count == 1); + passed &= (args.args[0] == 9); + break; + case 4: + passed &= !rc; + passed &= (args.args_count == 3); + passed &= (args.args[0] == 10); + passed &= (args.args[1] == 11); + passed &= (args.args[2] == 12); + break; + case 5: + passed &= !rc; + passed &= (args.args_count == 2); + passed &= (args.args[0] == 13); + passed &= (args.args[1] == 14); + break; + case 6: + /* + * Tests child node that is missing property + * #address-cells. See the comments in + * drivers/of/unittest-data/tests-interrupts.dtsi + * nodes intmap1 and interrupts-extended0 + */ + passed &= !rc; + passed &= (args.args_count == 1); + passed &= (args.args[0] == 15); + break; + default: + passed = false; + } + + unittest(passed, "index %i - data error on node %pOF rc=%i\n", + i, args.np, rc); + } + of_node_put(np); +} + +static const struct of_device_id match_node_table[] = { + { .data = "A", .name = "name0", }, /* Name alone is lowest priority */ + { .data = "B", .type = "type1", }, /* followed by type alone */ + + { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */ + { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */ + { .data = "Cc", .name = "name2", .type = "type2", }, + + { .data = "E", .compatible = "compat3" }, + { .data = "G", .compatible = "compat2", }, + { .data = "H", .compatible = "compat2", .name = "name5", }, + { .data = "I", .compatible = "compat2", .type = "type1", }, + { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", }, + { .data = "K", .compatible = "compat2", .name = "name9", }, + {} +}; + +static struct { + const char *path; + const char *data; +} match_node_tests[] = { + { .path = "/testcase-data/match-node/name0", .data = "A", }, + { .path = "/testcase-data/match-node/name1", .data = "B", }, + { .path = "/testcase-data/match-node/a/name2", .data = "Ca", }, + { .path = "/testcase-data/match-node/b/name2", .data = "Cb", }, + { .path = "/testcase-data/match-node/c/name2", .data = "Cc", }, + { .path = "/testcase-data/match-node/name3", .data = "E", }, + { .path = "/testcase-data/match-node/name4", .data = "G", }, + { .path = "/testcase-data/match-node/name5", .data = "H", }, + { .path = "/testcase-data/match-node/name6", .data = "G", }, + { .path = "/testcase-data/match-node/name7", .data = "I", }, + { .path = "/testcase-data/match-node/name8", .data = "J", }, + { .path = "/testcase-data/match-node/name9", .data = "K", }, +}; + +static void __init of_unittest_match_node(void) +{ + struct device_node *np; + const struct of_device_id *match; + int i; + + for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) { + np = of_find_node_by_path(match_node_tests[i].path); + if (!np) { + unittest(0, "missing testcase node %s\n", + match_node_tests[i].path); + continue; + } + + match = of_match_node(match_node_table, np); + if (!match) { + unittest(0, "%s didn't match anything\n", + match_node_tests[i].path); + continue; + } + + if (strcmp(match->data, match_node_tests[i].data) != 0) { + unittest(0, "%s got wrong match. expected %s, got %s\n", + match_node_tests[i].path, match_node_tests[i].data, + (const char *)match->data); + continue; + } + unittest(1, "passed"); + } +} + +static struct resource test_bus_res = DEFINE_RES_MEM(0xfffffff8, 2); +static const struct platform_device_info test_bus_info = { + .name = "unittest-bus", +}; +static void __init of_unittest_platform_populate(void) +{ + int irq, rc; + struct device_node *np, *child, *grandchild; + struct platform_device *pdev, *test_bus; + const struct of_device_id match[] = { + { .compatible = "test-device", }, + {} + }; + + np = of_find_node_by_path("/testcase-data"); + of_platform_default_populate(np, NULL, NULL); + + /* Test that a missing irq domain returns -EPROBE_DEFER */ + np = of_find_node_by_path("/testcase-data/testcase-device1"); + pdev = of_find_device_by_node(np); + unittest(pdev, "device 1 creation failed\n"); + + if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) { + irq = platform_get_irq(pdev, 0); + unittest(irq == -EPROBE_DEFER, + "device deferred probe failed - %d\n", irq); + + /* Test that a parsing failure does not return -EPROBE_DEFER */ + np = of_find_node_by_path("/testcase-data/testcase-device2"); + pdev = of_find_device_by_node(np); + unittest(pdev, "device 2 creation failed\n"); + + EXPECT_BEGIN(KERN_INFO, + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); + + irq = platform_get_irq(pdev, 0); + + EXPECT_END(KERN_INFO, + "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found"); + + unittest(irq < 0 && irq != -EPROBE_DEFER, + "device parsing error failed - %d\n", irq); + } + + np = of_find_node_by_path("/testcase-data/platform-tests"); + unittest(np, "No testcase data in device tree\n"); + if (!np) + return; + + test_bus = platform_device_register_full(&test_bus_info); + rc = PTR_ERR_OR_ZERO(test_bus); + unittest(!rc, "testbus registration failed; rc=%i\n", rc); + if (rc) { + of_node_put(np); + return; + } + test_bus->dev.of_node = np; + + /* + * Add a dummy resource to the test bus node after it is + * registered to catch problems with un-inserted resources. The + * DT code doesn't insert the resources, and it has caused the + * kernel to oops in the past. This makes sure the same bug + * doesn't crop up again. + */ + platform_device_add_resources(test_bus, &test_bus_res, 1); + + of_platform_populate(np, match, NULL, &test_bus->dev); + for_each_child_of_node(np, child) { + for_each_child_of_node(child, grandchild) { + pdev = of_find_device_by_node(grandchild); + unittest(pdev, + "Could not create device for node '%pOFn'\n", + grandchild); + platform_device_put(pdev); + } + } + + of_platform_depopulate(&test_bus->dev); + for_each_child_of_node(np, child) { + for_each_child_of_node(child, grandchild) + unittest(!of_find_device_by_node(grandchild), + "device didn't get destroyed '%pOFn'\n", + grandchild); + } + + platform_device_unregister(test_bus); + of_node_put(np); +} + +/** + * update_node_properties - adds the properties + * of np into dup node (present in live tree) and + * updates parent of children of np to dup. + * + * @np: node whose properties are being added to the live tree + * @dup: node present in live tree to be updated + */ +static void update_node_properties(struct device_node *np, + struct device_node *dup) +{ + struct property *prop; + struct property *save_next; + struct device_node *child; + int ret; + + for_each_child_of_node(np, child) + child->parent = dup; + + /* + * "unittest internal error: unable to add testdata property" + * + * If this message reports a property in node '/__symbols__' then + * the respective unittest overlay contains a label that has the + * same name as a label in the live devicetree. The label will + * be in the live devicetree only if the devicetree source was + * compiled with the '-@' option. If you encounter this error, + * please consider renaming __all__ of the labels in the unittest + * overlay dts files with an odd prefix that is unlikely to be + * used in a real devicetree. + */ + + /* + * open code for_each_property_of_node() because of_add_property() + * sets prop->next to NULL + */ + for (prop = np->properties; prop != NULL; prop = save_next) { + save_next = prop->next; + ret = of_add_property(dup, prop); + if (ret) { + if (ret == -EEXIST && !strcmp(prop->name, "name")) + continue; + pr_err("unittest internal error: unable to add testdata property %pOF/%s", + np, prop->name); + } + } +} + +/** + * attach_node_and_children - attaches nodes + * and its children to live tree. + * CAUTION: misleading function name - if node @np already exists in + * the live tree then children of @np are *not* attached to the live + * tree. This works for the current test devicetree nodes because such + * nodes do not have child nodes. + * + * @np: Node to attach to live tree + */ +static void attach_node_and_children(struct device_node *np) +{ + struct device_node *next, *dup, *child; + unsigned long flags; + const char *full_name; + + full_name = kasprintf(GFP_KERNEL, "%pOF", np); + if (!full_name) + return; + + if (!strcmp(full_name, "/__local_fixups__") || + !strcmp(full_name, "/__fixups__")) { + kfree(full_name); + return; + } + + dup = of_find_node_by_path(full_name); + kfree(full_name); + if (dup) { + update_node_properties(np, dup); + return; + } + + child = np->child; + np->child = NULL; + + mutex_lock(&of_mutex); + raw_spin_lock_irqsave(&devtree_lock, flags); + np->sibling = np->parent->child; + np->parent->child = np; + of_node_clear_flag(np, OF_DETACHED); + raw_spin_unlock_irqrestore(&devtree_lock, flags); + + __of_attach_node_sysfs(np); + mutex_unlock(&of_mutex); + + while (child) { + next = child->sibling; + attach_node_and_children(child); + child = next; + } +} + +/** + * unittest_data_add - Reads, copies data from + * linked tree and attaches it to the live tree + */ +static int __init unittest_data_add(void) +{ + void *unittest_data; + void *unittest_data_align; + struct device_node *unittest_data_node = NULL, *np; + /* + * __dtb_testcases_begin[] and __dtb_testcases_end[] are magically + * created by cmd_dt_S_dtb in scripts/Makefile.lib + */ + extern uint8_t __dtb_testcases_begin[]; + extern uint8_t __dtb_testcases_end[]; + const int size = __dtb_testcases_end - __dtb_testcases_begin; + int rc; + void *ret; + + if (!size) { + pr_warn("%s: testcases is empty\n", __func__); + return -ENODATA; + } + + /* creating copy */ + unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL); + if (!unittest_data) + return -ENOMEM; + + unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE); + memcpy(unittest_data_align, __dtb_testcases_begin, size); + + ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node); + if (!ret) { + pr_warn("%s: unflatten testcases tree failed\n", __func__); + kfree(unittest_data); + return -ENODATA; + } + if (!unittest_data_node) { + pr_warn("%s: testcases tree is empty\n", __func__); + kfree(unittest_data); + return -ENODATA; + } + + /* + * This lock normally encloses of_resolve_phandles() + */ + of_overlay_mutex_lock(); + + rc = of_resolve_phandles(unittest_data_node); + if (rc) { + pr_err("%s: Failed to resolve phandles (rc=%i)\n", __func__, rc); + of_overlay_mutex_unlock(); + return -EINVAL; + } + + if (!of_root) { + of_root = unittest_data_node; + for_each_of_allnodes(np) + __of_attach_node_sysfs(np); + of_aliases = of_find_node_by_path("/aliases"); + of_chosen = of_find_node_by_path("/chosen"); + of_overlay_mutex_unlock(); + return 0; + } + + EXPECT_BEGIN(KERN_INFO, + "Duplicate name in testcase-data, renamed to \"duplicate-name#1\""); + + /* attach the sub-tree to live tree */ + np = unittest_data_node->child; + while (np) { + struct device_node *next = np->sibling; + + np->parent = of_root; + attach_node_and_children(np); + np = next; + } + + EXPECT_END(KERN_INFO, + "Duplicate name in testcase-data, renamed to \"duplicate-name#1\""); + + of_overlay_mutex_unlock(); + + return 0; +} + +#ifdef CONFIG_OF_OVERLAY +static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id); + +static int unittest_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + + if (np == NULL) { + dev_err(dev, "No OF data for device\n"); + return -EINVAL; + + } + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); + + of_platform_populate(np, NULL, NULL, &pdev->dev); + + return 0; +} + +static int unittest_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); + return 0; +} + +static const struct of_device_id unittest_match[] = { + { .compatible = "unittest", }, + {}, +}; + +static struct platform_driver unittest_driver = { + .probe = unittest_probe, + .remove = unittest_remove, + .driver = { + .name = "unittest", + .of_match_table = of_match_ptr(unittest_match), + }, +}; + +/* get the platform device instantiated at the path */ +static struct platform_device *of_path_to_platform_device(const char *path) +{ + struct device_node *np; + struct platform_device *pdev; + + np = of_find_node_by_path(path); + if (np == NULL) + return NULL; + + pdev = of_find_device_by_node(np); + of_node_put(np); + + return pdev; +} + +/* find out if a platform device exists at that path */ +static int of_path_platform_device_exists(const char *path) +{ + struct platform_device *pdev; + + pdev = of_path_to_platform_device(path); + platform_device_put(pdev); + return pdev != NULL; +} + +#ifdef CONFIG_OF_GPIO + +struct unittest_gpio_dev { + struct gpio_chip chip; +}; + +static int unittest_gpio_chip_request_count; +static int unittest_gpio_probe_count; +static int unittest_gpio_probe_pass_count; + +static int unittest_gpio_chip_request(struct gpio_chip *chip, unsigned int offset) +{ + unittest_gpio_chip_request_count++; + + pr_debug("%s(): %s %d %d\n", __func__, chip->label, offset, + unittest_gpio_chip_request_count); + return 0; +} + +static int unittest_gpio_probe(struct platform_device *pdev) +{ + struct unittest_gpio_dev *devptr; + int ret; + + unittest_gpio_probe_count++; + + devptr = kzalloc(sizeof(*devptr), GFP_KERNEL); + if (!devptr) + return -ENOMEM; + + platform_set_drvdata(pdev, devptr); + + devptr->chip.fwnode = dev_fwnode(&pdev->dev); + devptr->chip.label = "of-unittest-gpio"; + devptr->chip.base = -1; /* dynamic allocation */ + devptr->chip.ngpio = 5; + devptr->chip.request = unittest_gpio_chip_request; + + ret = gpiochip_add_data(&devptr->chip, NULL); + + unittest(!ret, + "gpiochip_add_data() for node @%pfw failed, ret = %d\n", devptr->chip.fwnode, ret); + + if (!ret) + unittest_gpio_probe_pass_count++; + return ret; +} + +static int unittest_gpio_remove(struct platform_device *pdev) +{ + struct unittest_gpio_dev *devptr = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + + dev_dbg(dev, "%s for node @%pfw\n", __func__, devptr->chip.fwnode); + + if (!devptr) + return -EINVAL; + + if (devptr->chip.base != -1) + gpiochip_remove(&devptr->chip); + + platform_set_drvdata(pdev, NULL); + kfree(devptr); + + return 0; +} + +static const struct of_device_id unittest_gpio_id[] = { + { .compatible = "unittest-gpio", }, + {} +}; + +static struct platform_driver unittest_gpio_driver = { + .probe = unittest_gpio_probe, + .remove = unittest_gpio_remove, + .driver = { + .name = "unittest-gpio", + .of_match_table = of_match_ptr(unittest_gpio_id), + }, +}; + +static void __init of_unittest_overlay_gpio(void) +{ + int chip_request_count; + int probe_pass_count; + int ret; + + /* + * tests: apply overlays before registering driver + * Similar to installing a driver as a module, the + * driver is registered after applying the overlays. + * + * The overlays are applied by overlay_data_apply() + * instead of of_unittest_apply_overlay() so that they + * will not be tracked. Thus they will not be removed + * by of_unittest_remove_tracked_overlays(). + * + * - apply overlay_gpio_01 + * - apply overlay_gpio_02a + * - apply overlay_gpio_02b + * - register driver + * + * register driver will result in + * - probe and processing gpio hog for overlay_gpio_01 + * - probe for overlay_gpio_02a + * - processing gpio for overlay_gpio_02b + */ + + probe_pass_count = unittest_gpio_probe_pass_count; + chip_request_count = unittest_gpio_chip_request_count; + + /* + * overlay_gpio_01 contains gpio node and child gpio hog node + * overlay_gpio_02a contains gpio node + * overlay_gpio_02b contains child gpio hog node + */ + + unittest(overlay_data_apply("overlay_gpio_01", NULL), + "Adding overlay 'overlay_gpio_01' failed\n"); + + unittest(overlay_data_apply("overlay_gpio_02a", NULL), + "Adding overlay 'overlay_gpio_02a' failed\n"); + + unittest(overlay_data_apply("overlay_gpio_02b", NULL), + "Adding overlay 'overlay_gpio_02b' failed\n"); + + /* + * messages are the result of the probes, after the + * driver is registered + */ + + EXPECT_BEGIN(KERN_INFO, + "gpio-<<int>> (line-B-input): hogged as input\n"); + + EXPECT_BEGIN(KERN_INFO, + "gpio-<<int>> (line-A-input): hogged as input\n"); + + ret = platform_driver_register(&unittest_gpio_driver); + if (unittest(ret == 0, "could not register unittest gpio driver\n")) + return; + + EXPECT_END(KERN_INFO, + "gpio-<<int>> (line-A-input): hogged as input\n"); + EXPECT_END(KERN_INFO, + "gpio-<<int>> (line-B-input): hogged as input\n"); + + unittest(probe_pass_count + 2 == unittest_gpio_probe_pass_count, + "unittest_gpio_probe() failed or not called\n"); + + unittest(chip_request_count + 2 == unittest_gpio_chip_request_count, + "unittest_gpio_chip_request() called %d times (expected 1 time)\n", + unittest_gpio_chip_request_count - chip_request_count); + + /* + * tests: apply overlays after registering driver + * + * Similar to a driver built-in to the kernel, the + * driver is registered before applying the overlays. + * + * overlay_gpio_03 contains gpio node and child gpio hog node + * + * - apply overlay_gpio_03 + * + * apply overlay will result in + * - probe and processing gpio hog. + */ + + probe_pass_count = unittest_gpio_probe_pass_count; + chip_request_count = unittest_gpio_chip_request_count; + + EXPECT_BEGIN(KERN_INFO, + "gpio-<<int>> (line-D-input): hogged as input\n"); + + /* overlay_gpio_03 contains gpio node and child gpio hog node */ + + unittest(overlay_data_apply("overlay_gpio_03", NULL), + "Adding overlay 'overlay_gpio_03' failed\n"); + + EXPECT_END(KERN_INFO, + "gpio-<<int>> (line-D-input): hogged as input\n"); + + unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count, + "unittest_gpio_probe() failed or not called\n"); + + unittest(chip_request_count + 1 == unittest_gpio_chip_request_count, + "unittest_gpio_chip_request() called %d times (expected 1 time)\n", + unittest_gpio_chip_request_count - chip_request_count); + + /* + * overlay_gpio_04a contains gpio node + * + * - apply overlay_gpio_04a + * + * apply the overlay will result in + * - probe for overlay_gpio_04a + */ + + probe_pass_count = unittest_gpio_probe_pass_count; + chip_request_count = unittest_gpio_chip_request_count; + + /* overlay_gpio_04a contains gpio node */ + + unittest(overlay_data_apply("overlay_gpio_04a", NULL), + "Adding overlay 'overlay_gpio_04a' failed\n"); + + unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count, + "unittest_gpio_probe() failed or not called\n"); + + /* + * overlay_gpio_04b contains child gpio hog node + * + * - apply overlay_gpio_04b + * + * apply the overlay will result in + * - processing gpio for overlay_gpio_04b + */ + + EXPECT_BEGIN(KERN_INFO, + "gpio-<<int>> (line-C-input): hogged as input\n"); + + /* overlay_gpio_04b contains child gpio hog node */ + + unittest(overlay_data_apply("overlay_gpio_04b", NULL), + "Adding overlay 'overlay_gpio_04b' failed\n"); + + EXPECT_END(KERN_INFO, + "gpio-<<int>> (line-C-input): hogged as input\n"); + + unittest(chip_request_count + 1 == unittest_gpio_chip_request_count, + "unittest_gpio_chip_request() called %d times (expected 1 time)\n", + unittest_gpio_chip_request_count - chip_request_count); +} + +#else + +static void __init of_unittest_overlay_gpio(void) +{ + /* skip tests */ +} + +#endif + +#if IS_BUILTIN(CONFIG_I2C) + +/* get the i2c client device instantiated at the path */ +static struct i2c_client *of_path_to_i2c_client(const char *path) +{ + struct device_node *np; + struct i2c_client *client; + + np = of_find_node_by_path(path); + if (np == NULL) + return NULL; + + client = of_find_i2c_device_by_node(np); + of_node_put(np); + + return client; +} + +/* find out if a i2c client device exists at that path */ +static int of_path_i2c_client_exists(const char *path) +{ + struct i2c_client *client; + + client = of_path_to_i2c_client(path); + if (client) + put_device(&client->dev); + return client != NULL; +} +#else +static int of_path_i2c_client_exists(const char *path) +{ + return 0; +} +#endif + +enum overlay_type { + PDEV_OVERLAY, + I2C_OVERLAY +}; + +static int of_path_device_type_exists(const char *path, + enum overlay_type ovtype) +{ + switch (ovtype) { + case PDEV_OVERLAY: + return of_path_platform_device_exists(path); + case I2C_OVERLAY: + return of_path_i2c_client_exists(path); + } + return 0; +} + +static const char *unittest_path(int nr, enum overlay_type ovtype) +{ + const char *base; + static char buf[256]; + + switch (ovtype) { + case PDEV_OVERLAY: + base = "/testcase-data/overlay-node/test-bus"; + break; + case I2C_OVERLAY: + base = "/testcase-data/overlay-node/test-bus/i2c-test-bus"; + break; + default: + buf[0] = '\0'; + return buf; + } + snprintf(buf, sizeof(buf) - 1, "%s/test-unittest%d", base, nr); + buf[sizeof(buf) - 1] = '\0'; + return buf; +} + +static int of_unittest_device_exists(int unittest_nr, enum overlay_type ovtype) +{ + const char *path; + + path = unittest_path(unittest_nr, ovtype); + + switch (ovtype) { + case PDEV_OVERLAY: + return of_path_platform_device_exists(path); + case I2C_OVERLAY: + return of_path_i2c_client_exists(path); + } + return 0; +} + +static const char *overlay_name_from_nr(int nr) +{ + static char buf[256]; + + snprintf(buf, sizeof(buf) - 1, + "overlay_%d", nr); + buf[sizeof(buf) - 1] = '\0'; + + return buf; +} + +static const char *bus_path = "/testcase-data/overlay-node/test-bus"; + +#define MAX_TRACK_OVCS_IDS 256 + +static int track_ovcs_id[MAX_TRACK_OVCS_IDS]; +static int track_ovcs_id_overlay_nr[MAX_TRACK_OVCS_IDS]; +static int track_ovcs_id_cnt; + +static void of_unittest_track_overlay(int ovcs_id, int overlay_nr) +{ + if (WARN_ON(track_ovcs_id_cnt >= MAX_TRACK_OVCS_IDS)) + return; + + track_ovcs_id[track_ovcs_id_cnt] = ovcs_id; + track_ovcs_id_overlay_nr[track_ovcs_id_cnt] = overlay_nr; + track_ovcs_id_cnt++; +} + +static void of_unittest_untrack_overlay(int ovcs_id) +{ + if (WARN_ON(track_ovcs_id_cnt < 1)) + return; + + track_ovcs_id_cnt--; + + /* If out of synch then test is broken. Do not try to recover. */ + WARN_ON(track_ovcs_id[track_ovcs_id_cnt] != ovcs_id); +} + +static void of_unittest_remove_tracked_overlays(void) +{ + int ret, ovcs_id, overlay_nr, save_ovcs_id; + const char *overlay_name; + + while (track_ovcs_id_cnt > 0) { + + ovcs_id = track_ovcs_id[track_ovcs_id_cnt - 1]; + overlay_nr = track_ovcs_id_overlay_nr[track_ovcs_id_cnt - 1]; + save_ovcs_id = ovcs_id; + ret = of_overlay_remove(&ovcs_id); + if (ret == -ENODEV) { + overlay_name = overlay_name_from_nr(overlay_nr); + pr_warn("%s: of_overlay_remove() for overlay \"%s\" failed, ret = %d\n", + __func__, overlay_name, ret); + } + of_unittest_untrack_overlay(save_ovcs_id); + } + +} + +static int __init of_unittest_apply_overlay(int overlay_nr, int *ovcs_id) +{ + /* + * The overlay will be tracked, thus it will be removed + * by of_unittest_remove_tracked_overlays(). + */ + + const char *overlay_name; + + overlay_name = overlay_name_from_nr(overlay_nr); + + if (!overlay_data_apply(overlay_name, ovcs_id)) { + unittest(0, "could not apply overlay \"%s\"\n", overlay_name); + return -EFAULT; + } + of_unittest_track_overlay(*ovcs_id, overlay_nr); + + return 0; +} + +/* apply an overlay while checking before and after states */ +static int __init of_unittest_apply_overlay_check(int overlay_nr, + int unittest_nr, int before, int after, + enum overlay_type ovtype) +{ + int ret, ovcs_id; + + /* unittest device must not be in before state */ + if (of_unittest_device_exists(unittest_nr, ovtype) != before) { + unittest(0, "%s with device @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr), + unittest_path(unittest_nr, ovtype), + !before ? "enabled" : "disabled"); + return -EINVAL; + } + + ovcs_id = 0; + ret = of_unittest_apply_overlay(overlay_nr, &ovcs_id); + if (ret != 0) { + /* of_unittest_apply_overlay already called unittest() */ + return ret; + } + + /* unittest device must be to set to after state */ + if (of_unittest_device_exists(unittest_nr, ovtype) != after) { + unittest(0, "%s failed to create @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr), + unittest_path(unittest_nr, ovtype), + !after ? "enabled" : "disabled"); + return -EINVAL; + } + + return 0; +} + +/* apply an overlay and then revert it while checking before, after states */ +static int __init of_unittest_apply_revert_overlay_check(int overlay_nr, + int unittest_nr, int before, int after, + enum overlay_type ovtype) +{ + int ret, ovcs_id, save_ovcs_id; + + /* unittest device must be in before state */ + if (of_unittest_device_exists(unittest_nr, ovtype) != before) { + unittest(0, "%s with device @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr), + unittest_path(unittest_nr, ovtype), + !before ? "enabled" : "disabled"); + return -EINVAL; + } + + /* apply the overlay */ + ovcs_id = 0; + ret = of_unittest_apply_overlay(overlay_nr, &ovcs_id); + if (ret != 0) { + /* of_unittest_apply_overlay already called unittest() */ + return ret; + } + + /* unittest device must be in after state */ + if (of_unittest_device_exists(unittest_nr, ovtype) != after) { + unittest(0, "%s failed to create @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr), + unittest_path(unittest_nr, ovtype), + !after ? "enabled" : "disabled"); + return -EINVAL; + } + + save_ovcs_id = ovcs_id; + ret = of_overlay_remove(&ovcs_id); + if (ret != 0) { + unittest(0, "%s failed to be destroyed @\"%s\"\n", + overlay_name_from_nr(overlay_nr), + unittest_path(unittest_nr, ovtype)); + return ret; + } + of_unittest_untrack_overlay(save_ovcs_id); + + /* unittest device must be again in before state */ + if (of_unittest_device_exists(unittest_nr, ovtype) != before) { + unittest(0, "%s with device @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr), + unittest_path(unittest_nr, ovtype), + !before ? "enabled" : "disabled"); + return -EINVAL; + } + + return 0; +} + +/* test activation of device */ +static void __init of_unittest_overlay_0(void) +{ + int ret; + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status"); + + /* device should enable */ + ret = of_unittest_apply_overlay_check(0, 0, 0, 1, PDEV_OVERLAY); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest0/status"); + + if (ret) + return; + + unittest(1, "overlay test %d passed\n", 0); +} + +/* test deactivation of device */ +static void __init of_unittest_overlay_1(void) +{ + int ret; + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status"); + + /* device should disable */ + ret = of_unittest_apply_overlay_check(1, 1, 1, 0, PDEV_OVERLAY); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest1/status"); + + if (ret) + return; + + unittest(1, "overlay test %d passed\n", 1); + +} + +/* test activation of device */ +static void __init of_unittest_overlay_2(void) +{ + int ret; + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status"); + + /* device should enable */ + ret = of_unittest_apply_overlay_check(2, 2, 0, 1, PDEV_OVERLAY); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest2/status"); + + if (ret) + return; + unittest(1, "overlay test %d passed\n", 2); +} + +/* test deactivation of device */ +static void __init of_unittest_overlay_3(void) +{ + int ret; + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status"); + + /* device should disable */ + ret = of_unittest_apply_overlay_check(3, 3, 1, 0, PDEV_OVERLAY); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest3/status"); + + if (ret) + return; + + unittest(1, "overlay test %d passed\n", 3); +} + +/* test activation of a full device node */ +static void __init of_unittest_overlay_4(void) +{ + /* device should disable */ + if (of_unittest_apply_overlay_check(4, 4, 0, 1, PDEV_OVERLAY)) + return; + + unittest(1, "overlay test %d passed\n", 4); +} + +/* test overlay apply/revert sequence */ +static void __init of_unittest_overlay_5(void) +{ + int ret; + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status"); + + /* device should disable */ + ret = of_unittest_apply_revert_overlay_check(5, 5, 0, 1, PDEV_OVERLAY); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest5/status"); + + if (ret) + return; + + unittest(1, "overlay test %d passed\n", 5); +} + +/* test overlay application in sequence */ +static void __init of_unittest_overlay_6(void) +{ + int i, save_ovcs_id[2], ovcs_id; + int overlay_nr = 6, unittest_nr = 6; + int before = 0, after = 1; + const char *overlay_name; + + int ret; + + /* unittest device must be in before state */ + for (i = 0; i < 2; i++) { + if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY) + != before) { + unittest(0, "%s with device @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr + i), + unittest_path(unittest_nr + i, + PDEV_OVERLAY), + !before ? "enabled" : "disabled"); + return; + } + } + + /* apply the overlays */ + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status"); + + overlay_name = overlay_name_from_nr(overlay_nr + 0); + + ret = overlay_data_apply(overlay_name, &ovcs_id); + + if (!ret) { + unittest(0, "could not apply overlay \"%s\"\n", overlay_name); + return; + } + save_ovcs_id[0] = ovcs_id; + of_unittest_track_overlay(ovcs_id, overlay_nr + 0); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status"); + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status"); + + overlay_name = overlay_name_from_nr(overlay_nr + 1); + + ret = overlay_data_apply(overlay_name, &ovcs_id); + + if (!ret) { + unittest(0, "could not apply overlay \"%s\"\n", overlay_name); + return; + } + save_ovcs_id[1] = ovcs_id; + of_unittest_track_overlay(ovcs_id, overlay_nr + 1); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status"); + + + for (i = 0; i < 2; i++) { + /* unittest device must be in after state */ + if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY) + != after) { + unittest(0, "overlay @\"%s\" failed @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr + i), + unittest_path(unittest_nr + i, + PDEV_OVERLAY), + !after ? "enabled" : "disabled"); + return; + } + } + + for (i = 1; i >= 0; i--) { + ovcs_id = save_ovcs_id[i]; + if (of_overlay_remove(&ovcs_id)) { + unittest(0, "%s failed destroy @\"%s\"\n", + overlay_name_from_nr(overlay_nr + i), + unittest_path(unittest_nr + i, + PDEV_OVERLAY)); + return; + } + of_unittest_untrack_overlay(save_ovcs_id[i]); + } + + for (i = 0; i < 2; i++) { + /* unittest device must be again in before state */ + if (of_unittest_device_exists(unittest_nr + i, PDEV_OVERLAY) + != before) { + unittest(0, "%s with device @\"%s\" %s\n", + overlay_name_from_nr(overlay_nr + i), + unittest_path(unittest_nr + i, + PDEV_OVERLAY), + !before ? "enabled" : "disabled"); + return; + } + } + + unittest(1, "overlay test %d passed\n", 6); + +} + +/* test overlay application in sequence */ +static void __init of_unittest_overlay_8(void) +{ + int i, save_ovcs_id[2], ovcs_id; + int overlay_nr = 8, unittest_nr = 8; + const char *overlay_name; + int ret; + + /* we don't care about device state in this test */ + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status"); + + overlay_name = overlay_name_from_nr(overlay_nr + 0); + + ret = overlay_data_apply(overlay_name, &ovcs_id); + if (!ret) + unittest(0, "could not apply overlay \"%s\"\n", overlay_name); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/status"); + + if (!ret) + return; + + save_ovcs_id[0] = ovcs_id; + of_unittest_track_overlay(ovcs_id, overlay_nr + 0); + + overlay_name = overlay_name_from_nr(overlay_nr + 1); + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo"); + + /* apply the overlays */ + ret = overlay_data_apply(overlay_name, &ovcs_id); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest8/property-foo"); + + if (!ret) { + unittest(0, "could not apply overlay \"%s\"\n", overlay_name); + return; + } + + save_ovcs_id[1] = ovcs_id; + of_unittest_track_overlay(ovcs_id, overlay_nr + 1); + + /* now try to remove first overlay (it should fail) */ + ovcs_id = save_ovcs_id[0]; + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8"); + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: overlay #6 is not topmost"); + + ret = of_overlay_remove(&ovcs_id); + + EXPECT_END(KERN_INFO, + "OF: overlay: overlay #6 is not topmost"); + + EXPECT_END(KERN_INFO, + "OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8"); + + if (!ret) { + /* + * Should never get here. If we do, expect a lot of + * subsequent tracking and overlay removal related errors. + */ + unittest(0, "%s was destroyed @\"%s\"\n", + overlay_name_from_nr(overlay_nr + 0), + unittest_path(unittest_nr, + PDEV_OVERLAY)); + return; + } + + /* removing them in order should work */ + for (i = 1; i >= 0; i--) { + ovcs_id = save_ovcs_id[i]; + if (of_overlay_remove(&ovcs_id)) { + unittest(0, "%s not destroyed @\"%s\"\n", + overlay_name_from_nr(overlay_nr + i), + unittest_path(unittest_nr, + PDEV_OVERLAY)); + return; + } + of_unittest_untrack_overlay(save_ovcs_id[i]); + } + + unittest(1, "overlay test %d passed\n", 8); +} + +/* test insertion of a bus with parent devices */ +static void __init of_unittest_overlay_10(void) +{ + int ret; + char *child_path; + + /* device should disable */ + ret = of_unittest_apply_overlay_check(10, 10, 0, 1, PDEV_OVERLAY); + + if (unittest(ret == 0, + "overlay test %d failed; overlay application\n", 10)) + return; + + child_path = kasprintf(GFP_KERNEL, "%s/test-unittest101", + unittest_path(10, PDEV_OVERLAY)); + if (unittest(child_path, "overlay test %d failed; kasprintf\n", 10)) + return; + + ret = of_path_device_type_exists(child_path, PDEV_OVERLAY); + kfree(child_path); + + unittest(ret, "overlay test %d failed; no child device\n", 10); +} + +/* test insertion of a bus with parent devices (and revert) */ +static void __init of_unittest_overlay_11(void) +{ + int ret; + + /* device should disable */ + ret = of_unittest_apply_revert_overlay_check(11, 11, 0, 1, + PDEV_OVERLAY); + + unittest(ret == 0, "overlay test %d failed; overlay apply\n", 11); +} + +#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) + +struct unittest_i2c_bus_data { + struct platform_device *pdev; + struct i2c_adapter adap; +}; + +static int unittest_i2c_master_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct unittest_i2c_bus_data *std = i2c_get_adapdata(adap); + + (void)std; + + return num; +} + +static u32 unittest_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm unittest_i2c_algo = { + .master_xfer = unittest_i2c_master_xfer, + .functionality = unittest_i2c_functionality, +}; + +static int unittest_i2c_bus_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct unittest_i2c_bus_data *std; + struct i2c_adapter *adap; + int ret; + + if (np == NULL) { + dev_err(dev, "No OF data for device\n"); + return -EINVAL; + + } + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); + + std = devm_kzalloc(dev, sizeof(*std), GFP_KERNEL); + if (!std) + return -ENOMEM; + + /* link them together */ + std->pdev = pdev; + platform_set_drvdata(pdev, std); + + adap = &std->adap; + i2c_set_adapdata(adap, std); + adap->nr = -1; + strscpy(adap->name, pdev->name, sizeof(adap->name)); + adap->class = I2C_CLASS_DEPRECATED; + adap->algo = &unittest_i2c_algo; + adap->dev.parent = dev; + adap->dev.of_node = dev->of_node; + adap->timeout = 5 * HZ; + adap->retries = 3; + + ret = i2c_add_numbered_adapter(adap); + if (ret != 0) { + dev_err(dev, "Failed to add I2C adapter\n"); + return ret; + } + + return 0; +} + +static int unittest_i2c_bus_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct unittest_i2c_bus_data *std = platform_get_drvdata(pdev); + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); + i2c_del_adapter(&std->adap); + + return 0; +} + +static const struct of_device_id unittest_i2c_bus_match[] = { + { .compatible = "unittest-i2c-bus", }, + {}, +}; + +static struct platform_driver unittest_i2c_bus_driver = { + .probe = unittest_i2c_bus_probe, + .remove = unittest_i2c_bus_remove, + .driver = { + .name = "unittest-i2c-bus", + .of_match_table = of_match_ptr(unittest_i2c_bus_match), + }, +}; + +static int unittest_i2c_dev_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device_node *np = client->dev.of_node; + + if (!np) { + dev_err(dev, "No OF node\n"); + return -EINVAL; + } + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); + + return 0; +}; + +static void unittest_i2c_dev_remove(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct device_node *np = client->dev.of_node; + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); +} + +static const struct i2c_device_id unittest_i2c_dev_id[] = { + { .name = "unittest-i2c-dev" }, + { } +}; + +static struct i2c_driver unittest_i2c_dev_driver = { + .driver = { + .name = "unittest-i2c-dev", + }, + .probe = unittest_i2c_dev_probe, + .remove = unittest_i2c_dev_remove, + .id_table = unittest_i2c_dev_id, +}; + +#if IS_BUILTIN(CONFIG_I2C_MUX) + +static int unittest_i2c_mux_select_chan(struct i2c_mux_core *muxc, u32 chan) +{ + return 0; +} + +static int unittest_i2c_mux_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int i, nchans; + struct device *dev = &client->dev; + struct i2c_adapter *adap = client->adapter; + struct device_node *np = client->dev.of_node, *child; + struct i2c_mux_core *muxc; + u32 reg, max_reg; + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); + + if (!np) { + dev_err(dev, "No OF node\n"); + return -EINVAL; + } + + max_reg = (u32)-1; + for_each_child_of_node(np, child) { + if (of_property_read_u32(child, "reg", ®)) + continue; + if (max_reg == (u32)-1 || reg > max_reg) + max_reg = reg; + } + nchans = max_reg == (u32)-1 ? 0 : max_reg + 1; + if (nchans == 0) { + dev_err(dev, "No channels\n"); + return -EINVAL; + } + + muxc = i2c_mux_alloc(adap, dev, nchans, 0, 0, + unittest_i2c_mux_select_chan, NULL); + if (!muxc) + return -ENOMEM; + for (i = 0; i < nchans; i++) { + if (i2c_mux_add_adapter(muxc, 0, i, 0)) { + dev_err(dev, "Failed to register mux #%d\n", i); + i2c_mux_del_adapters(muxc); + return -ENODEV; + } + } + + i2c_set_clientdata(client, muxc); + + return 0; +}; + +static void unittest_i2c_mux_remove(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct device_node *np = client->dev.of_node; + struct i2c_mux_core *muxc = i2c_get_clientdata(client); + + dev_dbg(dev, "%s for node @%pOF\n", __func__, np); + i2c_mux_del_adapters(muxc); +} + +static const struct i2c_device_id unittest_i2c_mux_id[] = { + { .name = "unittest-i2c-mux" }, + { } +}; + +static struct i2c_driver unittest_i2c_mux_driver = { + .driver = { + .name = "unittest-i2c-mux", + }, + .probe = unittest_i2c_mux_probe, + .remove = unittest_i2c_mux_remove, + .id_table = unittest_i2c_mux_id, +}; + +#endif + +static int of_unittest_overlay_i2c_init(void) +{ + int ret; + + ret = i2c_add_driver(&unittest_i2c_dev_driver); + if (unittest(ret == 0, + "could not register unittest i2c device driver\n")) + return ret; + + ret = platform_driver_register(&unittest_i2c_bus_driver); + + if (unittest(ret == 0, + "could not register unittest i2c bus driver\n")) + return ret; + +#if IS_BUILTIN(CONFIG_I2C_MUX) + + EXPECT_BEGIN(KERN_INFO, + "i2c i2c-1: Added multiplexed i2c bus 2"); + + ret = i2c_add_driver(&unittest_i2c_mux_driver); + + EXPECT_END(KERN_INFO, + "i2c i2c-1: Added multiplexed i2c bus 2"); + + if (unittest(ret == 0, + "could not register unittest i2c mux driver\n")) + return ret; +#endif + + return 0; +} + +static void of_unittest_overlay_i2c_cleanup(void) +{ +#if IS_BUILTIN(CONFIG_I2C_MUX) + i2c_del_driver(&unittest_i2c_mux_driver); +#endif + platform_driver_unregister(&unittest_i2c_bus_driver); + i2c_del_driver(&unittest_i2c_dev_driver); +} + +static void __init of_unittest_overlay_i2c_12(void) +{ + int ret; + + /* device should enable */ + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status"); + + ret = of_unittest_apply_overlay_check(12, 12, 0, 1, I2C_OVERLAY); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest12/status"); + + if (ret) + return; + + unittest(1, "overlay test %d passed\n", 12); +} + +/* test deactivation of device */ +static void __init of_unittest_overlay_i2c_13(void) +{ + int ret; + + EXPECT_BEGIN(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status"); + + /* device should disable */ + ret = of_unittest_apply_overlay_check(13, 13, 1, 0, I2C_OVERLAY); + + EXPECT_END(KERN_INFO, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/i2c-test-bus/test-unittest13/status"); + + if (ret) + return; + + unittest(1, "overlay test %d passed\n", 13); +} + +/* just check for i2c mux existence */ +static void of_unittest_overlay_i2c_14(void) +{ +} + +static void __init of_unittest_overlay_i2c_15(void) +{ + int ret; + + /* device should enable */ + EXPECT_BEGIN(KERN_INFO, + "i2c i2c-1: Added multiplexed i2c bus 3"); + + ret = of_unittest_apply_overlay_check(15, 15, 0, 1, I2C_OVERLAY); + + EXPECT_END(KERN_INFO, + "i2c i2c-1: Added multiplexed i2c bus 3"); + + if (ret) + return; + + unittest(1, "overlay test %d passed\n", 15); +} + +#else + +static inline void of_unittest_overlay_i2c_14(void) { } +static inline void of_unittest_overlay_i2c_15(void) { } + +#endif + +static int of_notify(struct notifier_block *nb, unsigned long action, + void *arg) +{ + struct of_overlay_notify_data *nd = arg; + struct device_node *found; + int ret; + + /* + * For overlay_16 .. overlay_19, check that returning an error + * works for each of the actions by setting an arbitrary return + * error number that matches the test number. e.g. for unittest16, + * ret = -EBUSY which is -16. + * + * OVERLAY_INFO() for the overlays is declared to expect the same + * error number, so overlay_data_apply() will return no error. + * + * overlay_20 will return NOTIFY_DONE + */ + + ret = 0; + of_node_get(nd->overlay); + + switch (action) { + + case OF_OVERLAY_PRE_APPLY: + found = of_find_node_by_name(nd->overlay, "test-unittest16"); + if (found) { + of_node_put(found); + ret = -EBUSY; + } + break; + + case OF_OVERLAY_POST_APPLY: + found = of_find_node_by_name(nd->overlay, "test-unittest17"); + if (found) { + of_node_put(found); + ret = -EEXIST; + } + break; + + case OF_OVERLAY_PRE_REMOVE: + found = of_find_node_by_name(nd->overlay, "test-unittest18"); + if (found) { + of_node_put(found); + ret = -EXDEV; + } + break; + + case OF_OVERLAY_POST_REMOVE: + found = of_find_node_by_name(nd->overlay, "test-unittest19"); + if (found) { + of_node_put(found); + ret = -ENODEV; + } + break; + + default: /* should not happen */ + of_node_put(nd->overlay); + ret = -EINVAL; + break; + } + + if (ret) + return notifier_from_errno(ret); + + return NOTIFY_DONE; +} + +static struct notifier_block of_nb = { + .notifier_call = of_notify, +}; + +static void __init of_unittest_overlay_notify(void) +{ + int ovcs_id; + int ret; + + ret = of_overlay_notifier_register(&of_nb); + unittest(!ret, + "of_overlay_notifier_register() failed, ret = %d\n", ret); + if (ret) + return; + + /* + * The overlays are applied by overlay_data_apply() + * instead of of_unittest_apply_overlay() so that they + * will not be tracked. Thus they will not be removed + * by of_unittest_remove_tracked_overlays(). + * + * Applying overlays 16 - 19 will each trigger an error for a + * different action in of_notify(). + * + * Applying overlay 20 will not trigger any error in of_notify(). + */ + + /* --- overlay 16 --- */ + + EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset pre-apply notifier error -16, target: /testcase-data/overlay-node/test-bus"); + + unittest(overlay_data_apply("overlay_16", &ovcs_id), + "test OF_OVERLAY_PRE_APPLY notify injected error\n"); + + EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset pre-apply notifier error -16, target: /testcase-data/overlay-node/test-bus"); + + unittest(ovcs_id, "ovcs_id not created for overlay_16\n"); + + /* --- overlay 17 --- */ + + EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset post-apply notifier error -17, target: /testcase-data/overlay-node/test-bus"); + + unittest(overlay_data_apply("overlay_17", &ovcs_id), + "test OF_OVERLAY_POST_APPLY notify injected error\n"); + + EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset post-apply notifier error -17, target: /testcase-data/overlay-node/test-bus"); + + unittest(ovcs_id, "ovcs_id not created for overlay_17\n"); + + if (ovcs_id) { + ret = of_overlay_remove(&ovcs_id); + unittest(!ret, + "overlay_17 of_overlay_remove(), ret = %d\n", ret); + } + + /* --- overlay 18 --- */ + + unittest(overlay_data_apply("overlay_18", &ovcs_id), + "OF_OVERLAY_PRE_REMOVE notify injected error\n"); + + unittest(ovcs_id, "ovcs_id not created for overlay_18\n"); + + if (ovcs_id) { + EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset pre-remove notifier error -18, target: /testcase-data/overlay-node/test-bus"); + + ret = of_overlay_remove(&ovcs_id); + EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset pre-remove notifier error -18, target: /testcase-data/overlay-node/test-bus"); + if (ret == -EXDEV) { + /* + * change set ovcs_id should still exist + */ + unittest(1, "overlay_18 of_overlay_remove() injected error for OF_OVERLAY_PRE_REMOVE\n"); + } else { + unittest(0, "overlay_18 of_overlay_remove() injected error for OF_OVERLAY_PRE_REMOVE not returned\n"); + } + } else { + unittest(1, "ovcs_id not created for overlay_18\n"); + } + + unittest(ovcs_id, "ovcs_id removed for overlay_18\n"); + + /* --- overlay 19 --- */ + + unittest(overlay_data_apply("overlay_19", &ovcs_id), + "OF_OVERLAY_POST_REMOVE notify injected error\n"); + + unittest(ovcs_id, "ovcs_id not created for overlay_19\n"); + + if (ovcs_id) { + EXPECT_BEGIN(KERN_INFO, "OF: overlay: overlay changeset post-remove notifier error -19, target: /testcase-data/overlay-node/test-bus"); + ret = of_overlay_remove(&ovcs_id); + EXPECT_END(KERN_INFO, "OF: overlay: overlay changeset post-remove notifier error -19, target: /testcase-data/overlay-node/test-bus"); + if (ret == -ENODEV) + unittest(1, "overlay_19 of_overlay_remove() injected error for OF_OVERLAY_POST_REMOVE\n"); + else + unittest(0, "overlay_19 of_overlay_remove() injected error for OF_OVERLAY_POST_REMOVE not returned\n"); + } else { + unittest(1, "ovcs_id removed for overlay_19\n"); + } + + unittest(!ovcs_id, "changeset ovcs_id = %d not removed for overlay_19\n", + ovcs_id); + + /* --- overlay 20 --- */ + + unittest(overlay_data_apply("overlay_20", &ovcs_id), + "overlay notify no injected error\n"); + + if (ovcs_id) { + ret = of_overlay_remove(&ovcs_id); + if (ret) + unittest(1, "overlay_20 failed to be destroyed, ret = %d\n", + ret); + } else { + unittest(1, "ovcs_id not created for overlay_20\n"); + } + + unittest(!of_overlay_notifier_unregister(&of_nb), + "of_overlay_notifier_unregister() failed, ret = %d\n", ret); +} + +static void __init of_unittest_overlay(void) +{ + struct device_node *bus_np = NULL; + + if (platform_driver_register(&unittest_driver)) { + unittest(0, "could not register unittest driver\n"); + goto out; + } + + bus_np = of_find_node_by_path(bus_path); + if (bus_np == NULL) { + unittest(0, "could not find bus_path \"%s\"\n", bus_path); + goto out; + } + + if (of_platform_default_populate(bus_np, NULL, NULL)) { + unittest(0, "could not populate bus @ \"%s\"\n", bus_path); + goto out; + } + + if (!of_unittest_device_exists(100, PDEV_OVERLAY)) { + unittest(0, "could not find unittest0 @ \"%s\"\n", + unittest_path(100, PDEV_OVERLAY)); + goto out; + } + + if (of_unittest_device_exists(101, PDEV_OVERLAY)) { + unittest(0, "unittest1 @ \"%s\" should not exist\n", + unittest_path(101, PDEV_OVERLAY)); + goto out; + } + + unittest(1, "basic infrastructure of overlays passed"); + + /* tests in sequence */ + of_unittest_overlay_0(); + of_unittest_overlay_1(); + of_unittest_overlay_2(); + of_unittest_overlay_3(); + of_unittest_overlay_4(); + of_unittest_overlay_5(); + of_unittest_overlay_6(); + of_unittest_overlay_8(); + + of_unittest_overlay_10(); + of_unittest_overlay_11(); + +#if IS_BUILTIN(CONFIG_I2C) + if (unittest(of_unittest_overlay_i2c_init() == 0, "i2c init failed\n")) + goto out; + + of_unittest_overlay_i2c_12(); + of_unittest_overlay_i2c_13(); + of_unittest_overlay_i2c_14(); + of_unittest_overlay_i2c_15(); + + of_unittest_overlay_i2c_cleanup(); +#endif + + of_unittest_overlay_gpio(); + + of_unittest_remove_tracked_overlays(); + + of_unittest_overlay_notify(); + +out: + of_node_put(bus_np); +} + +#else +static inline void __init of_unittest_overlay(void) { } +#endif + +#ifdef CONFIG_OF_OVERLAY + +/* + * __dtb_ot_begin[] and __dtb_ot_end[] are created by cmd_dt_S_dtb + * in scripts/Makefile.lib + */ + +#define OVERLAY_INFO_EXTERN(name) \ + extern uint8_t __dtb_##name##_begin[]; \ + extern uint8_t __dtb_##name##_end[] + +#define OVERLAY_INFO(overlay_name, expected) \ +{ .dtb_begin = __dtb_##overlay_name##_begin, \ + .dtb_end = __dtb_##overlay_name##_end, \ + .expected_result = expected, \ + .name = #overlay_name, \ +} + +struct overlay_info { + uint8_t *dtb_begin; + uint8_t *dtb_end; + int expected_result; + int ovcs_id; + char *name; +}; + +OVERLAY_INFO_EXTERN(overlay_base); +OVERLAY_INFO_EXTERN(overlay); +OVERLAY_INFO_EXTERN(overlay_0); +OVERLAY_INFO_EXTERN(overlay_1); +OVERLAY_INFO_EXTERN(overlay_2); +OVERLAY_INFO_EXTERN(overlay_3); +OVERLAY_INFO_EXTERN(overlay_4); +OVERLAY_INFO_EXTERN(overlay_5); +OVERLAY_INFO_EXTERN(overlay_6); +OVERLAY_INFO_EXTERN(overlay_7); +OVERLAY_INFO_EXTERN(overlay_8); +OVERLAY_INFO_EXTERN(overlay_9); +OVERLAY_INFO_EXTERN(overlay_10); +OVERLAY_INFO_EXTERN(overlay_11); +OVERLAY_INFO_EXTERN(overlay_12); +OVERLAY_INFO_EXTERN(overlay_13); +OVERLAY_INFO_EXTERN(overlay_15); +OVERLAY_INFO_EXTERN(overlay_16); +OVERLAY_INFO_EXTERN(overlay_17); +OVERLAY_INFO_EXTERN(overlay_18); +OVERLAY_INFO_EXTERN(overlay_19); +OVERLAY_INFO_EXTERN(overlay_20); +OVERLAY_INFO_EXTERN(overlay_gpio_01); +OVERLAY_INFO_EXTERN(overlay_gpio_02a); +OVERLAY_INFO_EXTERN(overlay_gpio_02b); +OVERLAY_INFO_EXTERN(overlay_gpio_03); +OVERLAY_INFO_EXTERN(overlay_gpio_04a); +OVERLAY_INFO_EXTERN(overlay_gpio_04b); +OVERLAY_INFO_EXTERN(overlay_bad_add_dup_node); +OVERLAY_INFO_EXTERN(overlay_bad_add_dup_prop); +OVERLAY_INFO_EXTERN(overlay_bad_phandle); +OVERLAY_INFO_EXTERN(overlay_bad_symbol); + +/* entries found by name */ +static struct overlay_info overlays[] = { + OVERLAY_INFO(overlay_base, -9999), + OVERLAY_INFO(overlay, 0), + OVERLAY_INFO(overlay_0, 0), + OVERLAY_INFO(overlay_1, 0), + OVERLAY_INFO(overlay_2, 0), + OVERLAY_INFO(overlay_3, 0), + OVERLAY_INFO(overlay_4, 0), + OVERLAY_INFO(overlay_5, 0), + OVERLAY_INFO(overlay_6, 0), + OVERLAY_INFO(overlay_7, 0), + OVERLAY_INFO(overlay_8, 0), + OVERLAY_INFO(overlay_9, 0), + OVERLAY_INFO(overlay_10, 0), + OVERLAY_INFO(overlay_11, 0), + OVERLAY_INFO(overlay_12, 0), + OVERLAY_INFO(overlay_13, 0), + OVERLAY_INFO(overlay_15, 0), + OVERLAY_INFO(overlay_16, -EBUSY), + OVERLAY_INFO(overlay_17, -EEXIST), + OVERLAY_INFO(overlay_18, 0), + OVERLAY_INFO(overlay_19, 0), + OVERLAY_INFO(overlay_20, 0), + OVERLAY_INFO(overlay_gpio_01, 0), + OVERLAY_INFO(overlay_gpio_02a, 0), + OVERLAY_INFO(overlay_gpio_02b, 0), + OVERLAY_INFO(overlay_gpio_03, 0), + OVERLAY_INFO(overlay_gpio_04a, 0), + OVERLAY_INFO(overlay_gpio_04b, 0), + OVERLAY_INFO(overlay_bad_add_dup_node, -EINVAL), + OVERLAY_INFO(overlay_bad_add_dup_prop, -EINVAL), + OVERLAY_INFO(overlay_bad_phandle, -EINVAL), + OVERLAY_INFO(overlay_bad_symbol, -EINVAL), + /* end marker */ + {.dtb_begin = NULL, .dtb_end = NULL, .expected_result = 0, .name = NULL} +}; + +static struct device_node *overlay_base_root; + +static void * __init dt_alloc_memory(u64 size, u64 align) +{ + void *ptr = memblock_alloc(size, align); + + if (!ptr) + panic("%s: Failed to allocate %llu bytes align=0x%llx\n", + __func__, size, align); + + return ptr; +} + +/* + * Create base device tree for the overlay unittest. + * + * This is called from very early boot code. + * + * Do as much as possible the same way as done in __unflatten_device_tree + * and other early boot steps for the normal FDT so that the overlay base + * unflattened tree will have the same characteristics as the real tree + * (such as having memory allocated by the early allocator). The goal + * is to test "the real thing" as much as possible, and test "test setup + * code" as little as possible. + * + * Have to stop before resolving phandles, because that uses kmalloc. + */ +void __init unittest_unflatten_overlay_base(void) +{ + struct overlay_info *info; + u32 data_size; + void *new_fdt; + u32 size; + int found = 0; + const char *overlay_name = "overlay_base"; + + for (info = overlays; info && info->name; info++) { + if (!strcmp(overlay_name, info->name)) { + found = 1; + break; + } + } + if (!found) { + pr_err("no overlay data for %s\n", overlay_name); + return; + } + + info = &overlays[0]; + + if (info->expected_result != -9999) { + pr_err("No dtb 'overlay_base' to attach\n"); + return; + } + + data_size = info->dtb_end - info->dtb_begin; + if (!data_size) { + pr_err("No dtb 'overlay_base' to attach\n"); + return; + } + + size = fdt_totalsize(info->dtb_begin); + if (size != data_size) { + pr_err("dtb 'overlay_base' header totalsize != actual size"); + return; + } + + new_fdt = dt_alloc_memory(size, roundup_pow_of_two(FDT_V17_SIZE)); + if (!new_fdt) { + pr_err("alloc for dtb 'overlay_base' failed"); + return; + } + + memcpy(new_fdt, info->dtb_begin, size); + + __unflatten_device_tree(new_fdt, NULL, &overlay_base_root, + dt_alloc_memory, true); +} + +/* + * The purpose of of_unittest_overlay_data_add is to add an + * overlay in the normal fashion. This is a test of the whole + * picture, instead of testing individual elements. + * + * A secondary purpose is to be able to verify that the contents of + * /proc/device-tree/ contains the updated structure and values from + * the overlay. That must be verified separately in user space. + * + * Return 0 on unexpected error. + */ +static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id) +{ + struct overlay_info *info; + int found = 0; + int ret; + u32 size; + + for (info = overlays; info && info->name; info++) { + if (!strcmp(overlay_name, info->name)) { + found = 1; + break; + } + } + if (!found) { + pr_err("no overlay data for %s\n", overlay_name); + return 0; + } + + size = info->dtb_end - info->dtb_begin; + if (!size) + pr_err("no overlay data for %s\n", overlay_name); + + ret = of_overlay_fdt_apply(info->dtb_begin, size, &info->ovcs_id); + if (ovcs_id) + *ovcs_id = info->ovcs_id; + if (ret < 0) + goto out; + + pr_debug("%s applied\n", overlay_name); + +out: + if (ret != info->expected_result) + pr_err("of_overlay_fdt_apply() expected %d, ret=%d, %s\n", + info->expected_result, ret, overlay_name); + + return (ret == info->expected_result); +} + +/* + * The purpose of of_unittest_overlay_high_level is to add an overlay + * in the normal fashion. This is a test of the whole picture, + * instead of individual elements. + * + * The first part of the function is _not_ normal overlay usage; it is + * finishing splicing the base overlay device tree into the live tree. + */ +static __init void of_unittest_overlay_high_level(void) +{ + struct device_node *last_sibling; + struct device_node *np; + struct device_node *of_symbols; + struct device_node *overlay_base_symbols; + struct device_node **pprev; + struct property *prop; + int ret; + + if (!overlay_base_root) { + unittest(0, "overlay_base_root not initialized\n"); + return; + } + + /* + * Could not fixup phandles in unittest_unflatten_overlay_base() + * because kmalloc() was not yet available. + */ + of_overlay_mutex_lock(); + of_resolve_phandles(overlay_base_root); + of_overlay_mutex_unlock(); + + + /* + * do not allow overlay_base to duplicate any node already in + * tree, this greatly simplifies the code + */ + + /* + * remove overlay_base_root node "__local_fixups", after + * being used by of_resolve_phandles() + */ + pprev = &overlay_base_root->child; + for (np = overlay_base_root->child; np; np = np->sibling) { + if (of_node_name_eq(np, "__local_fixups__")) { + *pprev = np->sibling; + break; + } + pprev = &np->sibling; + } + + /* remove overlay_base_root node "__symbols__" if in live tree */ + of_symbols = of_get_child_by_name(of_root, "__symbols__"); + if (of_symbols) { + /* will have to graft properties from node into live tree */ + pprev = &overlay_base_root->child; + for (np = overlay_base_root->child; np; np = np->sibling) { + if (of_node_name_eq(np, "__symbols__")) { + overlay_base_symbols = np; + *pprev = np->sibling; + break; + } + pprev = &np->sibling; + } + } + + for_each_child_of_node(overlay_base_root, np) { + struct device_node *base_child; + for_each_child_of_node(of_root, base_child) { + if (!strcmp(np->full_name, base_child->full_name)) { + unittest(0, "illegal node name in overlay_base %pOFn", + np); + of_node_put(np); + of_node_put(base_child); + return; + } + } + } + + /* + * overlay 'overlay_base' is not allowed to have root + * properties, so only need to splice nodes into main device tree. + * + * root node of *overlay_base_root will not be freed, it is lost + * memory. + */ + + for (np = overlay_base_root->child; np; np = np->sibling) + np->parent = of_root; + + mutex_lock(&of_mutex); + + for (last_sibling = np = of_root->child; np; np = np->sibling) + last_sibling = np; + + if (last_sibling) + last_sibling->sibling = overlay_base_root->child; + else + of_root->child = overlay_base_root->child; + + for_each_of_allnodes_from(overlay_base_root, np) + __of_attach_node_sysfs(np); + + if (of_symbols) { + struct property *new_prop; + for_each_property_of_node(overlay_base_symbols, prop) { + + new_prop = __of_prop_dup(prop, GFP_KERNEL); + if (!new_prop) { + unittest(0, "__of_prop_dup() of '%s' from overlay_base node __symbols__", + prop->name); + goto err_unlock; + } + if (__of_add_property(of_symbols, new_prop)) { + kfree(new_prop->name); + kfree(new_prop->value); + kfree(new_prop); + /* "name" auto-generated by unflatten */ + if (!strcmp(prop->name, "name")) + continue; + unittest(0, "duplicate property '%s' in overlay_base node __symbols__", + prop->name); + goto err_unlock; + } + if (__of_add_property_sysfs(of_symbols, new_prop)) { + unittest(0, "unable to add property '%s' in overlay_base node __symbols__ to sysfs", + prop->name); + goto err_unlock; + } + } + } + + mutex_unlock(&of_mutex); + + + /* now do the normal overlay usage test */ + + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right"); + + ret = overlay_data_apply("overlay", NULL); + + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_right"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200_left"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/ride_200"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /__symbols__/hvac_2"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/rate"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/color"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/lights@40000/status"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@40/incline-up"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/ride@100/track@30/incline-up"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/fairway-1/status"); + EXPECT_END(KERN_ERR, + "OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data-2/substation@100/status"); + + unittest(ret, "Adding overlay 'overlay' failed\n"); + + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name"); + + unittest(overlay_data_apply("overlay_bad_add_dup_node", NULL), + "Adding overlay 'overlay_bad_add_dup_node' failed\n"); + + EXPECT_END(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/controller/name"); + EXPECT_END(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/controller"); + + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail"); + EXPECT_BEGIN(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name"); + + unittest(overlay_data_apply("overlay_bad_add_dup_prop", NULL), + "Adding overlay 'overlay_bad_add_dup_prop' failed\n"); + + EXPECT_END(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/name"); + EXPECT_END(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add, update, and/or delete property /testcase-data-2/substation@100/motor-1/electric/rpm_avail"); + EXPECT_END(KERN_ERR, + "OF: overlay: ERROR: multiple fragments add and/or delete node /testcase-data-2/substation@100/motor-1/electric"); + + unittest(overlay_data_apply("overlay_bad_phandle", NULL), + "Adding overlay 'overlay_bad_phandle' failed\n"); + + unittest(overlay_data_apply("overlay_bad_symbol", NULL), + "Adding overlay 'overlay_bad_symbol' failed\n"); + + return; + +err_unlock: + mutex_unlock(&of_mutex); +} + +#else + +static inline __init void of_unittest_overlay_high_level(void) {} + +#endif + +static int __init of_unittest(void) +{ + struct device_node *np; + int res; + + pr_info("start of unittest - you will see error messages\n"); + + /* Taint the kernel so we know we've run tests. */ + add_taint(TAINT_TEST, LOCKDEP_STILL_OK); + + /* adding data for unittest */ + + if (IS_ENABLED(CONFIG_UML)) + unittest_unflatten_overlay_base(); + + res = unittest_data_add(); + if (res) + return res; + if (!of_aliases) + of_aliases = of_find_node_by_path("/aliases"); + + np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); + if (!np) { + pr_info("No testcase data in device tree; not running tests\n"); + return 0; + } + of_node_put(np); + + of_unittest_check_tree_linkage(); + of_unittest_check_phandles(); + of_unittest_find_node_by_name(); + of_unittest_dynamic(); + of_unittest_parse_phandle_with_args(); + of_unittest_parse_phandle_with_args_map(); + of_unittest_printf(); + of_unittest_property_string(); + of_unittest_property_copy(); + of_unittest_changeset(); + of_unittest_parse_interrupts(); + of_unittest_parse_interrupts_extended(); + of_unittest_dma_get_max_cpu_address(); + of_unittest_parse_dma_ranges(); + of_unittest_pci_dma_ranges(); + of_unittest_match_node(); + of_unittest_platform_populate(); + of_unittest_overlay(); + + /* Double check linkage after removing testcase data */ + of_unittest_check_tree_linkage(); + + of_unittest_overlay_high_level(); + + pr_info("end of unittest - %i passed, %i failed\n", + unittest_results.passed, unittest_results.failed); + + return 0; +} +late_initcall(of_unittest); |