summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
commit7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch)
treebcc69b5f4609f348fac49e2f59e210b29eaea783 /drivers/base
parentAdding upstream version 6.9.12. (diff)
downloadlinux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz
linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_topology.c26
-rw-r--r--drivers/base/core.c62
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/devcoredump.c23
-rw-r--r--drivers/base/devres.c11
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/main.c6
-rw-r--r--drivers/base/power/wakeup.c11
-rw-r--r--drivers/base/property.c16
-rw-r--r--drivers/base/regmap/internal.h14
-rw-r--r--drivers/base/regmap/regcache-maple.c2
-rw-r--r--drivers/base/regmap/regmap-i3c.c2
-rw-r--r--drivers/base/regmap/regmap-kunit.c999
-rw-r--r--drivers/base/regmap/regmap-mdio.c2
-rw-r--r--drivers/base/regmap/regmap-ram.c5
-rw-r--r--drivers/base/regmap/regmap-raw-ram.c5
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c2
-rw-r--r--drivers/base/regmap/regmap-sdw.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c1
-rw-r--r--drivers/base/regmap/trace.h18
-rw-r--r--drivers/base/trace.h2
21 files changed, 854 insertions, 359 deletions
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 024b78a0cf..c66d070207 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -22,7 +22,7 @@
#include <linux/units.h>
#define CREATE_TRACE_POINTS
-#include <trace/events/thermal_pressure.h>
+#include <trace/events/hw_pressure.h>
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
@@ -160,26 +160,26 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
-DEFINE_PER_CPU(unsigned long, thermal_pressure);
+DEFINE_PER_CPU(unsigned long, hw_pressure);
/**
- * topology_update_thermal_pressure() - Update thermal pressure for CPUs
+ * topology_update_hw_pressure() - Update HW pressure for CPUs
* @cpus : The related CPUs for which capacity has been reduced
* @capped_freq : The maximum allowed frequency that CPUs can run at
*
- * Update the value of thermal pressure for all @cpus in the mask. The
+ * Update the value of HW pressure for all @cpus in the mask. The
* cpumask should include all (online+offline) affected CPUs, to avoid
* operating on stale data when hot-plug is used for some CPUs. The
* @capped_freq reflects the currently allowed max CPUs frequency due to
- * thermal capping. It might be also a boost frequency value, which is bigger
+ * HW capping. It might be also a boost frequency value, which is bigger
* than the internal 'capacity_freq_ref' max frequency. In such case the
* pressure value should simply be removed, since this is an indication that
- * there is no thermal throttling. The @capped_freq must be provided in kHz.
+ * there is no HW throttling. The @capped_freq must be provided in kHz.
*/
-void topology_update_thermal_pressure(const struct cpumask *cpus,
+void topology_update_hw_pressure(const struct cpumask *cpus,
unsigned long capped_freq)
{
- unsigned long max_capacity, capacity, th_pressure;
+ unsigned long max_capacity, capacity, pressure;
u32 max_freq;
int cpu;
@@ -189,21 +189,21 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
/*
* Handle properly the boost frequencies, which should simply clean
- * the thermal pressure value.
+ * the HW pressure value.
*/
if (max_freq <= capped_freq)
capacity = max_capacity;
else
capacity = mult_frac(max_capacity, capped_freq, max_freq);
- th_pressure = max_capacity - capacity;
+ pressure = max_capacity - capacity;
- trace_thermal_pressure_update(cpu, th_pressure);
+ trace_hw_pressure_update(cpu, pressure);
for_each_cpu(cpu, cpus)
- WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
+ WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
}
-EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
+EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8e3bd230b1..2b4c0624b7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2346,8 +2346,6 @@ static void fw_devlink_link_device(struct device *dev)
/* Device links support end. */
-int (*platform_notify)(struct device *dev) = NULL;
-int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
/* /sys/dev/char */
@@ -2395,16 +2393,10 @@ static void device_platform_notify(struct device *dev)
acpi_device_notify(dev);
software_node_notify(dev);
-
- if (platform_notify)
- platform_notify(dev);
}
static void device_platform_notify_remove(struct device *dev)
{
- if (platform_notify_remove)
- platform_notify_remove(dev);
-
software_node_notify_remove(dev);
acpi_device_notify_remove(dev);
@@ -2546,6 +2538,15 @@ ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(device_show_bool);
+ssize_t device_show_string(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return sysfs_emit(buf, "%s\n", (char *)ea->var);
+}
+EXPORT_SYMBOL_GPL(device_show_string);
+
/**
* device_release - free device structure.
* @kobj: device's kobject.
@@ -2847,15 +2848,6 @@ static void devm_attr_group_remove(struct device *dev, void *res)
sysfs_remove_group(&dev->kobj, group);
}
-static void devm_attr_groups_remove(struct device *dev, void *res)
-{
- union device_attr_group_devres *devres = res;
- const struct attribute_group **groups = devres->groups;
-
- dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
- sysfs_remove_groups(&dev->kobj, groups);
-}
-
/**
* devm_device_add_group - given a device, create a managed attribute group
* @dev: The device to create the group for
@@ -2888,42 +2880,6 @@ int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
}
EXPORT_SYMBOL_GPL(devm_device_add_group);
-/**
- * devm_device_add_groups - create a bunch of managed attribute groups
- * @dev: The device to create the group for
- * @groups: The attribute groups to create, NULL terminated
- *
- * This function creates a bunch of managed attribute groups. If an error
- * occurs when creating a group, all previously created groups will be
- * removed, unwinding everything back to the original state when this
- * function was called. It will explicitly warn and error if any of the
- * attribute files being created already exist.
- *
- * Returns 0 on success or error code from sysfs_create_group on failure.
- */
-int devm_device_add_groups(struct device *dev,
- const struct attribute_group **groups)
-{
- union device_attr_group_devres *devres;
- int error;
-
- devres = devres_alloc(devm_attr_groups_remove,
- sizeof(*devres), GFP_KERNEL);
- if (!devres)
- return -ENOMEM;
-
- error = sysfs_create_groups(&dev->kobj, groups);
- if (error) {
- devres_free(devres);
- return error;
- }
-
- devres->groups = groups;
- devres_add(dev, devres);
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_device_add_groups);
-
static int device_add_attrs(struct device *dev)
{
const struct class *class = dev->class;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 56fba44ba3..c61ecb0c2a 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -306,7 +306,7 @@ static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sysfs_emit(buf, "%d\n", crash_hotplug_cpu_support());
+ return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
static DEVICE_ATTR_ADMIN_RO(crash_hotplug);
#endif
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 7e2d1f0d90..82aeb09b3d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -305,6 +305,29 @@ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
}
/**
+ * dev_coredump_put - remove device coredump
+ * @dev: the struct device for the crashed device
+ *
+ * dev_coredump_put() removes coredump, if exists, for a given device from
+ * the file system and free its associated data otherwise, does nothing.
+ *
+ * It is useful for modules that do not want to keep coredump
+ * available after its unload.
+ */
+void dev_coredump_put(struct device *dev)
+{
+ struct device *existing;
+
+ existing = class_find_device(&devcd_class, NULL, dev,
+ devcd_match_failing);
+ if (existing) {
+ devcd_free(existing, NULL);
+ put_device(existing);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_coredump_put);
+
+/**
* dev_coredumpm - create device coredump with read/free methods
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 3df0025d12..8d709dbd4e 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -896,9 +896,12 @@ void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp)
/*
* Otherwise: allocate new, larger chunk. We need to allocate before
* taking the lock as most probably the caller uses GFP_KERNEL.
+ * alloc_dr() will call check_dr_size() to reserve extra memory
+ * for struct devres automatically, so size @new_size user request
+ * is delivered to it directly as devm_kmalloc() does.
*/
new_dr = alloc_dr(devm_kmalloc_release,
- total_new_size, gfp, dev_to_node(dev));
+ new_size, gfp, dev_to_node(dev));
if (!new_dr)
return NULL;
@@ -1222,7 +1225,11 @@ EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
*/
void devm_free_percpu(struct device *dev, void __percpu *pdata)
{
- WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match,
+ /*
+ * Use devres_release() to prevent memory leakage as
+ * devm_free_pages() does.
+ */
+ WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
(__force void *)pdata));
}
EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c0436f46cf..67858eeb92 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -535,7 +535,7 @@ static DEVICE_ATTR_RW(auto_online_blocks);
static ssize_t crash_hotplug_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%d\n", crash_hotplug_memory_support());
+ return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
}
static DEVICE_ATTR_RO(crash_hotplug);
#endif
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5679f966f6..4a67e83300 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -208,7 +208,7 @@ static ktime_t initcall_debug_start(struct device *dev, void *cb)
if (!pm_print_times_enabled)
return 0;
- dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
+ dev_info(dev, "calling %ps @ %i, parent: %s\n", cb,
task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
return ktime_get();
@@ -223,7 +223,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
return;
rettime = ktime_get();
- dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
+ dev_info(dev, "%ps returned %d after %Ld usecs\n", cb, error,
(unsigned long long)ktime_us_delta(rettime, calltime));
}
@@ -1927,7 +1927,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_start);
void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret)
{
if (ret)
- dev_err(dev, "%s(): %pS returns %d\n", function, fn, ret);
+ dev_err(dev, "%s(): %ps returns %d\n", function, fn, ret);
}
EXPORT_SYMBOL_GPL(__suspend_report_result);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index a917219fee..752b417e81 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -451,16 +451,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev)
* Detach the @dev's wakeup source object from it, unregister this wakeup source
* object and destroy it.
*/
-int device_wakeup_disable(struct device *dev)
+void device_wakeup_disable(struct device *dev)
{
struct wakeup_source *ws;
if (!dev || !dev->power.can_wakeup)
- return -EINVAL;
+ return;
ws = device_wakeup_detach(dev);
wakeup_source_unregister(ws);
- return 0;
}
EXPORT_SYMBOL_GPL(device_wakeup_disable);
@@ -502,7 +501,11 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
*/
int device_set_wakeup_enable(struct device *dev, bool enable)
{
- return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
+ if (enable)
+ return device_wakeup_enable(dev);
+
+ device_wakeup_disable(dev);
+ return 0;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 7324a704a9..837d77e3af 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -869,20 +869,6 @@ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_handle_get);
/**
- * fwnode_handle_put - Drop reference to a device node
- * @fwnode: Pointer to the device node to drop the reference to.
- *
- * This has to be used when terminating device_for_each_child_node() iteration
- * with break or return to prevent stale device node references from being left
- * behind.
- */
-void fwnode_handle_put(struct fwnode_handle *fwnode)
-{
- fwnode_call_void_op(fwnode, put);
-}
-EXPORT_SYMBOL_GPL(fwnode_handle_put);
-
-/**
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*
@@ -905,7 +891,7 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available);
/**
* device_get_child_node_count - return the number of child nodes for device
- * @dev: Device to cound the child nodes for
+ * @dev: Device to count the child nodes for
*
* Return: the number of child nodes for a given device.
*/
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index bcdb25bec7..83acccdc10 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -326,20 +326,22 @@ struct regmap_ram_data {
* Create a test register map with data stored in RAM, not intended
* for practical use.
*/
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_ram, #config, config, data)
+#define regmap_init_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_ram, #dev, dev, config, data)
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name);
-#define regmap_init_raw_ram(config, data) \
- __regmap_lockdep_wrapper(__regmap_init_raw_ram, #config, config, data)
+#define regmap_init_raw_ram(dev, config, data) \
+ __regmap_lockdep_wrapper(__regmap_init_raw_ram, #dev, dev, config, data)
#endif
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 55999a50cc..e424334048 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -294,7 +294,7 @@ static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
MA_STATE(mas, mt, 0, UINT_MAX);
- unsigned int *entry;;
+ unsigned int *entry;
/* if we've already been called then just return */
if (!mt)
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index 0328b0b342..b5300b7c47 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -56,5 +56,5 @@ struct regmap *__devm_regmap_init_i3c(struct i3c_device *i3c,
EXPORT_SYMBOL_GPL(__devm_regmap_init_i3c);
MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
-MODULE_DESCRIPTION("Regmap I3C Module");
+MODULE_DESCRIPTION("regmap I3C Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index bb2ab6129f..be32cd4e84 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -4,11 +4,26 @@
//
// Copyright 2023 Arm Ltd
+#include <kunit/device.h>
+#include <kunit/resource.h>
#include <kunit/test.h>
#include "internal.h"
#define BLOCK_TEST_SIZE 12
+KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
+
+struct regmap_test_priv {
+ struct device *dev;
+};
+
+struct regmap_test_param {
+ enum regcache_type cache;
+ enum regmap_endian val_endian;
+
+ unsigned int from_reg;
+};
+
static void get_changed_bytes(void *orig, void *new, size_t size)
{
char *o = orig;
@@ -27,57 +42,128 @@ static void get_changed_bytes(void *orig, void *new, size_t size)
}
static const struct regmap_config test_regmap_config = {
- .max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
.val_bits = sizeof(unsigned int) * 8,
};
-struct regcache_types {
- enum regcache_type type;
- const char *name;
-};
+static const char *regcache_type_name(enum regcache_type type)
+{
+ switch (type) {
+ case REGCACHE_NONE:
+ return "none";
+ case REGCACHE_FLAT:
+ return "flat";
+ case REGCACHE_RBTREE:
+ return "rbtree";
+ case REGCACHE_MAPLE:
+ return "maple";
+ default:
+ return NULL;
+ }
+}
+
+static const char *regmap_endian_name(enum regmap_endian endian)
+{
+ switch (endian) {
+ case REGMAP_ENDIAN_BIG:
+ return "big";
+ case REGMAP_ENDIAN_LITTLE:
+ return "little";
+ case REGMAP_ENDIAN_DEFAULT:
+ return "default";
+ case REGMAP_ENDIAN_NATIVE:
+ return "native";
+ default:
+ return NULL;
+ }
+}
-static void case_to_desc(const struct regcache_types *t, char *desc)
+static void param_to_desc(const struct regmap_test_param *param, char *desc)
{
- strcpy(desc, t->name);
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
+ regcache_type_name(param->cache),
+ regmap_endian_name(param->val_endian),
+ param->from_reg);
}
-static const struct regcache_types regcache_types_list[] = {
- { REGCACHE_NONE, "none" },
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param regcache_types_list[] = {
+ { .cache = REGCACHE_NONE },
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_MAPLE },
};
-KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
-static const struct regcache_types real_cache_types_list[] = {
- { REGCACHE_FLAT, "flat" },
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+static const struct regmap_test_param real_cache_types_only_list[] = {
+ { .cache = REGCACHE_FLAT },
+ { .cache = REGCACHE_RBTREE },
+ { .cache = REGCACHE_MAPLE },
};
-KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
+
+static const struct regmap_test_param real_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
+};
-static const struct regcache_types sparse_cache_types_list[] = {
- { REGCACHE_RBTREE, "rbtree" },
- { REGCACHE_MAPLE, "maple" },
+KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
+
+static const struct regmap_test_param sparse_cache_types_list[] = {
+ { .cache = REGCACHE_RBTREE, .from_reg = 0 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
+ { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
};
-KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
+KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
-static struct regmap *gen_regmap(struct regmap_config *config,
+static struct regmap *gen_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap_test_priv *priv = test->priv;
unsigned int *buf;
struct regmap *ret;
- size_t size = (config->max_register + 1) * sizeof(unsigned int);
+ size_t size;
int i;
struct reg_default *defaults;
+ config->cache_type = param->cache;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
+ if (config->max_register == 0) {
+ config->max_register = param->from_reg;
+ if (config->num_reg_defaults)
+ config->max_register += (config->num_reg_defaults - 1) *
+ config->reg_stride;
+ else
+ config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
+ }
+
+ size = (config->max_register + 1) * sizeof(unsigned int);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -98,37 +184,40 @@ static struct regmap *gen_regmap(struct regmap_config *config,
config->reg_defaults = defaults;
for (i = 0; i < config->num_reg_defaults; i++) {
- defaults[i].reg = i * config->reg_stride;
- defaults[i].def = buf[i * config->reg_stride];
+ defaults[i].reg = param->from_reg + (i * config->reg_stride);
+ defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
}
}
- ret = regmap_init_ram(config, *data);
+ ret = regmap_init_ram(priv->dev, config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
+ } else {
+ kunit_add_action(test, regmap_exit_action, ret);
}
return ret;
}
-static bool reg_5_false(struct device *context, unsigned int reg)
+static bool reg_5_false(struct device *dev, unsigned int reg)
{
- return reg != 5;
+ struct kunit *test = dev_get_drvdata(dev);
+ const struct regmap_test_param *param = test->param_value;
+
+ return reg != (param->from_reg + 5);
}
static void basic_read_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -141,14 +230,11 @@ static void basic_read_write(struct kunit *test)
KUNIT_EXPECT_EQ(test, val, rval);
/* If using a cache the cache satisfied the read */
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
}
static void bulk_write(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -156,9 +242,8 @@ static void bulk_write(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -178,14 +263,11 @@ static void bulk_write(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void bulk_read(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -193,9 +275,8 @@ static void bulk_read(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -211,14 +292,140 @@ static void bulk_read(struct kunit *test)
/* If using a cache the cache satisfied the read */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
+}
+
+static void read_bypassed(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
- regmap_exit(map);
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_NE(test, val[i], rval);
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+}
+
+static void read_bypassed_volatile(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE], rval;
+ int i;
+
+ config = test_regmap_config;
+ /* All registers except #5 volatile */
+ config.volatile_reg = reg_5_false;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+
+ get_random_bytes(&val, sizeof(val));
+
+ /* Write some test values */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
+
+ regcache_cache_only(map, true);
+
+ /*
+ * While in cache-only regmap_read_bypassed() should return the register
+ * value and leave the map in cache-only.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ /* Register #5 is non-volatile so should read from cache */
+ KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
+ regmap_read(map, param->from_reg + i, &rval));
+
+ /* Put inverted bits in rval to prove we really read the value */
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
+
+ /*
+ * Change the underlying register values to prove it is returning
+ * real values not cached values.
+ */
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] = ~val[i];
+ data->vals[param->from_reg + i] = val[i];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ if (i == 5)
+ continue;
+
+ rval = ~val[i];
+ KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
+ KUNIT_EXPECT_EQ(test, val[i], rval);
+ KUNIT_EXPECT_TRUE(test, map->cache_only);
+ KUNIT_EXPECT_FALSE(test, map->cache_bypass);
+ }
}
static void write_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -226,11 +433,10 @@ static void write_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -247,13 +453,10 @@ static void write_readonly(struct kunit *test)
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
}
static void read_writeonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -261,10 +464,9 @@ static void read_writeonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.readable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -277,7 +479,7 @@ static void read_writeonly(struct kunit *test)
* fail if we aren't using the flat cache.
*/
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- if (t->type != REGCACHE_FLAT) {
+ if (config.cache_type != REGCACHE_FLAT) {
KUNIT_EXPECT_EQ(test, i != 5,
regmap_read(map, i, &val) == 0);
} else {
@@ -287,13 +489,10 @@ static void read_writeonly(struct kunit *test)
/* Did we trigger a hardware access? */
KUNIT_EXPECT_FALSE(test, data->read[5]);
-
- regmap_exit(map);
}
static void reg_defaults(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -301,10 +500,9 @@ static void reg_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -316,12 +514,11 @@ static void reg_defaults(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void reg_defaults_read_dev(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -329,17 +526,16 @@ static void reg_defaults_read_dev(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* We should have read the cache defaults back from the map */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
data->read[i] = false;
}
@@ -350,12 +546,11 @@ static void reg_defaults_read_dev(struct kunit *test)
/* The data should have been read from cache if there was one */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
static void register_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -365,10 +560,9 @@ static void register_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -401,13 +595,10 @@ static void register_patch(struct kunit *test)
break;
}
}
-
- regmap_exit(map);
}
static void stride(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -415,16 +606,22 @@ static void stride(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.reg_stride = 2;
config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
- map = gen_regmap(&config, &data);
+ /*
+ * Allow one extra register so that the read/written arrays
+ * are sized big enough to include an entry for the odd
+ * address past the final reg_default register.
+ */
+ config.max_register = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
- /* Only even registers can be accessed, try both read and write */
+ /* Only even addresses can be accessed, try both read and write */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
data->read[i] = false;
data->written[i] = false;
@@ -437,15 +634,13 @@ static void stride(struct kunit *test)
} else {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, data->vals[i], rval);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
data->read[i]);
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
KUNIT_EXPECT_TRUE(test, data->written[i]);
}
}
-
- regmap_exit(map);
}
static struct regmap_range_cfg test_range = {
@@ -481,7 +676,6 @@ static bool test_range_all_volatile(struct device *dev, unsigned int reg)
static void basic_ranges(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -489,13 +683,12 @@ static void basic_ranges(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_all_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -546,14 +739,11 @@ static void basic_ranges(struct kunit *test)
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
-
- regmap_exit(map);
}
/* Try to stress dynamic creation of cache data structures */
static void stress_insert(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -562,10 +752,9 @@ static void stress_insert(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.max_register = 300;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -599,24 +788,21 @@ static void stress_insert(struct kunit *test)
for (i = 0; i < config.max_register; i ++) {
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, rval, vals[i]);
- KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
+ KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
}
-
- regmap_exit(map);
}
static void cache_bypass(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
unsigned int val, rval;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -624,28 +810,26 @@ static void cache_bypass(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Ensure the cache has a value in it */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
/* Bypass then write a different value */
regcache_cache_bypass(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
/* Read the bypassed value */
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val + 1, rval);
- KUNIT_EXPECT_EQ(test, data->vals[0], rval);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
/* Disable bypass, the cache should still return the original value */
regcache_cache_bypass(map, false);
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
-static void cache_sync(struct kunit *test)
+static void cache_sync_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -653,9 +837,8 @@ static void cache_sync(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -663,10 +846,10 @@ static void cache_sync(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Put some data into the cache */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
/* Trash the data on the device itself then resync */
regcache_mark_dirty(map);
@@ -674,16 +857,63 @@ static void cache_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just write the correct data out? */
- KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+}
- regmap_exit(map);
+static void cache_sync_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[BLOCK_TEST_SIZE];
+ unsigned int val_mask;
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ val_mask = GENMASK(config.val_bits - 1, 0);
+ get_random_bytes(&val, sizeof(val));
+
+ /* Put some data into the cache */
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ /* Set cache-only and change the values */
+ regcache_cache_only(map, true);
+ for (i = 0; i < ARRAY_SIZE(val); ++i)
+ val[i] = ~val[i] & val_mask;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
+ BLOCK_TEST_SIZE));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+
+ KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+
+ /* Exit cache-only and sync the cache without marking hardware registers dirty */
+ regcache_cache_only(map, false);
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Did we just write the correct data out? */
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
}
-static void cache_sync_defaults(struct kunit *test)
+static void cache_sync_defaults_marked_dirty(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -691,10 +921,9 @@ static void cache_sync_defaults(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -702,24 +931,85 @@ static void cache_sync_defaults(struct kunit *test)
get_random_bytes(&val, sizeof(val));
/* Change the value of one register */
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
/* Resync */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did we just sync the one register we touched? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
+ KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
+
+ /* Rewrite registers back to their defaults */
+ for (i = 0; i < config.num_reg_defaults; ++i)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
+ config.reg_defaults[i].def));
+
+ /*
+ * Resync after regcache_mark_dirty() should not write out registers
+ * that are at default value
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ regcache_mark_dirty(map);
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
+}
+
+static void cache_sync_default_after_cache_only(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int orig_val;
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
+
+ /* Enter cache-only and change the value of one register */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
+
+ /* Exit cache-only and resync, should write out the changed register */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
- regmap_exit(map);
+ /* Was the register written out? */
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
+
+ /* Enter cache-only and write register back to its default value */
+ regcache_cache_only(map, true);
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
+
+ /* Resync should write out the new value */
+ regcache_cache_only(map, false);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
}
static void cache_sync_readonly(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -727,40 +1017,37 @@ static void cache_sync_readonly(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.writeable_reg = reg_5_false;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Read all registers to fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Change the value of all registers, readonly should fail */
get_random_bytes(&val, sizeof(val));
regcache_cache_only(map, true);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
+ KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
regcache_cache_only(map, false);
/* Resync */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* Did that match what we see on the device? */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
-
- regmap_exit(map);
+ KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
}
static void cache_sync_patch(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -770,23 +1057,22 @@ static void cache_sync_patch(struct kunit *test)
/* We need defaults so readback works */
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Stash the original values */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
/* Patch a couple of values */
- patch[0].reg = 2;
+ patch[0].reg = param->from_reg + 2;
patch[0].def = rval[2] + 1;
patch[0].delay_us = 0;
- patch[1].reg = 5;
+ patch[1].reg = param->from_reg + 5;
patch[1].def = rval[5] + 1;
patch[1].delay_us = 0;
KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
@@ -795,33 +1081,31 @@ static void cache_sync_patch(struct kunit *test)
/* Sync the cache */
regcache_mark_dirty(map);
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->written[i] = false;
+ data->written[param->from_reg + i] = false;
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The patch should be on the device but not in the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
KUNIT_EXPECT_EQ(test, val, rval[i]);
switch (i) {
case 2:
case 5:
- KUNIT_EXPECT_EQ(test, true, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
+ KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
break;
default:
- KUNIT_EXPECT_EQ(test, false, data->written[i]);
- KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
+ KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
+ KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
break;
}
}
-
- regmap_exit(map);
}
static void cache_drop(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -829,41 +1113,267 @@ static void cache_drop(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.num_reg_defaults = BLOCK_TEST_SIZE;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
/* Ensure the data is read from the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++) {
- KUNIT_EXPECT_FALSE(test, data->read[i]);
- data->read[i] = false;
+ KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
+ data->read[param->from_reg + i] = false;
}
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
/* Drop some registers */
- KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
+ param->from_reg + 5));
/* Reread and check only the dropped registers hit the device. */
- KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
BLOCK_TEST_SIZE));
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
- KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
+ KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+}
+
+static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val[4][BLOCK_TEST_SIZE];
+ unsigned int reg;
+ const int num_ranges = ARRAY_SIZE(val) * 2;
+ int rangeidx, i;
+
+ static_assert(ARRAY_SIZE(val) == 4);
+
+ config = test_regmap_config;
+ config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ for (i = 0; i < config.max_register + 1; i++)
+ data->written[i] = false;
+
+ /* Create non-contiguous cache blocks by writing every other range */
+ get_random_bytes(&val, sizeof(val));
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
+ &val[rangeidx / 2],
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Drop range 2 */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
+
+ /* Drop part of range 4 */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
+
+ /* Mark dirty and reset mock registers to 0 */
+ regcache_mark_dirty(map);
+ for (i = 0; i < config.max_register + 1; i++) {
+ data->vals[i] = 0;
+ data->written[i] = false;
+ }
+
+ /* The registers that were dropped from range 4 should now remain at 0 */
+ val[4 / 2][3] = 0;
+ val[4 / 2][4] = 0;
+ val[4 / 2][5] = 0;
+
+ /* Sync and check that the expected register ranges were written */
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+
+ /* Check that odd ranges weren't written */
+ for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+ }
+
+ /* Check that even ranges (except 2 and 4) were written */
+ for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
+ if ((rangeidx == 2) || (rangeidx == 4))
+ continue;
+
+ reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
+
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
+ &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
+ }
+
+ /* Check that range 2 wasn't written */
+ reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
+
+ /* Check that range 4 was partially written */
+ reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
- regmap_exit(map);
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
+
+ /* Nothing before param->from_reg should have been written */
+ for (i = 0; i < param->from_reg; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /* Mark dirty and cache sync should not write anything. */
+ regcache_mark_dirty(map);
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
+}
+
+static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
+{
+ const struct regmap_test_param *param = test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int rval[BLOCK_TEST_SIZE];
+ int i;
+
+ config = test_regmap_config;
+ config.num_reg_defaults = BLOCK_TEST_SIZE;
+
+ map = gen_regmap(test, &config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Ensure the data is read from the cache */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->read[param->from_reg + i] = false;
+ KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
+ BLOCK_TEST_SIZE));
+ KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
+
+ /* Change all values in cache from defaults */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
+
+ /* Drop all registers */
+ KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
+
+ /*
+ * Sync cache without marking it dirty. All registers were dropped
+ * so the cache should not have any entries to write out.
+ */
+ for (i = 0; i < BLOCK_TEST_SIZE; i++)
+ data->written[param->from_reg + i] = false;
+
+ KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
+ for (i = 0; i <= config.max_register; i++)
+ KUNIT_EXPECT_FALSE(test, data->written[i]);
}
static void cache_present(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
+ const struct regmap_test_param *param = test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -871,39 +1381,35 @@ static void cache_present(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- data->read[i] = false;
+ data->read[param->from_reg + i] = false;
/* No defaults so no registers cached. */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
+ KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
/* We didn't trigger any reads */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_FALSE(test, data->read[i]);
+ KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
/* Fill the cache */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
+ KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
/* Now everything should be cached */
for (i = 0; i < BLOCK_TEST_SIZE; i++)
- KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
-
- regmap_exit(map);
+ KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
}
/* Check that caching the window register works with sync */
static void cache_range_window_reg(struct kunit *test)
{
- struct regcache_types *t = (struct regcache_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -911,13 +1417,12 @@ static void cache_range_window_reg(struct kunit *test)
int i;
config = test_regmap_config;
- config.cache_type = t->type;
config.volatile_reg = test_range_window_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_regmap(&config, &data);
+ map = gen_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -953,41 +1458,29 @@ static void cache_range_window_reg(struct kunit *test)
KUNIT_ASSERT_EQ(test, val, 2);
}
-struct raw_test_types {
- const char *name;
-
- enum regcache_type cache_type;
- enum regmap_endian val_endian;
-};
-
-static void raw_to_desc(const struct raw_test_types *t, char *desc)
-{
- strcpy(desc, t->name);
-}
-
-static const struct raw_test_types raw_types_list[] = {
- { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
- { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_types_list[] = {
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
-static const struct raw_test_types raw_cache_types_list[] = {
- { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
- { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
- { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
- { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
- { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
- { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
+static const struct regmap_test_param raw_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
};
-KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
+KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
static const struct regmap_config raw_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
@@ -997,18 +1490,20 @@ static const struct regmap_config raw_regmap_config = {
.val_bits = 16,
};
-static struct regmap *gen_raw_regmap(struct regmap_config *config,
- struct raw_test_types *test_type,
+static struct regmap *gen_raw_regmap(struct kunit *test,
+ struct regmap_config *config,
struct regmap_ram_data **data)
{
+ struct regmap_test_priv *priv = test->priv;
+ const struct regmap_test_param *param = test->param_value;
u16 *buf;
struct regmap *ret;
size_t size = (config->max_register + 1) * config->reg_bits / 8;
int i;
struct reg_default *defaults;
- config->cache_type = test_type->cache_type;
- config->val_format_endian = test_type->val_endian;
+ config->cache_type = param->cache;
+ config->val_format_endian = param->val_endian;
config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
config->cache_type == REGCACHE_MAPLE;
@@ -1033,7 +1528,7 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
for (i = 0; i < config->num_reg_defaults; i++) {
defaults[i].reg = i;
- switch (test_type->val_endian) {
+ switch (param->val_endian) {
case REGMAP_ENDIAN_LITTLE:
defaults[i].def = le16_to_cpu(buf[i]);
break;
@@ -1052,10 +1547,12 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
if (config->cache_type == REGCACHE_NONE)
config->num_reg_defaults = 0;
- ret = regmap_init_raw_ram(config, *data);
+ ret = regmap_init_raw_ram(priv->dev, config, *data);
if (IS_ERR(ret)) {
kfree(buf);
kfree(*data);
+ } else {
+ kunit_add_action(test, regmap_exit_action, ret);
}
return ret;
@@ -1063,7 +1560,6 @@ static struct regmap *gen_raw_regmap(struct regmap_config *config,
static void raw_read_defaults_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1072,7 +1568,7 @@ static void raw_read_defaults_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1082,13 +1578,10 @@ static void raw_read_defaults_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
}
-
- regmap_exit(map);
}
static void raw_read_defaults(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1099,35 +1592,31 @@ static void raw_read_defaults(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
val_len = sizeof(*rval) * (config.max_register + 1);
- rval = kmalloc(val_len, GFP_KERNEL);
+ rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
KUNIT_ASSERT_TRUE(test, rval != NULL);
if (!rval)
return;
-
+
/* Check that we can read the defaults via the API */
KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
for (i = 0; i < config.max_register + 1; i++) {
def = config.reg_defaults[i].def;
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
- KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
} else {
- KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
+ KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
}
}
-
- kfree(rval);
- regmap_exit(map);
}
static void raw_write_read_single(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1136,7 +1625,7 @@ static void raw_write_read_single(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1147,13 +1636,10 @@ static void raw_write_read_single(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
KUNIT_EXPECT_EQ(test, val, rval);
-
- regmap_exit(map);
}
static void raw_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1164,7 +1650,7 @@ static void raw_write(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1185,10 +1671,10 @@ static void raw_write(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu((__force __be16)val[i % 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu((__force __le16)val[i % 2]));
}
break;
default:
@@ -1199,8 +1685,6 @@ static void raw_write(struct kunit *test)
/* The values should appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
-
- regmap_exit(map);
}
static bool reg_zero(struct device *dev, unsigned int reg)
@@ -1215,7 +1699,6 @@ static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
static void raw_noinc_write(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1228,7 +1711,7 @@ static void raw_noinc_write(struct kunit *test)
config.writeable_noinc_reg = reg_zero;
config.readable_noinc_reg = reg_zero;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1259,13 +1742,10 @@ static void raw_noinc_write(struct kunit *test)
/* Make sure we didn't touch the register after the noinc register */
KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
KUNIT_ASSERT_EQ(test, val_test, val);
-
- regmap_exit(map);
}
static void raw_sync(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1276,7 +1756,7 @@ static void raw_sync(struct kunit *test)
config = raw_regmap_config;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1300,10 +1780,10 @@ static void raw_sync(struct kunit *test)
case 3:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i - 2]));
+ be16_to_cpu((__force __be16)val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i - 2]));
+ le16_to_cpu((__force __le16)val[i - 2]));
}
break;
case 4:
@@ -1323,7 +1803,7 @@ static void raw_sync(struct kunit *test)
val[2] = cpu_to_be16(val[2]);
else
val[2] = cpu_to_le16(val[2]);
-
+
/* The values should not appear in the "hardware" */
KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
@@ -1337,13 +1817,10 @@ static void raw_sync(struct kunit *test)
/* The values should now appear in the "hardware" */
KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
-
- regmap_exit(map);
}
static void raw_ranges(struct kunit *test)
{
- struct raw_test_types *t = (struct raw_test_types *)test->param_value;
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
@@ -1356,7 +1833,7 @@ static void raw_ranges(struct kunit *test)
config.num_ranges = 1;
config.max_register = test_range.range_max;
- map = gen_raw_regmap(&config, t, &data);
+ map = gen_raw_regmap(test, &config, &data);
KUNIT_ASSERT_FALSE(test, IS_ERR(map));
if (IS_ERR(map))
return;
@@ -1402,12 +1879,12 @@ static void raw_ranges(struct kunit *test)
KUNIT_EXPECT_FALSE(test, data->read[i]);
KUNIT_EXPECT_FALSE(test, data->written[i]);
}
-
- regmap_exit(map);
}
static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
@@ -1419,13 +1896,19 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
- KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
@@ -1437,8 +1920,40 @@ static struct kunit_case regmap_test_cases[] = {
{}
};
+static int regmap_test_init(struct kunit *test)
+{
+ struct regmap_test_priv *priv;
+ struct device *dev;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ test->priv = priv;
+
+ dev = kunit_device_register(test, "regmap_test");
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ priv->dev = get_device(dev);
+ dev_set_drvdata(dev, test);
+
+ return 0;
+}
+
+static void regmap_test_exit(struct kunit *test)
+{
+ struct regmap_test_priv *priv = test->priv;
+
+ /* Destroy the dummy struct device */
+ if (priv && priv->dev)
+ put_device(priv->dev);
+}
+
static struct kunit_suite regmap_test_suite = {
.name = "regmap",
+ .init = regmap_test_init,
+ .exit = regmap_test_exit,
.test_cases = regmap_test_cases,
};
kunit_test_suite(regmap_test_suite);
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index 6aa6a24094..9573bf3b52 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -117,5 +117,5 @@ struct regmap *__devm_regmap_init_mdio(struct mdio_device *mdio_dev,
EXPORT_SYMBOL_GPL(__devm_regmap_init_mdio);
MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
-MODULE_DESCRIPTION("Regmap MDIO Module");
+MODULE_DESCRIPTION("regmap MDIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-ram.c b/drivers/base/regmap/regmap-ram.c
index 192d6b131d..5b4cbf982a 100644
--- a/drivers/base/regmap/regmap-ram.c
+++ b/drivers/base/regmap/regmap-ram.c
@@ -53,7 +53,8 @@ static const struct regmap_bus regmap_ram = {
.free_context = regmap_ram_free_context,
};
-struct regmap *__regmap_init_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -75,7 +76,7 @@ struct regmap *__regmap_init_ram(const struct regmap_config *config,
if (!data->written)
return ERR_PTR(-ENOMEM);
- map = __regmap_init(NULL, &regmap_ram, data, config,
+ map = __regmap_init(dev, &regmap_ram, data, config,
lock_key, lock_name);
return map;
diff --git a/drivers/base/regmap/regmap-raw-ram.c b/drivers/base/regmap/regmap-raw-ram.c
index 93ae07b503..69eabfb89e 100644
--- a/drivers/base/regmap/regmap-raw-ram.c
+++ b/drivers/base/regmap/regmap-raw-ram.c
@@ -107,7 +107,8 @@ static const struct regmap_bus regmap_raw_ram = {
.free_context = regmap_raw_ram_free_context,
};
-struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
+struct regmap *__regmap_init_raw_ram(struct device *dev,
+ const struct regmap_config *config,
struct regmap_ram_data *data,
struct lock_class_key *lock_key,
const char *lock_name)
@@ -134,7 +135,7 @@ struct regmap *__regmap_init_raw_ram(const struct regmap_config *config,
data->reg_endian = config->reg_format_endian;
- map = __regmap_init(NULL, &regmap_raw_ram, data, config,
+ map = __regmap_init(dev, &regmap_raw_ram, data, config,
lock_key, lock_name);
return map;
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 388c3a087b..c99eada837 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -97,5 +97,5 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
-MODULE_DESCRIPTION("Regmap SoundWire MBQ Module");
+MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
index 159c0b740b..ea631ac7c7 100644
--- a/drivers/base/regmap/regmap-sdw.c
+++ b/drivers/base/regmap/regmap-sdw.c
@@ -98,5 +98,5 @@ struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw);
-MODULE_DESCRIPTION("Regmap SoundWire Module");
+MODULE_DESCRIPTION("regmap SoundWire Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 37ab23a9d0..094cf2a2ca 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -165,4 +165,5 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
+MODULE_DESCRIPTION("regmap SPI Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/trace.h b/drivers/base/regmap/trace.h
index 704e106e5d..bcc5a8b226 100644
--- a/drivers/base/regmap/trace.h
+++ b/drivers/base/regmap/trace.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(regmap_reg,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->val = val;
),
@@ -74,7 +74,7 @@ DECLARE_EVENT_CLASS(regmap_bulk,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->val_len = val_len;
memcpy(__get_dynamic_array(buf), val, val_len);
@@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(regmap_block,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->reg = reg;
__entry->count = count;
),
@@ -163,9 +163,9 @@ TRACE_EVENT(regcache_sync,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
- __assign_str(status, status);
- __assign_str(type, type);
+ __assign_str(name);
+ __assign_str(status);
+ __assign_str(type);
),
TP_printk("%s type=%s status=%s", __get_str(name),
@@ -184,7 +184,7 @@ DECLARE_EVENT_CLASS(regmap_bool,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->flag = flag;
),
@@ -216,7 +216,7 @@ DECLARE_EVENT_CLASS(regmap_async,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
),
TP_printk("%s", __get_str(name))
@@ -264,7 +264,7 @@ TRACE_EVENT(regcache_drop_region,
),
TP_fast_assign(
- __assign_str(name, regmap_name(map));
+ __assign_str(name);
__entry->from = from;
__entry->to = to;
),
diff --git a/drivers/base/trace.h b/drivers/base/trace.h
index 3192e18f87..e52b6eae06 100644
--- a/drivers/base/trace.h
+++ b/drivers/base/trace.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(devres,
__field(size_t, size)
),
TP_fast_assign(
- __assign_str(devname, dev_name(dev));
+ __assign_str(devname);
__entry->op = op;
__entry->node = node;
__entry->name = name;