summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/base
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/arch_numa.c2
-rw-r--r--drivers/base/cacheinfo.c51
-rw-r--r--drivers/base/class.c6
-rw-r--r--drivers/base/core.c16
-rw-r--r--drivers/base/firmware_loader/fallback.c10
-rw-r--r--drivers/base/firmware_loader/fallback.h4
-rw-r--r--drivers/base/firmware_loader/fallback_table.c1
-rw-r--r--drivers/base/firmware_loader/firmware.h1
-rw-r--r--drivers/base/firmware_loader/main.c9
-rw-r--r--drivers/base/platform.c71
-rw-r--r--drivers/base/power/common.c21
-rw-r--r--drivers/base/power/domain.c46
-rw-r--r--drivers/base/regmap/regmap-kunit.c68
-rw-r--r--drivers/base/test/Kconfig4
-rw-r--r--drivers/base/test/property-entry-test.c4
15 files changed, 236 insertions, 78 deletions
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index eaa31e567d..5b59d133b6 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -144,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
-static int __init early_cpu_to_node(int cpu)
+int __init early_cpu_to_node(int cpu)
{
return cpu_to_node_map[cpu];
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cbae8be1fe..f1e79263fe 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -898,6 +898,48 @@ err:
return rc;
}
+/*
+ * Calculate the size of the per-CPU data cache slice. This can be
+ * used to estimate the size of the data cache slice that can be used
+ * by one CPU under ideal circumstances. UNIFIED caches are counted
+ * in addition to DATA caches. So, please consider code cache usage
+ * when use the result.
+ *
+ * Because the cache inclusive/non-inclusive information isn't
+ * available, we just use the size of the per-CPU slice of LLC to make
+ * the result more predictable across architectures.
+ */
+static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
+{
+ struct cpu_cacheinfo *ci;
+ struct cacheinfo *llc;
+ unsigned int nr_shared;
+
+ if (!last_level_cache_is_valid(cpu))
+ return;
+
+ ci = ci_cacheinfo(cpu);
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
+ return;
+
+ nr_shared = cpumask_weight(&llc->shared_cpu_map);
+ if (nr_shared)
+ ci->per_cpu_data_slice_size = llc->size / nr_shared;
+}
+
+static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu)
+{
+ unsigned int icpu;
+
+ for_each_online_cpu(icpu) {
+ if (!cpu_online && icpu == cpu)
+ continue;
+ update_per_cpu_data_slice_size_cpu(icpu);
+ }
+}
+
static int cacheinfo_cpu_online(unsigned int cpu)
{
int rc = detect_cache_attributes(cpu);
@@ -906,7 +948,12 @@ static int cacheinfo_cpu_online(unsigned int cpu)
return rc;
rc = cache_add_dev(cpu);
if (rc)
- free_cache_attributes(cpu);
+ goto err;
+ update_per_cpu_data_slice_size(true, cpu);
+ setup_pcp_cacheinfo();
+ return 0;
+err:
+ free_cache_attributes(cpu);
return rc;
}
@@ -916,6 +963,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
cpu_cache_sysfs_exit(cpu);
free_cache_attributes(cpu);
+ update_per_cpu_data_slice_size(false, cpu);
+ setup_pcp_cacheinfo();
return 0;
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 9cd489a577..7b38fdf8e1 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -193,10 +193,8 @@ int class_register(const struct class *cls)
lockdep_register_key(key);
__mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
- if (error) {
- kfree(cp);
- return error;
- }
+ if (error)
+ goto err_out;
cp->subsys.kobj.kset = class_kset;
cp->subsys.kobj.ktype = &class_ktype;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4d8b315c48..f9fb692491 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -49,6 +49,7 @@ static bool fw_devlink_best_effort;
* __fwnode_link_add - Create a link between two fwnode_handles.
* @con: Consumer end of the link.
* @sup: Supplier end of the link.
+ * @flags: Link flags.
*
* Create a fwnode link between fwnode handles @con and @sup. The fwnode link
* represents the detail that the firmware lists @sup fwnode as supplying a
@@ -283,10 +284,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false;
}
+#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE | \
+ DL_FLAG_MANAGED)
static inline bool device_link_flag_is_sync_state_only(u32 flags)
{
- return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+ return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
/**
@@ -2057,9 +2060,14 @@ static int fw_devlink_create_devlink(struct device *con,
/*
* SYNC_STATE_ONLY device links don't block probing and supports cycles.
- * So cycle detection isn't necessary and shouldn't be done.
+ * So, one might expect that cycle detection isn't necessary for them.
+ * However, if the device link was marked as SYNC_STATE_ONLY because
+ * it's part of a cycle, then we still need to do cycle detection. This
+ * is because the consumer and supplier might be part of multiple cycles
+ * and we need to detect all those cycles.
*/
- if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ if (!device_link_flag_is_sync_state_only(flags) ||
+ flags & DL_FLAG_CYCLE) {
device_links_write_lock();
if (__fw_devlink_relax_cycles(con, sup_handle)) {
__fwnode_link_cycle(link);
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index bf68e39478..3ef0b312ae 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -46,7 +46,7 @@ static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
static LIST_HEAD(pending_fw_head);
-void kill_pending_fw_fallback_reqs(bool only_kill_custom)
+void kill_pending_fw_fallback_reqs(bool kill_all)
{
struct fw_priv *fw_priv;
struct fw_priv *next;
@@ -54,9 +54,13 @@ void kill_pending_fw_fallback_reqs(bool only_kill_custom)
mutex_lock(&fw_lock);
list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
pending_list) {
- if (!fw_priv->need_uevent || !only_kill_custom)
+ if (kill_all || !fw_priv->need_uevent)
__fw_load_abort(fw_priv);
}
+
+ if (kill_all)
+ fw_load_abort_all = true;
+
mutex_unlock(&fw_lock);
}
@@ -86,7 +90,7 @@ static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs, long timeout)
}
mutex_lock(&fw_lock);
- if (fw_state_is_aborted(fw_priv)) {
+ if (fw_load_abort_all || fw_state_is_aborted(fw_priv)) {
mutex_unlock(&fw_lock);
retval = -EINTR;
goto out;
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 1441485956..ccf912bef6 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -13,7 +13,7 @@ int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
u32 opt_flags,
int ret);
-void kill_pending_fw_fallback_reqs(bool only_kill_custom);
+void kill_pending_fw_fallback_reqs(bool kill_all);
void fw_fallback_set_cache_timeout(void);
void fw_fallback_set_default_timeout(void);
@@ -28,7 +28,7 @@ static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
return ret;
}
-static inline void kill_pending_fw_fallback_reqs(bool only_kill_custom) { }
+static inline void kill_pending_fw_fallback_reqs(bool kill_all) { }
static inline void fw_fallback_set_cache_timeout(void) { }
static inline void fw_fallback_set_default_timeout(void) { }
#endif /* CONFIG_FW_LOADER_USER_HELPER */
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index e5ac098d07..8432ab2c3b 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -44,7 +44,6 @@ static struct ctl_table firmware_config_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- { }
};
static struct ctl_table_header *firmware_config_sysct_table_header;
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index bf549d6500..e891742ba2 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -86,6 +86,7 @@ struct fw_priv {
extern struct mutex fw_lock;
extern struct firmware_cache fw_cache;
+extern bool fw_load_abort_all;
static inline bool __fw_state_check(struct fw_priv *fw_priv,
enum fw_status status)
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index b58c42f1b1..ea28102d42 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -93,6 +93,7 @@ static inline struct fw_priv *to_fw_priv(struct kref *ref)
DEFINE_MUTEX(fw_lock);
struct firmware_cache fw_cache;
+bool fw_load_abort_all;
void fw_state_init(struct fw_priv *fw_priv)
{
@@ -1524,10 +1525,10 @@ static int fw_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
/*
- * kill pending fallback requests with a custom fallback
- * to avoid stalling suspend.
+ * Here, kill pending fallback requests will only kill
+ * non-uevent firmware request to avoid stalling suspend.
*/
- kill_pending_fw_fallback_reqs(true);
+ kill_pending_fw_fallback_reqs(false);
device_cache_fw_images();
break;
@@ -1612,7 +1613,7 @@ static int fw_shutdown_notify(struct notifier_block *unused1,
* Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock.
*/
- kill_pending_fw_fallback_reqs(false);
+ kill_pending_fw_fallback_reqs(true);
return NOTIFY_DONE;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 76bfcba250..10c5779634 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -178,18 +178,19 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
ret = dev->archdata.irqs[num];
goto out;
#else
+ struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
struct resource *r;
- if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
- ret = of_irq_get(dev->dev.of_node, num);
+ if (is_of_node(fwnode)) {
+ ret = of_irq_get(to_of_node(fwnode), num);
if (ret > 0 || ret == -EPROBE_DEFER)
goto out;
}
r = platform_get_resource(dev, IORESOURCE_IRQ, num);
- if (has_acpi_companion(&dev->dev)) {
+ if (is_acpi_device_node(fwnode)) {
if (r && r->flags & IORESOURCE_DISABLED) {
- ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r);
+ ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), num, r);
if (ret)
goto out;
}
@@ -222,8 +223,8 @@ int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
* the device will only expose one IRQ, and this fallback
* allows a common code path across either kind of resource.
*/
- if (num == 0 && has_acpi_companion(&dev->dev)) {
- ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
+ if (num == 0 && is_acpi_device_node(fwnode)) {
+ ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), num);
/* Our callers expect -ENXIO for missing IRQs. */
if (ret >= 0 || ret == -EPROBE_DEFER)
goto out;
@@ -291,7 +292,7 @@ EXPORT_SYMBOL_GPL(platform_irq_count);
struct irq_affinity_devres {
unsigned int count;
- unsigned int irq[];
+ unsigned int irq[] __counted_by(count);
};
static void platform_disable_acpi_irq(struct platform_device *pdev, int index)
@@ -312,7 +313,7 @@ static void devm_platform_get_irqs_affinity_release(struct device *dev,
for (i = 0; i < ptr->count; i++) {
irq_dispose_mapping(ptr->irq[i]);
- if (has_acpi_companion(dev))
+ if (is_acpi_device_node(dev_fwnode(dev)))
platform_disable_acpi_irq(to_platform_device(dev), i);
}
}
@@ -655,23 +656,21 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
*/
int platform_device_add(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
u32 i;
int ret;
- if (!pdev)
- return -EINVAL;
-
- if (!pdev->dev.parent)
- pdev->dev.parent = &platform_bus;
+ if (!dev->parent)
+ dev->parent = &platform_bus;
- pdev->dev.bus = &platform_bus_type;
+ dev->bus = &platform_bus_type;
switch (pdev->id) {
default:
- dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
+ dev_set_name(dev, "%s.%d", pdev->name, pdev->id);
break;
case PLATFORM_DEVID_NONE:
- dev_set_name(&pdev->dev, "%s", pdev->name);
+ dev_set_name(dev, "%s", pdev->name);
break;
case PLATFORM_DEVID_AUTO:
/*
@@ -681,10 +680,10 @@ int platform_device_add(struct platform_device *pdev)
*/
ret = ida_alloc(&platform_devid_ida, GFP_KERNEL);
if (ret < 0)
- goto err_out;
+ return ret;
pdev->id = ret;
pdev->id_auto = true;
- dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
+ dev_set_name(dev, "%s.%d.auto", pdev->name, pdev->id);
break;
}
@@ -692,7 +691,7 @@ int platform_device_add(struct platform_device *pdev)
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
- r->name = dev_name(&pdev->dev);
+ r->name = dev_name(dev);
p = r->parent;
if (!p) {
@@ -705,18 +704,20 @@ int platform_device_add(struct platform_device *pdev)
if (p) {
ret = insert_resource(p, r);
if (ret) {
- dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
+ dev_err(dev, "failed to claim resource %d: %pR\n", i, r);
goto failed;
}
}
}
- pr_debug("Registering platform device '%s'. Parent at %s\n",
- dev_name(&pdev->dev), dev_name(pdev->dev.parent));
+ pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(dev),
+ dev_name(dev->parent));
- ret = device_add(&pdev->dev);
- if (ret == 0)
- return ret;
+ ret = device_add(dev);
+ if (ret)
+ goto failed;
+
+ return 0;
failed:
if (pdev->id_auto) {
@@ -730,7 +731,6 @@ int platform_device_add(struct platform_device *pdev)
release_resource(r);
}
- err_out:
return ret;
}
EXPORT_SYMBOL_GPL(platform_device_add);
@@ -1447,21 +1447,22 @@ static void platform_shutdown(struct device *_dev)
static int platform_dma_configure(struct device *dev)
{
struct platform_driver *drv = to_platform_driver(dev->driver);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
enum dev_dma_attr attr;
int ret = 0;
- if (dev->of_node) {
- ret = of_dma_configure(dev, dev->of_node, true);
- } else if (has_acpi_companion(dev)) {
- attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode));
+ if (is_of_node(fwnode)) {
+ ret = of_dma_configure(dev, to_of_node(fwnode), true);
+ } else if (is_acpi_device_node(fwnode)) {
+ attr = acpi_get_dma_attr(to_acpi_device_node(fwnode));
ret = acpi_dma_configure(dev, attr);
}
+ if (ret || drv->driver_managed_dma)
+ return ret;
- if (!ret && !drv->driver_managed_dma) {
- ret = iommu_device_use_default_domain(dev);
- if (ret)
- arch_teardown_dma_ops(dev);
- }
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
return ret;
}
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 72115917e0..44ec20918a 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -228,3 +228,24 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
device_pm_check_callbacks(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_domain_set);
+
+/**
+ * dev_pm_domain_set_performance_state - Request a new performance state.
+ * @dev: The device to make the request for.
+ * @state: Target performance state for the device.
+ *
+ * This function should be called when a new performance state needs to be
+ * requested for a device that is attached to a PM domain. Note that, the
+ * support for performance scaling for PM domains is optional.
+ *
+ * Returns 0 on success and when performance scaling isn't supported, negative
+ * error code on failure.
+ */
+int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state)
+{
+ if (dev->pm_domain && dev->pm_domain->set_performance_state)
+ return dev->pm_domain->set_performance_state(dev, state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_set_performance_state);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5cb2023581..011948f02b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -130,6 +130,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_active_wakeup(genpd) (genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
#define genpd_is_cpu_domain(genpd) (genpd->flags & GENPD_FLAG_CPU_DOMAIN)
#define genpd_is_rpm_always_on(genpd) (genpd->flags & GENPD_FLAG_RPM_ALWAYS_ON)
+#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -419,6 +420,25 @@ static void genpd_restore_performance_state(struct device *dev,
genpd_set_performance_state(dev, state);
}
+static int genpd_dev_pm_set_performance_state(struct device *dev,
+ unsigned int state)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+ int ret = 0;
+
+ genpd_lock(genpd);
+ if (pm_runtime_suspended(dev)) {
+ dev_gpd_data(dev)->rpm_pstate = state;
+ } else {
+ ret = genpd_set_performance_state(dev, state);
+ if (!ret)
+ dev_gpd_data(dev)->rpm_pstate = 0;
+ }
+ genpd_unlock(genpd);
+
+ return ret;
+}
+
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
@@ -437,7 +457,6 @@ static void genpd_restore_performance_state(struct device *dev,
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
- int ret = 0;
genpd = dev_to_genpd_safe(dev);
if (!genpd)
@@ -447,17 +466,7 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
!dev->power.subsys_data->domain_data))
return -EINVAL;
- genpd_lock(genpd);
- if (pm_runtime_suspended(dev)) {
- dev_gpd_data(dev)->rpm_pstate = state;
- } else {
- ret = genpd_set_performance_state(dev, state);
- if (!ret)
- dev_gpd_data(dev)->rpm_pstate = 0;
- }
- genpd_unlock(genpd);
-
- return ret;
+ return genpd_dev_pm_set_performance_state(dev, state);
}
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
@@ -1102,7 +1111,7 @@ static int __init genpd_power_off_unused(void)
return 0;
}
-late_initcall(genpd_power_off_unused);
+late_initcall_sync(genpd_power_off_unused);
#ifdef CONFIG_PM_SLEEP
@@ -2079,6 +2088,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.restore_noirq = genpd_restore_noirq;
genpd->domain.ops.complete = genpd_complete;
genpd->domain.start = genpd_dev_pm_start;
+ genpd->domain.set_performance_state = genpd_dev_pm_set_performance_state;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
@@ -2328,7 +2338,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
genpd->dev.of_node = np;
/* Parse genpd OPP table */
- if (genpd->set_performance_state) {
+ if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table(&genpd->dev);
if (ret)
return dev_err_probe(&genpd->dev, ret, "Failed to add OPP table\n");
@@ -2343,7 +2353,7 @@ int of_genpd_add_provider_simple(struct device_node *np,
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
- if (genpd->set_performance_state) {
+ if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
@@ -2387,7 +2397,7 @@ int of_genpd_add_provider_onecell(struct device_node *np,
genpd->dev.of_node = np;
/* Parse genpd OPP table */
- if (genpd->set_performance_state) {
+ if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
ret = dev_pm_opp_of_add_table_indexed(&genpd->dev, i);
if (ret) {
dev_err_probe(&genpd->dev, ret,
@@ -2423,7 +2433,7 @@ error:
genpd->provider = NULL;
genpd->has_provider = false;
- if (genpd->set_performance_state) {
+ if (!genpd_is_opp_table_fw(genpd) && genpd->set_performance_state) {
dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
}
@@ -2455,7 +2465,7 @@ void of_genpd_del_provider(struct device_node *np)
if (gpd->provider == &np->fwnode) {
gpd->has_provider = false;
- if (!gpd->set_performance_state)
+ if (genpd_is_opp_table_fw(gpd) || !gpd->set_performance_state)
continue;
dev_pm_opp_put_opp_table(gpd->opp_table);
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 264d29b3fc..e14cc03a17 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -442,10 +442,18 @@ static struct regmap_range_cfg test_range = {
.range_max = 40,
};
-static bool test_range_volatile(struct device *dev, unsigned int reg)
+static bool test_range_window_volatile(struct device *dev, unsigned int reg)
{
if (reg >= test_range.window_start &&
- reg <= test_range.selector_reg + test_range.window_len)
+ reg <= test_range.window_start + test_range.window_len)
+ return true;
+
+ return false;
+}
+
+static bool test_range_all_volatile(struct device *dev, unsigned int reg)
+{
+ if (test_range_window_volatile(dev, reg))
return true;
if (reg >= test_range.range_min && reg <= test_range.range_max)
@@ -465,7 +473,7 @@ static void basic_ranges(struct kunit *test)
config = test_regmap_config;
config.cache_type = t->type;
- config.volatile_reg = test_range_volatile;
+ config.volatile_reg = test_range_all_volatile;
config.ranges = &test_range;
config.num_ranges = 1;
config.max_register = test_range.range_max;
@@ -875,6 +883,59 @@ static void cache_present(struct kunit *test)
regmap_exit(map);
}
+/* Check that caching the window register works with sync */
+static void cache_range_window_reg(struct kunit *test)
+{
+ struct regcache_types *t = (struct regcache_types *)test->param_value;
+ struct regmap *map;
+ struct regmap_config config;
+ struct regmap_ram_data *data;
+ unsigned int val;
+ int i;
+
+ config = test_regmap_config;
+ config.cache_type = t->type;
+ config.volatile_reg = test_range_window_volatile;
+ config.ranges = &test_range;
+ config.num_ranges = 1;
+ config.max_register = test_range.range_max;
+
+ map = gen_regmap(&config, &data);
+ KUNIT_ASSERT_FALSE(test, IS_ERR(map));
+ if (IS_ERR(map))
+ return;
+
+ /* Write new values to the entire range */
+ for (i = test_range.range_min; i <= test_range.range_max; i++)
+ KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
+
+ val = data->vals[test_range.selector_reg] & test_range.selector_mask;
+ KUNIT_ASSERT_EQ(test, val, 2);
+
+ /* Write to the first register in the range to reset the page */
+ KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
+ val = data->vals[test_range.selector_reg] & test_range.selector_mask;
+ KUNIT_ASSERT_EQ(test, val, 0);
+
+ /* Trigger a cache sync */
+ regcache_mark_dirty(map);
+ KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
+
+ /* Write to the first register again, the page should be reset */
+ KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
+ val = data->vals[test_range.selector_reg] & test_range.selector_mask;
+ KUNIT_ASSERT_EQ(test, val, 0);
+
+ /* Trigger another cache sync */
+ regcache_mark_dirty(map);
+ KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
+
+ /* Write to the last register again, the page should be reset */
+ KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
+ val = data->vals[test_range.selector_reg] & test_range.selector_mask;
+ KUNIT_ASSERT_EQ(test, val, 2);
+}
+
struct raw_test_types {
const char *name;
@@ -1217,6 +1278,7 @@ static struct kunit_case regmap_test_cases[] = {
KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
+ KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 9d42051f8f..5c7fac8061 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -14,6 +14,6 @@ config DM_KUNIT_TEST
depends on KUNIT
config DRIVER_PE_KUNIT_TEST
- bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
- depends on KUNIT=y
+ tristate "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
+ depends on KUNIT
default KUNIT_ALL_TESTS
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
index dd2b606d76..a8657eb06f 100644
--- a/drivers/base/test/property-entry-test.c
+++ b/drivers/base/test/property-entry-test.c
@@ -506,3 +506,7 @@ static struct kunit_suite property_entry_test_suite = {
};
kunit_test_suite(property_entry_test_suite);
+
+MODULE_DESCRIPTION("Test module for the property entry API");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");