diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
commit | 8b0a8165cdad0f4133837d753649ef4682e42c3b (patch) | |
tree | 5c58f869f31ddb1f7bd6e8bdea269b680b36c5b6 /drivers/perf | |
parent | Releasing progress-linux version 6.8.12-1~progress7.99u1. (diff) | |
download | linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.tar.xz linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/perf')
30 files changed, 899 insertions, 217 deletions
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index ec6e0d9194..7526a9e714 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -86,6 +86,30 @@ config RISCV_PMU_SBI full perf feature support i.e. counter overflow, privilege mode filtering, counter configuration. +config STARFIVE_STARLINK_PMU + depends on ARCH_STARFIVE || COMPILE_TEST + depends on 64BIT + bool "StarFive StarLink PMU" + help + Provide support for StarLink Performance Monitor Unit. + StarLink Performance Monitor Unit integrates one or more cores with + an L3 memory system. The L3 cache events are added into perf event + subsystem, allowing monitoring of various L3 cache perf events. + +config ANDES_CUSTOM_PMU + bool "Andes custom PMU support" + depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI + default y + help + The Andes cores implement the PMU overflow extension very + similar to the standard Sscofpmf and Smcntrpmf extension. + + This will patch the overflow and pending CSRs and handle the + non-standard behaviour via the regular SBI PMU driver and + interface. + + If you don't know what to do here, say "Y". + config ARM_PMU_ACPI depends on ARM_PMU && ACPI def_bool y diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index a06338e340..29b1c28203 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o obj-$(CONFIG_RISCV_PMU) += riscv_pmu.o obj-$(CONFIG_RISCV_PMU_LEGACY) += riscv_pmu_legacy.o obj-$(CONFIG_RISCV_PMU_SBI) += riscv_pmu_sbi.o +obj-$(CONFIG_STARFIVE_STARLINK_PMU) += starfive_starlink_pmu.o obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o diff --git a/drivers/perf/alibaba_uncore_drw_pmu.c b/drivers/perf/alibaba_uncore_drw_pmu.c index 19d459a36b..a9277dcf90 100644 --- a/drivers/perf/alibaba_uncore_drw_pmu.c +++ b/drivers/perf/alibaba_uncore_drw_pmu.c @@ -729,7 +729,7 @@ static int ali_drw_pmu_probe(struct platform_device *pdev) return ret; } -static int ali_drw_pmu_remove(struct platform_device *pdev) +static void ali_drw_pmu_remove(struct platform_device *pdev) { struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev); @@ -739,8 +739,6 @@ static int ali_drw_pmu_remove(struct platform_device *pdev) ali_drw_pmu_uninit_irq(drw_pmu); perf_pmu_unregister(&drw_pmu->pmu); - - return 0; } static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) @@ -795,7 +793,7 @@ static struct platform_driver ali_drw_pmu_driver = { .acpi_match_table = ali_drw_acpi_match, }, .probe = ali_drw_pmu_probe, - .remove = ali_drw_pmu_remove, + .remove_new = ali_drw_pmu_remove, }; static int __init ali_drw_pmu_init(void) diff --git a/drivers/perf/amlogic/meson_g12_ddr_pmu.c b/drivers/perf/amlogic/meson_g12_ddr_pmu.c index 15d52ab327..99cc791892 100644 --- a/drivers/perf/amlogic/meson_g12_ddr_pmu.c +++ b/drivers/perf/amlogic/meson_g12_ddr_pmu.c @@ -355,11 +355,9 @@ static int g12_ddr_pmu_probe(struct platform_device *pdev) return meson_ddr_pmu_create(pdev); } -static int g12_ddr_pmu_remove(struct platform_device *pdev) +static void g12_ddr_pmu_remove(struct platform_device *pdev) { meson_ddr_pmu_remove(pdev); - - return 0; } static const struct of_device_id meson_ddr_pmu_dt_match[] = { @@ -381,7 +379,7 @@ MODULE_DEVICE_TABLE(of, meson_ddr_pmu_dt_match); static struct platform_driver g12_ddr_pmu_driver = { .probe = g12_ddr_pmu_probe, - .remove = g12_ddr_pmu_remove, + .remove_new = g12_ddr_pmu_remove, .driver = { .name = "meson-g12-ddr-pmu", diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 61de861eaf..6be03f81ae 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1697,16 +1697,14 @@ error_pmu_init: return ret; } -static int cci_pmu_remove(struct platform_device *pdev) +static void cci_pmu_remove(struct platform_device *pdev) { if (!g_cci_pmu) - return 0; + return; cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); perf_pmu_unregister(&g_cci_pmu->pmu); g_cci_pmu = NULL; - - return 0; } static struct platform_driver cci_pmu_driver = { @@ -1716,7 +1714,7 @@ static struct platform_driver cci_pmu_driver = { .suppress_bind_attrs = true, }, .probe = cci_pmu_probe, - .remove = cci_pmu_remove, + .remove_new = cci_pmu_remove, }; module_platform_driver(cci_pmu_driver); diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 728d13d8e9..641471bd5e 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -1515,13 +1515,11 @@ static int arm_ccn_probe(struct platform_device *pdev) return arm_ccn_pmu_init(ccn); } -static int arm_ccn_remove(struct platform_device *pdev) +static void arm_ccn_remove(struct platform_device *pdev) { struct arm_ccn *ccn = platform_get_drvdata(pdev); arm_ccn_pmu_cleanup(ccn); - - return 0; } static const struct of_device_id arm_ccn_match[] = { @@ -1539,7 +1537,7 @@ static struct platform_driver arm_ccn_driver = { .suppress_bind_attrs = true, }, .probe = arm_ccn_probe, - .remove = arm_ccn_remove, + .remove_new = arm_ccn_remove, }; static int __init arm_ccn_init(void) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 7e3aa7e234..7ef9c7e483 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -493,6 +493,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) for (dn = cmn->dns; dn->type; dn++) { struct arm_cmn_nodeid nid = arm_cmn_nid(cmn, dn->id); + int pad = dn->logid < 10; if (dn->type == CMN_TYPE_XP) continue; @@ -503,7 +504,7 @@ static void arm_cmn_show_logid(struct seq_file *s, int x, int y, int p, int d) if (nid.x != x || nid.y != y || nid.port != p || nid.dev != d) continue; - seq_printf(s, " #%-2d |", dn->logid); + seq_printf(s, " %*c#%-*d |", pad + 1, ' ', 3 - pad, dn->logid); return; } seq_puts(s, " |"); @@ -516,7 +517,7 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) seq_puts(s, " X"); for (x = 0; x < cmn->mesh_x; x++) - seq_printf(s, " %d ", x); + seq_printf(s, " %-2d ", x); seq_puts(s, "\nY P D+"); y = cmn->mesh_y; while (y--) { @@ -526,13 +527,13 @@ static int arm_cmn_map_show(struct seq_file *s, void *data) for (x = 0; x < cmn->mesh_x; x++) seq_puts(s, "--------+"); - seq_printf(s, "\n%d |", y); + seq_printf(s, "\n%-2d |", y); for (x = 0; x < cmn->mesh_x; x++) { struct arm_cmn_node *xp = cmn->xps + xp_base + x; for (p = 0; p < CMN_MAX_PORTS; p++) port[p][x] = arm_cmn_device_connect_info(cmn, xp, p); - seq_printf(s, " XP #%-2d |", xp_base + x); + seq_printf(s, " XP #%-3d|", xp_base + x); } seq_puts(s, "\n |"); @@ -2515,7 +2516,7 @@ static int arm_cmn_probe(struct platform_device *pdev) return err; } -static int arm_cmn_remove(struct platform_device *pdev) +static void arm_cmn_remove(struct platform_device *pdev) { struct arm_cmn *cmn = platform_get_drvdata(pdev); @@ -2524,7 +2525,6 @@ static int arm_cmn_remove(struct platform_device *pdev) perf_pmu_unregister(&cmn->pmu); cpuhp_state_remove_instance_nocalls(arm_cmn_hp_state, &cmn->cpuhp_node); debugfs_remove(cmn->debug); - return 0; } #ifdef CONFIG_OF @@ -2555,7 +2555,7 @@ static struct platform_driver arm_cmn_driver = { .acpi_match_table = ACPI_PTR(arm_cmn_acpi_match), }, .probe = arm_cmn_probe, - .remove = arm_cmn_remove, + .remove_new = arm_cmn_remove, }; static int __init arm_cmn_init(void) diff --git a/drivers/perf/arm_cspmu/arm_cspmu.c b/drivers/perf/arm_cspmu/arm_cspmu.c index 50b89b989c..b9a252272f 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.c +++ b/drivers/perf/arm_cspmu/arm_cspmu.c @@ -27,6 +27,7 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/of.h> #include <linux/perf_event.h> #include <linux/platform_device.h> @@ -100,13 +101,6 @@ #define ARM_CSPMU_ACTIVE_CPU_MASK 0x0 #define ARM_CSPMU_ASSOCIATED_CPU_MASK 0x1 -/* Check and use default if implementer doesn't provide attribute callback */ -#define CHECK_DEFAULT_IMPL_OPS(ops, callback) \ - do { \ - if (!ops->callback) \ - ops->callback = arm_cspmu_ ## callback; \ - } while (0) - /* * Maximum poll count for reading counter value using high-low-high sequence. */ @@ -121,7 +115,9 @@ static void arm_cspmu_set_ev_filter(struct arm_cspmu *cspmu, static struct acpi_apmt_node *arm_cspmu_apmt_node(struct device *dev) { - return *(struct acpi_apmt_node **)dev_get_platdata(dev); + struct acpi_apmt_node **ptr = dev_get_platdata(dev); + + return ptr ? *ptr : NULL; } /* @@ -317,6 +313,10 @@ static const char *arm_cspmu_get_name(const struct arm_cspmu *cspmu) dev = cspmu->dev; apmt_node = arm_cspmu_apmt_node(dev); + if (!apmt_node) + return devm_kasprintf(dev, GFP_KERNEL, PMUNAME "_%u", + atomic_fetch_inc(&pmu_idx[0])); + pmu_type = apmt_node->type; if (pmu_type >= ACPI_APMT_NODE_TYPE_COUNT) { @@ -408,21 +408,32 @@ static struct arm_cspmu_impl_match *arm_cspmu_impl_match_get(u32 pmiidr) return NULL; } +#define DEFAULT_IMPL_OP(name) .name = arm_cspmu_##name + static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu) { int ret = 0; - struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops; struct acpi_apmt_node *apmt_node = arm_cspmu_apmt_node(cspmu->dev); struct arm_cspmu_impl_match *match; - /* - * Get PMU implementer and product id from APMT node. - * If APMT node doesn't have implementer/product id, try get it - * from PMIIDR. - */ - cspmu->impl.pmiidr = - (apmt_node->impl_id) ? apmt_node->impl_id : - readl(cspmu->base0 + PMIIDR); + /* Start with a default PMU implementation */ + cspmu->impl.module = THIS_MODULE; + cspmu->impl.pmiidr = readl(cspmu->base0 + PMIIDR); + cspmu->impl.ops = (struct arm_cspmu_impl_ops) { + DEFAULT_IMPL_OP(get_event_attrs), + DEFAULT_IMPL_OP(get_format_attrs), + DEFAULT_IMPL_OP(get_identifier), + DEFAULT_IMPL_OP(get_name), + DEFAULT_IMPL_OP(is_cycle_counter_event), + DEFAULT_IMPL_OP(event_type), + DEFAULT_IMPL_OP(event_filter), + DEFAULT_IMPL_OP(set_ev_filter), + DEFAULT_IMPL_OP(event_attr_is_visible), + }; + + /* Firmware may override implementer/product ID from PMIIDR */ + if (apmt_node && apmt_node->impl_id) + cspmu->impl.pmiidr = apmt_node->impl_id; /* Find implementer specific attribute ops. */ match = arm_cspmu_impl_match_get(cspmu->impl.pmiidr); @@ -450,24 +461,9 @@ static int arm_cspmu_init_impl_ops(struct arm_cspmu *cspmu) } mutex_unlock(&arm_cspmu_lock); + } - if (ret) - return ret; - } else - cspmu->impl.module = THIS_MODULE; - - /* Use default callbacks if implementer doesn't provide one. */ - CHECK_DEFAULT_IMPL_OPS(impl_ops, get_event_attrs); - CHECK_DEFAULT_IMPL_OPS(impl_ops, get_format_attrs); - CHECK_DEFAULT_IMPL_OPS(impl_ops, get_identifier); - CHECK_DEFAULT_IMPL_OPS(impl_ops, get_name); - CHECK_DEFAULT_IMPL_OPS(impl_ops, is_cycle_counter_event); - CHECK_DEFAULT_IMPL_OPS(impl_ops, event_type); - CHECK_DEFAULT_IMPL_OPS(impl_ops, event_filter); - CHECK_DEFAULT_IMPL_OPS(impl_ops, event_attr_is_visible); - CHECK_DEFAULT_IMPL_OPS(impl_ops, set_ev_filter); - - return 0; + return ret; } static struct attribute_group * @@ -512,23 +508,16 @@ arm_cspmu_alloc_format_attr_group(struct arm_cspmu *cspmu) return format_group; } -static struct attribute_group ** -arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu) +static int arm_cspmu_alloc_attr_groups(struct arm_cspmu *cspmu) { - struct attribute_group **attr_groups = NULL; - struct device *dev = cspmu->dev; + const struct attribute_group **attr_groups = cspmu->attr_groups; const struct arm_cspmu_impl_ops *impl_ops = &cspmu->impl.ops; cspmu->identifier = impl_ops->get_identifier(cspmu); cspmu->name = impl_ops->get_name(cspmu); if (!cspmu->identifier || !cspmu->name) - return NULL; - - attr_groups = devm_kcalloc(dev, 5, sizeof(struct attribute_group *), - GFP_KERNEL); - if (!attr_groups) - return NULL; + return -ENOMEM; attr_groups[0] = arm_cspmu_alloc_event_attr_group(cspmu); attr_groups[1] = arm_cspmu_alloc_format_attr_group(cspmu); @@ -536,18 +525,14 @@ arm_cspmu_alloc_attr_group(struct arm_cspmu *cspmu) attr_groups[3] = &arm_cspmu_cpumask_attr_group; if (!attr_groups[0] || !attr_groups[1]) - return NULL; + return -ENOMEM; - return attr_groups; + return 0; } static inline void arm_cspmu_reset_counters(struct arm_cspmu *cspmu) { - u32 pmcr = 0; - - pmcr |= PMCR_P; - pmcr |= PMCR_C; - writel(pmcr, cspmu->base0 + PMCR); + writel(PMCR_C | PMCR_P, cspmu->base0 + PMCR); } static inline void arm_cspmu_start_counters(struct arm_cspmu *cspmu) @@ -962,7 +947,14 @@ static struct arm_cspmu *arm_cspmu_alloc(struct platform_device *pdev) platform_set_drvdata(pdev, cspmu); apmt_node = arm_cspmu_apmt_node(dev); - cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC; + if (apmt_node) { + cspmu->has_atomic_dword = apmt_node->flags & ACPI_APMT_FLAGS_ATOMIC; + } else { + u32 width = 0; + + device_property_read_u32(dev, "reg-io-width", &width); + cspmu->has_atomic_dword = (width == 8); + } return cspmu; } @@ -1153,11 +1145,6 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu) } } - if (cpumask_empty(&cspmu->associated_cpus)) { - dev_dbg(cspmu->dev, "No cpu associated with the PMU\n"); - return -ENODEV; - } - return 0; } #else @@ -1167,19 +1154,45 @@ static int arm_cspmu_acpi_get_cpus(struct arm_cspmu *cspmu) } #endif +static int arm_cspmu_of_get_cpus(struct arm_cspmu *cspmu) +{ + struct of_phandle_iterator it; + int ret, cpu; + + of_for_each_phandle(&it, ret, dev_of_node(cspmu->dev), "cpus", NULL, 0) { + cpu = of_cpu_node_to_id(it.node); + if (cpu < 0) + continue; + cpumask_set_cpu(cpu, &cspmu->associated_cpus); + } + return ret == -ENOENT ? 0 : ret; +} + static int arm_cspmu_get_cpus(struct arm_cspmu *cspmu) { - return arm_cspmu_acpi_get_cpus(cspmu); + int ret = 0; + + if (arm_cspmu_apmt_node(cspmu->dev)) + ret = arm_cspmu_acpi_get_cpus(cspmu); + else if (device_property_present(cspmu->dev, "cpus")) + ret = arm_cspmu_of_get_cpus(cspmu); + else + cpumask_copy(&cspmu->associated_cpus, cpu_possible_mask); + + if (!ret && cpumask_empty(&cspmu->associated_cpus)) { + dev_dbg(cspmu->dev, "No cpu associated with the PMU\n"); + ret = -ENODEV; + } + return ret; } static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu) { int ret, capabilities; - struct attribute_group **attr_groups; - attr_groups = arm_cspmu_alloc_attr_group(cspmu); - if (!attr_groups) - return -ENOMEM; + ret = arm_cspmu_alloc_attr_groups(cspmu); + if (ret) + return ret; ret = cpuhp_state_add_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node); @@ -1201,12 +1214,11 @@ static int arm_cspmu_register_pmu(struct arm_cspmu *cspmu) .start = arm_cspmu_start, .stop = arm_cspmu_stop, .read = arm_cspmu_read, - .attr_groups = (const struct attribute_group **)attr_groups, + .attr_groups = cspmu->attr_groups, .capabilities = capabilities, }; /* Hardware counter init */ - arm_cspmu_stop_counters(cspmu); arm_cspmu_reset_counters(cspmu); ret = perf_pmu_register(&cspmu->pmu, cspmu->name, -1); @@ -1252,14 +1264,12 @@ static int arm_cspmu_device_probe(struct platform_device *pdev) return ret; } -static int arm_cspmu_device_remove(struct platform_device *pdev) +static void arm_cspmu_device_remove(struct platform_device *pdev) { struct arm_cspmu *cspmu = platform_get_drvdata(pdev); perf_pmu_unregister(&cspmu->pmu); cpuhp_state_remove_instance(arm_cspmu_cpuhp_state, &cspmu->cpuhp_node); - - return 0; } static const struct platform_device_id arm_cspmu_id[] = { @@ -1268,13 +1278,20 @@ static const struct platform_device_id arm_cspmu_id[] = { }; MODULE_DEVICE_TABLE(platform, arm_cspmu_id); +static const struct of_device_id arm_cspmu_of_match[] = { + { .compatible = "arm,coresight-pmu" }, + {} +}; +MODULE_DEVICE_TABLE(of, arm_cspmu_of_match); + static struct platform_driver arm_cspmu_driver = { .driver = { - .name = DRVNAME, - .suppress_bind_attrs = true, - }, + .name = DRVNAME, + .of_match_table = arm_cspmu_of_match, + .suppress_bind_attrs = true, + }, .probe = arm_cspmu_device_probe, - .remove = arm_cspmu_device_remove, + .remove_new = arm_cspmu_device_remove, .id_table = arm_cspmu_id, }; diff --git a/drivers/perf/arm_cspmu/arm_cspmu.h b/drivers/perf/arm_cspmu/arm_cspmu.h index 2fe723555a..c9163acfe8 100644 --- a/drivers/perf/arm_cspmu/arm_cspmu.h +++ b/drivers/perf/arm_cspmu/arm_cspmu.h @@ -157,6 +157,7 @@ struct arm_cspmu { int cycle_counter_logical_idx; struct arm_cspmu_hw_events hw_events; + const struct attribute_group *attr_groups[5]; struct arm_cspmu_impl impl; }; diff --git a/drivers/perf/arm_cspmu/nvidia_cspmu.c b/drivers/perf/arm_cspmu/nvidia_cspmu.c index 0382b702f0..5b84b701ad 100644 --- a/drivers/perf/arm_cspmu/nvidia_cspmu.c +++ b/drivers/perf/arm_cspmu/nvidia_cspmu.c @@ -388,12 +388,6 @@ static int nv_cspmu_init_ops(struct arm_cspmu *cspmu) impl_ops->get_format_attrs = nv_cspmu_get_format_attrs; impl_ops->get_name = nv_cspmu_get_name; - /* Set others to NULL to use default callback. */ - impl_ops->event_type = NULL; - impl_ops->event_attr_is_visible = NULL; - impl_ops->get_identifier = NULL; - impl_ops->is_cycle_counter_event = NULL; - return 0; } diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c index 30cea68595..88c17c1d6d 100644 --- a/drivers/perf/arm_dmc620_pmu.c +++ b/drivers/perf/arm_dmc620_pmu.c @@ -542,12 +542,16 @@ static int dmc620_pmu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; + hwc->idx = -1; + + if (event->group_leader == event) + return 0; + /* * We can't atomically disable all HW counters so only one event allowed, * although software events are acceptable. */ - if (event->group_leader != event && - !is_software_event(event->group_leader)) + if (!is_software_event(event->group_leader)) return -EINVAL; for_each_sibling_event(sibling, event->group_leader) { @@ -556,7 +560,6 @@ static int dmc620_pmu_event_init(struct perf_event *event) return -EINVAL; } - hwc->idx = -1; return 0; } @@ -724,7 +727,7 @@ out_teardown_dev: return ret; } -static int dmc620_pmu_device_remove(struct platform_device *pdev) +static void dmc620_pmu_device_remove(struct platform_device *pdev) { struct dmc620_pmu *dmc620_pmu = platform_get_drvdata(pdev); @@ -732,8 +735,6 @@ static int dmc620_pmu_device_remove(struct platform_device *pdev) /* perf will synchronise RCU before devres can free dmc620_pmu */ perf_pmu_unregister(&dmc620_pmu->pmu); - - return 0; } static const struct acpi_device_id dmc620_acpi_match[] = { @@ -748,7 +749,7 @@ static struct platform_driver dmc620_pmu_driver = { .suppress_bind_attrs = true, }, .probe = dmc620_pmu_device_probe, - .remove = dmc620_pmu_device_remove, + .remove_new = dmc620_pmu_device_remove, }; static int __init dmc620_pmu_init(void) diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 7ec4498e31..bae3ca37f8 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -774,14 +774,12 @@ static int dsu_pmu_device_probe(struct platform_device *pdev) return rc; } -static int dsu_pmu_device_remove(struct platform_device *pdev) +static void dsu_pmu_device_remove(struct platform_device *pdev) { struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&dsu_pmu->pmu); cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node); - - return 0; } static const struct of_device_id dsu_pmu_of_match[] = { @@ -806,7 +804,7 @@ static struct platform_driver dsu_pmu_driver = { .suppress_bind_attrs = true, }, .probe = dsu_pmu_device_probe, - .remove = dsu_pmu_device_remove, + .remove_new = dsu_pmu_device_remove, }; static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 6303b82566..719aa953a1 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -716,7 +716,7 @@ static void smmu_pmu_free_msis(void *data) { struct device *dev = data; - platform_msi_domain_free_irqs(dev); + platform_device_msi_free_irqs_all(dev); } static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) @@ -746,7 +746,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) return; - ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg); + ret = platform_device_msi_init_and_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg); if (ret) { dev_warn(dev, "failed to allocate MSIs\n"); return; @@ -965,14 +965,12 @@ out_unregister: return err; } -static int smmu_pmu_remove(struct platform_device *pdev) +static void smmu_pmu_remove(struct platform_device *pdev) { struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&smmu_pmu->pmu); cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); - - return 0; } static void smmu_pmu_shutdown(struct platform_device *pdev) @@ -997,7 +995,7 @@ static struct platform_driver smmu_pmu_driver = { .suppress_bind_attrs = true, }, .probe = smmu_pmu_probe, - .remove = smmu_pmu_remove, + .remove_new = smmu_pmu_remove, .shutdown = smmu_pmu_shutdown, }; diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index b622d75d8c..35f0de0341 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -1263,14 +1263,13 @@ out_free_handle: return ret; } -static int arm_spe_pmu_device_remove(struct platform_device *pdev) +static void arm_spe_pmu_device_remove(struct platform_device *pdev) { struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev); arm_spe_pmu_perf_destroy(spe_pmu); arm_spe_pmu_dev_teardown(spe_pmu); free_percpu(spe_pmu->handle); - return 0; } static struct platform_driver arm_spe_pmu_driver = { @@ -1281,7 +1280,7 @@ static struct platform_driver arm_spe_pmu_driver = { .suppress_bind_attrs = true, }, .probe = arm_spe_pmu_device_probe, - .remove = arm_spe_pmu_device_remove, + .remove_new = arm_spe_pmu_device_remove, }; static int __init arm_spe_pmu_init(void) diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 7dbfaee372..4e8fa5a48f 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -826,7 +826,7 @@ cpuhp_state_err: return ret; } -static int ddr_perf_remove(struct platform_device *pdev) +static void ddr_perf_remove(struct platform_device *pdev) { struct ddr_pmu *pmu = platform_get_drvdata(pdev); @@ -836,7 +836,6 @@ static int ddr_perf_remove(struct platform_device *pdev) perf_pmu_unregister(&pmu->pmu); ida_free(&ddr_ida, pmu->id); - return 0; } static struct platform_driver imx_ddr_pmu_driver = { @@ -846,7 +845,7 @@ static struct platform_driver imx_ddr_pmu_driver = { .suppress_bind_attrs = true, }, .probe = ddr_perf_probe, - .remove = ddr_perf_remove, + .remove_new = ddr_perf_remove, }; module_platform_driver(imx_ddr_pmu_driver); diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c index 9685645bfe..72c2d3074c 100644 --- a/drivers/perf/fsl_imx9_ddr_perf.c +++ b/drivers/perf/fsl_imx9_ddr_perf.c @@ -679,7 +679,7 @@ format_string_err: return ret; } -static int ddr_perf_remove(struct platform_device *pdev) +static void ddr_perf_remove(struct platform_device *pdev) { struct ddr_pmu *pmu = platform_get_drvdata(pdev); @@ -689,8 +689,6 @@ static int ddr_perf_remove(struct platform_device *pdev) perf_pmu_unregister(&pmu->pmu); ida_free(&ddr_ida, pmu->id); - - return 0; } static struct platform_driver imx_ddr_pmu_driver = { @@ -700,7 +698,7 @@ static struct platform_driver imx_ddr_pmu_driver = { .suppress_bind_attrs = true, }, .probe = ddr_perf_probe, - .remove = ddr_perf_remove, + .remove_new = ddr_perf_remove, }; module_platform_driver(imx_ddr_pmu_driver); diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c index ec4d5fc307..dba3991256 100644 --- a/drivers/perf/hisilicon/hisi_pcie_pmu.c +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -216,10 +216,8 @@ static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, writeq_relaxed(val, pcie_pmu->base + offset); } -static void hisi_pcie_pmu_config_filter(struct perf_event *event) +static u64 hisi_pcie_pmu_get_event_ctrl_val(struct perf_event *event) { - struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); - struct hw_perf_event *hwc = &event->hw; u64 port, trig_len, thr_len, len_mode; u64 reg = HISI_PCIE_INIT_SET; @@ -256,10 +254,19 @@ static void hisi_pcie_pmu_config_filter(struct perf_event *event) else reg |= FIELD_PREP(HISI_PCIE_LEN_M, HISI_PCIE_LEN_M_DEFAULT); + return reg; +} + +static void hisi_pcie_pmu_config_event_ctrl(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 reg = hisi_pcie_pmu_get_event_ctrl_val(event); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg); } -static void hisi_pcie_pmu_clear_filter(struct perf_event *event) +static void hisi_pcie_pmu_clear_event_ctrl(struct perf_event *event) { struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; @@ -299,18 +306,24 @@ static bool hisi_pcie_pmu_valid_filter(struct perf_event *event, if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL) return false; - if (requester_id) { - if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id)) - return false; - } + /* Need to explicitly set filter of "port" or "bdf" */ + if (!hisi_pcie_get_port(event) && + !hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id)) + return false; return true; } +/* + * Check Whether two events share the same config. The same config means not + * only the event code, but also the filter settings of the two events are + * the same. + */ static bool hisi_pcie_pmu_cmp_event(struct perf_event *target, struct perf_event *event) { - return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event); + return hisi_pcie_pmu_get_event_ctrl_val(target) == + hisi_pcie_pmu_get_event_ctrl_val(event); } static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event) @@ -397,40 +410,32 @@ static u64 hisi_pcie_pmu_read_counter(struct perf_event *event) return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx); } -static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu, - struct perf_event *event) +/* + * Check all work events, if a relevant event is found then we return it + * first, otherwise return the first idle counter (need to reset). + */ +static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu, + struct perf_event *event) { + int first_idle = -EAGAIN; struct perf_event *sibling; int idx; for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { sibling = pcie_pmu->hw_events[idx]; - if (!sibling) - continue; - - if (!hisi_pcie_pmu_cmp_event(sibling, event)) + if (!sibling) { + if (first_idle == -EAGAIN) + first_idle = idx; continue; + } /* Related events must be used in group */ - if (sibling->group_leader == event->group_leader) + if (hisi_pcie_pmu_cmp_event(sibling, event) && + sibling->group_leader == event->group_leader) return idx; - else - return -EINVAL; } - return idx; -} - -static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu) -{ - int idx; - - for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { - if (!pcie_pmu->hw_events[idx]) - return idx; - } - - return -EINVAL; + return first_idle; } static void hisi_pcie_pmu_event_update(struct perf_event *event) @@ -517,7 +522,7 @@ static void hisi_pcie_pmu_start(struct perf_event *event, int flags) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; - hisi_pcie_pmu_config_filter(event); + hisi_pcie_pmu_config_event_ctrl(event); hisi_pcie_pmu_enable_counter(pcie_pmu, hwc); hisi_pcie_pmu_enable_int(pcie_pmu, hwc); hisi_pcie_pmu_set_period(event); @@ -538,7 +543,7 @@ static void hisi_pcie_pmu_stop(struct perf_event *event, int flags) hisi_pcie_pmu_event_update(event); hisi_pcie_pmu_disable_int(pcie_pmu, hwc); hisi_pcie_pmu_disable_counter(pcie_pmu, hwc); - hisi_pcie_pmu_clear_filter(event); + hisi_pcie_pmu_clear_event_ctrl(event); WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); hwc->state |= PERF_HES_STOPPED; @@ -556,27 +561,18 @@ static int hisi_pcie_pmu_add(struct perf_event *event, int flags) hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; - /* Check all working events to find a related event. */ - idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event); - if (idx < 0) - return idx; - - /* Current event shares an enabled counter with the related event */ - if (idx < HISI_PCIE_MAX_COUNTERS) { - hwc->idx = idx; - goto start_count; - } - - idx = hisi_pcie_pmu_get_event_idx(pcie_pmu); + idx = hisi_pcie_pmu_get_event_idx(pcie_pmu, event); if (idx < 0) return idx; hwc->idx = idx; - pcie_pmu->hw_events[idx] = event; - /* Reset Counter to avoid previous statistic interference. */ - hisi_pcie_pmu_reset_counter(pcie_pmu, idx); -start_count: + /* No enabled counter found with related event, reset it */ + if (!pcie_pmu->hw_events[idx]) { + hisi_pcie_pmu_reset_counter(pcie_pmu, idx); + pcie_pmu->hw_events[idx] = event; + } + if (flags & PERF_EF_START) hisi_pcie_pmu_start(event, PERF_EF_RELOAD); @@ -726,10 +722,18 @@ static struct attribute *hisi_pcie_pmu_events_attr[] = { HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210), HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011), HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011), + HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_flux, 0x0104), + HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_time, 0x10104), HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804), HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804), + HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_flux, 0x2004), + HISI_PCIE_PMU_EVENT_ATTR(rx_cpl_time, 0x12004), + HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_flux, 0x0105), + HISI_PCIE_PMU_EVENT_ATTR(tx_mwr_time, 0x10105), HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405), HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405), + HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_flux, 0x1005), + HISI_PCIE_PMU_EVENT_ATTR(tx_cpl_time, 0x11005), NULL }; diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c index 40f1bc9f9b..0e923f94fa 100644 --- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c @@ -341,7 +341,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev) return ret; } -static int hisi_cpa_pmu_remove(struct platform_device *pdev) +static void hisi_cpa_pmu_remove(struct platform_device *pdev) { struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev); @@ -349,7 +349,6 @@ static int hisi_cpa_pmu_remove(struct platform_device *pdev) cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node); hisi_cpa_pmu_enable_pm(cpa_pmu); - return 0; } static struct platform_driver hisi_cpa_pmu_driver = { @@ -359,7 +358,7 @@ static struct platform_driver hisi_cpa_pmu_driver = { .suppress_bind_attrs = true, }, .probe = hisi_cpa_pmu_probe, - .remove = hisi_cpa_pmu_remove, + .remove_new = hisi_cpa_pmu_remove, }; static int __init hisi_cpa_pmu_module_init(void) diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index ffb039d05d..b804e37381 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -531,14 +531,13 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) return ret; } -static int hisi_ddrc_pmu_remove(struct platform_device *pdev) +static void hisi_ddrc_pmu_remove(struct platform_device *pdev) { struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&ddrc_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node); - return 0; } static struct platform_driver hisi_ddrc_pmu_driver = { @@ -548,7 +547,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = { .suppress_bind_attrs = true, }, .probe = hisi_ddrc_pmu_probe, - .remove = hisi_ddrc_pmu_remove, + .remove_new = hisi_ddrc_pmu_remove, }; static int __init hisi_ddrc_pmu_module_init(void) diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 15caf99e1e..21e69b1cdd 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -534,14 +534,13 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) return ret; } -static int hisi_hha_pmu_remove(struct platform_device *pdev) +static void hisi_hha_pmu_remove(struct platform_device *pdev) { struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&hha_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); - return 0; } static struct platform_driver hisi_hha_pmu_driver = { @@ -551,7 +550,7 @@ static struct platform_driver hisi_hha_pmu_driver = { .suppress_bind_attrs = true, }, .probe = hisi_hha_pmu_probe, - .remove = hisi_hha_pmu_remove, + .remove_new = hisi_hha_pmu_remove, }; static int __init hisi_hha_pmu_module_init(void) diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 794dbcd19b..51ba768710 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -568,14 +568,13 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) return ret; } -static int hisi_l3c_pmu_remove(struct platform_device *pdev) +static void hisi_l3c_pmu_remove(struct platform_device *pdev) { struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&l3c_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node); - return 0; } static struct platform_driver hisi_l3c_pmu_driver = { @@ -585,7 +584,7 @@ static struct platform_driver hisi_l3c_pmu_driver = { .suppress_bind_attrs = true, }, .probe = hisi_l3c_pmu_probe, - .remove = hisi_l3c_pmu_remove, + .remove_new = hisi_l3c_pmu_remove, }; static int __init hisi_l3c_pmu_module_init(void) diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c index 797cf20199..3cdb35c741 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -514,14 +514,13 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev) return ret; } -static int hisi_pa_pmu_remove(struct platform_device *pdev) +static void hisi_pa_pmu_remove(struct platform_device *pdev) { struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&pa_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, &pa_pmu->node); - return 0; } static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = { @@ -539,7 +538,7 @@ static struct platform_driver hisi_pa_pmu_driver = { .suppress_bind_attrs = true, }, .probe = hisi_pa_pmu_probe, - .remove = hisi_pa_pmu_remove, + .remove_new = hisi_pa_pmu_remove, }; static int __init hisi_pa_pmu_module_init(void) diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c index e706ca5676..765bbd61db 100644 --- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c @@ -460,14 +460,13 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev) return ret; } -static int hisi_sllc_pmu_remove(struct platform_device *pdev) +static void hisi_sllc_pmu_remove(struct platform_device *pdev) { struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev); perf_pmu_unregister(&sllc_pmu->pmu); cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, &sllc_pmu->node); - return 0; } static struct platform_driver hisi_sllc_pmu_driver = { @@ -477,7 +476,7 @@ static struct platform_driver hisi_sllc_pmu_driver = { .suppress_bind_attrs = true, }, .probe = hisi_sllc_pmu_probe, - .remove = hisi_sllc_pmu_remove, + .remove_new = hisi_sllc_pmu_remove, }; static int __init hisi_sllc_pmu_module_init(void) diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c index 524ba82bfc..e2abca188d 100644 --- a/drivers/perf/marvell_cn10k_ddr_pmu.c +++ b/drivers/perf/marvell_cn10k_ddr_pmu.c @@ -697,7 +697,7 @@ error: return ret; } -static int cn10k_ddr_perf_remove(struct platform_device *pdev) +static void cn10k_ddr_perf_remove(struct platform_device *pdev) { struct cn10k_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev); @@ -706,7 +706,6 @@ static int cn10k_ddr_perf_remove(struct platform_device *pdev) &ddr_pmu->node); perf_pmu_unregister(&ddr_pmu->pmu); - return 0; } #ifdef CONFIG_OF @@ -733,7 +732,7 @@ static struct platform_driver cn10k_ddr_pmu_driver = { .suppress_bind_attrs = true, }, .probe = cn10k_ddr_perf_probe, - .remove = cn10k_ddr_perf_remove, + .remove_new = cn10k_ddr_perf_remove, }; static int __init cn10k_ddr_pmu_init(void) diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c index fec8e82edb..9e635f3554 100644 --- a/drivers/perf/marvell_cn10k_tad_pmu.c +++ b/drivers/perf/marvell_cn10k_tad_pmu.c @@ -351,15 +351,13 @@ static int tad_pmu_probe(struct platform_device *pdev) return ret; } -static int tad_pmu_remove(struct platform_device *pdev) +static void tad_pmu_remove(struct platform_device *pdev) { struct tad_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(tad_pmu_cpuhp_state, &pmu->node); perf_pmu_unregister(&pmu->pmu); - - return 0; } #ifdef CONFIG_OF @@ -385,7 +383,7 @@ static struct platform_driver tad_pmu_driver = { .suppress_bind_attrs = true, }, .probe = tad_pmu_probe, - .remove = tad_pmu_remove, + .remove_new = tad_pmu_remove, }; static int tad_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 3f9a98c17a..148df5ae8e 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -965,7 +965,7 @@ out_unregister: return err; } -static int l2_cache_pmu_remove(struct platform_device *pdev) +static void l2_cache_pmu_remove(struct platform_device *pdev) { struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(platform_get_drvdata(pdev)); @@ -973,7 +973,6 @@ static int l2_cache_pmu_remove(struct platform_device *pdev) perf_pmu_unregister(&l2cache_pmu->pmu); cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, &l2cache_pmu->node); - return 0; } static struct platform_driver l2_cache_pmu_driver = { @@ -983,7 +982,7 @@ static struct platform_driver l2_cache_pmu_driver = { .suppress_bind_attrs = true, }, .probe = l2_cache_pmu_probe, - .remove = l2_cache_pmu_remove, + .remove_new = l2_cache_pmu_remove, }; static int __init register_l2_cache_pmu_driver(void) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 452aab49db..3e44d2fb8b 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -19,11 +19,33 @@ #include <linux/of.h> #include <linux/cpu_pm.h> #include <linux/sched/clock.h> +#include <linux/soc/andes/irq.h> #include <asm/errata_list.h> #include <asm/sbi.h> #include <asm/cpufeature.h> +#define ALT_SBI_PMU_OVERFLOW(__ovl) \ +asm volatile(ALTERNATIVE_2( \ + "csrr %0, " __stringify(CSR_SCOUNTOVF), \ + "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \ + THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \ + CONFIG_ERRATA_THEAD_PMU, \ + "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \ + 0, RISCV_ISA_EXT_XANDESPMU, \ + CONFIG_ANDES_CUSTOM_PMU) \ + : "=r" (__ovl) : \ + : "memory") + +#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \ +asm volatile(ALTERNATIVE( \ + "csrc " __stringify(CSR_IP) ", %0\n\t", \ + "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \ + 0, RISCV_ISA_EXT_XANDESPMU, \ + CONFIG_ANDES_CUSTOM_PMU) \ + : : "r"(__irq_mask) \ + : "memory") + #define SYSCTL_NO_USER_ACCESS 0 #define SYSCTL_USER_ACCESS 1 #define SYSCTL_LEGACY 2 @@ -61,6 +83,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS; static union sbi_pmu_ctr_info *pmu_ctr_list; static bool riscv_pmu_use_irq; static unsigned int riscv_pmu_irq_num; +static unsigned int riscv_pmu_irq_mask; static unsigned int riscv_pmu_irq; /* Cache the available counters in a bitmask */ @@ -694,7 +717,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) event = cpu_hw_evt->events[fidx]; if (!event) { - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); + ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); return IRQ_NONE; } @@ -708,7 +731,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) * Overflow interrupt pending bit should only be cleared after stopping * all the counters to avoid any race condition. */ - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); + ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); /* No overflow bit is set */ if (!overflow) @@ -780,8 +803,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) if (riscv_pmu_use_irq) { cpu_hw_evt->irq = riscv_pmu_irq; - csr_clear(CSR_IP, BIT(riscv_pmu_irq_num)); - csr_set(CSR_IE, BIT(riscv_pmu_irq_num)); + ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); } @@ -792,7 +814,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) { if (riscv_pmu_use_irq) { disable_percpu_irq(riscv_pmu_irq); - csr_clear(CSR_IE, BIT(riscv_pmu_irq_num)); } /* Disable all counters access for user mode now */ @@ -816,8 +837,14 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde riscv_cached_mimpid(0) == 0) { riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU; riscv_pmu_use_irq = true; + } else if (riscv_isa_extension_available(NULL, XANDESPMU) && + IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) { + riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI; + riscv_pmu_use_irq = true; } + riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG); + if (!riscv_pmu_use_irq) return -EOPNOTSUPP; diff --git a/drivers/perf/starfive_starlink_pmu.c b/drivers/perf/starfive_starlink_pmu.c new file mode 100644 index 0000000000..5e5a672b42 --- /dev/null +++ b/drivers/perf/starfive_starlink_pmu.c @@ -0,0 +1,642 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * StarFive's StarLink PMU driver + * + * Copyright (C) 2023 StarFive Technology Co., Ltd. + * + * Author: Ji Sheng Teoh <jisheng.teoh@starfivetech.com> + * + */ + +#define STARLINK_PMU_PDEV_NAME "starfive_starlink_pmu" +#define pr_fmt(fmt) STARLINK_PMU_PDEV_NAME ": " fmt + +#include <linux/bitmap.h> +#include <linux/cpu_pm.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/sysfs.h> + +#define STARLINK_PMU_MAX_COUNTERS 64 +#define STARLINK_PMU_NUM_COUNTERS 16 +#define STARLINK_PMU_IDX_CYCLE_COUNTER 63 + +#define STARLINK_PMU_EVENT_SELECT 0x060 +#define STARLINK_PMU_EVENT_COUNTER 0x160 +#define STARLINK_PMU_COUNTER_MASK GENMASK_ULL(63, 0) +#define STARLINK_PMU_CYCLE_COUNTER 0x058 + +#define STARLINK_PMU_CONTROL 0x040 +#define STARLINK_PMU_GLOBAL_ENABLE BIT_ULL(0) + +#define STARLINK_PMU_INTERRUPT_ENABLE 0x050 +#define STARLINK_PMU_COUNTER_OVERFLOW_STATUS 0x048 +#define STARLINK_PMU_CYCLE_OVERFLOW_MASK BIT_ULL(63) + +#define STARLINK_CYCLES 0x058 +#define CACHE_READ_REQUEST 0x04000701 +#define CACHE_WRITE_REQUEST 0x03000001 +#define CACHE_RELEASE_REQUEST 0x0003e001 +#define CACHE_READ_HIT 0x00901202 +#define CACHE_READ_MISS 0x04008002 +#define CACHE_WRITE_HIT 0x006c0002 +#define CACHE_WRITE_MISS 0x03000002 +#define CACHE_WRITEBACK 0x00000403 + +#define to_starlink_pmu(p) (container_of(p, struct starlink_pmu, pmu)) + +#define STARLINK_FORMAT_ATTR(_name, _config) \ + (&((struct dev_ext_attribute[]) { \ + { .attr = __ATTR(_name, 0444, starlink_pmu_sysfs_format_show, NULL), \ + .var = (void *)_config, } \ + })[0].attr.attr) + +#define STARLINK_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, starlink_pmu_sysfs_event_show, _id) + +static int starlink_pmu_cpuhp_state; + +struct starlink_hw_events { + struct perf_event *events[STARLINK_PMU_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, STARLINK_PMU_MAX_COUNTERS); +}; + +struct starlink_pmu { + struct pmu pmu; + struct starlink_hw_events __percpu *hw_events; + struct hlist_node node; + struct notifier_block starlink_pmu_pm_nb; + void __iomem *pmu_base; + cpumask_t cpumask; + int irq; +}; + +static ssize_t +starlink_pmu_sysfs_format_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} + +static struct attribute *starlink_pmu_format_attrs[] = { + STARLINK_FORMAT_ATTR(event, "config:0-31"), + NULL +}; + +static const struct attribute_group starlink_pmu_format_attr_group = { + .name = "format", + .attrs = starlink_pmu_format_attrs, +}; + +static ssize_t +starlink_pmu_sysfs_event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct perf_pmu_events_attr *eattr = container_of(attr, + struct perf_pmu_events_attr, attr); + + return sysfs_emit(buf, "event=0x%02llx\n", eattr->id); +} + +static struct attribute *starlink_pmu_event_attrs[] = { + STARLINK_EVENT_ATTR(cycles, STARLINK_CYCLES), + STARLINK_EVENT_ATTR(read_request, CACHE_READ_REQUEST), + STARLINK_EVENT_ATTR(write_request, CACHE_WRITE_REQUEST), + STARLINK_EVENT_ATTR(release_request, CACHE_RELEASE_REQUEST), + STARLINK_EVENT_ATTR(read_hit, CACHE_READ_HIT), + STARLINK_EVENT_ATTR(read_miss, CACHE_READ_MISS), + STARLINK_EVENT_ATTR(write_hit, CACHE_WRITE_HIT), + STARLINK_EVENT_ATTR(write_miss, CACHE_WRITE_MISS), + STARLINK_EVENT_ATTR(writeback, CACHE_WRITEBACK), + NULL +}; + +static const struct attribute_group starlink_pmu_events_attr_group = { + .name = "events", + .attrs = starlink_pmu_event_attrs, +}; + +static ssize_t +cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &starlink_pmu->cpumask); +} + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *starlink_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group starlink_pmu_cpumask_attr_group = { + .attrs = starlink_pmu_cpumask_attrs, +}; + +static const struct attribute_group *starlink_pmu_attr_groups[] = { + &starlink_pmu_format_attr_group, + &starlink_pmu_events_attr_group, + &starlink_pmu_cpumask_attr_group, + NULL +}; + +static void starlink_pmu_set_event_period(struct perf_event *event) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = event->hw.idx; + + /* + * Program counter to half of it's max count to handle + * cases of extreme interrupt latency. + */ + u64 val = STARLINK_PMU_COUNTER_MASK >> 1; + + local64_set(&hwc->prev_count, val); + if (hwc->config == STARLINK_CYCLES) + writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CYCLE_COUNTER); + else + writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_EVENT_COUNTER + + idx * sizeof(u64)); +} + +static void starlink_pmu_counter_start(struct perf_event *event, + struct starlink_pmu *starlink_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = event->hw.idx; + u64 val; + + /* + * Enable counter overflow interrupt[63:0], + * which is mapped as follow: + * + * event counter 0 - Bit [0] + * event counter 1 - Bit [1] + * ... + * cycle counter - Bit [63] + */ + val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE); + + if (hwc->config == STARLINK_CYCLES) { + /* + * Cycle count has its dedicated register, and it starts + * counting as soon as STARLINK_PMU_GLOBAL_ENABLE is set. + */ + val |= STARLINK_PMU_CYCLE_OVERFLOW_MASK; + } else { + writeq(event->hw.config, starlink_pmu->pmu_base + + STARLINK_PMU_EVENT_SELECT + idx * sizeof(u64)); + + val |= BIT_ULL(idx); + } + + writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE); + + writeq(STARLINK_PMU_GLOBAL_ENABLE, starlink_pmu->pmu_base + + STARLINK_PMU_CONTROL); +} + +static void starlink_pmu_counter_stop(struct perf_event *event, + struct starlink_pmu *starlink_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = event->hw.idx; + u64 val; + + val = readq(starlink_pmu->pmu_base + STARLINK_PMU_CONTROL); + val &= ~STARLINK_PMU_GLOBAL_ENABLE; + writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_CONTROL); + + val = readq(starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE); + if (hwc->config == STARLINK_CYCLES) + val &= ~STARLINK_PMU_CYCLE_OVERFLOW_MASK; + else + val &= ~BIT_ULL(idx); + + writeq(val, starlink_pmu->pmu_base + STARLINK_PMU_INTERRUPT_ENABLE); +} + +static void starlink_pmu_update(struct perf_event *event) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + u64 prev_raw_count, new_raw_count; + u64 oldval; + u64 delta; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + if (hwc->config == STARLINK_CYCLES) + new_raw_count = readq(starlink_pmu->pmu_base + + STARLINK_PMU_CYCLE_COUNTER); + else + new_raw_count = readq(starlink_pmu->pmu_base + + STARLINK_PMU_EVENT_COUNTER + + idx * sizeof(u64)); + oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count); + } while (oldval != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & STARLINK_PMU_COUNTER_MASK; + local64_add(delta, &event->count); +} + +static void starlink_pmu_start(struct perf_event *event, int flags) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + starlink_pmu_set_event_period(event); + starlink_pmu_counter_start(event, starlink_pmu); + + perf_event_update_userpage(event); +} + +static void starlink_pmu_stop(struct perf_event *event, int flags) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (hwc->state & PERF_HES_STOPPED) + return; + + starlink_pmu_counter_stop(event, starlink_pmu); + starlink_pmu_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int starlink_pmu_add(struct perf_event *event, int flags) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu); + struct starlink_hw_events *hw_events = + this_cpu_ptr(starlink_pmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + unsigned long *used_mask = hw_events->used_mask; + u32 n_events = STARLINK_PMU_NUM_COUNTERS; + int idx; + + /* + * Cycle counter has dedicated register to hold counter value. + * Event other than cycle count has to be enabled through + * event select register, and assigned with independent counter + * as they appear. + */ + + if (hwc->config == STARLINK_CYCLES) { + idx = STARLINK_PMU_IDX_CYCLE_COUNTER; + } else { + idx = find_first_zero_bit(used_mask, n_events); + /* All counter are in use */ + if (idx < 0) + return idx; + + set_bit(idx, used_mask); + } + + hwc->idx = idx; + hw_events->events[idx] = event; + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + starlink_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static void starlink_pmu_del(struct perf_event *event, int flags) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu); + struct starlink_hw_events *hw_events = + this_cpu_ptr(starlink_pmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + + starlink_pmu_stop(event, PERF_EF_UPDATE); + hw_events->events[hwc->idx] = NULL; + clear_bit(hwc->idx, hw_events->used_mask); + + perf_event_update_userpage(event); +} + +static bool starlink_pmu_validate_event_group(struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct perf_event *sibling; + int counter = 1; + + /* + * Ensure hardware events in the group are on the same PMU, + * software events are acceptable. + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return false; + + for_each_sibling_event(sibling, leader) { + if (sibling->pmu != event->pmu && !is_software_event(sibling)) + return false; + + counter++; + } + + return counter <= STARLINK_PMU_NUM_COUNTERS; +} + +static int starlink_pmu_event_init(struct perf_event *event) +{ + struct starlink_pmu *starlink_pmu = to_starlink_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * Sampling is not supported, as counters are shared + * by all CPU. + */ + if (hwc->sample_period) + return -EOPNOTSUPP; + + /* + * Per-task and attach to a task are not supported, + * as uncore events are not specific to any CPU. + */ + if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + if (!starlink_pmu_validate_event_group(event)) + return -EINVAL; + + hwc->idx = -1; + hwc->config = event->attr.config; + event->cpu = cpumask_first(&starlink_pmu->cpumask); + + return 0; +} + +static irqreturn_t starlink_pmu_handle_irq(int irq_num, void *data) +{ + struct starlink_pmu *starlink_pmu = data; + struct starlink_hw_events *hw_events = + this_cpu_ptr(starlink_pmu->hw_events); + bool handled = false; + int idx; + u64 overflow_status; + + for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) { + struct perf_event *event = hw_events->events[idx]; + + if (!event) + continue; + + overflow_status = readq(starlink_pmu->pmu_base + + STARLINK_PMU_COUNTER_OVERFLOW_STATUS); + if (!(overflow_status & BIT_ULL(idx))) + continue; + + writeq(BIT_ULL(idx), starlink_pmu->pmu_base + + STARLINK_PMU_COUNTER_OVERFLOW_STATUS); + + starlink_pmu_update(event); + starlink_pmu_set_event_period(event); + handled = true; + } + return IRQ_RETVAL(handled); +} + +static int starlink_setup_irqs(struct starlink_pmu *starlink_pmu, + struct platform_device *pdev) +{ + int ret, irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + ret = devm_request_irq(&pdev->dev, irq, starlink_pmu_handle_irq, + 0, STARLINK_PMU_PDEV_NAME, starlink_pmu); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to request IRQ\n"); + + starlink_pmu->irq = irq; + + return 0; +} + +static int starlink_pmu_pm_notify(struct notifier_block *b, + unsigned long cmd, void *v) +{ + struct starlink_pmu *starlink_pmu = container_of(b, struct starlink_pmu, + starlink_pmu_pm_nb); + struct starlink_hw_events *hw_events = + this_cpu_ptr(starlink_pmu->hw_events); + int enabled = bitmap_weight(hw_events->used_mask, + STARLINK_PMU_MAX_COUNTERS); + struct perf_event *event; + int idx; + + if (!enabled) + return NOTIFY_OK; + + for (idx = 0; idx < STARLINK_PMU_MAX_COUNTERS; idx++) { + event = hw_events->events[idx]; + if (!event) + continue; + + switch (cmd) { + case CPU_PM_ENTER: + /* Stop and update the counter */ + starlink_pmu_stop(event, PERF_EF_UPDATE); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + /* Restore and enable the counter */ + starlink_pmu_start(event, PERF_EF_RELOAD); + break; + default: + break; + } + } + + return NOTIFY_OK; +} + +static int starlink_pmu_pm_register(struct starlink_pmu *starlink_pmu) +{ + if (!IS_ENABLED(CONFIG_CPU_PM)) + return 0; + + starlink_pmu->starlink_pmu_pm_nb.notifier_call = starlink_pmu_pm_notify; + return cpu_pm_register_notifier(&starlink_pmu->starlink_pmu_pm_nb); +} + +static void starlink_pmu_pm_unregister(struct starlink_pmu *starlink_pmu) +{ + if (!IS_ENABLED(CONFIG_CPU_PM)) + return; + + cpu_pm_unregister_notifier(&starlink_pmu->starlink_pmu_pm_nb); +} + +static void starlink_pmu_destroy(struct starlink_pmu *starlink_pmu) +{ + starlink_pmu_pm_unregister(starlink_pmu); + cpuhp_state_remove_instance(starlink_pmu_cpuhp_state, + &starlink_pmu->node); +} + +static int starlink_pmu_probe(struct platform_device *pdev) +{ + struct starlink_pmu *starlink_pmu; + struct starlink_hw_events *hw_events; + struct resource *res; + int cpuid, i, ret; + + starlink_pmu = devm_kzalloc(&pdev->dev, sizeof(*starlink_pmu), GFP_KERNEL); + if (!starlink_pmu) + return -ENOMEM; + + starlink_pmu->pmu_base = + devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(starlink_pmu->pmu_base)) + return PTR_ERR(starlink_pmu->pmu_base); + + starlink_pmu->hw_events = alloc_percpu_gfp(struct starlink_hw_events, + GFP_KERNEL); + if (!starlink_pmu->hw_events) { + dev_err(&pdev->dev, "Failed to allocate per-cpu PMU data\n"); + return -ENOMEM; + } + + for_each_possible_cpu(cpuid) { + hw_events = per_cpu_ptr(starlink_pmu->hw_events, cpuid); + for (i = 0; i < STARLINK_PMU_MAX_COUNTERS; i++) + hw_events->events[i] = NULL; + } + + ret = starlink_setup_irqs(starlink_pmu, pdev); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(starlink_pmu_cpuhp_state, + &starlink_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Failed to register hotplug\n"); + return ret; + } + + ret = starlink_pmu_pm_register(starlink_pmu); + if (ret) { + cpuhp_state_remove_instance(starlink_pmu_cpuhp_state, + &starlink_pmu->node); + return ret; + } + + starlink_pmu->pmu = (struct pmu) { + .task_ctx_nr = perf_invalid_context, + .event_init = starlink_pmu_event_init, + .add = starlink_pmu_add, + .del = starlink_pmu_del, + .start = starlink_pmu_start, + .stop = starlink_pmu_stop, + .read = starlink_pmu_update, + .attr_groups = starlink_pmu_attr_groups, + }; + + ret = perf_pmu_register(&starlink_pmu->pmu, STARLINK_PMU_PDEV_NAME, -1); + if (ret) + starlink_pmu_destroy(starlink_pmu); + + return ret; +} + +static const struct of_device_id starlink_pmu_of_match[] = { + { .compatible = "starfive,jh8100-starlink-pmu" }, + {} +}; +MODULE_DEVICE_TABLE(of, starlink_pmu_of_match); + +static struct platform_driver starlink_pmu_driver = { + .driver = { + .name = STARLINK_PMU_PDEV_NAME, + .of_match_table = starlink_pmu_of_match, + .suppress_bind_attrs = true, + }, + .probe = starlink_pmu_probe, +}; + +static int +starlink_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct starlink_pmu *starlink_pmu = hlist_entry_safe(node, + struct starlink_pmu, + node); + + if (cpumask_empty(&starlink_pmu->cpumask)) + cpumask_set_cpu(cpu, &starlink_pmu->cpumask); + + WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(cpu))); + + return 0; +} + +static int +starlink_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct starlink_pmu *starlink_pmu = hlist_entry_safe(node, + struct starlink_pmu, + node); + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &starlink_pmu->cpumask)) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&starlink_pmu->pmu, cpu, target); + + cpumask_set_cpu(target, &starlink_pmu->cpumask); + WARN_ON(irq_set_affinity(starlink_pmu->irq, cpumask_of(target))); + + return 0; +} + +static int __init starlink_pmu_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "soc/starfive/starlink_pmu:online", + starlink_pmu_online_cpu, + starlink_pmu_offline_cpu); + if (ret < 0) + return ret; + + starlink_pmu_cpuhp_state = ret; + + return platform_driver_register(&starlink_pmu_driver); +} + +device_initcall(starlink_pmu_init); diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index 1edb9c0370..e16d10c763 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -993,7 +993,7 @@ static int tx2_uncore_probe(struct platform_device *pdev) return 0; } -static int tx2_uncore_remove(struct platform_device *pdev) +static void tx2_uncore_remove(struct platform_device *pdev) { struct tx2_uncore_pmu *tx2_pmu, *temp; struct device *dev = &pdev->dev; @@ -1009,7 +1009,6 @@ static int tx2_uncore_remove(struct platform_device *pdev) } } } - return 0; } static struct platform_driver tx2_uncore_driver = { @@ -1019,7 +1018,7 @@ static struct platform_driver tx2_uncore_driver = { .suppress_bind_attrs = true, }, .probe = tx2_uncore_probe, - .remove = tx2_uncore_remove, + .remove_new = tx2_uncore_remove, }; static int __init tx2_uncore_driver_init(void) diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 7ce344248d..0d49343d70 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1937,7 +1937,7 @@ xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus) } } -static int xgene_pmu_remove(struct platform_device *pdev) +static void xgene_pmu_remove(struct platform_device *pdev) { struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev); @@ -1947,13 +1947,11 @@ static int xgene_pmu_remove(struct platform_device *pdev) xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus); cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, &xgene_pmu->node); - - return 0; } static struct platform_driver xgene_pmu_driver = { .probe = xgene_pmu_probe, - .remove = xgene_pmu_remove, + .remove_new = xgene_pmu_remove, .driver = { .name = "xgene-pmu", .of_match_table = xgene_pmu_of_match, |