diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /drivers/perf | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/perf')
23 files changed, 17400 insertions, 0 deletions
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig new file mode 100644 index 000000000..130327ff0 --- /dev/null +++ b/drivers/perf/Kconfig @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Performance Monitor Drivers +# + +menu "Performance monitor support" + depends on PERF_EVENTS + +config ARM_CCI_PMU + tristate "ARM CCI PMU driver" + depends on (ARM && CPU_V7) || ARM64 + select ARM_CCI + help + Support for PMU events monitoring on the ARM CCI (Cache Coherent + Interconnect) family of products. + + If compiled as a module, it will be called arm-cci. + +config ARM_CCI400_PMU + bool "support CCI-400" + default y + depends on ARM_CCI_PMU + select ARM_CCI400_COMMON + help + CCI-400 provides 4 independent event counters counting events related + to the connected slave/master interfaces, plus a cycle counter. + +config ARM_CCI5xx_PMU + bool "support CCI-500/CCI-550" + default y + depends on ARM_CCI_PMU + help + CCI-500/CCI-550 both provide 8 independent event counters, which can + count events pertaining to the slave/master interfaces as well as the + internal events to the CCI. + +config ARM_CCN + tristate "ARM CCN driver support" + depends on ARM || ARM64 + help + PMU (perf) driver supporting the ARM CCN (Cache Coherent Network) + interconnect. + +config ARM_CMN + tristate "Arm CMN-600 PMU support" + depends on ARM64 || (COMPILE_TEST && 64BIT) + help + Support for PMU events monitoring on the Arm CMN-600 Coherent Mesh + Network interconnect. + +config ARM_PMU + depends on ARM || ARM64 + bool "ARM PMU framework" + default y + help + Say y if you want to use CPU performance monitors on ARM-based + systems. + +config ARM_PMU_ACPI + depends on ARM_PMU && ACPI + def_bool y + +config ARM_SMMU_V3_PMU + tristate "ARM SMMUv3 Performance Monitors Extension" + depends on ARM64 && ACPI && ARM_SMMU_V3 + help + Provides support for the ARM SMMUv3 Performance Monitor Counter + Groups (PMCG), which provide monitoring of transactions passing + through the SMMU and allow the resulting information to be filtered + based on the Stream ID of the corresponding master. + +config ARM_DSU_PMU + tristate "ARM DynamIQ Shared Unit (DSU) PMU" + depends on ARM64 + help + Provides support for performance monitor unit in ARM DynamIQ Shared + Unit (DSU). The DSU integrates one or more cores with an L3 memory + system, control logic. The PMU allows counting various events related + to DSU. + +config FSL_IMX8_DDR_PMU + tristate "Freescale i.MX8 DDR perf monitor" + depends on ARCH_MXC + help + Provides support for the DDR performance monitor in i.MX8, which + can give information about memory throughput and other related + events. + +config QCOM_L2_PMU + bool "Qualcomm Technologies L2-cache PMU" + depends on ARCH_QCOM && ARM64 && ACPI + select QCOM_KRYO_L2_ACCESSORS + help + Provides support for the L2 cache performance monitor unit (PMU) + in Qualcomm Technologies processors. + Adds the L2 cache PMU into the perf events subsystem for + monitoring L2 cache events. + +config QCOM_L3_PMU + bool "Qualcomm Technologies L3-cache PMU" + depends on ARCH_QCOM && ARM64 && ACPI + select QCOM_IRQ_COMBINER + help + Provides support for the L3 cache performance monitor unit (PMU) + in Qualcomm Technologies processors. + Adds the L3 cache PMU into the perf events subsystem for + monitoring L3 cache events. + +config THUNDERX2_PMU + tristate "Cavium ThunderX2 SoC PMU UNCORE" + depends on ARCH_THUNDER2 && ARM64 && ACPI && NUMA + default m + help + Provides support for ThunderX2 UNCORE events. + The SoC has PMU support in its L3 cache controller (L3C) and + in the DDR4 Memory Controller (DMC). + +config XGENE_PMU + depends on ARCH_XGENE + bool "APM X-Gene SoC PMU" + default n + help + Say y if you want to use APM X-Gene SoC performance monitors. + +config ARM_SPE_PMU + tristate "Enable support for the ARMv8.2 Statistical Profiling Extension" + depends on ARM64 + help + Enable perf support for the ARMv8.2 Statistical Profiling + Extension, which provides periodic sampling of operations in + the CPU pipeline and reports this via the perf AUX interface. + +source "drivers/perf/hisilicon/Kconfig" + +endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile new file mode 100644 index 000000000..5365fd56f --- /dev/null +++ b/drivers/perf/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ARM_CCI_PMU) += arm-cci.o +obj-$(CONFIG_ARM_CCN) += arm-ccn.o +obj-$(CONFIG_ARM_CMN) += arm-cmn.o +obj-$(CONFIG_ARM_DSU_PMU) += arm_dsu_pmu.o +obj-$(CONFIG_ARM_PMU) += arm_pmu.o arm_pmu_platform.o +obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o +obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o +obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o +obj-$(CONFIG_HISI_PMU) += hisilicon/ +obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o +obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o +obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o +obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o +obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c new file mode 100644 index 000000000..87c4be9dd --- /dev/null +++ b/drivers/perf/arm-cci.c @@ -0,0 +1,1729 @@ +// SPDX-License-Identifier: GPL-2.0 +// CCI Cache Coherent Interconnect PMU driver +// Copyright (C) 2013-2018 Arm Ltd. +// Author: Punit Agrawal <punit.agrawal@arm.com>, Suzuki Poulose <suzuki.poulose@arm.com> + +#include <linux/arm-cci.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#define DRIVER_NAME "ARM-CCI PMU" + +#define CCI_PMCR 0x0100 +#define CCI_PID2 0x0fe8 + +#define CCI_PMCR_CEN 0x00000001 +#define CCI_PMCR_NCNT_MASK 0x0000f800 +#define CCI_PMCR_NCNT_SHIFT 11 + +#define CCI_PID2_REV_MASK 0xf0 +#define CCI_PID2_REV_SHIFT 4 + +#define CCI_PMU_EVT_SEL 0x000 +#define CCI_PMU_CNTR 0x004 +#define CCI_PMU_CNTR_CTRL 0x008 +#define CCI_PMU_OVRFLW 0x00c + +#define CCI_PMU_OVRFLW_FLAG 1 + +#define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) +#define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) +#define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) +#define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) + +#define CCI_PMU_MAX_HW_CNTRS(model) \ + ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) + +/* Types of interfaces that can generate events */ +enum { + CCI_IF_SLAVE, + CCI_IF_MASTER, +#ifdef CONFIG_ARM_CCI5xx_PMU + CCI_IF_GLOBAL, +#endif + CCI_IF_MAX, +}; + +#define NUM_HW_CNTRS_CII_4XX 4 +#define NUM_HW_CNTRS_CII_5XX 8 +#define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX + +#define FIXED_HW_CNTRS_CII_4XX 1 +#define FIXED_HW_CNTRS_CII_5XX 0 +#define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX + +#define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX) + +struct event_range { + u32 min; + u32 max; +}; + +struct cci_pmu_hw_events { + struct perf_event **events; + unsigned long *used_mask; + raw_spinlock_t pmu_lock; +}; + +struct cci_pmu; +/* + * struct cci_pmu_model: + * @fixed_hw_cntrs - Number of fixed event counters + * @num_hw_cntrs - Maximum number of programmable event counters + * @cntr_size - Size of an event counter mapping + */ +struct cci_pmu_model { + char *name; + u32 fixed_hw_cntrs; + u32 num_hw_cntrs; + u32 cntr_size; + struct attribute **format_attrs; + struct attribute **event_attrs; + struct event_range event_ranges[CCI_IF_MAX]; + int (*validate_hw_event)(struct cci_pmu *, unsigned long); + int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); + void (*write_counters)(struct cci_pmu *, unsigned long *); +}; + +static struct cci_pmu_model cci_pmu_models[]; + +struct cci_pmu { + void __iomem *base; + void __iomem *ctrl_base; + struct pmu pmu; + int cpu; + int nr_irqs; + int *irqs; + unsigned long active_irqs; + const struct cci_pmu_model *model; + struct cci_pmu_hw_events hw_events; + struct platform_device *plat_device; + int num_cntrs; + atomic_t active_events; + struct mutex reserve_mutex; +}; + +#define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) + +static struct cci_pmu *g_cci_pmu; + +enum cci_models { +#ifdef CONFIG_ARM_CCI400_PMU + CCI400_R0, + CCI400_R1, +#endif +#ifdef CONFIG_ARM_CCI5xx_PMU + CCI500_R0, + CCI550_R0, +#endif + CCI_MODEL_MAX +}; + +static void pmu_write_counters(struct cci_pmu *cci_pmu, + unsigned long *mask); +static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +#define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ + &((struct dev_ext_attribute[]) { \ + { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ + })[0].attr.attr + +#define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) +#define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) + +/* CCI400 PMU Specific definitions */ + +#ifdef CONFIG_ARM_CCI400_PMU + +/* Port ids */ +#define CCI400_PORT_S0 0 +#define CCI400_PORT_S1 1 +#define CCI400_PORT_S2 2 +#define CCI400_PORT_S3 3 +#define CCI400_PORT_S4 4 +#define CCI400_PORT_M0 5 +#define CCI400_PORT_M1 6 +#define CCI400_PORT_M2 7 + +#define CCI400_R1_PX 5 + +/* + * Instead of an event id to monitor CCI cycles, a dedicated counter is + * provided. Use 0xff to represent CCI cycles and hope that no future revisions + * make use of this event in hardware. + */ +enum cci400_perf_events { + CCI400_PMU_CYCLES = 0xff +}; + +#define CCI400_PMU_CYCLE_CNTR_IDX 0 +#define CCI400_PMU_CNTR0_IDX 1 + +/* + * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 + * ports and bits 4:0 are event codes. There are different event codes + * associated with each port type. + * + * Additionally, the range of events associated with the port types changed + * between Rev0 and Rev1. + * + * The constants below define the range of valid codes for each port type for + * the different revisions and are used to validate the event to be monitored. + */ + +#define CCI400_PMU_EVENT_MASK 0xffUL +#define CCI400_PMU_EVENT_SOURCE_SHIFT 5 +#define CCI400_PMU_EVENT_SOURCE_MASK 0x7 +#define CCI400_PMU_EVENT_CODE_SHIFT 0 +#define CCI400_PMU_EVENT_CODE_MASK 0x1f +#define CCI400_PMU_EVENT_SOURCE(event) \ + ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ + CCI400_PMU_EVENT_SOURCE_MASK) +#define CCI400_PMU_EVENT_CODE(event) \ + ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) + +#define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 +#define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 +#define CCI400_R0_MASTER_PORT_MIN_EV 0x14 +#define CCI400_R0_MASTER_PORT_MAX_EV 0x1a + +#define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 +#define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 +#define CCI400_R1_MASTER_PORT_MIN_EV 0x00 +#define CCI400_R1_MASTER_PORT_MAX_EV 0x11 + +#define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ + (unsigned long)_config) + +static ssize_t cci400_pmu_cycle_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static struct attribute *cci400_pmu_format_attrs[] = { + CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), + CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), + NULL +}; + +static struct attribute *cci400_r0_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), + /* Special event for cycles counter */ + CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), + NULL +}; + +static struct attribute *cci400_r1_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), + /* Special event for cycles counter */ + CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), + NULL +}; + +static ssize_t cci400_pmu_cycle_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); +} + +static int cci400_get_event_idx(struct cci_pmu *cci_pmu, + struct cci_pmu_hw_events *hw, + unsigned long cci_event) +{ + int idx; + + /* cycles event idx is fixed */ + if (cci_event == CCI400_PMU_CYCLES) { + if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) + return -EAGAIN; + + return CCI400_PMU_CYCLE_CNTR_IDX; + } + + for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) + if (!test_and_set_bit(idx, hw->used_mask)) + return idx; + + /* No counters available */ + return -EAGAIN; +} + +static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) +{ + u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); + u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI400_PMU_EVENT_MASK) + return -ENOENT; + + if (hw_event == CCI400_PMU_CYCLES) + return hw_event; + + switch (ev_source) { + case CCI400_PORT_S0: + case CCI400_PORT_S1: + case CCI400_PORT_S2: + case CCI400_PORT_S3: + case CCI400_PORT_S4: + /* Slave Interface */ + if_type = CCI_IF_SLAVE; + break; + case CCI400_PORT_M0: + case CCI400_PORT_M1: + case CCI400_PORT_M2: + /* Master Interface */ + if_type = CCI_IF_MASTER; + break; + default: + return -ENOENT; + } + + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) + return hw_event; + + return -ENOENT; +} + +static int probe_cci400_revision(struct cci_pmu *cci_pmu) +{ + int rev; + rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; + rev >>= CCI_PID2_REV_SHIFT; + + if (rev < CCI400_R1_PX) + return CCI400_R0; + else + return CCI400_R1; +} + +static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) +{ + if (platform_has_secure_cci_access()) + return &cci_pmu_models[probe_cci400_revision(cci_pmu)]; + return NULL; +} +#else /* !CONFIG_ARM_CCI400_PMU */ +static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) +{ + return NULL; +} +#endif /* CONFIG_ARM_CCI400_PMU */ + +#ifdef CONFIG_ARM_CCI5xx_PMU + +/* + * CCI5xx PMU event id is an 9-bit value made of two parts. + * bits [8:5] - Source for the event + * bits [4:0] - Event code (specific to type of interface) + * + * + */ + +/* Port ids */ +#define CCI5xx_PORT_S0 0x0 +#define CCI5xx_PORT_S1 0x1 +#define CCI5xx_PORT_S2 0x2 +#define CCI5xx_PORT_S3 0x3 +#define CCI5xx_PORT_S4 0x4 +#define CCI5xx_PORT_S5 0x5 +#define CCI5xx_PORT_S6 0x6 + +#define CCI5xx_PORT_M0 0x8 +#define CCI5xx_PORT_M1 0x9 +#define CCI5xx_PORT_M2 0xa +#define CCI5xx_PORT_M3 0xb +#define CCI5xx_PORT_M4 0xc +#define CCI5xx_PORT_M5 0xd +#define CCI5xx_PORT_M6 0xe + +#define CCI5xx_PORT_GLOBAL 0xf + +#define CCI5xx_PMU_EVENT_MASK 0x1ffUL +#define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5 +#define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf +#define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0 +#define CCI5xx_PMU_EVENT_CODE_MASK 0x1f + +#define CCI5xx_PMU_EVENT_SOURCE(event) \ + ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) +#define CCI5xx_PMU_EVENT_CODE(event) \ + ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) + +#define CCI5xx_SLAVE_PORT_MIN_EV 0x00 +#define CCI5xx_SLAVE_PORT_MAX_EV 0x1f +#define CCI5xx_MASTER_PORT_MIN_EV 0x00 +#define CCI5xx_MASTER_PORT_MAX_EV 0x06 +#define CCI5xx_GLOBAL_PORT_MIN_EV 0x00 +#define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f + + +#define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ + CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \ + (unsigned long) _config) + +static ssize_t cci5xx_pmu_global_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +static struct attribute *cci5xx_pmu_format_attrs[] = { + CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), + CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), + NULL, +}; + +static struct attribute *cci5xx_pmu_event_attrs[] = { + /* Slave events */ + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), + CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), + CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), + CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), + CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), + CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), + CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), + CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), + + /* Master events */ + CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), + CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), + CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), + CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), + CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), + + /* Global events */ + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), + CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), + NULL +}; + +static ssize_t cci5xx_pmu_global_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + /* Global events have single fixed source code */ + return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", + (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); +} + +/* + * CCI500 provides 8 independent event counters that can count + * any of the events available. + * CCI500 PMU event source ids + * 0x0-0x6 - Slave interfaces + * 0x8-0xD - Master interfaces + * 0xf - Global Events + * 0x7,0xe - Reserved + */ +static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, + unsigned long hw_event) +{ + u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); + u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI5xx_PMU_EVENT_MASK) + return -ENOENT; + + switch (ev_source) { + case CCI5xx_PORT_S0: + case CCI5xx_PORT_S1: + case CCI5xx_PORT_S2: + case CCI5xx_PORT_S3: + case CCI5xx_PORT_S4: + case CCI5xx_PORT_S5: + case CCI5xx_PORT_S6: + if_type = CCI_IF_SLAVE; + break; + case CCI5xx_PORT_M0: + case CCI5xx_PORT_M1: + case CCI5xx_PORT_M2: + case CCI5xx_PORT_M3: + case CCI5xx_PORT_M4: + case CCI5xx_PORT_M5: + if_type = CCI_IF_MASTER; + break; + case CCI5xx_PORT_GLOBAL: + if_type = CCI_IF_GLOBAL; + break; + default: + return -ENOENT; + } + + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) + return hw_event; + + return -ENOENT; +} + +/* + * CCI550 provides 8 independent event counters that can count + * any of the events available. + * CCI550 PMU event source ids + * 0x0-0x6 - Slave interfaces + * 0x8-0xe - Master interfaces + * 0xf - Global Events + * 0x7 - Reserved + */ +static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, + unsigned long hw_event) +{ + u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); + u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); + int if_type; + + if (hw_event & ~CCI5xx_PMU_EVENT_MASK) + return -ENOENT; + + switch (ev_source) { + case CCI5xx_PORT_S0: + case CCI5xx_PORT_S1: + case CCI5xx_PORT_S2: + case CCI5xx_PORT_S3: + case CCI5xx_PORT_S4: + case CCI5xx_PORT_S5: + case CCI5xx_PORT_S6: + if_type = CCI_IF_SLAVE; + break; + case CCI5xx_PORT_M0: + case CCI5xx_PORT_M1: + case CCI5xx_PORT_M2: + case CCI5xx_PORT_M3: + case CCI5xx_PORT_M4: + case CCI5xx_PORT_M5: + case CCI5xx_PORT_M6: + if_type = CCI_IF_MASTER; + break; + case CCI5xx_PORT_GLOBAL: + if_type = CCI_IF_GLOBAL; + break; + default: + return -ENOENT; + } + + if (ev_code >= cci_pmu->model->event_ranges[if_type].min && + ev_code <= cci_pmu->model->event_ranges[if_type].max) + return hw_event; + + return -ENOENT; +} + +#endif /* CONFIG_ARM_CCI5xx_PMU */ + +/* + * Program the CCI PMU counters which have PERF_HES_ARCH set + * with the event period and mark them ready before we enable + * PMU. + */ +static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) +{ + int i; + struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; + DECLARE_BITMAP(mask, HW_CNTRS_MAX); + + bitmap_zero(mask, cci_pmu->num_cntrs); + for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { + struct perf_event *event = cci_hw->events[i]; + + if (WARN_ON(!event)) + continue; + + /* Leave the events which are not counting */ + if (event->hw.state & PERF_HES_STOPPED) + continue; + if (event->hw.state & PERF_HES_ARCH) { + set_bit(i, mask); + event->hw.state &= ~PERF_HES_ARCH; + } + } + + pmu_write_counters(cci_pmu, mask); +} + +/* Should be called with cci_pmu->hw_events->pmu_lock held */ +static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) +{ + u32 val; + + /* Enable all the PMU counters. */ + val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; + writel(val, cci_pmu->ctrl_base + CCI_PMCR); +} + +/* Should be called with cci_pmu->hw_events->pmu_lock held */ +static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) +{ + cci_pmu_sync_counters(cci_pmu); + __cci_pmu_enable_nosync(cci_pmu); +} + +/* Should be called with cci_pmu->hw_events->pmu_lock held */ +static void __cci_pmu_disable(struct cci_pmu *cci_pmu) +{ + u32 val; + + /* Disable all the PMU counters. */ + val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; + writel(val, cci_pmu->ctrl_base + CCI_PMCR); +} + +static ssize_t cci_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); +} + +static ssize_t cci_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + /* source parameter is mandatory for normal PMU events */ + return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", + (unsigned long)eattr->var); +} + +static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) +{ + return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); +} + +static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) +{ + return readl_relaxed(cci_pmu->base + + CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); +} + +static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, + int idx, unsigned int offset) +{ + writel_relaxed(value, cci_pmu->base + + CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); +} + +static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) +{ + pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); +} + +static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) +{ + pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); +} + +static bool __maybe_unused +pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) +{ + return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; +} + +static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) +{ + pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); +} + +/* + * For all counters on the CCI-PMU, disable any 'enabled' counters, + * saving the changed counters in the mask, so that we can restore + * it later using pmu_restore_counters. The mask is private to the + * caller. We cannot rely on the used_mask maintained by the CCI_PMU + * as it only tells us if the counter is assigned to perf_event or not. + * The state of the perf_event cannot be locked by the PMU layer, hence + * we check the individual counter status (which can be locked by + * cci_pm->hw_events->pmu_lock). + * + * @mask should be initialised to empty by the caller. + */ +static void __maybe_unused +pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + + for (i = 0; i < cci_pmu->num_cntrs; i++) { + if (pmu_counter_is_enabled(cci_pmu, i)) { + set_bit(i, mask); + pmu_disable_counter(cci_pmu, i); + } + } +} + +/* + * Restore the status of the counters. Reversal of the pmu_save_counters(). + * For each counter set in the mask, enable the counter back. + */ +static void __maybe_unused +pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + + for_each_set_bit(i, mask, cci_pmu->num_cntrs) + pmu_enable_counter(cci_pmu, i); +} + +/* + * Returns the number of programmable counters actually implemented + * by the cci + */ +static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu) +{ + return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & + CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; +} + +static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + unsigned long cci_event = event->hw.config_base; + int idx; + + if (cci_pmu->model->get_event_idx) + return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); + + /* Generic code to find an unused idx from the mask */ + for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) + if (!test_and_set_bit(idx, hw->used_mask)) + return idx; + + /* No counters available */ + return -EAGAIN; +} + +static int pmu_map_event(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + + if (event->attr.type < PERF_TYPE_MAX || + !cci_pmu->model->validate_hw_event) + return -ENOENT; + + return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); +} + +static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) +{ + int i; + struct platform_device *pmu_device = cci_pmu->plat_device; + + if (unlikely(!pmu_device)) + return -ENODEV; + + if (cci_pmu->nr_irqs < 1) { + dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); + return -ENODEV; + } + + /* + * Register all available CCI PMU interrupts. In the interrupt handler + * we iterate over the counters checking for interrupt source (the + * overflowing counter) and clear it. + * + * This should allow handling of non-unique interrupt for the counters. + */ + for (i = 0; i < cci_pmu->nr_irqs; i++) { + int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, + "arm-cci-pmu", cci_pmu); + if (err) { + dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", + cci_pmu->irqs[i]); + return err; + } + + set_bit(i, &cci_pmu->active_irqs); + } + + return 0; +} + +static void pmu_free_irq(struct cci_pmu *cci_pmu) +{ + int i; + + for (i = 0; i < cci_pmu->nr_irqs; i++) { + if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) + continue; + + free_irq(cci_pmu->irqs[i], cci_pmu); + } +} + +static u32 pmu_read_counter(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct hw_perf_event *hw_counter = &event->hw; + int idx = hw_counter->idx; + u32 value; + + if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return 0; + } + value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); + + return value; +} + +static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) +{ + pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); +} + +static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; + + for_each_set_bit(i, mask, cci_pmu->num_cntrs) { + struct perf_event *event = cci_hw->events[i]; + + if (WARN_ON(!event)) + continue; + pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); + } +} + +static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + if (cci_pmu->model->write_counters) + cci_pmu->model->write_counters(cci_pmu, mask); + else + __pmu_write_counters(cci_pmu, mask); +} + +#ifdef CONFIG_ARM_CCI5xx_PMU + +/* + * CCI-500/CCI-550 has advanced power saving policies, which could gate the + * clocks to the PMU counters, which makes the writes to them ineffective. + * The only way to write to those counters is when the global counters + * are enabled and the particular counter is enabled. + * + * So we do the following : + * + * 1) Disable all the PMU counters, saving their current state + * 2) Enable the global PMU profiling, now that all counters are + * disabled. + * + * For each counter to be programmed, repeat steps 3-7: + * + * 3) Write an invalid event code to the event control register for the + counter, so that the counters are not modified. + * 4) Enable the counter control for the counter. + * 5) Set the counter value + * 6) Disable the counter + * 7) Restore the event in the target counter + * + * 8) Disable the global PMU. + * 9) Restore the status of the rest of the counters. + * + * We choose an event which for CCI-5xx is guaranteed not to count. + * We use the highest possible event code (0x1f) for the master interface 0. + */ +#define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \ + (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT)) +static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) +{ + int i; + DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX); + + bitmap_zero(saved_mask, cci_pmu->num_cntrs); + pmu_save_counters(cci_pmu, saved_mask); + + /* + * Now that all the counters are disabled, we can safely turn the PMU on, + * without syncing the status of the counters + */ + __cci_pmu_enable_nosync(cci_pmu); + + for_each_set_bit(i, mask, cci_pmu->num_cntrs) { + struct perf_event *event = cci_pmu->hw_events.events[i]; + + if (WARN_ON(!event)) + continue; + + pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); + pmu_enable_counter(cci_pmu, i); + pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); + pmu_disable_counter(cci_pmu, i); + pmu_set_event(cci_pmu, i, event->hw.config_base); + } + + __cci_pmu_disable(cci_pmu); + + pmu_restore_counters(cci_pmu, saved_mask); +} + +#endif /* CONFIG_ARM_CCI5xx_PMU */ + +static u64 pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; + + local64_add(delta, &event->count); + + return new_raw_count; +} + +static void pmu_read(struct perf_event *event) +{ + pmu_event_update(event); +} + +static void pmu_event_set_period(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + /* + * The CCI PMU counters have a period of 2^32. To account for the + * possiblity of extreme interrupt latency we program for a period of + * half that. Hopefully we can handle the interrupt before another 2^31 + * events occur and the counter overtakes its previous value. + */ + u64 val = 1ULL << 31; + local64_set(&hwc->prev_count, val); + + /* + * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose + * values needs to be sync-ed with the s/w state before the PMU is + * enabled. + * Mark this counter for sync. + */ + hwc->state |= PERF_HES_ARCH; +} + +static irqreturn_t pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long flags; + struct cci_pmu *cci_pmu = dev; + struct cci_pmu_hw_events *events = &cci_pmu->hw_events; + int idx, handled = IRQ_NONE; + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* Disable the PMU while we walk through the counters */ + __cci_pmu_disable(cci_pmu); + /* + * Iterate over counters and update the corresponding perf events. + * This should work regardless of whether we have per-counter overflow + * interrupt or a combined overflow interrupt. + */ + for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { + struct perf_event *event = events->events[idx]; + + if (!event) + continue; + + /* Did this counter overflow? */ + if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & + CCI_PMU_OVRFLW_FLAG)) + continue; + + pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, + CCI_PMU_OVRFLW); + + pmu_event_update(event); + pmu_event_set_period(event); + handled = IRQ_HANDLED; + } + + /* Enable the PMU and sync possibly overflowed counters */ + __cci_pmu_enable_sync(cci_pmu); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); + + return IRQ_RETVAL(handled); +} + +static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) +{ + int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); + if (ret) { + pmu_free_irq(cci_pmu); + return ret; + } + return 0; +} + +static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) +{ + pmu_free_irq(cci_pmu); +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + atomic_t *active_events = &cci_pmu->active_events; + struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; + + if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { + cci_pmu_put_hw(cci_pmu); + mutex_unlock(reserve_mutex); + } +} + +static void cci_pmu_enable(struct pmu *pmu) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); + unsigned long flags; + + if (!enabled) + return; + + raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); + __cci_pmu_enable_sync(cci_pmu); + raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); + +} + +static void cci_pmu_disable(struct pmu *pmu) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + unsigned long flags; + + raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); + __cci_pmu_disable(cci_pmu); + raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); +} + +/* + * Check if the idx represents a non-programmable counter. + * All the fixed event counters are mapped before the programmable + * counters. + */ +static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) +{ + return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); +} + +static void cci_pmu_start(struct perf_event *event, int pmu_flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + unsigned long flags; + + /* + * To handle interrupt latency, we always reprogram the period + * regardlesss of PERF_EF_RELOAD. + */ + if (pmu_flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return; + } + + raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); + + /* Configure the counter unless you are counting a fixed event */ + if (!pmu_fixed_hw_idx(cci_pmu, idx)) + pmu_set_event(cci_pmu, idx, hwc->config_base); + + pmu_event_set_period(event); + pmu_enable_counter(cci_pmu, idx); + + raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); +} + +static void cci_pmu_stop(struct perf_event *event, int pmu_flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (hwc->state & PERF_HES_STOPPED) + return; + + if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { + dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); + return; + } + + /* + * We always reprogram the counter, so ignore PERF_EF_UPDATE. See + * cci_pmu_start() + */ + pmu_disable_counter(cci_pmu, idx); + pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int cci_pmu_add(struct perf_event *event, int flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx; + + /* If we don't have a space for the counter then finish early. */ + idx = pmu_get_event_idx(hw_events, event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + hw_events->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + cci_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return 0; +} + +static void cci_pmu_del(struct perf_event *event, int flags) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + cci_pmu_stop(event, PERF_EF_UPDATE); + hw_events->events[idx] = NULL; + clear_bit(idx, hw_events->used_mask); + + perf_event_update_userpage(event); +} + +static int validate_event(struct pmu *cci_pmu, + struct cci_pmu_hw_events *hw_events, + struct perf_event *event) +{ + if (is_software_event(event)) + return 1; + + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != cci_pmu) + return 0; + + if (event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) + return 1; + + return pmu_get_event_idx(hw_events, event) >= 0; +} + +static int validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)]; + struct cci_pmu_hw_events fake_pmu = { + /* + * Initialise the fake PMU. We only need to populate the + * used_mask for the purposes of validation. + */ + .used_mask = mask, + }; + memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); + + if (!validate_event(event->pmu, &fake_pmu, leader)) + return -EINVAL; + + for_each_sibling_event(sibling, leader) { + if (!validate_event(event->pmu, &fake_pmu, sibling)) + return -EINVAL; + } + + if (!validate_event(event->pmu, &fake_pmu, event)) + return -EINVAL; + + return 0; +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int mapping; + + mapping = pmu_map_event(event); + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. + */ + hwc->idx = -1; + hwc->config_base = 0; + hwc->config = 0; + hwc->event_base = 0; + + /* + * Store the event encoding into the config_base field. + */ + hwc->config_base |= (unsigned long)mapping; + + if (event->group_leader != event) { + if (validate_group(event) != 0) + return -EINVAL; + } + + return 0; +} + +static int cci_pmu_event_init(struct perf_event *event) +{ + struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); + atomic_t *active_events = &cci_pmu->active_events; + int err = 0; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Shared by all CPUs, no meaningful state to sample */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + /* + * Following the example set by other "uncore" PMUs, we accept any CPU + * and rewrite its affinity dynamically rather than having perf core + * handle cpu == -1 and pid == -1 for this case. + * + * The perf core will pin online CPUs for the duration of this call and + * the event being installed into its context, so the PMU's CPU can't + * change under our feet. + */ + if (event->cpu < 0) + return -EINVAL; + event->cpu = cci_pmu->cpu; + + event->destroy = hw_perf_event_destroy; + if (!atomic_inc_not_zero(active_events)) { + mutex_lock(&cci_pmu->reserve_mutex); + if (atomic_read(active_events) == 0) + err = cci_pmu_get_hw(cci_pmu); + if (!err) + atomic_inc(active_events); + mutex_unlock(&cci_pmu->reserve_mutex); + } + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static ssize_t pmu_cpumask_attr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct cci_pmu *cci_pmu = to_cci_pmu(pmu); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); +} + +static struct device_attribute pmu_cpumask_attr = + __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); + +static struct attribute *pmu_attrs[] = { + &pmu_cpumask_attr.attr, + NULL, +}; + +static struct attribute_group pmu_attr_group = { + .attrs = pmu_attrs, +}; + +static struct attribute_group pmu_format_attr_group = { + .name = "format", + .attrs = NULL, /* Filled in cci_pmu_init_attrs */ +}; + +static struct attribute_group pmu_event_attr_group = { + .name = "events", + .attrs = NULL, /* Filled in cci_pmu_init_attrs */ +}; + +static const struct attribute_group *pmu_attr_groups[] = { + &pmu_attr_group, + &pmu_format_attr_group, + &pmu_event_attr_group, + NULL +}; + +static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) +{ + const struct cci_pmu_model *model = cci_pmu->model; + char *name = model->name; + u32 num_cntrs; + + if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX)) + return -EINVAL; + if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX)) + return -EINVAL; + + pmu_event_attr_group.attrs = model->event_attrs; + pmu_format_attr_group.attrs = model->format_attrs; + + cci_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, + .name = cci_pmu->model->name, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = cci_pmu_enable, + .pmu_disable = cci_pmu_disable, + .event_init = cci_pmu_event_init, + .add = cci_pmu_add, + .del = cci_pmu_del, + .start = cci_pmu_start, + .stop = cci_pmu_stop, + .read = pmu_read, + .attr_groups = pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + cci_pmu->plat_device = pdev; + num_cntrs = pmu_get_max_counters(cci_pmu); + if (num_cntrs > cci_pmu->model->num_hw_cntrs) { + dev_warn(&pdev->dev, + "PMU implements more counters(%d) than supported by" + " the model(%d), truncated.", + num_cntrs, cci_pmu->model->num_hw_cntrs); + num_cntrs = cci_pmu->model->num_hw_cntrs; + } + cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; + + return perf_pmu_register(&cci_pmu->pmu, name, -1); +} + +static int cci_pmu_offline_cpu(unsigned int cpu) +{ + int target; + + if (!g_cci_pmu || cpu != g_cci_pmu->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); + g_cci_pmu->cpu = target; + return 0; +} + +static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { +#ifdef CONFIG_ARM_CCI400_PMU + [CCI400_R0] = { + .name = "CCI_400", + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ + .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, + .cntr_size = SZ_4K, + .format_attrs = cci400_pmu_format_attrs, + .event_attrs = cci400_r0_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI400_R0_SLAVE_PORT_MIN_EV, + CCI400_R0_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI400_R0_MASTER_PORT_MIN_EV, + CCI400_R0_MASTER_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci400_validate_hw_event, + .get_event_idx = cci400_get_event_idx, + }, + [CCI400_R1] = { + .name = "CCI_400_r1", + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ + .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, + .cntr_size = SZ_4K, + .format_attrs = cci400_pmu_format_attrs, + .event_attrs = cci400_r1_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI400_R1_SLAVE_PORT_MIN_EV, + CCI400_R1_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI400_R1_MASTER_PORT_MIN_EV, + CCI400_R1_MASTER_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci400_validate_hw_event, + .get_event_idx = cci400_get_event_idx, + }, +#endif +#ifdef CONFIG_ARM_CCI5xx_PMU + [CCI500_R0] = { + .name = "CCI_500", + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, + .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, + .cntr_size = SZ_64K, + .format_attrs = cci5xx_pmu_format_attrs, + .event_attrs = cci5xx_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI5xx_SLAVE_PORT_MIN_EV, + CCI5xx_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI5xx_MASTER_PORT_MIN_EV, + CCI5xx_MASTER_PORT_MAX_EV, + }, + [CCI_IF_GLOBAL] = { + CCI5xx_GLOBAL_PORT_MIN_EV, + CCI5xx_GLOBAL_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci500_validate_hw_event, + .write_counters = cci5xx_pmu_write_counters, + }, + [CCI550_R0] = { + .name = "CCI_550", + .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, + .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, + .cntr_size = SZ_64K, + .format_attrs = cci5xx_pmu_format_attrs, + .event_attrs = cci5xx_pmu_event_attrs, + .event_ranges = { + [CCI_IF_SLAVE] = { + CCI5xx_SLAVE_PORT_MIN_EV, + CCI5xx_SLAVE_PORT_MAX_EV, + }, + [CCI_IF_MASTER] = { + CCI5xx_MASTER_PORT_MIN_EV, + CCI5xx_MASTER_PORT_MAX_EV, + }, + [CCI_IF_GLOBAL] = { + CCI5xx_GLOBAL_PORT_MIN_EV, + CCI5xx_GLOBAL_PORT_MAX_EV, + }, + }, + .validate_hw_event = cci550_validate_hw_event, + .write_counters = cci5xx_pmu_write_counters, + }, +#endif +}; + +static const struct of_device_id arm_cci_pmu_matches[] = { +#ifdef CONFIG_ARM_CCI400_PMU + { + .compatible = "arm,cci-400-pmu", + .data = NULL, + }, + { + .compatible = "arm,cci-400-pmu,r0", + .data = &cci_pmu_models[CCI400_R0], + }, + { + .compatible = "arm,cci-400-pmu,r1", + .data = &cci_pmu_models[CCI400_R1], + }, +#endif +#ifdef CONFIG_ARM_CCI5xx_PMU + { + .compatible = "arm,cci-500-pmu,r0", + .data = &cci_pmu_models[CCI500_R0], + }, + { + .compatible = "arm,cci-550-pmu,r0", + .data = &cci_pmu_models[CCI550_R0], + }, +#endif + {}, +}; +MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches); + +static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) + if (irq == irqs[i]) + return true; + + return false; +} + +static struct cci_pmu *cci_pmu_alloc(struct device *dev) +{ + struct cci_pmu *cci_pmu; + const struct cci_pmu_model *model; + + /* + * All allocations are devm_* hence we don't have to free + * them explicitly on an error, as it would end up in driver + * detach. + */ + cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); + if (!cci_pmu) + return ERR_PTR(-ENOMEM); + + cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; + + model = of_device_get_match_data(dev); + if (!model) { + dev_warn(dev, + "DEPRECATED compatible property, requires secure access to CCI registers"); + model = probe_cci_model(cci_pmu); + } + if (!model) { + dev_warn(dev, "CCI PMU version not supported\n"); + return ERR_PTR(-ENODEV); + } + + cci_pmu->model = model; + cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), + sizeof(*cci_pmu->irqs), GFP_KERNEL); + if (!cci_pmu->irqs) + return ERR_PTR(-ENOMEM); + cci_pmu->hw_events.events = devm_kcalloc(dev, + CCI_PMU_MAX_HW_CNTRS(model), + sizeof(*cci_pmu->hw_events.events), + GFP_KERNEL); + if (!cci_pmu->hw_events.events) + return ERR_PTR(-ENOMEM); + cci_pmu->hw_events.used_mask = devm_kcalloc(dev, + BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), + sizeof(*cci_pmu->hw_events.used_mask), + GFP_KERNEL); + if (!cci_pmu->hw_events.used_mask) + return ERR_PTR(-ENOMEM); + + return cci_pmu; +} + +static int cci_pmu_probe(struct platform_device *pdev) +{ + struct cci_pmu *cci_pmu; + int i, ret, irq; + + cci_pmu = cci_pmu_alloc(&pdev->dev); + if (IS_ERR(cci_pmu)) + return PTR_ERR(cci_pmu); + + cci_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cci_pmu->base)) + return -ENOMEM; + + /* + * CCI PMU has one overflow interrupt per counter; but some may be tied + * together to a common interrupt. + */ + cci_pmu->nr_irqs = 0; + for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) + break; + + if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) + continue; + + cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; + } + + /* + * Ensure that the device tree has as many interrupts as the number + * of counters. + */ + if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { + dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", + i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); + return -EINVAL; + } + + raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); + mutex_init(&cci_pmu->reserve_mutex); + atomic_set(&cci_pmu->active_events, 0); + + cci_pmu->cpu = raw_smp_processor_id(); + g_cci_pmu = cci_pmu; + cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, + "perf/arm/cci:online", NULL, + cci_pmu_offline_cpu); + + ret = cci_pmu_init(cci_pmu, pdev); + if (ret) + goto error_pmu_init; + + pr_info("ARM %s PMU driver probed", cci_pmu->model->name); + return 0; + +error_pmu_init: + cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); + g_cci_pmu = NULL; + return ret; +} + +static int cci_pmu_remove(struct platform_device *pdev) +{ + if (!g_cci_pmu) + return 0; + + cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); + perf_pmu_unregister(&g_cci_pmu->pmu); + g_cci_pmu = NULL; + + return 0; +} + +static struct platform_driver cci_pmu_driver = { + .driver = { + .name = DRIVER_NAME, + .of_match_table = arm_cci_pmu_matches, + .suppress_bind_attrs = true, + }, + .probe = cci_pmu_probe, + .remove = cci_pmu_remove, +}; + +module_platform_driver(cci_pmu_driver); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ARM CCI PMU support"); diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c new file mode 100644 index 000000000..a0a71c1df --- /dev/null +++ b/drivers/perf/arm-ccn.c @@ -0,0 +1,1583 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/ctype.h> +#include <linux/hrtimer.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define CCN_NUM_XP_PORTS 2 +#define CCN_NUM_VCS 4 +#define CCN_NUM_REGIONS 256 +#define CCN_REGION_SIZE 0x10000 + +#define CCN_ALL_OLY_ID 0xff00 +#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0 +#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f +#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8 +#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f + +#define CCN_MN_ERRINT_STATUS 0x0008 +#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20 +#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40 +#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80 +#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88 +#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0 +#define CCN_MN_ERR_SIG_VAL_63_0 0x0300 +#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1) + +#define CCN_DT_ACTIVE_DSM 0x0000 +#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8) +#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff +#define CCN_DT_CTL 0x0028 +#define CCN_DT_CTL__DT_EN (1 << 0) +#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8) +#define CCN_DT_PMCCNTR 0x0140 +#define CCN_DT_PMCCNTRSR 0x0190 +#define CCN_DT_PMOVSR 0x0198 +#define CCN_DT_PMOVSR_CLR 0x01a0 +#define CCN_DT_PMOVSR_CLR__MASK 0x1f +#define CCN_DT_PMCR 0x01a8 +#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6) +#define CCN_DT_PMCR__PMU_EN (1 << 0) +#define CCN_DT_PMSR 0x01b0 +#define CCN_DT_PMSR_REQ 0x01b8 +#define CCN_DT_PMSR_CLR 0x01c0 + +#define CCN_HNF_PMU_EVENT_SEL 0x0600 +#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_XP_DT_CONFIG 0x0300 +#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4) +#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf +#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0 +#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1 +#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n)) +#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n)) +#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n)) +#define CCN_XP_DT_INTERFACE_SEL 0x0308 +#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1 +#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1 +#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8) +#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3 +#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40) +#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40) +#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40) +#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40) +#define CCN_XP_DT_CONTROL 0x0370 +#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0) +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4) +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf +#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf +#define CCN_XP_PMU_EVENT_SEL 0x0600 +#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7) +#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f + +#define CCN_SBAS_PMU_EVENT_SEL 0x0600 +#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_RNI_PMU_EVENT_SEL 0x0600 +#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4) +#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf + +#define CCN_TYPE_MN 0x01 +#define CCN_TYPE_DT 0x02 +#define CCN_TYPE_HNF 0x04 +#define CCN_TYPE_HNI 0x05 +#define CCN_TYPE_XP 0x08 +#define CCN_TYPE_SBSX 0x0c +#define CCN_TYPE_SBAS 0x10 +#define CCN_TYPE_RNI_1P 0x14 +#define CCN_TYPE_RNI_2P 0x15 +#define CCN_TYPE_RNI_3P 0x16 +#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */ +#define CCN_TYPE_RND_2P 0x19 +#define CCN_TYPE_RND_3P 0x1a +#define CCN_TYPE_CYCLES 0xff /* Pseudotype */ + +#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */ + +#define CCN_NUM_PMU_EVENTS 4 +#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */ +#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */ +#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS + +#define CCN_NUM_PREDEFINED_MASKS 4 +#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0) +#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1) +#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2) +#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3) + +struct arm_ccn_component { + void __iomem *base; + u32 type; + + DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS); + union { + struct { + DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS); + } xp; + }; +}; + +#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \ + struct arm_ccn_dt, pmu), struct arm_ccn, dt) + +struct arm_ccn_dt { + int id; + void __iomem *base; + + spinlock_t config_lock; + + DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1); + struct { + struct arm_ccn_component *source; + struct perf_event *event; + } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1]; + + struct { + u64 l, h; + } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS]; + + struct hrtimer hrtimer; + + unsigned int cpu; + struct hlist_node node; + + struct pmu pmu; +}; + +struct arm_ccn { + struct device *dev; + void __iomem *base; + unsigned int irq; + + unsigned sbas_present:1; + unsigned sbsx_present:1; + + int num_nodes; + struct arm_ccn_component *node; + + int num_xps; + struct arm_ccn_component *xp; + + struct arm_ccn_dt dt; + int mn_id; +}; + +static int arm_ccn_node_to_xp(int node) +{ + return node / CCN_NUM_XP_PORTS; +} + +static int arm_ccn_node_to_xp_port(int node) +{ + return node % CCN_NUM_XP_PORTS; +} + + +/* + * Bit shifts and masks in these defines must be kept in sync with + * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below! + */ +#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff) +#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff) +#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff) +#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff) +#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3) +#define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3) +#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7) +#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1) +#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf) + +static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port) +{ + *config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24)); + *config |= (node_xp << 0) | (type << 8) | (port << 24); +} + +static ssize_t arm_ccn_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *ea = container_of(attr, + struct dev_ext_attribute, attr); + + return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var); +} + +#define CCN_FORMAT_ATTR(_name, _config) \ + struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \ + { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \ + NULL), _config } + +static CCN_FORMAT_ATTR(node, "config:0-7"); +static CCN_FORMAT_ATTR(xp, "config:0-7"); +static CCN_FORMAT_ATTR(type, "config:8-15"); +static CCN_FORMAT_ATTR(event, "config:16-23"); +static CCN_FORMAT_ATTR(port, "config:24-25"); +static CCN_FORMAT_ATTR(bus, "config:24-25"); +static CCN_FORMAT_ATTR(vc, "config:26-28"); +static CCN_FORMAT_ATTR(dir, "config:29-29"); +static CCN_FORMAT_ATTR(mask, "config:30-33"); +static CCN_FORMAT_ATTR(cmp_l, "config1:0-62"); +static CCN_FORMAT_ATTR(cmp_h, "config2:0-59"); + +static struct attribute *arm_ccn_pmu_format_attrs[] = { + &arm_ccn_pmu_format_attr_node.attr.attr, + &arm_ccn_pmu_format_attr_xp.attr.attr, + &arm_ccn_pmu_format_attr_type.attr.attr, + &arm_ccn_pmu_format_attr_event.attr.attr, + &arm_ccn_pmu_format_attr_port.attr.attr, + &arm_ccn_pmu_format_attr_bus.attr.attr, + &arm_ccn_pmu_format_attr_vc.attr.attr, + &arm_ccn_pmu_format_attr_dir.attr.attr, + &arm_ccn_pmu_format_attr_mask.attr.attr, + &arm_ccn_pmu_format_attr_cmp_l.attr.attr, + &arm_ccn_pmu_format_attr_cmp_h.attr.attr, + NULL +}; + +static const struct attribute_group arm_ccn_pmu_format_attr_group = { + .name = "format", + .attrs = arm_ccn_pmu_format_attrs, +}; + + +struct arm_ccn_pmu_event { + struct device_attribute attr; + u32 type; + u32 event; + int num_ports; + int num_vcs; + const char *def; + int mask; +}; + +#define CCN_EVENT_ATTR(_name) \ + __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL) + +/* + * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on + * their ports in XP they are connected to. For the sake of usability they are + * explicitly defined here (and translated into a relevant watchpoint in + * arm_ccn_pmu_event_init()) so the user can easily request them without deep + * knowledge of the flit format. + */ + +#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \ + .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \ + .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \ + .def = _def, .mask = _mask, } + +#define CCN_EVENT_HNI(_name, _def, _mask) { \ + .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \ + .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ + .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } + +#define CCN_EVENT_SBSX(_name, _def, _mask) { \ + .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \ + .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \ + .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, } + +#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \ + .type = CCN_TYPE_HNF, .event = _event, } + +#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \ + .type = CCN_TYPE_XP, .event = _event, \ + .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, } + +/* + * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending + * on configuration. One of them is picked to represent the whole group, + * as they all share the same event types. + */ +#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \ + .type = CCN_TYPE_RNI_3P, .event = _event, } + +#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \ + .type = CCN_TYPE_SBAS, .event = _event, } + +#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \ + .type = CCN_TYPE_CYCLES } + + +static ssize_t arm_ccn_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + struct arm_ccn_pmu_event *event = container_of(attr, + struct arm_ccn_pmu_event, attr); + ssize_t res; + + res = scnprintf(buf, PAGE_SIZE, "type=0x%x", event->type); + if (event->event) + res += scnprintf(buf + res, PAGE_SIZE - res, ",event=0x%x", + event->event); + if (event->def) + res += scnprintf(buf + res, PAGE_SIZE - res, ",%s", + event->def); + if (event->mask) + res += scnprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x", + event->mask); + + /* Arguments required by an event */ + switch (event->type) { + case CCN_TYPE_CYCLES: + break; + case CCN_TYPE_XP: + res += scnprintf(buf + res, PAGE_SIZE - res, + ",xp=?,vc=?"); + if (event->event == CCN_EVENT_WATCHPOINT) + res += scnprintf(buf + res, PAGE_SIZE - res, + ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?"); + else + res += scnprintf(buf + res, PAGE_SIZE - res, + ",bus=?"); + + break; + case CCN_TYPE_MN: + res += scnprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id); + break; + default: + res += scnprintf(buf + res, PAGE_SIZE - res, ",node=?"); + break; + } + + res += scnprintf(buf + res, PAGE_SIZE - res, "\n"); + + return res; +} + +static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + struct device_attribute *dev_attr = container_of(attr, + struct device_attribute, attr); + struct arm_ccn_pmu_event *event = container_of(dev_attr, + struct arm_ccn_pmu_event, attr); + + if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present) + return 0; + if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present) + return 0; + + return attr->mode; +} + +static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = { + CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE), + CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE), + CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", + CCN_IDX_MASK_ORDER), + CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY), + CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000", + CCN_IDX_MASK_ORDER), + CCN_EVENT_HNF(cache_miss, 0x1), + CCN_EVENT_HNF(l3_sf_cache_access, 0x02), + CCN_EVENT_HNF(cache_fill, 0x3), + CCN_EVENT_HNF(pocq_retry, 0x4), + CCN_EVENT_HNF(pocq_reqs_recvd, 0x5), + CCN_EVENT_HNF(sf_hit, 0x6), + CCN_EVENT_HNF(sf_evictions, 0x7), + CCN_EVENT_HNF(snoops_sent, 0x8), + CCN_EVENT_HNF(snoops_broadcast, 0x9), + CCN_EVENT_HNF(l3_eviction, 0xa), + CCN_EVENT_HNF(l3_fill_invalid_way, 0xb), + CCN_EVENT_HNF(mc_retries, 0xc), + CCN_EVENT_HNF(mc_reqs, 0xd), + CCN_EVENT_HNF(qos_hh_retry, 0xe), + CCN_EVENT_RNI(rdata_beats_p0, 0x1), + CCN_EVENT_RNI(rdata_beats_p1, 0x2), + CCN_EVENT_RNI(rdata_beats_p2, 0x3), + CCN_EVENT_RNI(rxdat_flits, 0x4), + CCN_EVENT_RNI(txdat_flits, 0x5), + CCN_EVENT_RNI(txreq_flits, 0x6), + CCN_EVENT_RNI(txreq_flits_retried, 0x7), + CCN_EVENT_RNI(rrt_full, 0x8), + CCN_EVENT_RNI(wrt_full, 0x9), + CCN_EVENT_RNI(txreq_flits_replayed, 0xa), + CCN_EVENT_XP(upload_starvation, 0x1), + CCN_EVENT_XP(download_starvation, 0x2), + CCN_EVENT_XP(respin, 0x3), + CCN_EVENT_XP(valid_flit, 0x4), + CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT), + CCN_EVENT_SBAS(rdata_beats_p0, 0x1), + CCN_EVENT_SBAS(rxdat_flits, 0x4), + CCN_EVENT_SBAS(txdat_flits, 0x5), + CCN_EVENT_SBAS(txreq_flits, 0x6), + CCN_EVENT_SBAS(txreq_flits_retried, 0x7), + CCN_EVENT_SBAS(rrt_full, 0x8), + CCN_EVENT_SBAS(wrt_full, 0x9), + CCN_EVENT_SBAS(txreq_flits_replayed, 0xa), + CCN_EVENT_CYCLES(cycles), +}; + +/* Populated in arm_ccn_init() */ +static struct attribute + *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1]; + +static const struct attribute_group arm_ccn_pmu_events_attr_group = { + .name = "events", + .is_visible = arm_ccn_pmu_events_is_visible, + .attrs = arm_ccn_pmu_events_attrs, +}; + + +static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name) +{ + unsigned long i; + + if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1])) + return NULL; + i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a'; + + switch (name[1]) { + case 'l': + return &ccn->dt.cmp_mask[i].l; + case 'h': + return &ccn->dt.cmp_mask[i].h; + default: + return NULL; + } +} + +static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); + + return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL; +} + +static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name); + int err = -EINVAL; + + if (mask) + err = kstrtoull(buf, 0, mask); + + return err ? err : count; +} + +#define CCN_CMP_MASK_ATTR(_name) \ + struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ + __ATTR(_name, S_IRUGO | S_IWUSR, \ + arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store) + +#define CCN_CMP_MASK_ATTR_RO(_name) \ + struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \ + __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL) + +static CCN_CMP_MASK_ATTR(0l); +static CCN_CMP_MASK_ATTR(0h); +static CCN_CMP_MASK_ATTR(1l); +static CCN_CMP_MASK_ATTR(1h); +static CCN_CMP_MASK_ATTR(2l); +static CCN_CMP_MASK_ATTR(2h); +static CCN_CMP_MASK_ATTR(3l); +static CCN_CMP_MASK_ATTR(3h); +static CCN_CMP_MASK_ATTR(4l); +static CCN_CMP_MASK_ATTR(4h); +static CCN_CMP_MASK_ATTR(5l); +static CCN_CMP_MASK_ATTR(5h); +static CCN_CMP_MASK_ATTR(6l); +static CCN_CMP_MASK_ATTR(6h); +static CCN_CMP_MASK_ATTR(7l); +static CCN_CMP_MASK_ATTR(7h); +static CCN_CMP_MASK_ATTR_RO(8l); +static CCN_CMP_MASK_ATTR_RO(8h); +static CCN_CMP_MASK_ATTR_RO(9l); +static CCN_CMP_MASK_ATTR_RO(9h); +static CCN_CMP_MASK_ATTR_RO(al); +static CCN_CMP_MASK_ATTR_RO(ah); +static CCN_CMP_MASK_ATTR_RO(bl); +static CCN_CMP_MASK_ATTR_RO(bh); + +static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = { + &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr, + &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr, + &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr, + &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr, + &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr, + &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr, + &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr, + &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr, + &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr, + &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr, + &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr, + &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr, + NULL +}; + +static const struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = { + .name = "cmp_mask", + .attrs = arm_ccn_pmu_cmp_mask_attrs, +}; + +static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu)); +} + +static struct device_attribute arm_ccn_pmu_cpumask_attr = + __ATTR(cpumask, S_IRUGO, arm_ccn_pmu_cpumask_show, NULL); + +static struct attribute *arm_ccn_pmu_cpumask_attrs[] = { + &arm_ccn_pmu_cpumask_attr.attr, + NULL, +}; + +static const struct attribute_group arm_ccn_pmu_cpumask_attr_group = { + .attrs = arm_ccn_pmu_cpumask_attrs, +}; + +/* + * Default poll period is 10ms, which is way over the top anyway, + * as in the worst case scenario (an event every cycle), with 1GHz + * clocked bus, the smallest, 32 bit counter will overflow in + * more than 4s. + */ +static unsigned int arm_ccn_pmu_poll_period_us = 10000; +module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint, + S_IRUGO | S_IWUSR); + +static ktime_t arm_ccn_pmu_timer_period(void) +{ + return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000); +} + + +static const struct attribute_group *arm_ccn_pmu_attr_groups[] = { + &arm_ccn_pmu_events_attr_group, + &arm_ccn_pmu_format_attr_group, + &arm_ccn_pmu_cmp_mask_attr_group, + &arm_ccn_pmu_cpumask_attr_group, + NULL +}; + + +static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size) +{ + int bit; + + do { + bit = find_first_zero_bit(bitmap, size); + if (bit >= size) + return -EAGAIN; + } while (test_and_set_bit(bit, bitmap)); + + return bit; +} + +/* All RN-I and RN-D nodes have identical PMUs */ +static int arm_ccn_pmu_type_eq(u32 a, u32 b) +{ + if (a == b) + return 1; + + switch (a) { + case CCN_TYPE_RNI_1P: + case CCN_TYPE_RNI_2P: + case CCN_TYPE_RNI_3P: + case CCN_TYPE_RND_1P: + case CCN_TYPE_RND_2P: + case CCN_TYPE_RND_3P: + switch (b) { + case CCN_TYPE_RNI_1P: + case CCN_TYPE_RNI_2P: + case CCN_TYPE_RNI_3P: + case CCN_TYPE_RND_1P: + case CCN_TYPE_RND_2P: + case CCN_TYPE_RND_3P: + return 1; + } + break; + } + + return 0; +} + +static int arm_ccn_pmu_event_alloc(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u32 node_xp, type, event_id; + struct arm_ccn_component *source; + int bit; + + node_xp = CCN_CONFIG_NODE(event->attr.config); + type = CCN_CONFIG_TYPE(event->attr.config); + event_id = CCN_CONFIG_EVENT(event->attr.config); + + /* Allocate the cycle counter */ + if (type == CCN_TYPE_CYCLES) { + if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER, + ccn->dt.pmu_counters_mask)) + return -EAGAIN; + + hw->idx = CCN_IDX_PMU_CYCLE_COUNTER; + ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event; + + return 0; + } + + /* Allocate an event counter */ + hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask, + CCN_NUM_PMU_EVENT_COUNTERS); + if (hw->idx < 0) { + dev_dbg(ccn->dev, "No more counters available!\n"); + return -EAGAIN; + } + + if (type == CCN_TYPE_XP) + source = &ccn->xp[node_xp]; + else + source = &ccn->node[node_xp]; + ccn->dt.pmu_counters[hw->idx].source = source; + + /* Allocate an event source or a watchpoint */ + if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT) + bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask, + CCN_NUM_XP_WATCHPOINTS); + else + bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask, + CCN_NUM_PMU_EVENTS); + if (bit < 0) { + dev_dbg(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n", + node_xp); + clear_bit(hw->idx, ccn->dt.pmu_counters_mask); + return -EAGAIN; + } + hw->config_base = bit; + + ccn->dt.pmu_counters[hw->idx].event = event; + + return 0; +} + +static void arm_ccn_pmu_event_release(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) { + clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask); + } else { + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && + CCN_CONFIG_EVENT(event->attr.config) == + CCN_EVENT_WATCHPOINT) + clear_bit(hw->config_base, source->xp.dt_cmp_mask); + else + clear_bit(hw->config_base, source->pmu_events_mask); + clear_bit(hw->idx, ccn->dt.pmu_counters_mask); + } + + ccn->dt.pmu_counters[hw->idx].source = NULL; + ccn->dt.pmu_counters[hw->idx].event = NULL; +} + +static int arm_ccn_pmu_event_init(struct perf_event *event) +{ + struct arm_ccn *ccn; + struct hw_perf_event *hw = &event->hw; + u32 node_xp, type, event_id; + int valid; + int i; + struct perf_event *sibling; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + ccn = pmu_to_arm_ccn(event->pmu); + + if (hw->sample_period) { + dev_dbg(ccn->dev, "Sampling not supported!\n"); + return -EOPNOTSUPP; + } + + if (has_branch_stack(event)) { + dev_dbg(ccn->dev, "Can't exclude execution levels!\n"); + return -EINVAL; + } + + if (event->cpu < 0) { + dev_dbg(ccn->dev, "Can't provide per-task data!\n"); + return -EOPNOTSUPP; + } + /* + * Many perf core operations (eg. events rotation) operate on a + * single CPU context. This is obvious for CPU PMUs, where one + * expects the same sets of events being observed on all CPUs, + * but can lead to issues for off-core PMUs, like CCN, where each + * event could be theoretically assigned to a different CPU. To + * mitigate this, we enforce CPU assignment to one, selected + * processor (the one described in the "cpumask" attribute). + */ + event->cpu = ccn->dt.cpu; + + node_xp = CCN_CONFIG_NODE(event->attr.config); + type = CCN_CONFIG_TYPE(event->attr.config); + event_id = CCN_CONFIG_EVENT(event->attr.config); + + /* Validate node/xp vs topology */ + switch (type) { + case CCN_TYPE_MN: + if (node_xp != ccn->mn_id) { + dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp); + return -EINVAL; + } + break; + case CCN_TYPE_XP: + if (node_xp >= ccn->num_xps) { + dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp); + return -EINVAL; + } + break; + case CCN_TYPE_CYCLES: + break; + default: + if (node_xp >= ccn->num_nodes) { + dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp); + return -EINVAL; + } + if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { + dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n", + type, node_xp); + return -EINVAL; + } + break; + } + + /* Validate event ID vs available for the type */ + for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid; + i++) { + struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i]; + u32 port = CCN_CONFIG_PORT(event->attr.config); + u32 vc = CCN_CONFIG_VC(event->attr.config); + + if (!arm_ccn_pmu_type_eq(type, e->type)) + continue; + if (event_id != e->event) + continue; + if (e->num_ports && port >= e->num_ports) { + dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n", + port, node_xp); + return -EINVAL; + } + if (e->num_vcs && vc >= e->num_vcs) { + dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n", + vc, node_xp); + return -EINVAL; + } + valid = 1; + } + if (!valid) { + dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", + event_id, node_xp); + return -EINVAL; + } + + /* Watchpoint-based event for a node is actually set on XP */ + if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) { + u32 port; + + type = CCN_TYPE_XP; + port = arm_ccn_node_to_xp_port(node_xp); + node_xp = arm_ccn_node_to_xp(node_xp); + + arm_ccn_pmu_config_set(&event->attr.config, + node_xp, type, port); + } + + /* + * We must NOT create groups containing mixed PMUs, although software + * events are acceptable (for example to create a CCN group + * periodically read when a hrtimer aka cpu-clock leader triggers). + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) + return -EINVAL; + } + + return 0; +} + +static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx) +{ + u64 res; + + if (idx == CCN_IDX_PMU_CYCLE_COUNTER) { +#ifdef readq + res = readq(ccn->dt.base + CCN_DT_PMCCNTR); +#else + /* 40 bit counter, can do snapshot and read in two parts */ + writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ); + while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1)) + ; + writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); + res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff; + res <<= 32; + res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR); +#endif + } else { + res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx)); + } + + return res; +} + +static void arm_ccn_pmu_event_update(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u64 prev_count, new_count, mask; + + do { + prev_count = local64_read(&hw->prev_count); + new_count = arm_ccn_pmu_read_counter(ccn, hw->idx); + } while (local64_xchg(&hw->prev_count, new_count) != prev_count); + + mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1; + + local64_add((new_count - prev_count) & mask, &event->count); +} + +static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *xp; + u32 val, dt_cfg; + + /* Nothing to do for cycle counter */ + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) + return; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) + xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)]; + else + xp = &ccn->xp[arm_ccn_node_to_xp( + CCN_CONFIG_NODE(event->attr.config))]; + + if (enable) + dt_cfg = hw->event_base; + else + dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH; + + spin_lock(&ccn->dt.config_lock); + + val = readl(xp->base + CCN_XP_DT_CONFIG); + val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK << + CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx)); + val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx); + writel(val, xp->base + CCN_XP_DT_CONFIG); + + spin_unlock(&ccn->dt.config_lock); +} + +static void arm_ccn_pmu_event_start(struct perf_event *event, int flags) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + + local64_set(&event->hw.prev_count, + arm_ccn_pmu_read_counter(ccn, hw->idx)); + hw->state = 0; + + /* Set the DT bus input, engaging the counter */ + arm_ccn_pmu_xp_dt_config(event, 1); +} + +static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hw = &event->hw; + + /* Disable counting, setting the DT bus to pass-through mode */ + arm_ccn_pmu_xp_dt_config(event, 0); + + if (flags & PERF_EF_UPDATE) + arm_ccn_pmu_event_update(event); + + hw->state |= PERF_HES_STOPPED; +} + +static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + unsigned long wp = hw->config_base; + u32 val; + u64 cmp_l = event->attr.config1; + u64 cmp_h = event->attr.config2; + u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l; + u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h; + + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp); + + /* Direction (RX/TX), device (port) & virtual channel */ + val = readl(source->base + CCN_XP_DT_INTERFACE_SEL); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp)); + val |= CCN_CONFIG_DIR(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp)); + val |= CCN_CONFIG_PORT(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp); + val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK << + CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp)); + val |= CCN_CONFIG_VC(event->attr.config) << + CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp); + writel(val, source->base + CCN_XP_DT_INTERFACE_SEL); + + /* Comparison values */ + writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp)); + writel((cmp_l >> 32) & 0x7fffffff, + source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4); + writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp)); + writel((cmp_h >> 32) & 0x0fffffff, + source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4); + + /* Mask */ + writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp)); + writel((mask_l >> 32) & 0x7fffffff, + source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4); + writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp)); + writel((mask_h >> 32) & 0x0fffffff, + source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4); +} + +static void arm_ccn_pmu_xp_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + u32 val, id; + + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base); + + id = (CCN_CONFIG_VC(event->attr.config) << 4) | + (CCN_CONFIG_BUS(event->attr.config) << 3) | + (CCN_CONFIG_EVENT(event->attr.config) << 0); + + val = readl(source->base + CCN_XP_PMU_EVENT_SEL); + val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK << + CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); + val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); + writel(val, source->base + CCN_XP_PMU_EVENT_SEL); +} + +static void arm_ccn_pmu_node_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct arm_ccn_component *source = + ccn->dt.pmu_counters[hw->idx].source; + u32 type = CCN_CONFIG_TYPE(event->attr.config); + u32 val, port; + + port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config)); + hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port, + hw->config_base); + + /* These *_event_sel regs should be identical, but let's make sure... */ + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL); + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) != + CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1)); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) != + CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1)); + BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK != + CCN_SBAS_PMU_EVENT_SEL__ID__MASK); + BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK != + CCN_RNI_PMU_EVENT_SEL__ID__MASK); + if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS && + !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P))) + return; + + /* Set the event id for the pre-allocated counter */ + val = readl(source->base + CCN_HNF_PMU_EVENT_SEL); + val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK << + CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base)); + val |= CCN_CONFIG_EVENT(event->attr.config) << + CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base); + writel(val, source->base + CCN_HNF_PMU_EVENT_SEL); +} + +static void arm_ccn_pmu_event_config(struct perf_event *event) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + struct hw_perf_event *hw = &event->hw; + u32 xp, offset, val; + + /* Cycle counter requires no setup */ + if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) + return; + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) + xp = CCN_CONFIG_XP(event->attr.config); + else + xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config)); + + spin_lock(&ccn->dt.config_lock); + + /* Set the DT bus "distance" register */ + offset = (hw->idx / 4) * 4; + val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); + val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK << + CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4)); + val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4); + writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset); + + if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) { + if (CCN_CONFIG_EVENT(event->attr.config) == + CCN_EVENT_WATCHPOINT) + arm_ccn_pmu_xp_watchpoint_config(event); + else + arm_ccn_pmu_xp_event_config(event); + } else { + arm_ccn_pmu_node_event_config(event); + } + + spin_unlock(&ccn->dt.config_lock); +} + +static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn) +{ + return bitmap_weight(ccn->dt.pmu_counters_mask, + CCN_NUM_PMU_EVENT_COUNTERS + 1); +} + +static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) +{ + int err; + struct hw_perf_event *hw = &event->hw; + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + + err = arm_ccn_pmu_event_alloc(event); + if (err) + return err; + + /* + * Pin the timer, so that the overflows are handled by the chosen + * event->cpu (this is the same one as presented in "cpumask" + * attribute). + */ + if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1) + hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(), + HRTIMER_MODE_REL_PINNED); + + arm_ccn_pmu_event_config(event); + + hw->state = PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + arm_ccn_pmu_event_start(event, PERF_EF_UPDATE); + + return 0; +} + +static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); + + arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); + + arm_ccn_pmu_event_release(event); + + if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0) + hrtimer_cancel(&ccn->dt.hrtimer); +} + +static void arm_ccn_pmu_event_read(struct perf_event *event) +{ + arm_ccn_pmu_event_update(event); +} + +static void arm_ccn_pmu_enable(struct pmu *pmu) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); + + u32 val = readl(ccn->dt.base + CCN_DT_PMCR); + val |= CCN_DT_PMCR__PMU_EN; + writel(val, ccn->dt.base + CCN_DT_PMCR); +} + +static void arm_ccn_pmu_disable(struct pmu *pmu) +{ + struct arm_ccn *ccn = pmu_to_arm_ccn(pmu); + + u32 val = readl(ccn->dt.base + CCN_DT_PMCR); + val &= ~CCN_DT_PMCR__PMU_EN; + writel(val, ccn->dt.base + CCN_DT_PMCR); +} + +static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt) +{ + u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR); + int idx; + + if (!pmovsr) + return IRQ_NONE; + + writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR); + + BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS); + + for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) { + struct perf_event *event = dt->pmu_counters[idx].event; + int overflowed = pmovsr & BIT(idx); + + WARN_ON_ONCE(overflowed && !event && + idx != CCN_IDX_PMU_CYCLE_COUNTER); + + if (!event || !overflowed) + continue; + + arm_ccn_pmu_event_update(event); + } + + return IRQ_HANDLED; +} + +static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) +{ + struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt, + hrtimer); + unsigned long flags; + + local_irq_save(flags); + arm_ccn_pmu_overflow_handler(dt); + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period()); + return HRTIMER_RESTART; +} + + +static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct arm_ccn_dt *dt = hlist_entry_safe(node, struct arm_ccn_dt, node); + struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); + unsigned int target; + + if (cpu != dt->cpu) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + perf_pmu_migrate_context(&dt->pmu, cpu, target); + dt->cpu = target; + if (ccn->irq) + WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu))); + return 0; +} + +static DEFINE_IDA(arm_ccn_pmu_ida); + +static int arm_ccn_pmu_init(struct arm_ccn *ccn) +{ + int i; + char *name; + int err; + + /* Initialize DT subsystem */ + ccn->dt.base = ccn->base + CCN_REGION_SIZE; + spin_lock_init(&ccn->dt.config_lock); + writel(CCN_DT_PMOVSR_CLR__MASK, ccn->dt.base + CCN_DT_PMOVSR_CLR); + writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL); + writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN, + ccn->dt.base + CCN_DT_PMCR); + writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR); + for (i = 0; i < ccn->num_xps; i++) { + writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG); + writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << + CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) | + (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS << + CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) | + CCN_XP_DT_CONTROL__DT_ENABLE, + ccn->xp[i].base + CCN_XP_DT_CONTROL); + } + ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0; + ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15); + ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0; + ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9); + + /* Get a convenient /sys/event_source/devices/ name */ + ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL); + if (ccn->dt.id == 0) { + name = "ccn"; + } else { + name = devm_kasprintf(ccn->dev, GFP_KERNEL, "ccn_%d", + ccn->dt.id); + if (!name) { + err = -ENOMEM; + goto error_choose_name; + } + } + + /* Perf driver registration */ + ccn->dt.pmu = (struct pmu) { + .module = THIS_MODULE, + .attr_groups = arm_ccn_pmu_attr_groups, + .task_ctx_nr = perf_invalid_context, + .event_init = arm_ccn_pmu_event_init, + .add = arm_ccn_pmu_event_add, + .del = arm_ccn_pmu_event_del, + .start = arm_ccn_pmu_event_start, + .stop = arm_ccn_pmu_event_stop, + .read = arm_ccn_pmu_event_read, + .pmu_enable = arm_ccn_pmu_enable, + .pmu_disable = arm_ccn_pmu_disable, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + /* No overflow interrupt? Have to use a timer instead. */ + if (!ccn->irq) { + dev_info(ccn->dev, "No access to interrupts, using timer.\n"); + hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler; + } + + /* Pick one CPU which we will use to collect data from CCN... */ + ccn->dt.cpu = raw_smp_processor_id(); + + /* Also make sure that the overflow interrupt is handled by this CPU */ + if (ccn->irq) { + err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu)); + if (err) { + dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); + goto error_set_affinity; + } + } + + cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + &ccn->dt.node); + + err = perf_pmu_register(&ccn->dt.pmu, name, -1); + if (err) + goto error_pmu_register; + + return 0; + +error_pmu_register: + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + &ccn->dt.node); +error_set_affinity: +error_choose_name: + ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); + for (i = 0; i < ccn->num_xps; i++) + writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); + writel(0, ccn->dt.base + CCN_DT_PMCR); + return err; +} + +static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) +{ + int i; + + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + &ccn->dt.node); + if (ccn->irq) + irq_set_affinity_hint(ccn->irq, NULL); + for (i = 0; i < ccn->num_xps; i++) + writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); + writel(0, ccn->dt.base + CCN_DT_PMCR); + perf_pmu_unregister(&ccn->dt.pmu); + ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); +} + +static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, + int (*callback)(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id)) +{ + int region; + + for (region = 0; region < CCN_NUM_REGIONS; region++) { + u32 val, type, id; + void __iomem *base; + int err; + + val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 + + 4 * (region / 32)); + if (!(val & (1 << (region % 32)))) + continue; + + base = ccn->base + region * CCN_REGION_SIZE; + val = readl(base + CCN_ALL_OLY_ID); + type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) & + CCN_ALL_OLY_ID__OLY_ID__MASK; + id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) & + CCN_ALL_OLY_ID__NODE_ID__MASK; + + err = callback(ccn, region, base, type, id); + if (err) + return err; + } + + return 0; +} + +static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id) +{ + + if (type == CCN_TYPE_XP && id >= ccn->num_xps) + ccn->num_xps = id + 1; + else if (id >= ccn->num_nodes) + ccn->num_nodes = id + 1; + + return 0; +} + +static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region, + void __iomem *base, u32 type, u32 id) +{ + struct arm_ccn_component *component; + + dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type); + + switch (type) { + case CCN_TYPE_MN: + ccn->mn_id = id; + return 0; + case CCN_TYPE_DT: + return 0; + case CCN_TYPE_XP: + component = &ccn->xp[id]; + break; + case CCN_TYPE_SBSX: + ccn->sbsx_present = 1; + component = &ccn->node[id]; + break; + case CCN_TYPE_SBAS: + ccn->sbas_present = 1; + fallthrough; + default: + component = &ccn->node[id]; + break; + } + + component->base = base; + component->type = type; + + return 0; +} + + +static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn, + const u32 *err_sig_val) +{ + /* This should be really handled by firmware... */ + dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n", + err_sig_val[5], err_sig_val[4], err_sig_val[3], + err_sig_val[2], err_sig_val[1], err_sig_val[0]); + dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n"); + writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + + return IRQ_HANDLED; +} + + +static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id) +{ + irqreturn_t res = IRQ_NONE; + struct arm_ccn *ccn = dev_id; + u32 err_sig_val[6]; + u32 err_or; + int i; + + /* PMU overflow is a special case */ + err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0); + if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) { + err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT; + res = arm_ccn_pmu_overflow_handler(&ccn->dt); + } + + /* Have to read all err_sig_vals to clear them */ + for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) { + err_sig_val[i] = readl(ccn->base + + CCN_MN_ERR_SIG_VAL_63_0 + i * 4); + err_or |= err_sig_val[i]; + } + if (err_or) + res |= arm_ccn_error_handler(ccn, err_sig_val); + + if (res != IRQ_NONE) + writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT, + ccn->base + CCN_MN_ERRINT_STATUS); + + return res; +} + + +static int arm_ccn_probe(struct platform_device *pdev) +{ + struct arm_ccn *ccn; + struct resource *res; + unsigned int irq; + int err; + + ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL); + if (!ccn) + return -ENOMEM; + ccn->dev = &pdev->dev; + platform_set_drvdata(pdev, ccn); + + ccn->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ccn->base)) + return PTR_ERR(ccn->base); + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) + return -EINVAL; + irq = res->start; + + /* Check if we can use the interrupt */ + writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + if (readl(ccn->base + CCN_MN_ERRINT_STATUS) & + CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) { + /* Can set 'disable' bits, so can acknowledge interrupts */ + writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE, + ccn->base + CCN_MN_ERRINT_STATUS); + err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(ccn->dev), ccn); + if (err) + return err; + + ccn->irq = irq; + } + + + /* Build topology */ + + err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num); + if (err) + return err; + + ccn->node = devm_kcalloc(ccn->dev, ccn->num_nodes, sizeof(*ccn->node), + GFP_KERNEL); + ccn->xp = devm_kcalloc(ccn->dev, ccn->num_xps, sizeof(*ccn->node), + GFP_KERNEL); + if (!ccn->node || !ccn->xp) + return -ENOMEM; + + err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes); + if (err) + return err; + + return arm_ccn_pmu_init(ccn); +} + +static int arm_ccn_remove(struct platform_device *pdev) +{ + struct arm_ccn *ccn = platform_get_drvdata(pdev); + + arm_ccn_pmu_cleanup(ccn); + + return 0; +} + +static const struct of_device_id arm_ccn_match[] = { + { .compatible = "arm,ccn-502", }, + { .compatible = "arm,ccn-504", }, + { .compatible = "arm,ccn-512", }, + {}, +}; +MODULE_DEVICE_TABLE(of, arm_ccn_match); + +static struct platform_driver arm_ccn_driver = { + .driver = { + .name = "arm-ccn", + .of_match_table = arm_ccn_match, + .suppress_bind_attrs = true, + }, + .probe = arm_ccn_probe, + .remove = arm_ccn_remove, +}; + +static int __init arm_ccn_init(void) +{ + int i, ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CCN_ONLINE, + "perf/arm/ccn:online", NULL, + arm_ccn_pmu_offline_cpu); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) + arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; + + ret = platform_driver_register(&arm_ccn_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); + return ret; +} + +static void __exit arm_ccn_exit(void) +{ + platform_driver_unregister(&arm_ccn_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE); +} + +module_init(arm_ccn_init); +module_exit(arm_ccn_exit); + +MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c new file mode 100644 index 000000000..ac4428e8f --- /dev/null +++ b/drivers/perf/arm-cmn.c @@ -0,0 +1,1639 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2016-2020 Arm Limited +// CMN-600 Coherent Mesh Network PMU driver + +#include <linux/acpi.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/sort.h> + +/* Common register stuff */ +#define CMN_NODE_INFO 0x0000 +#define CMN_NI_NODE_TYPE GENMASK_ULL(15, 0) +#define CMN_NI_NODE_ID GENMASK_ULL(31, 16) +#define CMN_NI_LOGICAL_ID GENMASK_ULL(47, 32) + +#define CMN_NODEID_DEVID(reg) ((reg) & 3) +#define CMN_NODEID_PID(reg) (((reg) >> 2) & 1) +#define CMN_NODEID_X(reg, bits) ((reg) >> (3 + (bits))) +#define CMN_NODEID_Y(reg, bits) (((reg) >> 3) & ((1U << (bits)) - 1)) + +#define CMN_CHILD_INFO 0x0080 +#define CMN_CI_CHILD_COUNT GENMASK_ULL(15, 0) +#define CMN_CI_CHILD_PTR_OFFSET GENMASK_ULL(31, 16) + +#define CMN_CHILD_NODE_ADDR GENMASK(27,0) +#define CMN_CHILD_NODE_EXTERNAL BIT(31) + +#define CMN_ADDR_NODE_PTR GENMASK(27, 14) + +#define CMN_NODE_PTR_DEVID(ptr) (((ptr) >> 2) & 3) +#define CMN_NODE_PTR_PID(ptr) ((ptr) & 1) +#define CMN_NODE_PTR_X(ptr, bits) ((ptr) >> (6 + (bits))) +#define CMN_NODE_PTR_Y(ptr, bits) (((ptr) >> 6) & ((1U << (bits)) - 1)) + +#define CMN_MAX_XPS (8 * 8) + +/* The CFG node has one other useful purpose */ +#define CMN_CFGM_PERIPH_ID_2 0x0010 +#define CMN_CFGM_PID2_REVISION GENMASK(7, 4) + +/* PMU registers occupy the 3rd 4KB page of each node's 16KB space */ +#define CMN_PMU_OFFSET 0x2000 + +/* For most nodes, this is all there is */ +#define CMN_PMU_EVENT_SEL 0x000 +#define CMN_PMU_EVENTn_ID_SHIFT(n) ((n) * 8) + +/* DTMs live in the PMU space of XP registers */ +#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18) +#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00) +#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(6) +#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(5) +#define CMN_DTM_WPn_CONFIG_WP_GRP BIT(4) +#define CMN_DTM_WPn_CONFIG_WP_CHN_SEL GENMASK_ULL(3, 1) +#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL BIT(0) +#define CMN_DTM_WPn_VAL(n) (CMN_DTM_WPn(n) + 0x08) +#define CMN_DTM_WPn_MASK(n) (CMN_DTM_WPn(n) + 0x10) + +#define CMN_DTM_PMU_CONFIG 0x210 +#define CMN__PMEVCNT0_INPUT_SEL GENMASK_ULL(37, 32) +#define CMN__PMEVCNT0_INPUT_SEL_WP 0x00 +#define CMN__PMEVCNT0_INPUT_SEL_XP 0x04 +#define CMN__PMEVCNT0_INPUT_SEL_DEV 0x10 +#define CMN__PMEVCNT0_GLOBAL_NUM GENMASK_ULL(18, 16) +#define CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(n) ((n) * 4) +#define CMN__PMEVCNT_PAIRED(n) BIT(4 + (n)) +#define CMN__PMEVCNT23_COMBINED BIT(2) +#define CMN__PMEVCNT01_COMBINED BIT(1) +#define CMN_DTM_PMU_CONFIG_PMU_EN BIT(0) + +#define CMN_DTM_PMEVCNT 0x220 + +#define CMN_DTM_PMEVCNTSR 0x240 + +#define CMN_DTM_NUM_COUNTERS 4 + +/* The DTC node is where the magic happens */ +#define CMN_DT_DTC_CTL 0x0a00 +#define CMN_DT_DTC_CTL_DT_EN BIT(0) + +/* DTC counters are paired in 64-bit registers on a 16-byte stride. Yuck */ +#define _CMN_DT_CNT_REG(n) ((((n) / 2) * 4 + (n) % 2) * 4) +#define CMN_DT_PMEVCNT(n) (CMN_PMU_OFFSET + _CMN_DT_CNT_REG(n)) +#define CMN_DT_PMCCNTR (CMN_PMU_OFFSET + 0x40) + +#define CMN_DT_PMEVCNTSR(n) (CMN_PMU_OFFSET + 0x50 + _CMN_DT_CNT_REG(n)) +#define CMN_DT_PMCCNTRSR (CMN_PMU_OFFSET + 0x90) + +#define CMN_DT_PMCR (CMN_PMU_OFFSET + 0x100) +#define CMN_DT_PMCR_PMU_EN BIT(0) +#define CMN_DT_PMCR_CNTR_RST BIT(5) +#define CMN_DT_PMCR_OVFL_INTR_EN BIT(6) + +#define CMN_DT_PMOVSR (CMN_PMU_OFFSET + 0x118) +#define CMN_DT_PMOVSR_CLR (CMN_PMU_OFFSET + 0x120) + +#define CMN_DT_PMSSR (CMN_PMU_OFFSET + 0x128) +#define CMN_DT_PMSSR_SS_STATUS(n) BIT(n) + +#define CMN_DT_PMSRR (CMN_PMU_OFFSET + 0x130) +#define CMN_DT_PMSRR_SS_REQ BIT(0) + +#define CMN_DT_NUM_COUNTERS 8 +#define CMN_MAX_DTCS 4 + +/* + * Even in the worst case a DTC counter can't wrap in fewer than 2^42 cycles, + * so throwing away one bit to make overflow handling easy is no big deal. + */ +#define CMN_COUNTER_INIT 0x80000000 +/* Similarly for the 40-bit cycle counter */ +#define CMN_CC_INIT 0x8000000000ULL + + +/* Event attributes */ +#define CMN_CONFIG_TYPE GENMASK(15, 0) +#define CMN_CONFIG_EVENTID GENMASK(23, 16) +#define CMN_CONFIG_OCCUPID GENMASK(27, 24) +#define CMN_CONFIG_BYNODEID BIT(31) +#define CMN_CONFIG_NODEID GENMASK(47, 32) + +#define CMN_EVENT_TYPE(event) FIELD_GET(CMN_CONFIG_TYPE, (event)->attr.config) +#define CMN_EVENT_EVENTID(event) FIELD_GET(CMN_CONFIG_EVENTID, (event)->attr.config) +#define CMN_EVENT_OCCUPID(event) FIELD_GET(CMN_CONFIG_OCCUPID, (event)->attr.config) +#define CMN_EVENT_BYNODEID(event) FIELD_GET(CMN_CONFIG_BYNODEID, (event)->attr.config) +#define CMN_EVENT_NODEID(event) FIELD_GET(CMN_CONFIG_NODEID, (event)->attr.config) + +#define CMN_CONFIG_WP_COMBINE GENMASK(27, 24) +#define CMN_CONFIG_WP_DEV_SEL BIT(48) +#define CMN_CONFIG_WP_CHN_SEL GENMASK(50, 49) +#define CMN_CONFIG_WP_GRP BIT(52) +#define CMN_CONFIG_WP_EXCLUSIVE BIT(53) +#define CMN_CONFIG1_WP_VAL GENMASK(63, 0) +#define CMN_CONFIG2_WP_MASK GENMASK(63, 0) + +#define CMN_EVENT_WP_COMBINE(event) FIELD_GET(CMN_CONFIG_WP_COMBINE, (event)->attr.config) +#define CMN_EVENT_WP_DEV_SEL(event) FIELD_GET(CMN_CONFIG_WP_DEV_SEL, (event)->attr.config) +#define CMN_EVENT_WP_CHN_SEL(event) FIELD_GET(CMN_CONFIG_WP_CHN_SEL, (event)->attr.config) +#define CMN_EVENT_WP_GRP(event) FIELD_GET(CMN_CONFIG_WP_GRP, (event)->attr.config) +#define CMN_EVENT_WP_EXCLUSIVE(event) FIELD_GET(CMN_CONFIG_WP_EXCLUSIVE, (event)->attr.config) +#define CMN_EVENT_WP_VAL(event) FIELD_GET(CMN_CONFIG1_WP_VAL, (event)->attr.config1) +#define CMN_EVENT_WP_MASK(event) FIELD_GET(CMN_CONFIG2_WP_MASK, (event)->attr.config2) + +/* Made-up event IDs for watchpoint direction */ +#define CMN_WP_UP 0 +#define CMN_WP_DOWN 2 + + +/* r0px probably don't exist in silicon, thankfully */ +enum cmn_revision { + CMN600_R1P0, + CMN600_R1P1, + CMN600_R1P2, + CMN600_R1P3, + CMN600_R2P0, + CMN600_R3P0, +}; + +enum cmn_node_type { + CMN_TYPE_INVALID, + CMN_TYPE_DVM, + CMN_TYPE_CFG, + CMN_TYPE_DTC, + CMN_TYPE_HNI, + CMN_TYPE_HNF, + CMN_TYPE_XP, + CMN_TYPE_SBSX, + CMN_TYPE_RNI = 0xa, + CMN_TYPE_RND = 0xd, + CMN_TYPE_RNSAM = 0xf, + CMN_TYPE_CXRA = 0x100, + CMN_TYPE_CXHA = 0x101, + CMN_TYPE_CXLA = 0x102, + /* Not a real node type */ + CMN_TYPE_WP = 0x7770 +}; + +struct arm_cmn_node { + void __iomem *pmu_base; + u16 id, logid; + enum cmn_node_type type; + + union { + /* Device node */ + struct { + int to_xp; + /* DN/HN-F/CXHA */ + unsigned int occupid_val; + unsigned int occupid_count; + }; + /* XP */ + struct { + int dtc; + u32 pmu_config_low; + union { + u8 input_sel[4]; + __le32 pmu_config_high; + }; + s8 wp_event[4]; + }; + }; + + union { + u8 event[4]; + __le32 event_sel; + }; +}; + +struct arm_cmn_dtc { + void __iomem *base; + int irq; + int irq_friend; + bool cc_active; + + struct perf_event *counters[CMN_DT_NUM_COUNTERS]; + struct perf_event *cycles; +}; + +#define CMN_STATE_DISABLED BIT(0) +#define CMN_STATE_TXN BIT(1) + +struct arm_cmn { + struct device *dev; + void __iomem *base; + + enum cmn_revision rev; + u8 mesh_x; + u8 mesh_y; + u16 num_xps; + u16 num_dns; + struct arm_cmn_node *xps; + struct arm_cmn_node *dns; + + struct arm_cmn_dtc *dtc; + unsigned int num_dtcs; + + int cpu; + struct hlist_node cpuhp_node; + + unsigned int state; + struct pmu pmu; +}; + +#define to_cmn(p) container_of(p, struct arm_cmn, pmu) + +static int arm_cmn_hp_state; + +struct arm_cmn_hw_event { + struct arm_cmn_node *dn; + u64 dtm_idx[2]; + unsigned int dtc_idx; + u8 dtcs_used; + u8 num_dns; +}; + +#define for_each_hw_dn(hw, dn, i) \ + for (i = 0, dn = hw->dn; i < hw->num_dns; i++, dn++) + +static struct arm_cmn_hw_event *to_cmn_hw(struct perf_event *event) +{ + BUILD_BUG_ON(sizeof(struct arm_cmn_hw_event) > offsetof(struct hw_perf_event, target)); + return (struct arm_cmn_hw_event *)&event->hw; +} + +static void arm_cmn_set_index(u64 x[], unsigned int pos, unsigned int val) +{ + x[pos / 32] |= (u64)val << ((pos % 32) * 2); +} + +static unsigned int arm_cmn_get_index(u64 x[], unsigned int pos) +{ + return (x[pos / 32] >> ((pos % 32) * 2)) & 3; +} + +struct arm_cmn_event_attr { + struct device_attribute attr; + enum cmn_node_type type; + u8 eventid; + u8 occupid; +}; + +struct arm_cmn_format_attr { + struct device_attribute attr; + u64 field; + int config; +}; + +static int arm_cmn_xyidbits(const struct arm_cmn *cmn) +{ + return cmn->mesh_x > 4 || cmn->mesh_y > 4 ? 3 : 2; +} + +static void arm_cmn_init_node_to_xp(const struct arm_cmn *cmn, + struct arm_cmn_node *dn) +{ + int bits = arm_cmn_xyidbits(cmn); + int x = CMN_NODEID_X(dn->id, bits); + int y = CMN_NODEID_Y(dn->id, bits); + int xp_idx = cmn->mesh_x * y + x; + + dn->to_xp = (cmn->xps + xp_idx) - dn; +} + +static struct arm_cmn_node *arm_cmn_node_to_xp(struct arm_cmn_node *dn) +{ + return dn->type == CMN_TYPE_XP ? dn : dn + dn->to_xp; +} + +static struct arm_cmn_node *arm_cmn_node(const struct arm_cmn *cmn, + enum cmn_node_type type) +{ + int i; + + for (i = 0; i < cmn->num_dns; i++) + if (cmn->dns[i].type == type) + return &cmn->dns[i]; + return NULL; +} + +#define CMN_EVENT_ATTR(_name, _type, _eventid, _occupid) \ + (&((struct arm_cmn_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \ + .type = _type, \ + .eventid = _eventid, \ + .occupid = _occupid, \ + }})[0].attr.attr) + +static bool arm_cmn_is_occup_event(enum cmn_node_type type, unsigned int id) +{ + return (type == CMN_TYPE_DVM && id == 0x05) || + (type == CMN_TYPE_HNF && id == 0x0f); +} + +static ssize_t arm_cmn_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_cmn_event_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr); + + if (eattr->type == CMN_TYPE_DTC) + return snprintf(buf, PAGE_SIZE, "type=0x%x\n", eattr->type); + + if (eattr->type == CMN_TYPE_WP) + return snprintf(buf, PAGE_SIZE, + "type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n", + eattr->type, eattr->eventid); + + if (arm_cmn_is_occup_event(eattr->type, eattr->eventid)) + return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x,occupid=0x%x\n", + eattr->type, eattr->eventid, eattr->occupid); + + return snprintf(buf, PAGE_SIZE, "type=0x%x,eventid=0x%x\n", + eattr->type, eattr->eventid); +} + +static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, + struct attribute *attr, + int unused) +{ + struct device *dev = kobj_to_dev(kobj); + struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); + struct arm_cmn_event_attr *eattr; + enum cmn_node_type type; + + eattr = container_of(attr, typeof(*eattr), attr.attr); + type = eattr->type; + + /* Watchpoints aren't nodes */ + if (type == CMN_TYPE_WP) + type = CMN_TYPE_XP; + + /* Revision-specific differences */ + if (cmn->rev < CMN600_R1P2) { + if (type == CMN_TYPE_HNF && eattr->eventid == 0x1b) + return 0; + } + + if (!arm_cmn_node(cmn, type)) + return 0; + + return attr->mode; +} + +#define _CMN_EVENT_DVM(_name, _event, _occup) \ + CMN_EVENT_ATTR(dn_##_name, CMN_TYPE_DVM, _event, _occup) +#define CMN_EVENT_DTC(_name) \ + CMN_EVENT_ATTR(dtc_##_name, CMN_TYPE_DTC, 0, 0) +#define _CMN_EVENT_HNF(_name, _event, _occup) \ + CMN_EVENT_ATTR(hnf_##_name, CMN_TYPE_HNF, _event, _occup) +#define CMN_EVENT_HNI(_name, _event) \ + CMN_EVENT_ATTR(hni_##_name, CMN_TYPE_HNI, _event, 0) +#define __CMN_EVENT_XP(_name, _event) \ + CMN_EVENT_ATTR(mxp_##_name, CMN_TYPE_XP, _event, 0) +#define CMN_EVENT_SBSX(_name, _event) \ + CMN_EVENT_ATTR(sbsx_##_name, CMN_TYPE_SBSX, _event, 0) +#define CMN_EVENT_RNID(_name, _event) \ + CMN_EVENT_ATTR(rnid_##_name, CMN_TYPE_RNI, _event, 0) + +#define CMN_EVENT_DVM(_name, _event) \ + _CMN_EVENT_DVM(_name, _event, 0) +#define CMN_EVENT_HNF(_name, _event) \ + _CMN_EVENT_HNF(_name, _event, 0) +#define _CMN_EVENT_XP(_name, _event) \ + __CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \ + __CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \ + __CMN_EVENT_XP(n_##_name, (_event) | (2 << 2)), \ + __CMN_EVENT_XP(s_##_name, (_event) | (3 << 2)), \ + __CMN_EVENT_XP(p0_##_name, (_event) | (4 << 2)), \ + __CMN_EVENT_XP(p1_##_name, (_event) | (5 << 2)) + +/* Good thing there are only 3 fundamental XP events... */ +#define CMN_EVENT_XP(_name, _event) \ + _CMN_EVENT_XP(req_##_name, (_event) | (0 << 5)), \ + _CMN_EVENT_XP(rsp_##_name, (_event) | (1 << 5)), \ + _CMN_EVENT_XP(snp_##_name, (_event) | (2 << 5)), \ + _CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)) + + +static struct attribute *arm_cmn_event_attrs[] = { + CMN_EVENT_DTC(cycles), + + /* + * DVM node events conflict with HN-I events in the equivalent PMU + * slot, but our lazy short-cut of using the DTM counter index for + * the PMU index as well happens to avoid that by construction. + */ + CMN_EVENT_DVM(rxreq_dvmop, 0x01), + CMN_EVENT_DVM(rxreq_dvmsync, 0x02), + CMN_EVENT_DVM(rxreq_dvmop_vmid_filtered, 0x03), + CMN_EVENT_DVM(rxreq_retried, 0x04), + _CMN_EVENT_DVM(rxreq_trk_occupancy_all, 0x05, 0), + _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmop, 0x05, 1), + _CMN_EVENT_DVM(rxreq_trk_occupancy_dvmsync, 0x05, 2), + + CMN_EVENT_HNF(cache_miss, 0x01), + CMN_EVENT_HNF(slc_sf_cache_access, 0x02), + CMN_EVENT_HNF(cache_fill, 0x03), + CMN_EVENT_HNF(pocq_retry, 0x04), + CMN_EVENT_HNF(pocq_reqs_recvd, 0x05), + CMN_EVENT_HNF(sf_hit, 0x06), + CMN_EVENT_HNF(sf_evictions, 0x07), + CMN_EVENT_HNF(dir_snoops_sent, 0x08), + CMN_EVENT_HNF(brd_snoops_sent, 0x09), + CMN_EVENT_HNF(slc_eviction, 0x0a), + CMN_EVENT_HNF(slc_fill_invalid_way, 0x0b), + CMN_EVENT_HNF(mc_retries, 0x0c), + CMN_EVENT_HNF(mc_reqs, 0x0d), + CMN_EVENT_HNF(qos_hh_retry, 0x0e), + _CMN_EVENT_HNF(qos_pocq_occupancy_all, 0x0f, 0), + _CMN_EVENT_HNF(qos_pocq_occupancy_read, 0x0f, 1), + _CMN_EVENT_HNF(qos_pocq_occupancy_write, 0x0f, 2), + _CMN_EVENT_HNF(qos_pocq_occupancy_atomic, 0x0f, 3), + _CMN_EVENT_HNF(qos_pocq_occupancy_stash, 0x0f, 4), + CMN_EVENT_HNF(pocq_addrhaz, 0x10), + CMN_EVENT_HNF(pocq_atomic_addrhaz, 0x11), + CMN_EVENT_HNF(ld_st_swp_adq_full, 0x12), + CMN_EVENT_HNF(cmp_adq_full, 0x13), + CMN_EVENT_HNF(txdat_stall, 0x14), + CMN_EVENT_HNF(txrsp_stall, 0x15), + CMN_EVENT_HNF(seq_full, 0x16), + CMN_EVENT_HNF(seq_hit, 0x17), + CMN_EVENT_HNF(snp_sent, 0x18), + CMN_EVENT_HNF(sfbi_dir_snp_sent, 0x19), + CMN_EVENT_HNF(sfbi_brd_snp_sent, 0x1a), + CMN_EVENT_HNF(snp_sent_untrk, 0x1b), + CMN_EVENT_HNF(intv_dirty, 0x1c), + CMN_EVENT_HNF(stash_snp_sent, 0x1d), + CMN_EVENT_HNF(stash_data_pull, 0x1e), + CMN_EVENT_HNF(snp_fwded, 0x1f), + + CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20), + CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21), + CMN_EVENT_HNI(rdt_rd_occ_cnt_ovfl, 0x22), + CMN_EVENT_HNI(rdt_wr_occ_cnt_ovfl, 0x23), + CMN_EVENT_HNI(wdb_occ_cnt_ovfl, 0x24), + CMN_EVENT_HNI(rrt_rd_alloc, 0x25), + CMN_EVENT_HNI(rrt_wr_alloc, 0x26), + CMN_EVENT_HNI(rdt_rd_alloc, 0x27), + CMN_EVENT_HNI(rdt_wr_alloc, 0x28), + CMN_EVENT_HNI(wdb_alloc, 0x29), + CMN_EVENT_HNI(txrsp_retryack, 0x2a), + CMN_EVENT_HNI(arvalid_no_arready, 0x2b), + CMN_EVENT_HNI(arready_no_arvalid, 0x2c), + CMN_EVENT_HNI(awvalid_no_awready, 0x2d), + CMN_EVENT_HNI(awready_no_awvalid, 0x2e), + CMN_EVENT_HNI(wvalid_no_wready, 0x2f), + CMN_EVENT_HNI(txdat_stall, 0x30), + CMN_EVENT_HNI(nonpcie_serialization, 0x31), + CMN_EVENT_HNI(pcie_serialization, 0x32), + + CMN_EVENT_XP(txflit_valid, 0x01), + CMN_EVENT_XP(txflit_stall, 0x02), + CMN_EVENT_XP(partial_dat_flit, 0x03), + /* We treat watchpoints as a special made-up class of XP events */ + CMN_EVENT_ATTR(watchpoint_up, CMN_TYPE_WP, 0, 0), + CMN_EVENT_ATTR(watchpoint_down, CMN_TYPE_WP, 2, 0), + + CMN_EVENT_SBSX(rd_req, 0x01), + CMN_EVENT_SBSX(wr_req, 0x02), + CMN_EVENT_SBSX(cmo_req, 0x03), + CMN_EVENT_SBSX(txrsp_retryack, 0x04), + CMN_EVENT_SBSX(txdat_flitv, 0x05), + CMN_EVENT_SBSX(txrsp_flitv, 0x06), + CMN_EVENT_SBSX(rd_req_trkr_occ_cnt_ovfl, 0x11), + CMN_EVENT_SBSX(wr_req_trkr_occ_cnt_ovfl, 0x12), + CMN_EVENT_SBSX(cmo_req_trkr_occ_cnt_ovfl, 0x13), + CMN_EVENT_SBSX(wdb_occ_cnt_ovfl, 0x14), + CMN_EVENT_SBSX(rd_axi_trkr_occ_cnt_ovfl, 0x15), + CMN_EVENT_SBSX(cmo_axi_trkr_occ_cnt_ovfl, 0x16), + CMN_EVENT_SBSX(arvalid_no_arready, 0x21), + CMN_EVENT_SBSX(awvalid_no_awready, 0x22), + CMN_EVENT_SBSX(wvalid_no_wready, 0x23), + CMN_EVENT_SBSX(txdat_stall, 0x24), + CMN_EVENT_SBSX(txrsp_stall, 0x25), + + CMN_EVENT_RNID(s0_rdata_beats, 0x01), + CMN_EVENT_RNID(s1_rdata_beats, 0x02), + CMN_EVENT_RNID(s2_rdata_beats, 0x03), + CMN_EVENT_RNID(rxdat_flits, 0x04), + CMN_EVENT_RNID(txdat_flits, 0x05), + CMN_EVENT_RNID(txreq_flits_total, 0x06), + CMN_EVENT_RNID(txreq_flits_retried, 0x07), + CMN_EVENT_RNID(rrt_occ_ovfl, 0x08), + CMN_EVENT_RNID(wrt_occ_ovfl, 0x09), + CMN_EVENT_RNID(txreq_flits_replayed, 0x0a), + CMN_EVENT_RNID(wrcancel_sent, 0x0b), + CMN_EVENT_RNID(s0_wdata_beats, 0x0c), + CMN_EVENT_RNID(s1_wdata_beats, 0x0d), + CMN_EVENT_RNID(s2_wdata_beats, 0x0e), + CMN_EVENT_RNID(rrt_alloc, 0x0f), + CMN_EVENT_RNID(wrt_alloc, 0x10), + CMN_EVENT_RNID(rdb_unord, 0x11), + CMN_EVENT_RNID(rdb_replay, 0x12), + CMN_EVENT_RNID(rdb_hybrid, 0x13), + CMN_EVENT_RNID(rdb_ord, 0x14), + + NULL +}; + +static const struct attribute_group arm_cmn_event_attrs_group = { + .name = "events", + .attrs = arm_cmn_event_attrs, + .is_visible = arm_cmn_event_attr_is_visible, +}; + +static ssize_t arm_cmn_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_cmn_format_attr *fmt = container_of(attr, typeof(*fmt), attr); + int lo = __ffs(fmt->field), hi = __fls(fmt->field); + + if (lo == hi) + return snprintf(buf, PAGE_SIZE, "config:%d\n", lo); + + if (!fmt->config) + return snprintf(buf, PAGE_SIZE, "config:%d-%d\n", lo, hi); + + return snprintf(buf, PAGE_SIZE, "config%d:%d-%d\n", fmt->config, lo, hi); +} + +#define _CMN_FORMAT_ATTR(_name, _cfg, _fld) \ + (&((struct arm_cmn_format_attr[]) {{ \ + .attr = __ATTR(_name, 0444, arm_cmn_format_show, NULL), \ + .config = _cfg, \ + .field = _fld, \ + }})[0].attr.attr) +#define CMN_FORMAT_ATTR(_name, _fld) _CMN_FORMAT_ATTR(_name, 0, _fld) + +static struct attribute *arm_cmn_format_attrs[] = { + CMN_FORMAT_ATTR(type, CMN_CONFIG_TYPE), + CMN_FORMAT_ATTR(eventid, CMN_CONFIG_EVENTID), + CMN_FORMAT_ATTR(occupid, CMN_CONFIG_OCCUPID), + CMN_FORMAT_ATTR(bynodeid, CMN_CONFIG_BYNODEID), + CMN_FORMAT_ATTR(nodeid, CMN_CONFIG_NODEID), + + CMN_FORMAT_ATTR(wp_dev_sel, CMN_CONFIG_WP_DEV_SEL), + CMN_FORMAT_ATTR(wp_chn_sel, CMN_CONFIG_WP_CHN_SEL), + CMN_FORMAT_ATTR(wp_grp, CMN_CONFIG_WP_GRP), + CMN_FORMAT_ATTR(wp_exclusive, CMN_CONFIG_WP_EXCLUSIVE), + CMN_FORMAT_ATTR(wp_combine, CMN_CONFIG_WP_COMBINE), + + _CMN_FORMAT_ATTR(wp_val, 1, CMN_CONFIG1_WP_VAL), + _CMN_FORMAT_ATTR(wp_mask, 2, CMN_CONFIG2_WP_MASK), + + NULL +}; + +static const struct attribute_group arm_cmn_format_attrs_group = { + .name = "format", + .attrs = arm_cmn_format_attrs, +}; + +static ssize_t arm_cmn_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(cmn->cpu)); +} + +static struct device_attribute arm_cmn_cpumask_attr = + __ATTR(cpumask, 0444, arm_cmn_cpumask_show, NULL); + +static struct attribute *arm_cmn_cpumask_attrs[] = { + &arm_cmn_cpumask_attr.attr, + NULL, +}; + +static struct attribute_group arm_cmn_cpumask_attr_group = { + .attrs = arm_cmn_cpumask_attrs, +}; + +static const struct attribute_group *arm_cmn_attr_groups[] = { + &arm_cmn_event_attrs_group, + &arm_cmn_format_attrs_group, + &arm_cmn_cpumask_attr_group, + NULL +}; + +static int arm_cmn_wp_idx(struct perf_event *event) +{ + return CMN_EVENT_EVENTID(event) + CMN_EVENT_WP_GRP(event); +} + +static u32 arm_cmn_wp_config(struct perf_event *event) +{ + u32 config; + u32 dev = CMN_EVENT_WP_DEV_SEL(event); + u32 chn = CMN_EVENT_WP_CHN_SEL(event); + u32 grp = CMN_EVENT_WP_GRP(event); + u32 exc = CMN_EVENT_WP_EXCLUSIVE(event); + u32 combine = CMN_EVENT_WP_COMBINE(event); + + config = FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_DEV_SEL, dev) | + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_CHN_SEL, chn) | + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_GRP, grp) | + FIELD_PREP(CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE, exc); + if (combine && !grp) + config |= CMN_DTM_WPn_CONFIG_WP_COMBINE; + + return config; +} + +static void arm_cmn_set_state(struct arm_cmn *cmn, u32 state) +{ + if (!cmn->state) + writel_relaxed(0, cmn->dtc[0].base + CMN_DT_PMCR); + cmn->state |= state; +} + +static void arm_cmn_clear_state(struct arm_cmn *cmn, u32 state) +{ + cmn->state &= ~state; + if (!cmn->state) + writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, + cmn->dtc[0].base + CMN_DT_PMCR); +} + +static void arm_cmn_pmu_enable(struct pmu *pmu) +{ + arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_DISABLED); +} + +static void arm_cmn_pmu_disable(struct pmu *pmu) +{ + arm_cmn_set_state(to_cmn(pmu), CMN_STATE_DISABLED); +} + +static u64 arm_cmn_read_dtm(struct arm_cmn *cmn, struct arm_cmn_hw_event *hw, + bool snapshot) +{ + struct arm_cmn_node *dn; + unsigned int i, offset; + u64 count = 0; + + offset = snapshot ? CMN_DTM_PMEVCNTSR : CMN_DTM_PMEVCNT; + for_each_hw_dn(hw, dn, i) { + struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn); + int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); + u64 reg = readq_relaxed(xp->pmu_base + offset); + u16 dtm_count = reg >> (dtm_idx * 16); + + count += dtm_count; + } + return count; +} + +static u64 arm_cmn_read_cc(struct arm_cmn_dtc *dtc) +{ + u64 val = readq_relaxed(dtc->base + CMN_DT_PMCCNTR); + + writeq_relaxed(CMN_CC_INIT, dtc->base + CMN_DT_PMCCNTR); + return (val - CMN_CC_INIT) & ((CMN_CC_INIT << 1) - 1); +} + +static u32 arm_cmn_read_counter(struct arm_cmn_dtc *dtc, int idx) +{ + u32 val, pmevcnt = CMN_DT_PMEVCNT(idx); + + val = readl_relaxed(dtc->base + pmevcnt); + writel_relaxed(CMN_COUNTER_INIT, dtc->base + pmevcnt); + return val - CMN_COUNTER_INIT; +} + +static void arm_cmn_init_counter(struct perf_event *event) +{ + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + unsigned int i, pmevcnt = CMN_DT_PMEVCNT(hw->dtc_idx); + u64 count; + + for (i = 0; hw->dtcs_used & (1U << i); i++) { + writel_relaxed(CMN_COUNTER_INIT, cmn->dtc[i].base + pmevcnt); + cmn->dtc[i].counters[hw->dtc_idx] = event; + } + + count = arm_cmn_read_dtm(cmn, hw, false); + local64_set(&event->hw.prev_count, count); +} + +static void arm_cmn_event_read(struct perf_event *event) +{ + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + u64 delta, new, prev; + unsigned long flags; + unsigned int i; + + if (hw->dtc_idx == CMN_DT_NUM_COUNTERS) { + i = __ffs(hw->dtcs_used); + delta = arm_cmn_read_cc(cmn->dtc + i); + local64_add(delta, &event->count); + return; + } + new = arm_cmn_read_dtm(cmn, hw, false); + prev = local64_xchg(&event->hw.prev_count, new); + + delta = new - prev; + + local_irq_save(flags); + for (i = 0; hw->dtcs_used & (1U << i); i++) { + new = arm_cmn_read_counter(cmn->dtc + i, hw->dtc_idx); + delta += new << 16; + } + local_irq_restore(flags); + local64_add(delta, &event->count); +} + +static void arm_cmn_event_start(struct perf_event *event, int flags) +{ + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_node *dn; + enum cmn_node_type type = CMN_EVENT_TYPE(event); + int i; + + if (type == CMN_TYPE_DTC) { + i = __ffs(hw->dtcs_used); + writeq_relaxed(CMN_CC_INIT, cmn->dtc[i].base + CMN_DT_PMCCNTR); + cmn->dtc[i].cc_active = true; + } else if (type == CMN_TYPE_WP) { + int wp_idx = arm_cmn_wp_idx(event); + u64 val = CMN_EVENT_WP_VAL(event); + u64 mask = CMN_EVENT_WP_MASK(event); + + for_each_hw_dn(hw, dn, i) { + writeq_relaxed(val, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx)); + writeq_relaxed(mask, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx)); + } + } else for_each_hw_dn(hw, dn, i) { + int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); + + dn->event[dtm_idx] = CMN_EVENT_EVENTID(event); + writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); + } +} + +static void arm_cmn_event_stop(struct perf_event *event, int flags) +{ + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_node *dn; + enum cmn_node_type type = CMN_EVENT_TYPE(event); + int i; + + if (type == CMN_TYPE_DTC) { + i = __ffs(hw->dtcs_used); + cmn->dtc[i].cc_active = false; + } else if (type == CMN_TYPE_WP) { + int wp_idx = arm_cmn_wp_idx(event); + + for_each_hw_dn(hw, dn, i) { + writeq_relaxed(0, dn->pmu_base + CMN_DTM_WPn_MASK(wp_idx)); + writeq_relaxed(~0ULL, dn->pmu_base + CMN_DTM_WPn_VAL(wp_idx)); + } + } else for_each_hw_dn(hw, dn, i) { + int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); + + dn->event[dtm_idx] = 0; + writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL); + } + + arm_cmn_event_read(event); +} + +struct arm_cmn_val { + u8 dtm_count[CMN_MAX_XPS]; + u8 occupid[CMN_MAX_XPS]; + u8 wp[CMN_MAX_XPS][4]; + int dtc_count; + bool cycles; +}; + +static void arm_cmn_val_add_event(struct arm_cmn_val *val, struct perf_event *event) +{ + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_node *dn; + enum cmn_node_type type; + int i; + u8 occupid; + + if (is_software_event(event)) + return; + + type = CMN_EVENT_TYPE(event); + if (type == CMN_TYPE_DTC) { + val->cycles = true; + return; + } + + val->dtc_count++; + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + occupid = CMN_EVENT_OCCUPID(event) + 1; + else + occupid = 0; + + for_each_hw_dn(hw, dn, i) { + int wp_idx, xp = arm_cmn_node_to_xp(dn)->logid; + + val->dtm_count[xp]++; + val->occupid[xp] = occupid; + + if (type != CMN_TYPE_WP) + continue; + + wp_idx = arm_cmn_wp_idx(event); + val->wp[xp][wp_idx] = CMN_EVENT_WP_COMBINE(event) + 1; + } +} + +static int arm_cmn_validate_group(struct perf_event *event) +{ + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_node *dn; + struct perf_event *sibling, *leader = event->group_leader; + enum cmn_node_type type; + struct arm_cmn_val val; + int i; + u8 occupid; + + if (leader == event) + return 0; + + if (event->pmu != leader->pmu && !is_software_event(leader)) + return -EINVAL; + + memset(&val, 0, sizeof(val)); + + arm_cmn_val_add_event(&val, leader); + for_each_sibling_event(sibling, leader) + arm_cmn_val_add_event(&val, sibling); + + type = CMN_EVENT_TYPE(event); + if (type == CMN_TYPE_DTC) + return val.cycles ? -EINVAL : 0; + + if (val.dtc_count == CMN_DT_NUM_COUNTERS) + return -EINVAL; + + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + occupid = CMN_EVENT_OCCUPID(event) + 1; + else + occupid = 0; + + for_each_hw_dn(hw, dn, i) { + int wp_idx, wp_cmb, xp = arm_cmn_node_to_xp(dn)->logid; + + if (val.dtm_count[xp] == CMN_DTM_NUM_COUNTERS) + return -EINVAL; + + if (occupid && val.occupid[xp] && occupid != val.occupid[xp]) + return -EINVAL; + + if (type != CMN_TYPE_WP) + continue; + + wp_idx = arm_cmn_wp_idx(event); + if (val.wp[xp][wp_idx]) + return -EINVAL; + + wp_cmb = val.wp[xp][wp_idx ^ 1]; + if (wp_cmb && wp_cmb != CMN_EVENT_WP_COMBINE(event) + 1) + return -EINVAL; + } + + return 0; +} + +static int arm_cmn_event_init(struct perf_event *event) +{ + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + enum cmn_node_type type; + unsigned int i; + bool bynodeid; + u16 nodeid, eventid; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + event->cpu = cmn->cpu; + if (event->cpu < 0) + return -EINVAL; + + type = CMN_EVENT_TYPE(event); + /* DTC events (i.e. cycles) already have everything they need */ + if (type == CMN_TYPE_DTC) + return 0; + + /* For watchpoints we need the actual XP node here */ + if (type == CMN_TYPE_WP) { + type = CMN_TYPE_XP; + /* ...and we need a "real" direction */ + eventid = CMN_EVENT_EVENTID(event); + if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN) + return -EINVAL; + } + + bynodeid = CMN_EVENT_BYNODEID(event); + nodeid = CMN_EVENT_NODEID(event); + + hw->dn = arm_cmn_node(cmn, type); + for (i = hw->dn - cmn->dns; i < cmn->num_dns && cmn->dns[i].type == type; i++) { + if (!bynodeid) { + hw->num_dns++; + } else if (cmn->dns[i].id != nodeid) { + hw->dn++; + } else { + hw->num_dns = 1; + break; + } + } + + if (!hw->num_dns) { + int bits = arm_cmn_xyidbits(cmn); + + dev_dbg(cmn->dev, "invalid node 0x%x (%d,%d,%d,%d) type 0x%x\n", + nodeid, CMN_NODEID_X(nodeid, bits), CMN_NODEID_Y(nodeid, bits), + CMN_NODEID_PID(nodeid), CMN_NODEID_DEVID(nodeid), type); + return -EINVAL; + } + /* + * By assuming events count in all DTC domains, we cunningly avoid + * needing to know anything about how XPs are assigned to domains. + */ + hw->dtcs_used = (1U << cmn->num_dtcs) - 1; + + return arm_cmn_validate_group(event); +} + +static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event, + int i) +{ + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + enum cmn_node_type type = CMN_EVENT_TYPE(event); + + while (i--) { + struct arm_cmn_node *xp = arm_cmn_node_to_xp(hw->dn + i); + unsigned int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i); + + if (type == CMN_TYPE_WP) + hw->dn[i].wp_event[arm_cmn_wp_idx(event)] = -1; + + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) + hw->dn[i].occupid_count--; + + xp->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx); + writel_relaxed(xp->pmu_config_low, xp->pmu_base + CMN_DTM_PMU_CONFIG); + } + memset(hw->dtm_idx, 0, sizeof(hw->dtm_idx)); + + for (i = 0; hw->dtcs_used & (1U << i); i++) + cmn->dtc[i].counters[hw->dtc_idx] = NULL; +} + +static int arm_cmn_event_add(struct perf_event *event, int flags) +{ + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + struct arm_cmn_dtc *dtc = &cmn->dtc[0]; + struct arm_cmn_node *dn; + enum cmn_node_type type = CMN_EVENT_TYPE(event); + unsigned int i, dtc_idx, input_sel; + + if (type == CMN_TYPE_DTC) { + i = 0; + while (cmn->dtc[i].cycles) + if (++i == cmn->num_dtcs) + return -ENOSPC; + + cmn->dtc[i].cycles = event; + hw->dtc_idx = CMN_DT_NUM_COUNTERS; + hw->dtcs_used = 1U << i; + + if (flags & PERF_EF_START) + arm_cmn_event_start(event, 0); + return 0; + } + + /* Grab a free global counter first... */ + dtc_idx = 0; + while (dtc->counters[dtc_idx]) + if (++dtc_idx == CMN_DT_NUM_COUNTERS) + return -ENOSPC; + + hw->dtc_idx = dtc_idx; + + /* ...then the local counters to feed it. */ + for_each_hw_dn(hw, dn, i) { + struct arm_cmn_node *xp = arm_cmn_node_to_xp(dn); + unsigned int dtm_idx, shift; + u64 reg; + + dtm_idx = 0; + while (xp->pmu_config_low & CMN__PMEVCNT_PAIRED(dtm_idx)) + if (++dtm_idx == CMN_DTM_NUM_COUNTERS) + goto free_dtms; + + if (type == CMN_TYPE_XP) { + input_sel = CMN__PMEVCNT0_INPUT_SEL_XP + dtm_idx; + } else if (type == CMN_TYPE_WP) { + int tmp, wp_idx = arm_cmn_wp_idx(event); + u32 cfg = arm_cmn_wp_config(event); + + if (dn->wp_event[wp_idx] >= 0) + goto free_dtms; + + tmp = dn->wp_event[wp_idx ^ 1]; + if (tmp >= 0 && CMN_EVENT_WP_COMBINE(event) != + CMN_EVENT_WP_COMBINE(dtc->counters[tmp])) + goto free_dtms; + + input_sel = CMN__PMEVCNT0_INPUT_SEL_WP + wp_idx; + dn->wp_event[wp_idx] = dtc_idx; + writel_relaxed(cfg, dn->pmu_base + CMN_DTM_WPn_CONFIG(wp_idx)); + } else { + unsigned int port = CMN_NODEID_PID(dn->id); + unsigned int dev = CMN_NODEID_DEVID(dn->id); + + input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx + + (port << 4) + (dev << 2); + + if (arm_cmn_is_occup_event(type, CMN_EVENT_EVENTID(event))) { + int occupid = CMN_EVENT_OCCUPID(event); + + if (dn->occupid_count == 0) { + dn->occupid_val = occupid; + writel_relaxed(occupid, + dn->pmu_base + CMN_PMU_EVENT_SEL + 4); + } else if (dn->occupid_val != occupid) { + goto free_dtms; + } + dn->occupid_count++; + } + } + + arm_cmn_set_index(hw->dtm_idx, i, dtm_idx); + + xp->input_sel[dtm_idx] = input_sel; + shift = CMN__PMEVCNTn_GLOBAL_NUM_SHIFT(dtm_idx); + xp->pmu_config_low &= ~(CMN__PMEVCNT0_GLOBAL_NUM << shift); + xp->pmu_config_low |= FIELD_PREP(CMN__PMEVCNT0_GLOBAL_NUM, dtc_idx) << shift; + xp->pmu_config_low |= CMN__PMEVCNT_PAIRED(dtm_idx); + reg = (u64)le32_to_cpu(xp->pmu_config_high) << 32 | xp->pmu_config_low; + writeq_relaxed(reg, xp->pmu_base + CMN_DTM_PMU_CONFIG); + } + + /* Go go go! */ + arm_cmn_init_counter(event); + + if (flags & PERF_EF_START) + arm_cmn_event_start(event, 0); + + return 0; + +free_dtms: + arm_cmn_event_clear(cmn, event, i); + return -ENOSPC; +} + +static void arm_cmn_event_del(struct perf_event *event, int flags) +{ + struct arm_cmn *cmn = to_cmn(event->pmu); + struct arm_cmn_hw_event *hw = to_cmn_hw(event); + enum cmn_node_type type = CMN_EVENT_TYPE(event); + + arm_cmn_event_stop(event, PERF_EF_UPDATE); + + if (type == CMN_TYPE_DTC) + cmn->dtc[__ffs(hw->dtcs_used)].cycles = NULL; + else + arm_cmn_event_clear(cmn, event, hw->num_dns); +} + +/* + * We stop the PMU for both add and read, to avoid skew across DTM counters. + * In theory we could use snapshots to read without stopping, but then it + * becomes a lot trickier to deal with overlow and racing against interrupts, + * plus it seems they don't work properly on some hardware anyway :( + */ +static void arm_cmn_start_txn(struct pmu *pmu, unsigned int flags) +{ + arm_cmn_set_state(to_cmn(pmu), CMN_STATE_TXN); +} + +static void arm_cmn_end_txn(struct pmu *pmu) +{ + arm_cmn_clear_state(to_cmn(pmu), CMN_STATE_TXN); +} + +static int arm_cmn_commit_txn(struct pmu *pmu) +{ + arm_cmn_end_txn(pmu); + return 0; +} + +static int arm_cmn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct arm_cmn *cmn; + unsigned int i, target; + + cmn = hlist_entry_safe(node, struct arm_cmn, cpuhp_node); + if (cpu != cmn->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&cmn->pmu, cpu, target); + for (i = 0; i < cmn->num_dtcs; i++) + irq_set_affinity_hint(cmn->dtc[i].irq, cpumask_of(target)); + cmn->cpu = target; + return 0; +} + +static irqreturn_t arm_cmn_handle_irq(int irq, void *dev_id) +{ + struct arm_cmn_dtc *dtc = dev_id; + irqreturn_t ret = IRQ_NONE; + + for (;;) { + u32 status = readl_relaxed(dtc->base + CMN_DT_PMOVSR); + u64 delta; + int i; + + for (i = 0; i < CMN_DT_NUM_COUNTERS; i++) { + if (status & (1U << i)) { + ret = IRQ_HANDLED; + if (WARN_ON(!dtc->counters[i])) + continue; + delta = (u64)arm_cmn_read_counter(dtc, i) << 16; + local64_add(delta, &dtc->counters[i]->count); + } + } + + if (status & (1U << CMN_DT_NUM_COUNTERS)) { + ret = IRQ_HANDLED; + if (dtc->cc_active && !WARN_ON(!dtc->cycles)) { + delta = arm_cmn_read_cc(dtc); + local64_add(delta, &dtc->cycles->count); + } + } + + writel_relaxed(status, dtc->base + CMN_DT_PMOVSR_CLR); + + if (!dtc->irq_friend) + return ret; + dtc += dtc->irq_friend; + } +} + +/* We can reasonably accommodate DTCs of the same CMN sharing IRQs */ +static int arm_cmn_init_irqs(struct arm_cmn *cmn) +{ + int i, j, irq, err; + + for (i = 0; i < cmn->num_dtcs; i++) { + irq = cmn->dtc[i].irq; + for (j = i; j--; ) { + if (cmn->dtc[j].irq == irq) { + cmn->dtc[j].irq_friend = i - j; + goto next; + } + } + err = devm_request_irq(cmn->dev, irq, arm_cmn_handle_irq, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(cmn->dev), &cmn->dtc[i]); + if (err) + return err; + + err = irq_set_affinity_hint(irq, cpumask_of(cmn->cpu)); + if (err) + return err; + next: + ; /* isn't C great? */ + } + return 0; +} + +static void arm_cmn_init_dtm(struct arm_cmn_node *xp) +{ + int i; + + for (i = 0; i < 4; i++) { + xp->wp_event[i] = -1; + writeq_relaxed(0, xp->pmu_base + CMN_DTM_WPn_MASK(i)); + writeq_relaxed(~0ULL, xp->pmu_base + CMN_DTM_WPn_VAL(i)); + } + xp->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN; + xp->dtc = -1; +} + +static int arm_cmn_init_dtc(struct arm_cmn *cmn, struct arm_cmn_node *dn, int idx) +{ + struct arm_cmn_dtc *dtc = cmn->dtc + idx; + struct arm_cmn_node *xp; + + dtc->base = dn->pmu_base - CMN_PMU_OFFSET; + dtc->irq = platform_get_irq(to_platform_device(cmn->dev), idx); + if (dtc->irq < 0) + return dtc->irq; + + writel_relaxed(CMN_DT_DTC_CTL_DT_EN, dtc->base + CMN_DT_DTC_CTL); + writel_relaxed(CMN_DT_PMCR_PMU_EN | CMN_DT_PMCR_OVFL_INTR_EN, dtc->base + CMN_DT_PMCR); + writeq_relaxed(0, dtc->base + CMN_DT_PMCCNTR); + writel_relaxed(0x1ff, dtc->base + CMN_DT_PMOVSR_CLR); + + /* We do at least know that a DTC's XP must be in that DTC's domain */ + xp = arm_cmn_node_to_xp(dn); + xp->dtc = idx; + + return 0; +} + +static int arm_cmn_node_cmp(const void *a, const void *b) +{ + const struct arm_cmn_node *dna = a, *dnb = b; + int cmp; + + cmp = dna->type - dnb->type; + if (!cmp) + cmp = dna->logid - dnb->logid; + return cmp; +} + +static int arm_cmn_init_dtcs(struct arm_cmn *cmn) +{ + struct arm_cmn_node *dn; + int dtc_idx = 0; + + cmn->dtc = devm_kcalloc(cmn->dev, cmn->num_dtcs, sizeof(cmn->dtc[0]), GFP_KERNEL); + if (!cmn->dtc) + return -ENOMEM; + + sort(cmn->dns, cmn->num_dns, sizeof(cmn->dns[0]), arm_cmn_node_cmp, NULL); + + cmn->xps = arm_cmn_node(cmn, CMN_TYPE_XP); + + for (dn = cmn->dns; dn < cmn->dns + cmn->num_dns; dn++) { + if (dn->type != CMN_TYPE_XP) + arm_cmn_init_node_to_xp(cmn, dn); + else if (cmn->num_dtcs == 1) + dn->dtc = 0; + + if (dn->type == CMN_TYPE_DTC) + arm_cmn_init_dtc(cmn, dn, dtc_idx++); + + /* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */ + if (dn->type == CMN_TYPE_RND) + dn->type = CMN_TYPE_RNI; + } + + arm_cmn_set_state(cmn, CMN_STATE_DISABLED); + + return 0; +} + +static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_cmn_node *node) +{ + int level; + u64 reg = readq_relaxed(cmn->base + offset + CMN_NODE_INFO); + + node->type = FIELD_GET(CMN_NI_NODE_TYPE, reg); + node->id = FIELD_GET(CMN_NI_NODE_ID, reg); + node->logid = FIELD_GET(CMN_NI_LOGICAL_ID, reg); + + node->pmu_base = cmn->base + offset + CMN_PMU_OFFSET; + + if (node->type == CMN_TYPE_CFG) + level = 0; + else if (node->type == CMN_TYPE_XP) + level = 1; + else + level = 2; + + dev_dbg(cmn->dev, "node%*c%#06hx%*ctype:%-#6x id:%-4hd off:%#x\n", + (level * 2) + 1, ' ', node->id, 5 - (level * 2), ' ', + node->type, node->logid, offset); +} + +static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) +{ + void __iomem *cfg_region; + struct arm_cmn_node cfg, *dn; + u16 child_count, child_poff; + u32 xp_offset[CMN_MAX_XPS]; + u64 reg; + int i, j; + + cfg_region = cmn->base + rgn_offset; + reg = readl_relaxed(cfg_region + CMN_CFGM_PERIPH_ID_2); + cmn->rev = FIELD_GET(CMN_CFGM_PID2_REVISION, reg); + dev_dbg(cmn->dev, "periph_id_2 revision: %d\n", cmn->rev); + + arm_cmn_init_node_info(cmn, rgn_offset, &cfg); + if (cfg.type != CMN_TYPE_CFG) + return -ENODEV; + + reg = readq_relaxed(cfg_region + CMN_CHILD_INFO); + child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); + child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); + + cmn->num_xps = child_count; + cmn->num_dns = cmn->num_xps; + + /* Pass 1: visit the XPs, enumerate their children */ + for (i = 0; i < cmn->num_xps; i++) { + reg = readq_relaxed(cfg_region + child_poff + i * 8); + xp_offset[i] = reg & CMN_CHILD_NODE_ADDR; + + reg = readq_relaxed(cmn->base + xp_offset[i] + CMN_CHILD_INFO); + cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg); + } + + /* Cheeky +1 to help terminate pointer-based iteration */ + cmn->dns = devm_kcalloc(cmn->dev, cmn->num_dns + 1, + sizeof(*cmn->dns), GFP_KERNEL); + if (!cmn->dns) + return -ENOMEM; + + /* Pass 2: now we can actually populate the nodes */ + dn = cmn->dns; + for (i = 0; i < cmn->num_xps; i++) { + void __iomem *xp_region = cmn->base + xp_offset[i]; + struct arm_cmn_node *xp = dn++; + + arm_cmn_init_node_info(cmn, xp_offset[i], xp); + arm_cmn_init_dtm(xp); + /* + * Thanks to the order in which XP logical IDs seem to be + * assigned, we can handily infer the mesh X dimension by + * looking out for the XP at (0,1) without needing to know + * the exact node ID format, which we can later derive. + */ + if (xp->id == (1 << 3)) + cmn->mesh_x = xp->logid; + + reg = readq_relaxed(xp_region + CMN_CHILD_INFO); + child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg); + child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg); + + for (j = 0; j < child_count; j++) { + reg = readq_relaxed(xp_region + child_poff + j * 8); + /* + * Don't even try to touch anything external, since in general + * we haven't a clue how to power up arbitrary CHI requesters. + * As of CMN-600r1 these could only be RN-SAMs or CXLAs, + * neither of which have any PMU events anyway. + * (Actually, CXLAs do seem to have grown some events in r1p2, + * but they don't go to regular XP DTMs, and they depend on + * secure configuration which we can't easily deal with) + */ + if (reg & CMN_CHILD_NODE_EXTERNAL) { + dev_dbg(cmn->dev, "ignoring external node %llx\n", reg); + continue; + } + + arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn); + + switch (dn->type) { + case CMN_TYPE_DTC: + cmn->num_dtcs++; + dn++; + break; + /* These guys have PMU events */ + case CMN_TYPE_DVM: + case CMN_TYPE_HNI: + case CMN_TYPE_HNF: + case CMN_TYPE_SBSX: + case CMN_TYPE_RNI: + case CMN_TYPE_RND: + case CMN_TYPE_CXRA: + case CMN_TYPE_CXHA: + dn++; + break; + /* Nothing to see here */ + case CMN_TYPE_RNSAM: + case CMN_TYPE_CXLA: + break; + /* Something has gone horribly wrong */ + default: + dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type); + return -ENODEV; + } + } + } + + /* Correct for any nodes we skipped */ + cmn->num_dns = dn - cmn->dns; + + /* + * If mesh_x wasn't set during discovery then we never saw + * an XP at (0,1), thus we must have an Nx1 configuration. + */ + if (!cmn->mesh_x) + cmn->mesh_x = cmn->num_xps; + cmn->mesh_y = cmn->num_xps / cmn->mesh_x; + + dev_dbg(cmn->dev, "mesh %dx%d, ID width %d\n", + cmn->mesh_x, cmn->mesh_y, arm_cmn_xyidbits(cmn)); + + return 0; +} + +static int arm_cmn_acpi_probe(struct platform_device *pdev, struct arm_cmn *cmn) +{ + struct resource *cfg, *root; + + cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!cfg) + return -EINVAL; + + root = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!root) + return -EINVAL; + + if (!resource_contains(cfg, root)) + swap(cfg, root); + /* + * Note that devm_ioremap_resource() is dumb and won't let the platform + * device claim cfg when the ACPI companion device has already claimed + * root within it. But since they *are* already both claimed in the + * appropriate name, we don't really need to do it again here anyway. + */ + cmn->base = devm_ioremap(cmn->dev, cfg->start, resource_size(cfg)); + if (!cmn->base) + return -ENOMEM; + + return root->start - cfg->start; +} + +static int arm_cmn_of_probe(struct platform_device *pdev, struct arm_cmn *cmn) +{ + struct device_node *np = pdev->dev.of_node; + u32 rootnode; + int ret; + + cmn->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cmn->base)) + return PTR_ERR(cmn->base); + + ret = of_property_read_u32(np, "arm,root-node", &rootnode); + if (ret) + return ret; + + return rootnode; +} + +static int arm_cmn_probe(struct platform_device *pdev) +{ + struct arm_cmn *cmn; + const char *name; + static atomic_t id; + int err, rootnode; + + cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); + if (!cmn) + return -ENOMEM; + + cmn->dev = &pdev->dev; + platform_set_drvdata(pdev, cmn); + + if (has_acpi_companion(cmn->dev)) + rootnode = arm_cmn_acpi_probe(pdev, cmn); + else + rootnode = arm_cmn_of_probe(pdev, cmn); + if (rootnode < 0) + return rootnode; + + err = arm_cmn_discover(cmn, rootnode); + if (err) + return err; + + err = arm_cmn_init_dtcs(cmn); + if (err) + return err; + + err = arm_cmn_init_irqs(cmn); + if (err) + return err; + + cmn->cpu = raw_smp_processor_id(); + cmn->pmu = (struct pmu) { + .module = THIS_MODULE, + .attr_groups = arm_cmn_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = arm_cmn_pmu_enable, + .pmu_disable = arm_cmn_pmu_disable, + .event_init = arm_cmn_event_init, + .add = arm_cmn_event_add, + .del = arm_cmn_event_del, + .start = arm_cmn_event_start, + .stop = arm_cmn_event_stop, + .read = arm_cmn_event_read, + .start_txn = arm_cmn_start_txn, + .commit_txn = arm_cmn_commit_txn, + .cancel_txn = arm_cmn_end_txn, + }; + + name = devm_kasprintf(cmn->dev, GFP_KERNEL, "arm_cmn_%d", atomic_fetch_inc(&id)); + if (!name) + return -ENOMEM; + + err = cpuhp_state_add_instance(arm_cmn_hp_state, &cmn->cpuhp_node); + if (err) + return err; + + err = perf_pmu_register(&cmn->pmu, name, -1); + if (err) + cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); + return err; +} + +static int arm_cmn_remove(struct platform_device *pdev) +{ + struct arm_cmn *cmn = platform_get_drvdata(pdev); + int i; + + writel_relaxed(0, cmn->dtc[0].base + CMN_DT_DTC_CTL); + + perf_pmu_unregister(&cmn->pmu); + cpuhp_state_remove_instance(arm_cmn_hp_state, &cmn->cpuhp_node); + + for (i = 0; i < cmn->num_dtcs; i++) + irq_set_affinity_hint(cmn->dtc[i].irq, NULL); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id arm_cmn_of_match[] = { + { .compatible = "arm,cmn-600", }, + {} +}; +MODULE_DEVICE_TABLE(of, arm_cmn_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id arm_cmn_acpi_match[] = { + { "ARMHC600", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match); +#endif + +static struct platform_driver arm_cmn_driver = { + .driver = { + .name = "arm-cmn", + .of_match_table = of_match_ptr(arm_cmn_of_match), + .acpi_match_table = ACPI_PTR(arm_cmn_acpi_match), + }, + .probe = arm_cmn_probe, + .remove = arm_cmn_remove, +}; + +static int __init arm_cmn_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/arm/cmn:online", NULL, + arm_cmn_pmu_offline_cpu); + if (ret < 0) + return ret; + + arm_cmn_hp_state = ret; + ret = platform_driver_register(&arm_cmn_driver); + if (ret) + cpuhp_remove_multi_state(arm_cmn_hp_state); + return ret; +} + +static void __exit arm_cmn_exit(void) +{ + platform_driver_unregister(&arm_cmn_driver); + cpuhp_remove_multi_state(arm_cmn_hp_state); +} + +module_init(arm_cmn_init); +module_exit(arm_cmn_exit); + +MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>"); +MODULE_DESCRIPTION("Arm CMN-600 PMU driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c new file mode 100644 index 000000000..1db8eccc9 --- /dev/null +++ b/drivers/perf/arm_dsu_pmu.c @@ -0,0 +1,887 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ARM DynamIQ Shared Unit (DSU) PMU driver + * + * Copyright (C) ARM Limited, 2017. + * + * Based on ARM CCI-PMU, ARMv8 PMU-v3 drivers. + */ + +#define PMUNAME "arm_dsu" +#define DRVNAME PMUNAME "_pmu" +#define pr_fmt(fmt) DRVNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/smp.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#include <asm/arm_dsu_pmu.h> +#include <asm/local64.h> + +/* PMU event codes */ +#define DSU_PMU_EVT_CYCLES 0x11 +#define DSU_PMU_EVT_CHAIN 0x1e + +#define DSU_PMU_MAX_COMMON_EVENTS 0x40 + +#define DSU_PMU_MAX_HW_CNTRS 32 +#define DSU_PMU_HW_COUNTER_MASK (DSU_PMU_MAX_HW_CNTRS - 1) + +#define CLUSTERPMCR_E BIT(0) +#define CLUSTERPMCR_P BIT(1) +#define CLUSTERPMCR_C BIT(2) +#define CLUSTERPMCR_N_SHIFT 11 +#define CLUSTERPMCR_N_MASK 0x1f +#define CLUSTERPMCR_IDCODE_SHIFT 16 +#define CLUSTERPMCR_IDCODE_MASK 0xff +#define CLUSTERPMCR_IMP_SHIFT 24 +#define CLUSTERPMCR_IMP_MASK 0xff +#define CLUSTERPMCR_RES_MASK 0x7e8 +#define CLUSTERPMCR_RES_VAL 0x40 + +#define DSU_ACTIVE_CPU_MASK 0x0 +#define DSU_ASSOCIATED_CPU_MASK 0x1 + +/* + * We use the index of the counters as they appear in the counter + * bit maps in the PMU registers (e.g CLUSTERPMSELR). + * i.e, + * counter 0 - Bit 0 + * counter 1 - Bit 1 + * ... + * Cycle counter - Bit 31 + */ +#define DSU_PMU_IDX_CYCLE_COUNTER 31 + +/* All event counters are 32bit, with a 64bit Cycle counter */ +#define DSU_PMU_COUNTER_WIDTH(idx) \ + (((idx) == DSU_PMU_IDX_CYCLE_COUNTER) ? 64 : 32) + +#define DSU_PMU_COUNTER_MASK(idx) \ + GENMASK_ULL((DSU_PMU_COUNTER_WIDTH((idx)) - 1), 0) + +#define DSU_EXT_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { \ + .attr = __ATTR(_name, 0444, _func, NULL), \ + .var = (void *)_config \ + } \ + })[0].attr.attr) + +#define DSU_EVENT_ATTR(_name, _config) \ + DSU_EXT_ATTR(_name, dsu_pmu_sysfs_event_show, (unsigned long)_config) + +#define DSU_FORMAT_ATTR(_name, _config) \ + DSU_EXT_ATTR(_name, dsu_pmu_sysfs_format_show, (char *)_config) + +#define DSU_CPUMASK_ATTR(_name, _config) \ + DSU_EXT_ATTR(_name, dsu_pmu_cpumask_show, (unsigned long)_config) + +struct dsu_hw_events { + DECLARE_BITMAP(used_mask, DSU_PMU_MAX_HW_CNTRS); + struct perf_event *events[DSU_PMU_MAX_HW_CNTRS]; +}; + +/* + * struct dsu_pmu - DSU PMU descriptor + * + * @pmu_lock : Protects accesses to DSU PMU register from normal vs + * interrupt handler contexts. + * @hw_events : Holds the event counter state. + * @associated_cpus : CPUs attached to the DSU. + * @active_cpu : CPU to which the PMU is bound for accesses. + * @cpuhp_node : Node for CPU hotplug notifier link. + * @num_counters : Number of event counters implemented by the PMU, + * excluding the cycle counter. + * @irq : Interrupt line for counter overflow. + * @cpmceid_bitmap : Bitmap for the availability of architected common + * events (event_code < 0x40). + */ +struct dsu_pmu { + struct pmu pmu; + struct device *dev; + raw_spinlock_t pmu_lock; + struct dsu_hw_events hw_events; + cpumask_t associated_cpus; + cpumask_t active_cpu; + struct hlist_node cpuhp_node; + s8 num_counters; + int irq; + DECLARE_BITMAP(cpmceid_bitmap, DSU_PMU_MAX_COMMON_EVENTS); +}; + +static unsigned long dsu_pmu_cpuhp_state; + +static inline struct dsu_pmu *to_dsu_pmu(struct pmu *pmu) +{ + return container_of(pmu, struct dsu_pmu, pmu); +} + +static ssize_t dsu_pmu_sysfs_event_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "event=0x%lx\n", + (unsigned long)eattr->var); +} + +static ssize_t dsu_pmu_sysfs_format_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); +} + +static ssize_t dsu_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr); + unsigned long mask_id = (unsigned long)eattr->var; + const cpumask_t *cpumask; + + switch (mask_id) { + case DSU_ACTIVE_CPU_MASK: + cpumask = &dsu_pmu->active_cpu; + break; + case DSU_ASSOCIATED_CPU_MASK: + cpumask = &dsu_pmu->associated_cpus; + break; + default: + return 0; + } + return cpumap_print_to_pagebuf(true, buf, cpumask); +} + +static struct attribute *dsu_pmu_format_attrs[] = { + DSU_FORMAT_ATTR(event, "config:0-31"), + NULL, +}; + +static const struct attribute_group dsu_pmu_format_attr_group = { + .name = "format", + .attrs = dsu_pmu_format_attrs, +}; + +static struct attribute *dsu_pmu_event_attrs[] = { + DSU_EVENT_ATTR(cycles, 0x11), + DSU_EVENT_ATTR(bus_access, 0x19), + DSU_EVENT_ATTR(memory_error, 0x1a), + DSU_EVENT_ATTR(bus_cycles, 0x1d), + DSU_EVENT_ATTR(l3d_cache_allocate, 0x29), + DSU_EVENT_ATTR(l3d_cache_refill, 0x2a), + DSU_EVENT_ATTR(l3d_cache, 0x2b), + DSU_EVENT_ATTR(l3d_cache_wb, 0x2c), + NULL, +}; + +static umode_t +dsu_pmu_event_attr_is_visible(struct kobject *kobj, struct attribute *attr, + int unused) +{ + struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); + struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); + struct dev_ext_attribute *eattr = container_of(attr, + struct dev_ext_attribute, attr.attr); + unsigned long evt = (unsigned long)eattr->var; + + return test_bit(evt, dsu_pmu->cpmceid_bitmap) ? attr->mode : 0; +} + +static const struct attribute_group dsu_pmu_events_attr_group = { + .name = "events", + .attrs = dsu_pmu_event_attrs, + .is_visible = dsu_pmu_event_attr_is_visible, +}; + +static struct attribute *dsu_pmu_cpumask_attrs[] = { + DSU_CPUMASK_ATTR(cpumask, DSU_ACTIVE_CPU_MASK), + DSU_CPUMASK_ATTR(associated_cpus, DSU_ASSOCIATED_CPU_MASK), + NULL, +}; + +static const struct attribute_group dsu_pmu_cpumask_attr_group = { + .attrs = dsu_pmu_cpumask_attrs, +}; + +static const struct attribute_group *dsu_pmu_attr_groups[] = { + &dsu_pmu_cpumask_attr_group, + &dsu_pmu_events_attr_group, + &dsu_pmu_format_attr_group, + NULL, +}; + +static int dsu_pmu_get_online_cpu_any_but(struct dsu_pmu *dsu_pmu, int cpu) +{ + struct cpumask online_supported; + + cpumask_and(&online_supported, + &dsu_pmu->associated_cpus, cpu_online_mask); + return cpumask_any_but(&online_supported, cpu); +} + +static inline bool dsu_pmu_counter_valid(struct dsu_pmu *dsu_pmu, u32 idx) +{ + return (idx < dsu_pmu->num_counters) || + (idx == DSU_PMU_IDX_CYCLE_COUNTER); +} + +static inline u64 dsu_pmu_read_counter(struct perf_event *event) +{ + u64 val; + unsigned long flags; + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + int idx = event->hw.idx; + + if (WARN_ON(!cpumask_test_cpu(smp_processor_id(), + &dsu_pmu->associated_cpus))) + return 0; + + if (!dsu_pmu_counter_valid(dsu_pmu, idx)) { + dev_err(event->pmu->dev, + "Trying reading invalid counter %d\n", idx); + return 0; + } + + raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); + if (idx == DSU_PMU_IDX_CYCLE_COUNTER) + val = __dsu_pmu_read_pmccntr(); + else + val = __dsu_pmu_read_counter(idx); + raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); + + return val; +} + +static void dsu_pmu_write_counter(struct perf_event *event, u64 val) +{ + unsigned long flags; + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + int idx = event->hw.idx; + + if (WARN_ON(!cpumask_test_cpu(smp_processor_id(), + &dsu_pmu->associated_cpus))) + return; + + if (!dsu_pmu_counter_valid(dsu_pmu, idx)) { + dev_err(event->pmu->dev, + "writing to invalid counter %d\n", idx); + return; + } + + raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); + if (idx == DSU_PMU_IDX_CYCLE_COUNTER) + __dsu_pmu_write_pmccntr(val); + else + __dsu_pmu_write_counter(idx, val); + raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); +} + +static int dsu_pmu_get_event_idx(struct dsu_hw_events *hw_events, + struct perf_event *event) +{ + int idx; + unsigned long evtype = event->attr.config; + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + unsigned long *used_mask = hw_events->used_mask; + + if (evtype == DSU_PMU_EVT_CYCLES) { + if (test_and_set_bit(DSU_PMU_IDX_CYCLE_COUNTER, used_mask)) + return -EAGAIN; + return DSU_PMU_IDX_CYCLE_COUNTER; + } + + idx = find_first_zero_bit(used_mask, dsu_pmu->num_counters); + if (idx >= dsu_pmu->num_counters) + return -EAGAIN; + set_bit(idx, hw_events->used_mask); + return idx; +} + +static void dsu_pmu_enable_counter(struct dsu_pmu *dsu_pmu, int idx) +{ + __dsu_pmu_counter_interrupt_enable(idx); + __dsu_pmu_enable_counter(idx); +} + +static void dsu_pmu_disable_counter(struct dsu_pmu *dsu_pmu, int idx) +{ + __dsu_pmu_disable_counter(idx); + __dsu_pmu_counter_interrupt_disable(idx); +} + +static inline void dsu_pmu_set_event(struct dsu_pmu *dsu_pmu, + struct perf_event *event) +{ + int idx = event->hw.idx; + unsigned long flags; + + if (!dsu_pmu_counter_valid(dsu_pmu, idx)) { + dev_err(event->pmu->dev, + "Trying to set invalid counter %d\n", idx); + return; + } + + raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); + __dsu_pmu_set_event(idx, event->hw.config_base); + raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); +} + +static void dsu_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_count, new_count; + + do { + /* We may also be called from the irq handler */ + prev_count = local64_read(&hwc->prev_count); + new_count = dsu_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_count, new_count) != + prev_count); + delta = (new_count - prev_count) & DSU_PMU_COUNTER_MASK(hwc->idx); + local64_add(delta, &event->count); +} + +static void dsu_pmu_read(struct perf_event *event) +{ + dsu_pmu_event_update(event); +} + +static inline u32 dsu_pmu_get_reset_overflow(void) +{ + return __dsu_pmu_get_reset_overflow(); +} + +/** + * dsu_pmu_set_event_period: Set the period for the counter. + * + * All DSU PMU event counters, except the cycle counter are 32bit + * counters. To handle cases of extreme interrupt latency, we program + * the counter with half of the max count for the counters. + */ +static void dsu_pmu_set_event_period(struct perf_event *event) +{ + int idx = event->hw.idx; + u64 val = DSU_PMU_COUNTER_MASK(idx) >> 1; + + local64_set(&event->hw.prev_count, val); + dsu_pmu_write_counter(event, val); +} + +static irqreturn_t dsu_pmu_handle_irq(int irq_num, void *dev) +{ + int i; + bool handled = false; + struct dsu_pmu *dsu_pmu = dev; + struct dsu_hw_events *hw_events = &dsu_pmu->hw_events; + unsigned long overflow; + + overflow = dsu_pmu_get_reset_overflow(); + if (!overflow) + return IRQ_NONE; + + for_each_set_bit(i, &overflow, DSU_PMU_MAX_HW_CNTRS) { + struct perf_event *event = hw_events->events[i]; + + if (!event) + continue; + dsu_pmu_event_update(event); + dsu_pmu_set_event_period(event); + handled = true; + } + + return IRQ_RETVAL(handled); +} + +static void dsu_pmu_start(struct perf_event *event, int pmu_flags) +{ + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + + /* We always reprogram the counter */ + if (pmu_flags & PERF_EF_RELOAD) + WARN_ON(!(event->hw.state & PERF_HES_UPTODATE)); + dsu_pmu_set_event_period(event); + if (event->hw.idx != DSU_PMU_IDX_CYCLE_COUNTER) + dsu_pmu_set_event(dsu_pmu, event); + event->hw.state = 0; + dsu_pmu_enable_counter(dsu_pmu, event->hw.idx); +} + +static void dsu_pmu_stop(struct perf_event *event, int pmu_flags) +{ + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + + if (event->hw.state & PERF_HES_STOPPED) + return; + dsu_pmu_disable_counter(dsu_pmu, event->hw.idx); + dsu_pmu_event_update(event); + event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int dsu_pmu_add(struct perf_event *event, int flags) +{ + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + struct dsu_hw_events *hw_events = &dsu_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx; + + if (WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), + &dsu_pmu->associated_cpus))) + return -ENOENT; + + idx = dsu_pmu_get_event_idx(hw_events, event); + if (idx < 0) + return idx; + + hwc->idx = idx; + hw_events->events[idx] = event; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (flags & PERF_EF_START) + dsu_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + return 0; +} + +static void dsu_pmu_del(struct perf_event *event, int flags) +{ + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + struct dsu_hw_events *hw_events = &dsu_pmu->hw_events; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + dsu_pmu_stop(event, PERF_EF_UPDATE); + hw_events->events[idx] = NULL; + clear_bit(idx, hw_events->used_mask); + perf_event_update_userpage(event); +} + +static void dsu_pmu_enable(struct pmu *pmu) +{ + u32 pmcr; + unsigned long flags; + struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); + + /* If no counters are added, skip enabling the PMU */ + if (bitmap_empty(dsu_pmu->hw_events.used_mask, DSU_PMU_MAX_HW_CNTRS)) + return; + + raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); + pmcr = __dsu_pmu_read_pmcr(); + pmcr |= CLUSTERPMCR_E; + __dsu_pmu_write_pmcr(pmcr); + raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); +} + +static void dsu_pmu_disable(struct pmu *pmu) +{ + u32 pmcr; + unsigned long flags; + struct dsu_pmu *dsu_pmu = to_dsu_pmu(pmu); + + raw_spin_lock_irqsave(&dsu_pmu->pmu_lock, flags); + pmcr = __dsu_pmu_read_pmcr(); + pmcr &= ~CLUSTERPMCR_E; + __dsu_pmu_write_pmcr(pmcr); + raw_spin_unlock_irqrestore(&dsu_pmu->pmu_lock, flags); +} + +static bool dsu_pmu_validate_event(struct pmu *pmu, + struct dsu_hw_events *hw_events, + struct perf_event *event) +{ + if (is_software_event(event)) + return true; + /* Reject groups spanning multiple HW PMUs. */ + if (event->pmu != pmu) + return false; + return dsu_pmu_get_event_idx(hw_events, event) >= 0; +} + +/* + * Make sure the group of events can be scheduled at once + * on the PMU. + */ +static bool dsu_pmu_validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct dsu_hw_events fake_hw; + + if (event->group_leader == event) + return true; + + memset(fake_hw.used_mask, 0, sizeof(fake_hw.used_mask)); + if (!dsu_pmu_validate_event(event->pmu, &fake_hw, leader)) + return false; + for_each_sibling_event(sibling, leader) { + if (!dsu_pmu_validate_event(event->pmu, &fake_hw, sibling)) + return false; + } + return dsu_pmu_validate_event(event->pmu, &fake_hw, event); +} + +static int dsu_pmu_event_init(struct perf_event *event) +{ + struct dsu_pmu *dsu_pmu = to_dsu_pmu(event->pmu); + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* We don't support sampling */ + if (is_sampling_event(event)) { + dev_dbg(dsu_pmu->pmu.dev, "Can't support sampling events\n"); + return -EOPNOTSUPP; + } + + /* We cannot support task bound events */ + if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) { + dev_dbg(dsu_pmu->pmu.dev, "Can't support per-task counters\n"); + return -EINVAL; + } + + if (has_branch_stack(event)) { + dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n"); + return -EINVAL; + } + + if (!cpumask_test_cpu(event->cpu, &dsu_pmu->associated_cpus)) { + dev_dbg(dsu_pmu->pmu.dev, + "Requested cpu is not associated with the DSU\n"); + return -EINVAL; + } + /* + * Choose the current active CPU to read the events. We don't want + * to migrate the event contexts, irq handling etc to the requested + * CPU. As long as the requested CPU is within the same DSU, we + * are fine. + */ + event->cpu = cpumask_first(&dsu_pmu->active_cpu); + if (event->cpu >= nr_cpu_ids) + return -EINVAL; + if (!dsu_pmu_validate_group(event)) + return -EINVAL; + + event->hw.config_base = event->attr.config; + return 0; +} + +static struct dsu_pmu *dsu_pmu_alloc(struct platform_device *pdev) +{ + struct dsu_pmu *dsu_pmu; + + dsu_pmu = devm_kzalloc(&pdev->dev, sizeof(*dsu_pmu), GFP_KERNEL); + if (!dsu_pmu) + return ERR_PTR(-ENOMEM); + + raw_spin_lock_init(&dsu_pmu->pmu_lock); + /* + * Initialise the number of counters to -1, until we probe + * the real number on a connected CPU. + */ + dsu_pmu->num_counters = -1; + return dsu_pmu; +} + +/** + * dsu_pmu_dt_get_cpus: Get the list of CPUs in the cluster + * from device tree. + */ +static int dsu_pmu_dt_get_cpus(struct device *dev, cpumask_t *mask) +{ + int i = 0, n, cpu; + struct device_node *cpu_node; + + n = of_count_phandle_with_args(dev->of_node, "cpus", NULL); + if (n <= 0) + return -ENODEV; + for (; i < n; i++) { + cpu_node = of_parse_phandle(dev->of_node, "cpus", i); + if (!cpu_node) + break; + cpu = of_cpu_node_to_id(cpu_node); + of_node_put(cpu_node); + /* + * We have to ignore the failures here and continue scanning + * the list to handle cases where the nr_cpus could be capped + * in the running kernel. + */ + if (cpu < 0) + continue; + cpumask_set_cpu(cpu, mask); + } + return 0; +} + +/** + * dsu_pmu_acpi_get_cpus: Get the list of CPUs in the cluster + * from ACPI. + */ +static int dsu_pmu_acpi_get_cpus(struct device *dev, cpumask_t *mask) +{ +#ifdef CONFIG_ACPI + int cpu; + + /* + * A dsu pmu node is inside a cluster parent node along with cpu nodes. + * We need to find out all cpus that have the same parent with this pmu. + */ + for_each_possible_cpu(cpu) { + struct acpi_device *acpi_dev; + struct device *cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) + continue; + + acpi_dev = ACPI_COMPANION(cpu_dev); + if (acpi_dev && + acpi_dev->parent == ACPI_COMPANION(dev)->parent) + cpumask_set_cpu(cpu, mask); + } +#endif + + return 0; +} + +/* + * dsu_pmu_probe_pmu: Probe the PMU details on a CPU in the cluster. + */ +static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu) +{ + u64 num_counters; + u32 cpmceid[2]; + + num_counters = (__dsu_pmu_read_pmcr() >> CLUSTERPMCR_N_SHIFT) & + CLUSTERPMCR_N_MASK; + /* We can only support up to 31 independent counters */ + if (WARN_ON(num_counters > 31)) + num_counters = 31; + dsu_pmu->num_counters = num_counters; + if (!dsu_pmu->num_counters) + return; + cpmceid[0] = __dsu_pmu_read_pmceid(0); + cpmceid[1] = __dsu_pmu_read_pmceid(1); + bitmap_from_arr32(dsu_pmu->cpmceid_bitmap, cpmceid, + DSU_PMU_MAX_COMMON_EVENTS); +} + +static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu) +{ + cpumask_set_cpu(cpu, &dsu_pmu->active_cpu); + if (irq_set_affinity_hint(dsu_pmu->irq, &dsu_pmu->active_cpu)) + pr_warn("Failed to set irq affinity to %d\n", cpu); +} + +/* + * dsu_pmu_init_pmu: Initialise the DSU PMU configurations if + * we haven't done it already. + */ +static void dsu_pmu_init_pmu(struct dsu_pmu *dsu_pmu) +{ + if (dsu_pmu->num_counters == -1) + dsu_pmu_probe_pmu(dsu_pmu); + /* Reset the interrupt overflow mask */ + dsu_pmu_get_reset_overflow(); +} + +static int dsu_pmu_device_probe(struct platform_device *pdev) +{ + int irq, rc; + struct dsu_pmu *dsu_pmu; + struct fwnode_handle *fwnode = dev_fwnode(&pdev->dev); + char *name; + static atomic_t pmu_idx = ATOMIC_INIT(-1); + + dsu_pmu = dsu_pmu_alloc(pdev); + if (IS_ERR(dsu_pmu)) + return PTR_ERR(dsu_pmu); + + if (IS_ERR_OR_NULL(fwnode)) + return -ENOENT; + + if (is_of_node(fwnode)) + rc = dsu_pmu_dt_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus); + else if (is_acpi_device_node(fwnode)) + rc = dsu_pmu_acpi_get_cpus(&pdev->dev, &dsu_pmu->associated_cpus); + else + return -ENOENT; + + if (rc) { + dev_warn(&pdev->dev, "Failed to parse the CPUs\n"); + return rc; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", + PMUNAME, atomic_inc_return(&pmu_idx)); + if (!name) + return -ENOMEM; + rc = devm_request_irq(&pdev->dev, irq, dsu_pmu_handle_irq, + IRQF_NOBALANCING, name, dsu_pmu); + if (rc) { + dev_warn(&pdev->dev, "Failed to request IRQ %d\n", irq); + return rc; + } + + dsu_pmu->irq = irq; + platform_set_drvdata(pdev, dsu_pmu); + rc = cpuhp_state_add_instance(dsu_pmu_cpuhp_state, + &dsu_pmu->cpuhp_node); + if (rc) + return rc; + + dsu_pmu->pmu = (struct pmu) { + .task_ctx_nr = perf_invalid_context, + .module = THIS_MODULE, + .pmu_enable = dsu_pmu_enable, + .pmu_disable = dsu_pmu_disable, + .event_init = dsu_pmu_event_init, + .add = dsu_pmu_add, + .del = dsu_pmu_del, + .start = dsu_pmu_start, + .stop = dsu_pmu_stop, + .read = dsu_pmu_read, + + .attr_groups = dsu_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + rc = perf_pmu_register(&dsu_pmu->pmu, name, -1); + if (rc) { + cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, + &dsu_pmu->cpuhp_node); + irq_set_affinity_hint(dsu_pmu->irq, NULL); + } + + return rc; +} + +static int dsu_pmu_device_remove(struct platform_device *pdev) +{ + struct dsu_pmu *dsu_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&dsu_pmu->pmu); + cpuhp_state_remove_instance(dsu_pmu_cpuhp_state, &dsu_pmu->cpuhp_node); + irq_set_affinity_hint(dsu_pmu->irq, NULL); + + return 0; +} + +static const struct of_device_id dsu_pmu_of_match[] = { + { .compatible = "arm,dsu-pmu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, dsu_pmu_of_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id dsu_pmu_acpi_match[] = { + { "ARMHD500", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, dsu_pmu_acpi_match); +#endif + +static struct platform_driver dsu_pmu_driver = { + .driver = { + .name = DRVNAME, + .of_match_table = of_match_ptr(dsu_pmu_of_match), + .acpi_match_table = ACPI_PTR(dsu_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = dsu_pmu_device_probe, + .remove = dsu_pmu_device_remove, +}; + +static int dsu_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, + cpuhp_node); + + if (!cpumask_test_cpu(cpu, &dsu_pmu->associated_cpus)) + return 0; + + /* If the PMU is already managed, there is nothing to do */ + if (!cpumask_empty(&dsu_pmu->active_cpu)) + return 0; + + dsu_pmu_init_pmu(dsu_pmu); + dsu_pmu_set_active_cpu(cpu, dsu_pmu); + + return 0; +} + +static int dsu_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) +{ + int dst; + struct dsu_pmu *dsu_pmu = hlist_entry_safe(node, struct dsu_pmu, + cpuhp_node); + + if (!cpumask_test_and_clear_cpu(cpu, &dsu_pmu->active_cpu)) + return 0; + + dst = dsu_pmu_get_online_cpu_any_but(dsu_pmu, cpu); + /* If there are no active CPUs in the DSU, leave IRQ disabled */ + if (dst >= nr_cpu_ids) { + irq_set_affinity_hint(dsu_pmu->irq, NULL); + return 0; + } + + perf_pmu_migrate_context(&dsu_pmu->pmu, cpu, dst); + dsu_pmu_set_active_cpu(dst, dsu_pmu); + + return 0; +} + +static int __init dsu_pmu_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + DRVNAME, + dsu_pmu_cpu_online, + dsu_pmu_cpu_teardown); + if (ret < 0) + return ret; + dsu_pmu_cpuhp_state = ret; + ret = platform_driver_register(&dsu_pmu_driver); + if (ret) + cpuhp_remove_multi_state(dsu_pmu_cpuhp_state); + + return ret; +} + +static void __exit dsu_pmu_exit(void) +{ + platform_driver_unregister(&dsu_pmu_driver); + cpuhp_remove_multi_state(dsu_pmu_cpuhp_state); +} + +module_init(dsu_pmu_init); +module_exit(dsu_pmu_exit); + +MODULE_DESCRIPTION("Perf driver for ARM DynamIQ Shared Unit"); +MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c new file mode 100644 index 000000000..7fd11ef5c --- /dev/null +++ b/drivers/perf/arm_pmu.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0-only +#undef DEBUG + +/* + * ARM performance counter support. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> + * + * This code is based on the sparc64 perf event code, which is in turn based + * on the x86 code. + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include <linux/bitmap.h> +#include <linux/cpumask.h> +#include <linux/cpu_pm.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/perf/arm_pmu.h> +#include <linux/slab.h> +#include <linux/sched/clock.h> +#include <linux/spinlock.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> + +#include <asm/irq_regs.h> + +static int armpmu_count_irq_users(const int irq); + +struct pmu_irq_ops { + void (*enable_pmuirq)(unsigned int irq); + void (*disable_pmuirq)(unsigned int irq); + void (*free_pmuirq)(unsigned int irq, int cpu, void __percpu *devid); +}; + +static void armpmu_free_pmuirq(unsigned int irq, int cpu, void __percpu *devid) +{ + free_irq(irq, per_cpu_ptr(devid, cpu)); +} + +static const struct pmu_irq_ops pmuirq_ops = { + .enable_pmuirq = enable_irq, + .disable_pmuirq = disable_irq_nosync, + .free_pmuirq = armpmu_free_pmuirq +}; + +static void armpmu_free_pmunmi(unsigned int irq, int cpu, void __percpu *devid) +{ + free_nmi(irq, per_cpu_ptr(devid, cpu)); +} + +static const struct pmu_irq_ops pmunmi_ops = { + .enable_pmuirq = enable_nmi, + .disable_pmuirq = disable_nmi_nosync, + .free_pmuirq = armpmu_free_pmunmi +}; + +static void armpmu_enable_percpu_pmuirq(unsigned int irq) +{ + enable_percpu_irq(irq, IRQ_TYPE_NONE); +} + +static void armpmu_free_percpu_pmuirq(unsigned int irq, int cpu, + void __percpu *devid) +{ + if (armpmu_count_irq_users(irq) == 1) + free_percpu_irq(irq, devid); +} + +static const struct pmu_irq_ops percpu_pmuirq_ops = { + .enable_pmuirq = armpmu_enable_percpu_pmuirq, + .disable_pmuirq = disable_percpu_irq, + .free_pmuirq = armpmu_free_percpu_pmuirq +}; + +static void armpmu_enable_percpu_pmunmi(unsigned int irq) +{ + if (!prepare_percpu_nmi(irq)) + enable_percpu_nmi(irq, IRQ_TYPE_NONE); +} + +static void armpmu_disable_percpu_pmunmi(unsigned int irq) +{ + disable_percpu_nmi(irq); + teardown_percpu_nmi(irq); +} + +static void armpmu_free_percpu_pmunmi(unsigned int irq, int cpu, + void __percpu *devid) +{ + if (armpmu_count_irq_users(irq) == 1) + free_percpu_nmi(irq, devid); +} + +static const struct pmu_irq_ops percpu_pmunmi_ops = { + .enable_pmuirq = armpmu_enable_percpu_pmunmi, + .disable_pmuirq = armpmu_disable_percpu_pmunmi, + .free_pmuirq = armpmu_free_percpu_pmunmi +}; + +static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); +static DEFINE_PER_CPU(int, cpu_irq); +static DEFINE_PER_CPU(const struct pmu_irq_ops *, cpu_irq_ops); + +static bool has_nmi; + +static inline u64 arm_pmu_event_max_period(struct perf_event *event) +{ + if (event->hw.flags & ARMPMU_EVT_64BIT) + return GENMASK_ULL(63, 0); + else + return GENMASK_ULL(31, 0); +} + +static int +armpmu_map_cache_event(const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u64 config) +{ + unsigned int cache_type, cache_op, cache_result, ret; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + if (!cache_map) + return -ENOENT; + + ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; + + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + + return ret; +} + +static int +armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) +{ + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + if (!event_map) + return -ENOENT; + + mapping = (*event_map)[config]; + return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; +} + +static int +armpmu_map_raw_event(u32 raw_event_mask, u64 config) +{ + return (int)(config & raw_event_mask); +} + +int +armpmu_map_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask) +{ + u64 config = event->attr.config; + int type = event->attr.type; + + if (type == event->pmu->type) + return armpmu_map_raw_event(raw_event_mask, config); + + switch (type) { + case PERF_TYPE_HARDWARE: + return armpmu_map_hw_event(event_map, config); + case PERF_TYPE_HW_CACHE: + return armpmu_map_cache_event(cache_map, config); + case PERF_TYPE_RAW: + return armpmu_map_raw_event(raw_event_mask, config); + } + + return -ENOENT; +} + +int armpmu_event_set_period(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + u64 max_period; + int ret = 0; + + max_period = arm_pmu_event_max_period(event); + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + /* + * Limit the maximum period to prevent the counter value + * from overtaking the one we are about to program. In + * effect we are reducing max_period to account for + * interrupt latency (and we are being very conservative). + */ + if (left > (max_period >> 1)) + left = (max_period >> 1); + + local64_set(&hwc->prev_count, (u64)-left); + + armpmu->write_counter(event, (u64)(-left) & max_period); + + perf_event_update_userpage(event); + + return ret; +} + +u64 armpmu_event_update(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + u64 max_period = arm_pmu_event_max_period(event); + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = armpmu->read_counter(event); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - prev_raw_count) & max_period; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void +armpmu_read(struct perf_event *event) +{ + armpmu_event_update(event); +} + +static void +armpmu_stop(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * ARM pmu always has to update the counter, so ignore + * PERF_EF_UPDATE, see comments in armpmu_start(). + */ + if (!(hwc->state & PERF_HES_STOPPED)) { + armpmu->disable(event); + armpmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static void armpmu_start(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * ARM pmu always has to reprogram the period, so ignore + * PERF_EF_RELOAD, see the comment below. + */ + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + /* + * Set the period again. Some counters can't be stopped, so when we + * were stopped we simply disabled the IRQ source and the counter + * may have been left counting. If we don't do this step then we may + * get an interrupt too soon or *way* too late if the overflow has + * happened since disabling. + */ + armpmu_event_set_period(event); + armpmu->enable(event); +} + +static void +armpmu_del(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + armpmu_stop(event, PERF_EF_UPDATE); + hw_events->events[idx] = NULL; + armpmu->clear_event_idx(hw_events, event); + perf_event_update_userpage(event); + /* Clear the allocated counter */ + hwc->idx = -1; +} + +static int +armpmu_add(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + + /* An event following a process won't be stopped earlier */ + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) + return -ENOENT; + + /* If we don't have a space for the counter then finish early. */ + idx = armpmu->get_event_idx(hw_events, event); + if (idx < 0) + return idx; + + /* + * If there is an event in the counter we are going to use then make + * sure it is disabled. + */ + event->hw.idx = idx; + armpmu->disable(event); + hw_events->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + armpmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return 0; +} + +static int +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) +{ + struct arm_pmu *armpmu; + + if (is_software_event(event)) + return 1; + + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + + if (event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) + return 1; + + armpmu = to_arm_pmu(event->pmu); + return armpmu->get_event_idx(hw_events, event) >= 0; +} + +static int +validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct pmu_hw_events fake_pmu; + + /* + * Initialise the fake PMU. We only need to populate the + * used_mask for the purposes of validation. + */ + memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); + + if (!validate_event(event->pmu, &fake_pmu, leader)) + return -EINVAL; + + if (event == leader) + return 0; + + for_each_sibling_event(sibling, leader) { + if (!validate_event(event->pmu, &fake_pmu, sibling)) + return -EINVAL; + } + + if (!validate_event(event->pmu, &fake_pmu, event)) + return -EINVAL; + + return 0; +} + +static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) +{ + struct arm_pmu *armpmu; + int ret; + u64 start_clock, finish_clock; + + /* + * we request the IRQ with a (possibly percpu) struct arm_pmu**, but + * the handlers expect a struct arm_pmu*. The percpu_irq framework will + * do any necessary shifting, we just need to perform the first + * dereference. + */ + armpmu = *(void **)dev; + if (WARN_ON_ONCE(!armpmu)) + return IRQ_NONE; + + start_clock = sched_clock(); + ret = armpmu->handle_irq(armpmu); + finish_clock = sched_clock(); + + perf_sample_event_took(finish_clock - start_clock); + return ret; +} + +static int +__hw_perf_event_init(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int mapping; + + hwc->flags = 0; + mapping = armpmu->map_event(event); + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. For SMP systems, each core has it's own PMU so we can't do any + * clever allocation or constraints checking at this point. + */ + hwc->idx = -1; + hwc->config_base = 0; + hwc->config = 0; + hwc->event_base = 0; + + /* + * Check whether we need to exclude the counter from certain modes. + */ + if (armpmu->set_event_filter && + armpmu->set_event_filter(hwc, &event->attr)) { + pr_debug("ARM performance counters do not support " + "mode exclusion\n"); + return -EOPNOTSUPP; + } + + /* + * Store the event encoding into the config_base field. + */ + hwc->config_base |= (unsigned long)mapping; + + if (!is_sampling_event(event)) { + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = arm_pmu_event_max_period(event) >> 1; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return validate_group(event); +} + +static int armpmu_event_init(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + + /* + * Reject CPU-affine events for CPUs that are of a different class to + * that which this PMU handles. Process-following events (where + * event->cpu == -1) can be migrated between CPUs, and thus we have to + * reject them later (in armpmu_add) if they're scheduled on a + * different class of CPU. + */ + if (event->cpu != -1 && + !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus)) + return -ENOENT; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + if (armpmu->map_event(event) == -ENOENT) + return -ENOENT; + + return __hw_perf_event_init(event); +} + +static void armpmu_enable(struct pmu *pmu) +{ + struct arm_pmu *armpmu = to_arm_pmu(pmu); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); + + /* For task-bound events we may be called on other CPUs */ + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) + return; + + if (enabled) + armpmu->start(armpmu); +} + +static void armpmu_disable(struct pmu *pmu) +{ + struct arm_pmu *armpmu = to_arm_pmu(pmu); + + /* For task-bound events we may be called on other CPUs */ + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) + return; + + armpmu->stop(armpmu); +} + +/* + * In heterogeneous systems, events are specific to a particular + * microarchitecture, and aren't suitable for another. Thus, only match CPUs of + * the same microarchitecture. + */ +static int armpmu_filter_match(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + unsigned int cpu = smp_processor_id(); + int ret; + + ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus); + if (ret && armpmu->filter_match) + return armpmu->filter_match(event); + + return ret; +} + +static ssize_t armpmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev)); + return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus); +} + +static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL); + +static struct attribute *armpmu_common_attrs[] = { + &dev_attr_cpus.attr, + NULL, +}; + +static struct attribute_group armpmu_common_attr_group = { + .attrs = armpmu_common_attrs, +}; + +/* Set at runtime when we know what CPU type we are. */ +static struct arm_pmu *__oprofile_cpu_pmu; + +/* + * Despite the names, these two functions are CPU-specific and are used + * by the OProfile/perf code. + */ +const char *perf_pmu_name(void) +{ + if (!__oprofile_cpu_pmu) + return NULL; + + return __oprofile_cpu_pmu->name; +} +EXPORT_SYMBOL_GPL(perf_pmu_name); + +int perf_num_counters(void) +{ + int max_events = 0; + + if (__oprofile_cpu_pmu != NULL) + max_events = __oprofile_cpu_pmu->num_events; + + return max_events; +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +static int armpmu_count_irq_users(const int irq) +{ + int cpu, count = 0; + + for_each_possible_cpu(cpu) { + if (per_cpu(cpu_irq, cpu) == irq) + count++; + } + + return count; +} + +static const struct pmu_irq_ops *armpmu_find_irq_ops(int irq) +{ + const struct pmu_irq_ops *ops = NULL; + int cpu; + + for_each_possible_cpu(cpu) { + if (per_cpu(cpu_irq, cpu) != irq) + continue; + + ops = per_cpu(cpu_irq_ops, cpu); + if (ops) + break; + } + + return ops; +} + +void armpmu_free_irq(int irq, int cpu) +{ + if (per_cpu(cpu_irq, cpu) == 0) + return; + if (WARN_ON(irq != per_cpu(cpu_irq, cpu))) + return; + + per_cpu(cpu_irq_ops, cpu)->free_pmuirq(irq, cpu, &cpu_armpmu); + + per_cpu(cpu_irq, cpu) = 0; + per_cpu(cpu_irq_ops, cpu) = NULL; +} + +int armpmu_request_irq(int irq, int cpu) +{ + int err = 0; + const irq_handler_t handler = armpmu_dispatch_irq; + const struct pmu_irq_ops *irq_ops; + + if (!irq) + return 0; + + if (!irq_is_percpu_devid(irq)) { + unsigned long irq_flags; + + err = irq_force_affinity(irq, cpumask_of(cpu)); + + if (err && num_possible_cpus() > 1) { + pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + goto err_out; + } + + irq_flags = IRQF_PERCPU | + IRQF_NOBALANCING | + IRQF_NO_THREAD; + + irq_set_status_flags(irq, IRQ_NOAUTOEN); + + err = request_nmi(irq, handler, irq_flags, "arm-pmu", + per_cpu_ptr(&cpu_armpmu, cpu)); + + /* If cannot get an NMI, get a normal interrupt */ + if (err) { + err = request_irq(irq, handler, irq_flags, "arm-pmu", + per_cpu_ptr(&cpu_armpmu, cpu)); + irq_ops = &pmuirq_ops; + } else { + has_nmi = true; + irq_ops = &pmunmi_ops; + } + } else if (armpmu_count_irq_users(irq) == 0) { + err = request_percpu_nmi(irq, handler, "arm-pmu", &cpu_armpmu); + + /* If cannot get an NMI, get a normal interrupt */ + if (err) { + err = request_percpu_irq(irq, handler, "arm-pmu", + &cpu_armpmu); + irq_ops = &percpu_pmuirq_ops; + } else { + has_nmi= true; + irq_ops = &percpu_pmunmi_ops; + } + } else { + /* Per cpudevid irq was already requested by another CPU */ + irq_ops = armpmu_find_irq_ops(irq); + + if (WARN_ON(!irq_ops)) + err = -EINVAL; + } + + if (err) + goto err_out; + + per_cpu(cpu_irq, cpu) = irq; + per_cpu(cpu_irq_ops, cpu) = irq_ops; + return 0; + +err_out: + pr_err("unable to request IRQ%d for ARM PMU counters\n", irq); + return err; +} + +static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu) +{ + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + return per_cpu(hw_events->irq, cpu); +} + +/* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); + int irq; + + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + return 0; + if (pmu->reset) + pmu->reset(pmu); + + per_cpu(cpu_armpmu, cpu) = pmu; + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq) + per_cpu(cpu_irq_ops, cpu)->enable_pmuirq(irq); + + return 0; +} + +static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node); + int irq; + + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + return 0; + + irq = armpmu_get_cpu_irq(pmu, cpu); + if (irq) + per_cpu(cpu_irq_ops, cpu)->disable_pmuirq(irq); + + per_cpu(cpu_armpmu, cpu) = NULL; + + return 0; +} + +#ifdef CONFIG_CPU_PM +static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) +{ + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + struct perf_event *event; + int idx; + + for (idx = 0; idx < armpmu->num_events; idx++) { + event = hw_events->events[idx]; + if (!event) + continue; + + switch (cmd) { + case CPU_PM_ENTER: + /* + * Stop and update the counter + */ + armpmu_stop(event, PERF_EF_UPDATE); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + /* + * Restore and enable the counter. + * armpmu_start() indirectly calls + * + * perf_event_update_userpage() + * + * that requires RCU read locking to be functional, + * wrap the call within RCU_NONIDLE to make the + * RCU subsystem aware this cpu is not idle from + * an RCU perspective for the armpmu_start() call + * duration. + */ + RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD)); + break; + default: + break; + } + } +} + +static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, + void *v) +{ + struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); + struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); + int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); + + if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus)) + return NOTIFY_DONE; + + /* + * Always reset the PMU registers on power-up even if + * there are no events running. + */ + if (cmd == CPU_PM_EXIT && armpmu->reset) + armpmu->reset(armpmu); + + if (!enabled) + return NOTIFY_OK; + + switch (cmd) { + case CPU_PM_ENTER: + armpmu->stop(armpmu); + cpu_pm_pmu_setup(armpmu, cmd); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + cpu_pm_pmu_setup(armpmu, cmd); + armpmu->start(armpmu); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; + return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); +} + +static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) +{ + cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); +} +#else +static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } +static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } +#endif + +static int cpu_pmu_init(struct arm_pmu *cpu_pmu) +{ + int err; + + err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); + if (err) + goto out; + + err = cpu_pm_pmu_register(cpu_pmu); + if (err) + goto out_unregister; + + return 0; + +out_unregister: + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); +out: + return err; +} + +static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) +{ + cpu_pm_pmu_unregister(cpu_pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING, + &cpu_pmu->node); +} + +static struct arm_pmu *__armpmu_alloc(gfp_t flags) +{ + struct arm_pmu *pmu; + int cpu; + + pmu = kzalloc(sizeof(*pmu), flags); + if (!pmu) { + pr_info("failed to allocate PMU device!\n"); + goto out; + } + + pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags); + if (!pmu->hw_events) { + pr_info("failed to allocate per-cpu PMU data.\n"); + goto out_free_pmu; + } + + pmu->pmu = (struct pmu) { + .pmu_enable = armpmu_enable, + .pmu_disable = armpmu_disable, + .event_init = armpmu_event_init, + .add = armpmu_add, + .del = armpmu_del, + .start = armpmu_start, + .stop = armpmu_stop, + .read = armpmu_read, + .filter_match = armpmu_filter_match, + .attr_groups = pmu->attr_groups, + /* + * This is a CPU PMU potentially in a heterogeneous + * configuration (e.g. big.LITTLE). This is not an uncore PMU, + * and we have taken ctx sharing into account (e.g. with our + * pmu::filter_match callback and pmu::event_init group + * validation). + */ + .capabilities = PERF_PMU_CAP_HETEROGENEOUS_CPUS, + }; + + pmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] = + &armpmu_common_attr_group; + + for_each_possible_cpu(cpu) { + struct pmu_hw_events *events; + + events = per_cpu_ptr(pmu->hw_events, cpu); + raw_spin_lock_init(&events->pmu_lock); + events->percpu_pmu = pmu; + } + + return pmu; + +out_free_pmu: + kfree(pmu); +out: + return NULL; +} + +struct arm_pmu *armpmu_alloc(void) +{ + return __armpmu_alloc(GFP_KERNEL); +} + +struct arm_pmu *armpmu_alloc_atomic(void) +{ + return __armpmu_alloc(GFP_ATOMIC); +} + + +void armpmu_free(struct arm_pmu *pmu) +{ + free_percpu(pmu->hw_events); + kfree(pmu); +} + +int armpmu_register(struct arm_pmu *pmu) +{ + int ret; + + ret = cpu_pmu_init(pmu); + if (ret) + return ret; + + if (!pmu->set_event_filter) + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; + + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); + if (ret) + goto out_destroy; + + if (!__oprofile_cpu_pmu) + __oprofile_cpu_pmu = pmu; + + pr_info("enabled with %s PMU driver, %d counters available%s\n", + pmu->name, pmu->num_events, + has_nmi ? ", using NMIs" : ""); + + return 0; + +out_destroy: + cpu_pmu_destroy(pmu); + return ret; +} + +static int arm_pmu_hp_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING, + "perf/arm/pmu:starting", + arm_perf_starting_cpu, + arm_perf_teardown_cpu); + if (ret) + pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", + ret); + return ret; +} +subsys_initcall(arm_pmu_hp_init); diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c new file mode 100644 index 000000000..f5c7a845c --- /dev/null +++ b/drivers/perf/arm_pmu_acpi.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ACPI probing code for ARM performance counters. + * + * Copyright (C) 2017 ARM Ltd. + */ + +#include <linux/acpi.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> +#include <linux/percpu.h> +#include <linux/perf/arm_pmu.h> + +#include <asm/cputype.h> + +static DEFINE_PER_CPU(struct arm_pmu *, probed_pmus); +static DEFINE_PER_CPU(int, pmu_irqs); + +static int arm_pmu_acpi_register_irq(int cpu) +{ + struct acpi_madt_generic_interrupt *gicc; + int gsi, trigger; + + gicc = acpi_cpu_get_madt_gicc(cpu); + + gsi = gicc->performance_interrupt; + + /* + * Per the ACPI spec, the MADT cannot describe a PMU that doesn't + * have an interrupt. QEMU advertises this by using a GSI of zero, + * which is not known to be valid on any hardware despite being + * valid per the spec. Take the pragmatic approach and reject a + * GSI of zero for now. + */ + if (!gsi) + return 0; + + if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) + trigger = ACPI_EDGE_SENSITIVE; + else + trigger = ACPI_LEVEL_SENSITIVE; + + /* + * Helpfully, the MADT GICC doesn't have a polarity flag for the + * "performance interrupt". Luckily, on compliant GICs the polarity is + * a fixed value in HW (for both SPIs and PPIs) that we cannot change + * from SW. + * + * Here we pass in ACPI_ACTIVE_HIGH to keep the core code happy. This + * may not match the real polarity, but that should not matter. + * + * Other interrupt controllers are not supported with ACPI. + */ + return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH); +} + +static void arm_pmu_acpi_unregister_irq(int cpu) +{ + struct acpi_madt_generic_interrupt *gicc; + int gsi; + + gicc = acpi_cpu_get_madt_gicc(cpu); + + gsi = gicc->performance_interrupt; + if (gsi) + acpi_unregister_gsi(gsi); +} + +#if IS_ENABLED(CONFIG_ARM_SPE_PMU) +static struct resource spe_resources[] = { + { + /* irq */ + .flags = IORESOURCE_IRQ, + } +}; + +static struct platform_device spe_dev = { + .name = ARMV8_SPE_PDEV_NAME, + .id = -1, + .resource = spe_resources, + .num_resources = ARRAY_SIZE(spe_resources) +}; + +/* + * For lack of a better place, hook the normal PMU MADT walk + * and create a SPE device if we detect a recent MADT with + * a homogeneous PPI mapping. + */ +static void arm_spe_acpi_register_device(void) +{ + int cpu, hetid, irq, ret; + bool first = true; + u16 gsi = 0; + + /* + * Sanity check all the GICC tables for the same interrupt number. + * For now, we only support homogeneous ACPI/SPE machines. + */ + for_each_possible_cpu(cpu) { + struct acpi_madt_generic_interrupt *gicc; + + gicc = acpi_cpu_get_madt_gicc(cpu); + if (gicc->header.length < ACPI_MADT_GICC_SPE) + return; + + if (first) { + gsi = gicc->spe_interrupt; + if (!gsi) + return; + hetid = find_acpi_cpu_topology_hetero_id(cpu); + first = false; + } else if ((gsi != gicc->spe_interrupt) || + (hetid != find_acpi_cpu_topology_hetero_id(cpu))) { + pr_warn("ACPI: SPE must be homogeneous\n"); + return; + } + } + + irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, + ACPI_ACTIVE_HIGH); + if (irq < 0) { + pr_warn("ACPI: SPE Unable to register interrupt: %d\n", gsi); + return; + } + + spe_resources[0].start = irq; + ret = platform_device_register(&spe_dev); + if (ret < 0) { + pr_warn("ACPI: SPE: Unable to register device\n"); + acpi_unregister_gsi(gsi); + } +} +#else +static inline void arm_spe_acpi_register_device(void) +{ +} +#endif /* CONFIG_ARM_SPE_PMU */ + +static int arm_pmu_acpi_parse_irqs(void) +{ + int irq, cpu, irq_cpu, err; + + for_each_possible_cpu(cpu) { + irq = arm_pmu_acpi_register_irq(cpu); + if (irq < 0) { + err = irq; + pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", + cpu, err); + goto out_err; + } else if (irq == 0) { + pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); + } + + /* + * Log and request the IRQ so the core arm_pmu code can manage + * it. We'll have to sanity-check IRQs later when we associate + * them with their PMUs. + */ + per_cpu(pmu_irqs, cpu) = irq; + armpmu_request_irq(irq, cpu); + } + + return 0; + +out_err: + for_each_possible_cpu(cpu) { + irq = per_cpu(pmu_irqs, cpu); + if (!irq) + continue; + + arm_pmu_acpi_unregister_irq(cpu); + + /* + * Blat all copies of the IRQ so that we only unregister the + * corresponding GSI once (e.g. when we have PPIs). + */ + for_each_possible_cpu(irq_cpu) { + if (per_cpu(pmu_irqs, irq_cpu) == irq) + per_cpu(pmu_irqs, irq_cpu) = 0; + } + } + + return err; +} + +static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void) +{ + unsigned long cpuid = read_cpuid_id(); + struct arm_pmu *pmu; + int cpu; + + for_each_possible_cpu(cpu) { + pmu = per_cpu(probed_pmus, cpu); + if (!pmu || pmu->acpi_cpuid != cpuid) + continue; + + return pmu; + } + + pmu = armpmu_alloc_atomic(); + if (!pmu) { + pr_warn("Unable to allocate PMU for CPU%d\n", + smp_processor_id()); + return NULL; + } + + pmu->acpi_cpuid = cpuid; + + return pmu; +} + +/* + * Check whether the new IRQ is compatible with those already associated with + * the PMU (e.g. we don't have mismatched PPIs). + */ +static bool pmu_irq_matches(struct arm_pmu *pmu, int irq) +{ + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + int cpu; + + if (!irq) + return true; + + for_each_cpu(cpu, &pmu->supported_cpus) { + int other_irq = per_cpu(hw_events->irq, cpu); + if (!other_irq) + continue; + + if (irq == other_irq) + continue; + if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq)) + continue; + + pr_warn("mismatched PPIs detected\n"); + return false; + } + + return true; +} + +/* + * This must run before the common arm_pmu hotplug logic, so that we can + * associate a CPU and its interrupt before the common code tries to manage the + * affinity and so on. + * + * Note that hotplug events are serialized, so we cannot race with another CPU + * coming up. The perf core won't open events while a hotplug event is in + * progress. + */ +static int arm_pmu_acpi_cpu_starting(unsigned int cpu) +{ + struct arm_pmu *pmu; + struct pmu_hw_events __percpu *hw_events; + int irq; + + /* If we've already probed this CPU, we have nothing to do */ + if (per_cpu(probed_pmus, cpu)) + return 0; + + irq = per_cpu(pmu_irqs, cpu); + + pmu = arm_pmu_acpi_find_alloc_pmu(); + if (!pmu) + return -ENOMEM; + + per_cpu(probed_pmus, cpu) = pmu; + + if (pmu_irq_matches(pmu, irq)) { + hw_events = pmu->hw_events; + per_cpu(hw_events->irq, cpu) = irq; + } + + cpumask_set_cpu(cpu, &pmu->supported_cpus); + + /* + * Ideally, we'd probe the PMU here when we find the first matching + * CPU. We can't do that for several reasons; see the comment in + * arm_pmu_acpi_init(). + * + * So for the time being, we're done. + */ + return 0; +} + +int arm_pmu_acpi_probe(armpmu_init_fn init_fn) +{ + int pmu_idx = 0; + int cpu, ret; + + /* + * Initialise and register the set of PMUs which we know about right + * now. Ideally we'd do this in arm_pmu_acpi_cpu_starting() so that we + * could handle late hotplug, but this may lead to deadlock since we + * might try to register a hotplug notifier instance from within a + * hotplug notifier. + * + * There's also the problem of having access to the right init_fn, + * without tying this too deeply into the "real" PMU driver. + * + * For the moment, as with the platform/DT case, we need at least one + * of a PMU's CPUs to be online at probe time. + */ + for_each_possible_cpu(cpu) { + struct arm_pmu *pmu = per_cpu(probed_pmus, cpu); + char *base_name; + + if (!pmu || pmu->name) + continue; + + ret = init_fn(pmu); + if (ret == -ENODEV) { + /* PMU not handled by this driver, or not present */ + continue; + } else if (ret) { + pr_warn("Unable to initialise PMU for CPU%d\n", cpu); + return ret; + } + + base_name = pmu->name; + pmu->name = kasprintf(GFP_KERNEL, "%s_%d", base_name, pmu_idx++); + if (!pmu->name) { + pr_warn("Unable to allocate PMU name for CPU%d\n", cpu); + return -ENOMEM; + } + + ret = armpmu_register(pmu); + if (ret) { + pr_warn("Failed to register PMU for CPU%d\n", cpu); + kfree(pmu->name); + return ret; + } + } + + return 0; +} + +static int arm_pmu_acpi_init(void) +{ + int ret; + + if (acpi_disabled) + return 0; + + arm_spe_acpi_register_device(); + + ret = arm_pmu_acpi_parse_irqs(); + if (ret) + return ret; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_ACPI_STARTING, + "perf/arm/pmu_acpi:starting", + arm_pmu_acpi_cpu_starting, NULL); + + return ret; +} +subsys_initcall(arm_pmu_acpi_init) diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c new file mode 100644 index 000000000..2e1f3680d --- /dev/null +++ b/drivers/perf/arm_pmu_platform.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * platform_device probing code for ARM performance counters. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> + */ +#define pr_fmt(fmt) "hw perfevents: " fmt +#define dev_fmt pr_fmt + +#include <linux/bug.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> +#include <linux/kconfig.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/percpu.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/smp.h> + +static int probe_current_pmu(struct arm_pmu *pmu, + const struct pmu_probe_info *info) +{ + int cpu = get_cpu(); + unsigned int cpuid = read_cpuid_id(); + int ret = -ENODEV; + + pr_info("probing PMU on CPU %d\n", cpu); + + for (; info->init != NULL; info++) { + if ((cpuid & info->mask) != info->cpuid) + continue; + ret = info->init(pmu); + break; + } + + put_cpu(); + return ret; +} + +static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) +{ + int cpu, ret; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + + ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); + if (ret) + return ret; + + for_each_cpu(cpu, &pmu->supported_cpus) + per_cpu(hw_events->irq, cpu) = irq; + + return 0; +} + +static bool pmu_has_irq_affinity(struct device_node *node) +{ + return !!of_find_property(node, "interrupt-affinity", NULL); +} + +static int pmu_parse_irq_affinity(struct device_node *node, int i) +{ + struct device_node *dn; + int cpu; + + /* + * If we don't have an interrupt-affinity property, we guess irq + * affinity matches our logical CPU order, as we used to assume. + * This is fragile, so we'll warn in pmu_parse_irqs(). + */ + if (!pmu_has_irq_affinity(node)) + return i; + + dn = of_parse_phandle(node, "interrupt-affinity", i); + if (!dn) { + pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n", + i, node); + return -EINVAL; + } + + cpu = of_cpu_node_to_id(dn); + if (cpu < 0) { + pr_warn("failed to find logical CPU for %pOFn\n", dn); + cpu = nr_cpu_ids; + } + + of_node_put(dn); + + return cpu; +} + +static int pmu_parse_irqs(struct arm_pmu *pmu) +{ + int i = 0, num_irqs; + struct platform_device *pdev = pmu->plat_device; + struct pmu_hw_events __percpu *hw_events = pmu->hw_events; + + num_irqs = platform_irq_count(pdev); + if (num_irqs < 0) + return dev_err_probe(&pdev->dev, num_irqs, "unable to count PMU IRQs\n"); + + /* + * In this case we have no idea which CPUs are covered by the PMU. + * To match our prior behaviour, we assume all CPUs in this case. + */ + if (num_irqs == 0) { + pr_warn("no irqs for PMU, sampling events not supported\n"); + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + cpumask_setall(&pmu->supported_cpus); + return 0; + } + + if (num_irqs == 1) { + int irq = platform_get_irq(pdev, 0); + if ((irq > 0) && irq_is_percpu_devid(irq)) + return pmu_parse_percpu_irq(pmu, irq); + } + + if (nr_cpu_ids != 1 && !pmu_has_irq_affinity(pdev->dev.of_node)) { + pr_warn("no interrupt-affinity property for %pOF, guessing.\n", + pdev->dev.of_node); + } + + for (i = 0; i < num_irqs; i++) { + int cpu, irq; + + irq = platform_get_irq(pdev, i); + if (WARN_ON(irq <= 0)) + continue; + + if (irq_is_percpu_devid(irq)) { + pr_warn("multiple PPIs or mismatched SPI/PPI detected\n"); + return -EINVAL; + } + + cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i); + if (cpu < 0) + return cpu; + if (cpu >= nr_cpu_ids) + continue; + + if (per_cpu(hw_events->irq, cpu)) { + pr_warn("multiple PMU IRQs for the same CPU detected\n"); + return -EINVAL; + } + + per_cpu(hw_events->irq, cpu) = irq; + cpumask_set_cpu(cpu, &pmu->supported_cpus); + } + + return 0; +} + +static int armpmu_request_irqs(struct arm_pmu *armpmu) +{ + struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; + int cpu, err = 0; + + for_each_cpu(cpu, &armpmu->supported_cpus) { + int irq = per_cpu(hw_events->irq, cpu); + if (!irq) + continue; + + err = armpmu_request_irq(irq, cpu); + if (err) + break; + } + + return err; +} + +static void armpmu_free_irqs(struct arm_pmu *armpmu) +{ + int cpu; + struct pmu_hw_events __percpu *hw_events = armpmu->hw_events; + + for_each_cpu(cpu, &armpmu->supported_cpus) { + int irq = per_cpu(hw_events->irq, cpu); + + armpmu_free_irq(irq, cpu); + } +} + +int arm_pmu_device_probe(struct platform_device *pdev, + const struct of_device_id *of_table, + const struct pmu_probe_info *probe_table) +{ + const struct of_device_id *of_id; + armpmu_init_fn init_fn; + struct device_node *node = pdev->dev.of_node; + struct arm_pmu *pmu; + int ret = -ENODEV; + + pmu = armpmu_alloc(); + if (!pmu) + return -ENOMEM; + + pmu->plat_device = pdev; + + ret = pmu_parse_irqs(pmu); + if (ret) + goto out_free; + + if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { + init_fn = of_id->data; + + pmu->secure_access = of_property_read_bool(pdev->dev.of_node, + "secure-reg-access"); + + /* arm64 systems boot only as non-secure */ + if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) { + pr_warn("ignoring \"secure-reg-access\" property for arm64\n"); + pmu->secure_access = false; + } + + ret = init_fn(pmu); + } else if (probe_table) { + cpumask_setall(&pmu->supported_cpus); + ret = probe_current_pmu(pmu, probe_table); + } + + if (ret) { + pr_info("%pOF: failed to probe PMU!\n", node); + goto out_free; + } + + ret = armpmu_request_irqs(pmu); + if (ret) + goto out_free_irqs; + + ret = armpmu_register(pmu); + if (ret) + goto out_free_irqs; + + return 0; + +out_free_irqs: + armpmu_free_irqs(pmu); +out_free: + pr_info("%pOF: failed to register PMU devices!\n", node); + armpmu_free(pmu); + return ret; +} diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c new file mode 100644 index 000000000..6ebe72b86 --- /dev/null +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -0,0 +1,945 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * This driver adds support for perf events to use the Performance + * Monitor Counter Groups (PMCG) associated with an SMMUv3 node + * to monitor that node. + * + * SMMUv3 PMCG devices are named as smmuv3_pmcg_<phys_addr_page> where + * <phys_addr_page> is the physical page address of the SMMU PMCG wrapped + * to 4K boundary. For example, the PMCG at 0xff88840000 is named + * smmuv3_pmcg_ff88840 + * + * Filtering by stream id is done by specifying filtering parameters + * with the event. options are: + * filter_enable - 0 = no filtering, 1 = filtering enabled + * filter_span - 0 = exact match, 1 = pattern match + * filter_stream_id - pattern to filter against + * + * To match a partial StreamID where the X most-significant bits must match + * but the Y least-significant bits might differ, STREAMID is programmed + * with a value that contains: + * STREAMID[Y - 1] == 0. + * STREAMID[Y - 2:0] == 1 (where Y > 1). + * The remainder of implemented bits of STREAMID (X bits, from bit Y upwards) + * contain a value to match from the corresponding bits of event StreamID. + * + * Example: perf stat -e smmuv3_pmcg_ff88840/transaction,filter_enable=1, + * filter_span=1,filter_stream_id=0x42/ -a netperf + * Applies filter pattern 0x42 to transaction events, which means events + * matching stream ids 0x42 and 0x43 are counted. Further filtering + * information is available in the SMMU documentation. + * + * SMMU events are not attributable to a CPU, so task mode and sampling + * are not supported. + */ + +#include <linux/acpi.h> +#include <linux/acpi_iort.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/msi.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/smp.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#define SMMU_PMCG_EVCNTR0 0x0 +#define SMMU_PMCG_EVCNTR(n, stride) (SMMU_PMCG_EVCNTR0 + (n) * (stride)) +#define SMMU_PMCG_EVTYPER0 0x400 +#define SMMU_PMCG_EVTYPER(n) (SMMU_PMCG_EVTYPER0 + (n) * 4) +#define SMMU_PMCG_SID_SPAN_SHIFT 29 +#define SMMU_PMCG_SMR0 0xA00 +#define SMMU_PMCG_SMR(n) (SMMU_PMCG_SMR0 + (n) * 4) +#define SMMU_PMCG_CNTENSET0 0xC00 +#define SMMU_PMCG_CNTENCLR0 0xC20 +#define SMMU_PMCG_INTENSET0 0xC40 +#define SMMU_PMCG_INTENCLR0 0xC60 +#define SMMU_PMCG_OVSCLR0 0xC80 +#define SMMU_PMCG_OVSSET0 0xCC0 +#define SMMU_PMCG_CFGR 0xE00 +#define SMMU_PMCG_CFGR_SID_FILTER_TYPE BIT(23) +#define SMMU_PMCG_CFGR_MSI BIT(21) +#define SMMU_PMCG_CFGR_RELOC_CTRS BIT(20) +#define SMMU_PMCG_CFGR_SIZE GENMASK(13, 8) +#define SMMU_PMCG_CFGR_NCTR GENMASK(5, 0) +#define SMMU_PMCG_CR 0xE04 +#define SMMU_PMCG_CR_ENABLE BIT(0) +#define SMMU_PMCG_CEID0 0xE20 +#define SMMU_PMCG_CEID1 0xE28 +#define SMMU_PMCG_IRQ_CTRL 0xE50 +#define SMMU_PMCG_IRQ_CTRL_IRQEN BIT(0) +#define SMMU_PMCG_IRQ_CFG0 0xE58 +#define SMMU_PMCG_IRQ_CFG1 0xE60 +#define SMMU_PMCG_IRQ_CFG2 0xE64 + +/* MSI config fields */ +#define MSI_CFG0_ADDR_MASK GENMASK_ULL(51, 2) +#define MSI_CFG2_MEMATTR_DEVICE_nGnRE 0x1 + +#define SMMU_PMCG_DEFAULT_FILTER_SPAN 1 +#define SMMU_PMCG_DEFAULT_FILTER_SID GENMASK(31, 0) + +#define SMMU_PMCG_MAX_COUNTERS 64 +#define SMMU_PMCG_ARCH_MAX_EVENTS 128 + +#define SMMU_PMCG_PA_SHIFT 12 + +#define SMMU_PMCG_EVCNTR_RDONLY BIT(0) +#define SMMU_PMCG_HARDEN_DISABLE BIT(1) + +static int cpuhp_state_num; + +struct smmu_pmu { + struct hlist_node node; + struct perf_event *events[SMMU_PMCG_MAX_COUNTERS]; + DECLARE_BITMAP(used_counters, SMMU_PMCG_MAX_COUNTERS); + DECLARE_BITMAP(supported_events, SMMU_PMCG_ARCH_MAX_EVENTS); + unsigned int irq; + unsigned int on_cpu; + struct pmu pmu; + unsigned int num_counters; + struct device *dev; + void __iomem *reg_base; + void __iomem *reloc_base; + u64 counter_mask; + u32 options; + bool global_filter; +}; + +#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) + +#define SMMU_PMU_EVENT_ATTR_EXTRACTOR(_name, _config, _start, _end) \ + static inline u32 get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK_ULL(_end, _start), \ + event->attr._config); \ + } \ + +SMMU_PMU_EVENT_ATTR_EXTRACTOR(event, config, 0, 15); +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_stream_id, config1, 0, 31); +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_span, config1, 32, 32); +SMMU_PMU_EVENT_ATTR_EXTRACTOR(filter_enable, config1, 33, 33); + +static inline void smmu_pmu_enable(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + + writel(SMMU_PMCG_IRQ_CTRL_IRQEN, + smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); + writel(SMMU_PMCG_CR_ENABLE, smmu_pmu->reg_base + SMMU_PMCG_CR); +} + +static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, + struct perf_event *event, int idx); + +static inline void smmu_pmu_enable_quirk_hip08_09(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + unsigned int idx; + + for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) + smmu_pmu_apply_event_filter(smmu_pmu, smmu_pmu->events[idx], idx); + + smmu_pmu_enable(pmu); +} + +static inline void smmu_pmu_disable(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + + writel(0, smmu_pmu->reg_base + SMMU_PMCG_CR); + writel(0, smmu_pmu->reg_base + SMMU_PMCG_IRQ_CTRL); +} + +static inline void smmu_pmu_disable_quirk_hip08_09(struct pmu *pmu) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(pmu); + unsigned int idx; + + /* + * The global disable of PMU sometimes fail to stop the counting. + * Harden this by writing an invalid event type to each used counter + * to forcibly stop counting. + */ + for_each_set_bit(idx, smmu_pmu->used_counters, smmu_pmu->num_counters) + writel(0xffff, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); + + smmu_pmu_disable(pmu); +} + +static inline void smmu_pmu_counter_set_value(struct smmu_pmu *smmu_pmu, + u32 idx, u64 value) +{ + if (smmu_pmu->counter_mask & BIT(32)) + writeq(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); + else + writel(value, smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); +} + +static inline u64 smmu_pmu_counter_get_value(struct smmu_pmu *smmu_pmu, u32 idx) +{ + u64 value; + + if (smmu_pmu->counter_mask & BIT(32)) + value = readq(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 8)); + else + value = readl(smmu_pmu->reloc_base + SMMU_PMCG_EVCNTR(idx, 4)); + + return value; +} + +static inline void smmu_pmu_counter_enable(struct smmu_pmu *smmu_pmu, u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENSET0); +} + +static inline void smmu_pmu_counter_disable(struct smmu_pmu *smmu_pmu, u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); +} + +static inline void smmu_pmu_interrupt_enable(struct smmu_pmu *smmu_pmu, u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENSET0); +} + +static inline void smmu_pmu_interrupt_disable(struct smmu_pmu *smmu_pmu, + u32 idx) +{ + writeq(BIT(idx), smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); +} + +static inline void smmu_pmu_set_evtyper(struct smmu_pmu *smmu_pmu, u32 idx, + u32 val) +{ + writel(val, smmu_pmu->reg_base + SMMU_PMCG_EVTYPER(idx)); +} + +static inline void smmu_pmu_set_smr(struct smmu_pmu *smmu_pmu, u32 idx, u32 val) +{ + writel(val, smmu_pmu->reg_base + SMMU_PMCG_SMR(idx)); +} + +static void smmu_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + u64 delta, prev, now; + u32 idx = hwc->idx; + + do { + prev = local64_read(&hwc->prev_count); + now = smmu_pmu_counter_get_value(smmu_pmu, idx); + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + /* handle overflow. */ + delta = now - prev; + delta &= smmu_pmu->counter_mask; + + local64_add(delta, &event->count); +} + +static void smmu_pmu_set_period(struct smmu_pmu *smmu_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u64 new; + + if (smmu_pmu->options & SMMU_PMCG_EVCNTR_RDONLY) { + /* + * On platforms that require this quirk, if the counter starts + * at < half_counter value and wraps, the current logic of + * handling the overflow may not work. It is expected that, + * those platforms will have full 64 counter bits implemented + * so that such a possibility is remote(eg: HiSilicon HIP08). + */ + new = smmu_pmu_counter_get_value(smmu_pmu, idx); + } else { + /* + * We limit the max period to half the max counter value + * of the counter size, so that even in the case of extreme + * interrupt latency the counter will (hopefully) not wrap + * past its initial value. + */ + new = smmu_pmu->counter_mask >> 1; + smmu_pmu_counter_set_value(smmu_pmu, idx, new); + } + + local64_set(&hwc->prev_count, new); +} + +static void smmu_pmu_set_event_filter(struct perf_event *event, + int idx, u32 span, u32 sid) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + u32 evtyper; + + evtyper = get_event(event) | span << SMMU_PMCG_SID_SPAN_SHIFT; + smmu_pmu_set_evtyper(smmu_pmu, idx, evtyper); + smmu_pmu_set_smr(smmu_pmu, idx, sid); +} + +static bool smmu_pmu_check_global_filter(struct perf_event *curr, + struct perf_event *new) +{ + if (get_filter_enable(new) != get_filter_enable(curr)) + return false; + + if (!get_filter_enable(new)) + return true; + + return get_filter_span(new) == get_filter_span(curr) && + get_filter_stream_id(new) == get_filter_stream_id(curr); +} + +static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, + struct perf_event *event, int idx) +{ + u32 span, sid; + unsigned int cur_idx, num_ctrs = smmu_pmu->num_counters; + bool filter_en = !!get_filter_enable(event); + + span = filter_en ? get_filter_span(event) : + SMMU_PMCG_DEFAULT_FILTER_SPAN; + sid = filter_en ? get_filter_stream_id(event) : + SMMU_PMCG_DEFAULT_FILTER_SID; + + cur_idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); + /* + * Per-counter filtering, or scheduling the first globally-filtered + * event into an empty PMU so idx == 0 and it works out equivalent. + */ + if (!smmu_pmu->global_filter || cur_idx == num_ctrs) { + smmu_pmu_set_event_filter(event, idx, span, sid); + return 0; + } + + /* Otherwise, must match whatever's currently scheduled */ + if (smmu_pmu_check_global_filter(smmu_pmu->events[cur_idx], event)) { + smmu_pmu_set_evtyper(smmu_pmu, idx, get_event(event)); + return 0; + } + + return -EAGAIN; +} + +static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, + struct perf_event *event) +{ + int idx, err; + unsigned int num_ctrs = smmu_pmu->num_counters; + + idx = find_first_zero_bit(smmu_pmu->used_counters, num_ctrs); + if (idx == num_ctrs) + /* The counters are all in use. */ + return -EAGAIN; + + err = smmu_pmu_apply_event_filter(smmu_pmu, event, idx); + if (err) + return err; + + set_bit(idx, smmu_pmu->used_counters); + + return idx; +} + +static bool smmu_pmu_events_compatible(struct perf_event *curr, + struct perf_event *new) +{ + if (new->pmu != curr->pmu) + return false; + + if (to_smmu_pmu(new->pmu)->global_filter && + !smmu_pmu_check_global_filter(curr, new)) + return false; + + return true; +} + +/* + * Implementation of abstract pmu functionality required by + * the core perf events code. + */ + +static int smmu_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + struct device *dev = smmu_pmu->dev; + struct perf_event *sibling; + int group_num_events = 1; + u16 event_id; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (hwc->sample_period) { + dev_dbg(dev, "Sampling not supported\n"); + return -EOPNOTSUPP; + } + + if (event->cpu < 0) { + dev_dbg(dev, "Per-task mode not supported\n"); + return -EOPNOTSUPP; + } + + /* Verify specified event is supported on this PMU */ + event_id = get_event(event); + if (event_id < SMMU_PMCG_ARCH_MAX_EVENTS && + (!test_bit(event_id, smmu_pmu->supported_events))) { + dev_dbg(dev, "Invalid event %d for this PMU\n", event_id); + return -EINVAL; + } + + /* Don't allow groups with mixed PMUs, except for s/w events */ + if (!is_software_event(event->group_leader)) { + if (!smmu_pmu_events_compatible(event->group_leader, event)) + return -EINVAL; + + if (++group_num_events > smmu_pmu->num_counters) + return -EINVAL; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (!smmu_pmu_events_compatible(sibling, event)) + return -EINVAL; + + if (++group_num_events > smmu_pmu->num_counters) + return -EINVAL; + } + + hwc->idx = -1; + + /* + * Ensure all events are on the same cpu so all events are in the + * same cpu context, to avoid races on pmu_enable etc. + */ + event->cpu = smmu_pmu->on_cpu; + + return 0; +} + +static void smmu_pmu_event_start(struct perf_event *event, int flags) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + hwc->state = 0; + + smmu_pmu_set_period(smmu_pmu, hwc); + + smmu_pmu_counter_enable(smmu_pmu, idx); +} + +static void smmu_pmu_event_stop(struct perf_event *event, int flags) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (hwc->state & PERF_HES_STOPPED) + return; + + smmu_pmu_counter_disable(smmu_pmu, idx); + /* As the counter gets updated on _start, ignore PERF_EF_UPDATE */ + smmu_pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int smmu_pmu_event_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + + idx = smmu_pmu_get_event_idx(smmu_pmu, event); + if (idx < 0) + return idx; + + hwc->idx = idx; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + smmu_pmu->events[idx] = event; + local64_set(&hwc->prev_count, 0); + + smmu_pmu_interrupt_enable(smmu_pmu, idx); + + if (flags & PERF_EF_START) + smmu_pmu_event_start(event, flags); + + /* Propagate changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return 0; +} + +static void smmu_pmu_event_del(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); + int idx = hwc->idx; + + smmu_pmu_event_stop(event, flags | PERF_EF_UPDATE); + smmu_pmu_interrupt_disable(smmu_pmu, idx); + smmu_pmu->events[idx] = NULL; + clear_bit(idx, smmu_pmu->used_counters); + + perf_event_update_userpage(event); +} + +static void smmu_pmu_event_read(struct perf_event *event) +{ + smmu_pmu_event_update(event); +} + +/* cpumask */ + +static ssize_t smmu_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(smmu_pmu->on_cpu)); +} + +static struct device_attribute smmu_pmu_cpumask_attr = + __ATTR(cpumask, 0444, smmu_pmu_cpumask_show, NULL); + +static struct attribute *smmu_pmu_cpumask_attrs[] = { + &smmu_pmu_cpumask_attr.attr, + NULL +}; + +static struct attribute_group smmu_pmu_cpumask_group = { + .attrs = smmu_pmu_cpumask_attrs, +}; + +/* Events */ + +static ssize_t smmu_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define SMMU_EVENT_ATTR(name, config) \ + PMU_EVENT_ATTR(name, smmu_event_attr_##name, \ + config, smmu_pmu_event_show) +SMMU_EVENT_ATTR(cycles, 0); +SMMU_EVENT_ATTR(transaction, 1); +SMMU_EVENT_ATTR(tlb_miss, 2); +SMMU_EVENT_ATTR(config_cache_miss, 3); +SMMU_EVENT_ATTR(trans_table_walk_access, 4); +SMMU_EVENT_ATTR(config_struct_access, 5); +SMMU_EVENT_ATTR(pcie_ats_trans_rq, 6); +SMMU_EVENT_ATTR(pcie_ats_trans_passed, 7); + +static struct attribute *smmu_pmu_events[] = { + &smmu_event_attr_cycles.attr.attr, + &smmu_event_attr_transaction.attr.attr, + &smmu_event_attr_tlb_miss.attr.attr, + &smmu_event_attr_config_cache_miss.attr.attr, + &smmu_event_attr_trans_table_walk_access.attr.attr, + &smmu_event_attr_config_struct_access.attr.attr, + &smmu_event_attr_pcie_ats_trans_rq.attr.attr, + &smmu_event_attr_pcie_ats_trans_passed.attr.attr, + NULL +}; + +static umode_t smmu_pmu_event_is_visible(struct kobject *kobj, + struct attribute *attr, int unused) +{ + struct device *dev = kobj_to_dev(kobj); + struct smmu_pmu *smmu_pmu = to_smmu_pmu(dev_get_drvdata(dev)); + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); + + if (test_bit(pmu_attr->id, smmu_pmu->supported_events)) + return attr->mode; + + return 0; +} + +static struct attribute_group smmu_pmu_events_group = { + .name = "events", + .attrs = smmu_pmu_events, + .is_visible = smmu_pmu_event_is_visible, +}; + +/* Formats */ +PMU_FORMAT_ATTR(event, "config:0-15"); +PMU_FORMAT_ATTR(filter_stream_id, "config1:0-31"); +PMU_FORMAT_ATTR(filter_span, "config1:32"); +PMU_FORMAT_ATTR(filter_enable, "config1:33"); + +static struct attribute *smmu_pmu_formats[] = { + &format_attr_event.attr, + &format_attr_filter_stream_id.attr, + &format_attr_filter_span.attr, + &format_attr_filter_enable.attr, + NULL +}; + +static struct attribute_group smmu_pmu_format_group = { + .name = "format", + .attrs = smmu_pmu_formats, +}; + +static const struct attribute_group *smmu_pmu_attr_grps[] = { + &smmu_pmu_cpumask_group, + &smmu_pmu_events_group, + &smmu_pmu_format_group, + NULL +}; + +/* + * Generic device handlers + */ + +static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct smmu_pmu *smmu_pmu; + unsigned int target; + + smmu_pmu = hlist_entry_safe(node, struct smmu_pmu, node); + if (cpu != smmu_pmu->on_cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&smmu_pmu->pmu, cpu, target); + smmu_pmu->on_cpu = target; + WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, cpumask_of(target))); + + return 0; +} + +static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data) +{ + struct smmu_pmu *smmu_pmu = data; + u64 ovsr; + unsigned int idx; + + ovsr = readq(smmu_pmu->reloc_base + SMMU_PMCG_OVSSET0); + if (!ovsr) + return IRQ_NONE; + + writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); + + for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) { + struct perf_event *event = smmu_pmu->events[idx]; + struct hw_perf_event *hwc; + + if (WARN_ON_ONCE(!event)) + continue; + + smmu_pmu_event_update(event); + hwc = &event->hw; + + smmu_pmu_set_period(smmu_pmu, hwc); + } + + return IRQ_HANDLED; +} + +static void smmu_pmu_free_msis(void *data) +{ + struct device *dev = data; + + platform_msi_domain_free_irqs(dev); +} + +static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) +{ + phys_addr_t doorbell; + struct device *dev = msi_desc_to_dev(desc); + struct smmu_pmu *pmu = dev_get_drvdata(dev); + + doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo; + doorbell &= MSI_CFG0_ADDR_MASK; + + writeq_relaxed(doorbell, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); + writel_relaxed(msg->data, pmu->reg_base + SMMU_PMCG_IRQ_CFG1); + writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, + pmu->reg_base + SMMU_PMCG_IRQ_CFG2); +} + +static void smmu_pmu_setup_msi(struct smmu_pmu *pmu) +{ + struct msi_desc *desc; + struct device *dev = pmu->dev; + int ret; + + /* Clear MSI address reg */ + writeq_relaxed(0, pmu->reg_base + SMMU_PMCG_IRQ_CFG0); + + /* MSI supported or not */ + if (!(readl(pmu->reg_base + SMMU_PMCG_CFGR) & SMMU_PMCG_CFGR_MSI)) + return; + + ret = platform_msi_domain_alloc_irqs(dev, 1, smmu_pmu_write_msi_msg); + if (ret) { + dev_warn(dev, "failed to allocate MSIs\n"); + return; + } + + desc = first_msi_entry(dev); + if (desc) + pmu->irq = desc->irq; + + /* Add callback to free MSIs on teardown */ + devm_add_action(dev, smmu_pmu_free_msis, dev); +} + +static int smmu_pmu_setup_irq(struct smmu_pmu *pmu) +{ + unsigned long flags = IRQF_NOBALANCING | IRQF_SHARED | IRQF_NO_THREAD; + int irq, ret = -ENXIO; + + smmu_pmu_setup_msi(pmu); + + irq = pmu->irq; + if (irq) + ret = devm_request_irq(pmu->dev, irq, smmu_pmu_handle_irq, + flags, "smmuv3-pmu", pmu); + return ret; +} + +static void smmu_pmu_reset(struct smmu_pmu *smmu_pmu) +{ + u64 counter_present_mask = GENMASK_ULL(smmu_pmu->num_counters - 1, 0); + + smmu_pmu_disable(&smmu_pmu->pmu); + + /* Disable counter and interrupt */ + writeq_relaxed(counter_present_mask, + smmu_pmu->reg_base + SMMU_PMCG_CNTENCLR0); + writeq_relaxed(counter_present_mask, + smmu_pmu->reg_base + SMMU_PMCG_INTENCLR0); + writeq_relaxed(counter_present_mask, + smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0); +} + +static void smmu_pmu_get_acpi_options(struct smmu_pmu *smmu_pmu) +{ + u32 model; + + model = *(u32 *)dev_get_platdata(smmu_pmu->dev); + + switch (model) { + case IORT_SMMU_V3_PMCG_HISI_HIP08: + /* HiSilicon Erratum 162001800 */ + smmu_pmu->options |= SMMU_PMCG_EVCNTR_RDONLY | SMMU_PMCG_HARDEN_DISABLE; + break; + case IORT_SMMU_V3_PMCG_HISI_HIP09: + smmu_pmu->options |= SMMU_PMCG_HARDEN_DISABLE; + break; + } + + dev_notice(smmu_pmu->dev, "option mask 0x%x\n", smmu_pmu->options); +} + +static int smmu_pmu_probe(struct platform_device *pdev) +{ + struct smmu_pmu *smmu_pmu; + struct resource *res_0; + u32 cfgr, reg_size; + u64 ceid_64[2]; + int irq, err; + char *name; + struct device *dev = &pdev->dev; + + smmu_pmu = devm_kzalloc(dev, sizeof(*smmu_pmu), GFP_KERNEL); + if (!smmu_pmu) + return -ENOMEM; + + smmu_pmu->dev = dev; + platform_set_drvdata(pdev, smmu_pmu); + + smmu_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = smmu_pmu_enable, + .pmu_disable = smmu_pmu_disable, + .event_init = smmu_pmu_event_init, + .add = smmu_pmu_event_add, + .del = smmu_pmu_event_del, + .start = smmu_pmu_event_start, + .stop = smmu_pmu_event_stop, + .read = smmu_pmu_event_read, + .attr_groups = smmu_pmu_attr_grps, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0); + if (IS_ERR(smmu_pmu->reg_base)) + return PTR_ERR(smmu_pmu->reg_base); + + cfgr = readl_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CFGR); + + /* Determine if page 1 is present */ + if (cfgr & SMMU_PMCG_CFGR_RELOC_CTRS) { + smmu_pmu->reloc_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(smmu_pmu->reloc_base)) + return PTR_ERR(smmu_pmu->reloc_base); + } else { + smmu_pmu->reloc_base = smmu_pmu->reg_base; + } + + irq = platform_get_irq_optional(pdev, 0); + if (irq > 0) + smmu_pmu->irq = irq; + + ceid_64[0] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID0); + ceid_64[1] = readq_relaxed(smmu_pmu->reg_base + SMMU_PMCG_CEID1); + bitmap_from_arr32(smmu_pmu->supported_events, (u32 *)ceid_64, + SMMU_PMCG_ARCH_MAX_EVENTS); + + smmu_pmu->num_counters = FIELD_GET(SMMU_PMCG_CFGR_NCTR, cfgr) + 1; + + smmu_pmu->global_filter = !!(cfgr & SMMU_PMCG_CFGR_SID_FILTER_TYPE); + + reg_size = FIELD_GET(SMMU_PMCG_CFGR_SIZE, cfgr); + smmu_pmu->counter_mask = GENMASK_ULL(reg_size, 0); + + smmu_pmu_reset(smmu_pmu); + + err = smmu_pmu_setup_irq(smmu_pmu); + if (err) { + dev_err(dev, "Setup irq failed, PMU @%pa\n", &res_0->start); + return err; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "smmuv3_pmcg_%llx", + (res_0->start) >> SMMU_PMCG_PA_SHIFT); + if (!name) { + dev_err(dev, "Create name failed, PMU @%pa\n", &res_0->start); + return -EINVAL; + } + + smmu_pmu_get_acpi_options(smmu_pmu); + + /* + * For platforms suffer this quirk, the PMU disable sometimes fails to + * stop the counters. This will leads to inaccurate or error counting. + * Forcibly disable the counters with these quirk handler. + */ + if (smmu_pmu->options & SMMU_PMCG_HARDEN_DISABLE) { + smmu_pmu->pmu.pmu_enable = smmu_pmu_enable_quirk_hip08_09; + smmu_pmu->pmu.pmu_disable = smmu_pmu_disable_quirk_hip08_09; + } + + /* Pick one CPU to be the preferred one to use */ + smmu_pmu->on_cpu = raw_smp_processor_id(); + WARN_ON(irq_set_affinity_hint(smmu_pmu->irq, + cpumask_of(smmu_pmu->on_cpu))); + + err = cpuhp_state_add_instance_nocalls(cpuhp_state_num, + &smmu_pmu->node); + if (err) { + dev_err(dev, "Error %d registering hotplug, PMU @%pa\n", + err, &res_0->start); + goto out_clear_affinity; + } + + err = perf_pmu_register(&smmu_pmu->pmu, name, -1); + if (err) { + dev_err(dev, "Error %d registering PMU @%pa\n", + err, &res_0->start); + goto out_unregister; + } + + dev_info(dev, "Registered PMU @ %pa using %d counters with %s filter settings\n", + &res_0->start, smmu_pmu->num_counters, + smmu_pmu->global_filter ? "Global(Counter0)" : + "Individual"); + + return 0; + +out_unregister: + cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); +out_clear_affinity: + irq_set_affinity_hint(smmu_pmu->irq, NULL); + return err; +} + +static int smmu_pmu_remove(struct platform_device *pdev) +{ + struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&smmu_pmu->pmu); + cpuhp_state_remove_instance_nocalls(cpuhp_state_num, &smmu_pmu->node); + irq_set_affinity_hint(smmu_pmu->irq, NULL); + + return 0; +} + +static void smmu_pmu_shutdown(struct platform_device *pdev) +{ + struct smmu_pmu *smmu_pmu = platform_get_drvdata(pdev); + + smmu_pmu_disable(&smmu_pmu->pmu); +} + +static struct platform_driver smmu_pmu_driver = { + .driver = { + .name = "arm-smmu-v3-pmcg", + .suppress_bind_attrs = true, + }, + .probe = smmu_pmu_probe, + .remove = smmu_pmu_remove, + .shutdown = smmu_pmu_shutdown, +}; + +static int __init arm_smmu_pmu_init(void) +{ + int ret; + + cpuhp_state_num = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/arm/pmcg:online", + NULL, + smmu_pmu_offline_cpu); + if (cpuhp_state_num < 0) + return cpuhp_state_num; + + ret = platform_driver_register(&smmu_pmu_driver); + if (ret) + cpuhp_remove_multi_state(cpuhp_state_num); + + return ret; +} +module_init(arm_smmu_pmu_init); + +static void __exit arm_smmu_pmu_exit(void) +{ + platform_driver_unregister(&smmu_pmu_driver); + cpuhp_remove_multi_state(cpuhp_state_num); +} + +module_exit(arm_smmu_pmu_exit); + +MODULE_DESCRIPTION("PMU driver for ARM SMMUv3 Performance Monitors Extension"); +MODULE_AUTHOR("Neil Leeder <nleeder@codeaurora.org>"); +MODULE_AUTHOR("Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c new file mode 100644 index 000000000..6fbfcab49 --- /dev/null +++ b/drivers/perf/arm_spe_pmu.c @@ -0,0 +1,1282 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Perf support for the Statistical Profiling Extension, introduced as + * part of ARMv8.2. + * + * Copyright (C) 2016 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define PMUNAME "arm_spe" +#define DRVNAME PMUNAME "_pmu" +#define pr_fmt(fmt) DRVNAME ": " fmt + +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/capability.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/perf_event.h> +#include <linux/perf/arm_pmu.h> +#include <linux/platform_device.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/vmalloc.h> + +#include <asm/barrier.h> +#include <asm/cpufeature.h> +#include <asm/mmu.h> +#include <asm/sysreg.h> + +/* + * Cache if the event is allowed to trace Context information. + * This allows us to perform the check, i.e, perfmon_capable(), + * in the context of the event owner, once, during the event_init(). + */ +#define SPE_PMU_HW_FLAGS_CX BIT(0) + +static void set_spe_event_has_cx(struct perf_event *event) +{ + if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && perfmon_capable()) + event->hw.flags |= SPE_PMU_HW_FLAGS_CX; +} + +static bool get_spe_event_has_cx(struct perf_event *event) +{ + return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX); +} + +#define ARM_SPE_BUF_PAD_BYTE 0 + +struct arm_spe_pmu_buf { + int nr_pages; + bool snapshot; + void *base; +}; + +struct arm_spe_pmu { + struct pmu pmu; + struct platform_device *pdev; + cpumask_t supported_cpus; + struct hlist_node hotplug_node; + + int irq; /* PPI */ + + u16 min_period; + u16 counter_sz; + +#define SPE_PMU_FEAT_FILT_EVT (1UL << 0) +#define SPE_PMU_FEAT_FILT_TYP (1UL << 1) +#define SPE_PMU_FEAT_FILT_LAT (1UL << 2) +#define SPE_PMU_FEAT_ARCH_INST (1UL << 3) +#define SPE_PMU_FEAT_LDS (1UL << 4) +#define SPE_PMU_FEAT_ERND (1UL << 5) +#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63) + u64 features; + + u16 max_record_sz; + u16 align; + struct perf_output_handle __percpu *handle; +}; + +#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu)) + +/* Convert a free-running index from perf into an SPE buffer offset */ +#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT)) + +/* Keep track of our dynamic hotplug state */ +static enum cpuhp_state arm_spe_pmu_online; + +enum arm_spe_pmu_buf_fault_action { + SPE_PMU_BUF_FAULT_ACT_SPURIOUS, + SPE_PMU_BUF_FAULT_ACT_FATAL, + SPE_PMU_BUF_FAULT_ACT_OK, +}; + +/* This sysfs gunk was really good fun to write. */ +enum arm_spe_pmu_capabilities { + SPE_PMU_CAP_ARCH_INST = 0, + SPE_PMU_CAP_ERND, + SPE_PMU_CAP_FEAT_MAX, + SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX, + SPE_PMU_CAP_MIN_IVAL, +}; + +static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = { + [SPE_PMU_CAP_ARCH_INST] = SPE_PMU_FEAT_ARCH_INST, + [SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND, +}; + +static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap) +{ + if (cap < SPE_PMU_CAP_FEAT_MAX) + return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]); + + switch (cap) { + case SPE_PMU_CAP_CNT_SZ: + return spe_pmu->counter_sz; + case SPE_PMU_CAP_MIN_IVAL: + return spe_pmu->min_period; + default: + WARN(1, "unknown cap %d\n", cap); + } + + return 0; +} + +static ssize_t arm_spe_pmu_cap_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = + container_of(attr, struct dev_ext_attribute, attr); + int cap = (long)ea->var; + + return snprintf(buf, PAGE_SIZE, "%u\n", + arm_spe_pmu_cap_get(spe_pmu, cap)); +} + +#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \ + &((struct dev_ext_attribute[]) { \ + { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_var } \ + })[0].attr.attr + +#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \ + SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var) + +static struct attribute *arm_spe_pmu_cap_attr[] = { + SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST), + SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND), + SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ), + SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL), + NULL, +}; + +static struct attribute_group arm_spe_pmu_cap_group = { + .name = "caps", + .attrs = arm_spe_pmu_cap_attr, +}; + +/* User ABI */ +#define ATTR_CFG_FLD_ts_enable_CFG config /* PMSCR_EL1.TS */ +#define ATTR_CFG_FLD_ts_enable_LO 0 +#define ATTR_CFG_FLD_ts_enable_HI 0 +#define ATTR_CFG_FLD_pa_enable_CFG config /* PMSCR_EL1.PA */ +#define ATTR_CFG_FLD_pa_enable_LO 1 +#define ATTR_CFG_FLD_pa_enable_HI 1 +#define ATTR_CFG_FLD_pct_enable_CFG config /* PMSCR_EL1.PCT */ +#define ATTR_CFG_FLD_pct_enable_LO 2 +#define ATTR_CFG_FLD_pct_enable_HI 2 +#define ATTR_CFG_FLD_jitter_CFG config /* PMSIRR_EL1.RND */ +#define ATTR_CFG_FLD_jitter_LO 16 +#define ATTR_CFG_FLD_jitter_HI 16 +#define ATTR_CFG_FLD_branch_filter_CFG config /* PMSFCR_EL1.B */ +#define ATTR_CFG_FLD_branch_filter_LO 32 +#define ATTR_CFG_FLD_branch_filter_HI 32 +#define ATTR_CFG_FLD_load_filter_CFG config /* PMSFCR_EL1.LD */ +#define ATTR_CFG_FLD_load_filter_LO 33 +#define ATTR_CFG_FLD_load_filter_HI 33 +#define ATTR_CFG_FLD_store_filter_CFG config /* PMSFCR_EL1.ST */ +#define ATTR_CFG_FLD_store_filter_LO 34 +#define ATTR_CFG_FLD_store_filter_HI 34 + +#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */ +#define ATTR_CFG_FLD_event_filter_LO 0 +#define ATTR_CFG_FLD_event_filter_HI 63 + +#define ATTR_CFG_FLD_min_latency_CFG config2 /* PMSLATFR_EL1.MINLAT */ +#define ATTR_CFG_FLD_min_latency_LO 0 +#define ATTR_CFG_FLD_min_latency_HI 11 + +/* Why does everything I do descend into this? */ +#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + (lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi + +#define _GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \ + __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) + +#define GEN_PMU_FORMAT_ATTR(name) \ + PMU_FORMAT_ATTR(name, \ + _GEN_PMU_FORMAT_ATTR(ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI)) + +#define _ATTR_CFG_GET_FLD(attr, cfg, lo, hi) \ + ((((attr)->cfg) >> lo) & GENMASK(hi - lo, 0)) + +#define ATTR_CFG_GET_FLD(attr, name) \ + _ATTR_CFG_GET_FLD(attr, \ + ATTR_CFG_FLD_##name##_CFG, \ + ATTR_CFG_FLD_##name##_LO, \ + ATTR_CFG_FLD_##name##_HI) + +GEN_PMU_FORMAT_ATTR(ts_enable); +GEN_PMU_FORMAT_ATTR(pa_enable); +GEN_PMU_FORMAT_ATTR(pct_enable); +GEN_PMU_FORMAT_ATTR(jitter); +GEN_PMU_FORMAT_ATTR(branch_filter); +GEN_PMU_FORMAT_ATTR(load_filter); +GEN_PMU_FORMAT_ATTR(store_filter); +GEN_PMU_FORMAT_ATTR(event_filter); +GEN_PMU_FORMAT_ATTR(min_latency); + +static struct attribute *arm_spe_pmu_formats_attr[] = { + &format_attr_ts_enable.attr, + &format_attr_pa_enable.attr, + &format_attr_pct_enable.attr, + &format_attr_jitter.attr, + &format_attr_branch_filter.attr, + &format_attr_load_filter.attr, + &format_attr_store_filter.attr, + &format_attr_event_filter.attr, + &format_attr_min_latency.attr, + NULL, +}; + +static struct attribute_group arm_spe_pmu_format_group = { + .name = "format", + .attrs = arm_spe_pmu_formats_attr, +}; + +static ssize_t arm_spe_pmu_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev); + + return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus); +} +static DEVICE_ATTR(cpumask, S_IRUGO, arm_spe_pmu_get_attr_cpumask, NULL); + +static struct attribute *arm_spe_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group arm_spe_pmu_group = { + .attrs = arm_spe_pmu_attrs, +}; + +static const struct attribute_group *arm_spe_pmu_attr_groups[] = { + &arm_spe_pmu_group, + &arm_spe_pmu_cap_group, + &arm_spe_pmu_format_group, + NULL, +}; + +/* Convert between user ABI and register values */ +static u64 arm_spe_event_to_pmscr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 reg = 0; + + reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT; + + if (!attr->exclude_user) + reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT); + + if (!attr->exclude_kernel) + reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT); + + if (get_spe_event_has_cx(event)) + reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT); + + return reg; +} + +static void arm_spe_event_sanitise_period(struct perf_event *event) +{ + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + u64 period = event->hw.sample_period; + u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK + << SYS_PMSIRR_EL1_INTERVAL_SHIFT; + + if (period < spe_pmu->min_period) + period = spe_pmu->min_period; + else if (period > max_period) + period = max_period; + else + period &= max_period; + + event->hw.sample_period = period; +} + +static u64 arm_spe_event_to_pmsirr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 reg = 0; + + arm_spe_event_sanitise_period(event); + + reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT; + reg |= event->hw.sample_period; + + return reg; +} + +static u64 arm_spe_event_to_pmsfcr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + u64 reg = 0; + + reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT; + reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT; + + if (reg) + reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT); + + if (ATTR_CFG_GET_FLD(attr, event_filter)) + reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT); + + if (ATTR_CFG_GET_FLD(attr, min_latency)) + reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT); + + return reg; +} + +static u64 arm_spe_event_to_pmsevfr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + return ATTR_CFG_GET_FLD(attr, event_filter); +} + +static u64 arm_spe_event_to_pmslatfr(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + return ATTR_CFG_GET_FLD(attr, min_latency) + << SYS_PMSLATFR_EL1_MINLAT_SHIFT; +} + +static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len); + if (!buf->snapshot) + perf_aux_output_skip(handle, len); +} + +static u64 arm_spe_pmu_next_snapshot_off(struct perf_output_handle *handle) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); + u64 head = PERF_IDX2OFF(handle->head, buf); + u64 limit = buf->nr_pages * PAGE_SIZE; + + /* + * The trace format isn't parseable in reverse, so clamp + * the limit to half of the buffer size in snapshot mode + * so that the worst case is half a buffer of records, as + * opposed to a single record. + */ + if (head < limit >> 1) + limit >>= 1; + + /* + * If we're within max_record_sz of the limit, we must + * pad, move the head index and recompute the limit. + */ + if (limit - head < spe_pmu->max_record_sz) { + arm_spe_pmu_pad_buf(handle, limit - head); + handle->head = PERF_IDX2OFF(limit, buf); + limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head; + } + + return limit; +} + +static u64 __arm_spe_pmu_next_off(struct perf_output_handle *handle) +{ + struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + const u64 bufsize = buf->nr_pages * PAGE_SIZE; + u64 limit = bufsize; + u64 head, tail, wakeup; + + /* + * The head can be misaligned for two reasons: + * + * 1. The hardware left PMBPTR pointing to the first byte after + * a record when generating a buffer management event. + * + * 2. We used perf_aux_output_skip to consume handle->size bytes + * and CIRC_SPACE was used to compute the size, which always + * leaves one entry free. + * + * Deal with this by padding to the next alignment boundary and + * moving the head index. If we run out of buffer space, we'll + * reduce handle->size to zero and end up reporting truncation. + */ + head = PERF_IDX2OFF(handle->head, buf); + if (!IS_ALIGNED(head, spe_pmu->align)) { + unsigned long delta = roundup(head, spe_pmu->align) - head; + + delta = min(delta, handle->size); + arm_spe_pmu_pad_buf(handle, delta); + head = PERF_IDX2OFF(handle->head, buf); + } + + /* If we've run out of free space, then nothing more to do */ + if (!handle->size) + goto no_space; + + /* Compute the tail and wakeup indices now that we've aligned head */ + tail = PERF_IDX2OFF(handle->head + handle->size, buf); + wakeup = PERF_IDX2OFF(handle->wakeup, buf); + + /* + * Avoid clobbering unconsumed data. We know we have space, so + * if we see head == tail we know that the buffer is empty. If + * head > tail, then there's nothing to clobber prior to + * wrapping. + */ + if (head < tail) + limit = round_down(tail, PAGE_SIZE); + + /* + * Wakeup may be arbitrarily far into the future. If it's not in + * the current generation, either we'll wrap before hitting it, + * or it's in the past and has been handled already. + * + * If there's a wakeup before we wrap, arrange to be woken up by + * the page boundary following it. Keep the tail boundary if + * that's lower. + */ + if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) + limit = min(limit, round_up(wakeup, PAGE_SIZE)); + + if (limit > head) + return limit; + + arm_spe_pmu_pad_buf(handle, handle->size); +no_space: + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); + perf_aux_output_end(handle, 0); + return 0; +} + +static u64 arm_spe_pmu_next_off(struct perf_output_handle *handle) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); + u64 limit = __arm_spe_pmu_next_off(handle); + u64 head = PERF_IDX2OFF(handle->head, buf); + + /* + * If the head has come too close to the end of the buffer, + * then pad to the end and recompute the limit. + */ + if (limit && (limit - head < spe_pmu->max_record_sz)) { + arm_spe_pmu_pad_buf(handle, limit - head); + limit = __arm_spe_pmu_next_off(handle); + } + + return limit; +} + +static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle, + struct perf_event *event) +{ + u64 base, limit; + struct arm_spe_pmu_buf *buf; + + /* Start a new aux session */ + buf = perf_aux_output_begin(handle, event); + if (!buf) { + event->hw.state |= PERF_HES_STOPPED; + /* + * We still need to clear the limit pointer, since the + * profiler might only be disabled by virtue of a fault. + */ + limit = 0; + goto out_write_limit; + } + + limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) + : arm_spe_pmu_next_off(handle); + if (limit) + limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT); + + limit += (u64)buf->base; + base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); + write_sysreg_s(base, SYS_PMBPTR_EL1); + +out_write_limit: + write_sysreg_s(limit, SYS_PMBLIMITR_EL1); +} + +static void arm_spe_perf_aux_output_end(struct perf_output_handle *handle) +{ + struct arm_spe_pmu_buf *buf = perf_get_aux(handle); + u64 offset, size; + + offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base; + size = offset - PERF_IDX2OFF(handle->head, buf); + + if (buf->snapshot) + handle->head = offset; + + perf_aux_output_end(handle, size); +} + +static void arm_spe_pmu_disable_and_drain_local(void) +{ + /* Disable profiling at EL0 and EL1 */ + write_sysreg_s(0, SYS_PMSCR_EL1); + isb(); + + /* Drain any buffered data */ + psb_csync(); + dsb(nsh); + + /* Disable the profiling buffer */ + write_sysreg_s(0, SYS_PMBLIMITR_EL1); + isb(); +} + +/* IRQ handling */ +static enum arm_spe_pmu_buf_fault_action +arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle) +{ + const char *err_str; + u64 pmbsr; + enum arm_spe_pmu_buf_fault_action ret; + + /* + * Ensure new profiling data is visible to the CPU and any external + * aborts have been resolved. + */ + psb_csync(); + dsb(nsh); + + /* Ensure hardware updates to PMBPTR_EL1 are visible */ + isb(); + + /* Service required? */ + pmbsr = read_sysreg_s(SYS_PMBSR_EL1); + if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT))) + return SPE_PMU_BUF_FAULT_ACT_SPURIOUS; + + /* + * If we've lost data, disable profiling and also set the PARTIAL + * flag to indicate that the last record is corrupted. + */ + if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT)) + perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED | + PERF_AUX_FLAG_PARTIAL); + + /* Report collisions to userspace so that it can up the period */ + if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT)) + perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION); + + /* We only expect buffer management events */ + switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) { + case SYS_PMBSR_EL1_EC_BUF: + /* Handled below */ + break; + case SYS_PMBSR_EL1_EC_FAULT_S1: + case SYS_PMBSR_EL1_EC_FAULT_S2: + err_str = "Unexpected buffer fault"; + goto out_err; + default: + err_str = "Unknown error code"; + goto out_err; + } + + /* Buffer management event */ + switch (pmbsr & + (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) { + case SYS_PMBSR_EL1_BUF_BSC_FULL: + ret = SPE_PMU_BUF_FAULT_ACT_OK; + goto out_stop; + default: + err_str = "Unknown buffer status code"; + } + +out_err: + pr_err_ratelimited("%s on CPU %d [PMBSR=0x%016llx, PMBPTR=0x%016llx, PMBLIMITR=0x%016llx]\n", + err_str, smp_processor_id(), pmbsr, + read_sysreg_s(SYS_PMBPTR_EL1), + read_sysreg_s(SYS_PMBLIMITR_EL1)); + ret = SPE_PMU_BUF_FAULT_ACT_FATAL; + +out_stop: + arm_spe_perf_aux_output_end(handle); + return ret; +} + +static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev) +{ + struct perf_output_handle *handle = dev; + struct perf_event *event = handle->event; + enum arm_spe_pmu_buf_fault_action act; + + if (!perf_get_aux(handle)) + return IRQ_NONE; + + act = arm_spe_pmu_buf_get_fault_act(handle); + if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS) + return IRQ_NONE; + + /* + * Ensure perf callbacks have completed, which may disable the + * profiling buffer in response to a TRUNCATION flag. + */ + irq_work_run(); + + switch (act) { + case SPE_PMU_BUF_FAULT_ACT_FATAL: + /* + * If a fatal exception occurred then leaving the profiling + * buffer enabled is a recipe waiting to happen. Since + * fatal faults don't always imply truncation, make sure + * that the profiling buffer is disabled explicitly before + * clearing the syndrome register. + */ + arm_spe_pmu_disable_and_drain_local(); + break; + case SPE_PMU_BUF_FAULT_ACT_OK: + /* + * We handled the fault (the buffer was full), so resume + * profiling as long as we didn't detect truncation. + * PMBPTR might be misaligned, but we'll burn that bridge + * when we get to it. + */ + if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) { + arm_spe_perf_aux_output_begin(handle, event); + isb(); + } + break; + case SPE_PMU_BUF_FAULT_ACT_SPURIOUS: + /* We've seen you before, but GCC has the memory of a sieve. */ + break; + } + + /* The buffer pointers are now sane, so resume profiling. */ + write_sysreg_s(0, SYS_PMBSR_EL1); + return IRQ_HANDLED; +} + +/* Perf callbacks */ +static int arm_spe_pmu_event_init(struct perf_event *event) +{ + u64 reg; + struct perf_event_attr *attr = &event->attr; + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + + /* This is, of course, deeply driver-specific */ + if (attr->type != event->pmu->type) + return -ENOENT; + + if (event->cpu >= 0 && + !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus)) + return -ENOENT; + + if (arm_spe_event_to_pmsevfr(event) & SYS_PMSEVFR_EL1_RES0) + return -EOPNOTSUPP; + + if (attr->exclude_idle) + return -EOPNOTSUPP; + + /* + * Feedback-directed frequency throttling doesn't work when we + * have a buffer of samples. We'd need to manually count the + * samples in the buffer when it fills up and adjust the event + * count to reflect that. Instead, just force the user to specify + * a sample period. + */ + if (attr->freq) + return -EINVAL; + + reg = arm_spe_event_to_pmsfcr(event); + if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) && + !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) + return -EOPNOTSUPP; + + if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) && + !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) + return -EOPNOTSUPP; + + if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) && + !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) + return -EOPNOTSUPP; + + set_spe_event_has_cx(event); + reg = arm_spe_event_to_pmscr(event); + if (!perfmon_capable() && + (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) | + BIT(SYS_PMSCR_EL1_PCT_SHIFT)))) + return -EACCES; + + return 0; +} + +static void arm_spe_pmu_start(struct perf_event *event, int flags) +{ + u64 reg; + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); + + hwc->state = 0; + arm_spe_perf_aux_output_begin(handle, event); + if (hwc->state) + return; + + reg = arm_spe_event_to_pmsfcr(event); + write_sysreg_s(reg, SYS_PMSFCR_EL1); + + reg = arm_spe_event_to_pmsevfr(event); + write_sysreg_s(reg, SYS_PMSEVFR_EL1); + + reg = arm_spe_event_to_pmslatfr(event); + write_sysreg_s(reg, SYS_PMSLATFR_EL1); + + if (flags & PERF_EF_RELOAD) { + reg = arm_spe_event_to_pmsirr(event); + write_sysreg_s(reg, SYS_PMSIRR_EL1); + isb(); + reg = local64_read(&hwc->period_left); + write_sysreg_s(reg, SYS_PMSICR_EL1); + } + + reg = arm_spe_event_to_pmscr(event); + isb(); + write_sysreg_s(reg, SYS_PMSCR_EL1); +} + +static void arm_spe_pmu_stop(struct perf_event *event, int flags) +{ + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); + + /* If we're already stopped, then nothing to do */ + if (hwc->state & PERF_HES_STOPPED) + return; + + /* Stop all trace generation */ + arm_spe_pmu_disable_and_drain_local(); + + if (flags & PERF_EF_UPDATE) { + /* + * If there's a fault pending then ensure we contain it + * to this buffer, since we might be on the context-switch + * path. + */ + if (perf_get_aux(handle)) { + enum arm_spe_pmu_buf_fault_action act; + + act = arm_spe_pmu_buf_get_fault_act(handle); + if (act == SPE_PMU_BUF_FAULT_ACT_SPURIOUS) + arm_spe_perf_aux_output_end(handle); + else + write_sysreg_s(0, SYS_PMBSR_EL1); + } + + /* + * This may also contain ECOUNT, but nobody else should + * be looking at period_left, since we forbid frequency + * based sampling. + */ + local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1)); + hwc->state |= PERF_HES_UPTODATE; + } + + hwc->state |= PERF_HES_STOPPED; +} + +static int arm_spe_pmu_add(struct perf_event *event, int flags) +{ + int ret = 0; + struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu; + + if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) + return -ENOENT; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (flags & PERF_EF_START) { + arm_spe_pmu_start(event, PERF_EF_RELOAD); + if (hwc->state & PERF_HES_STOPPED) + ret = -EINVAL; + } + + return ret; +} + +static void arm_spe_pmu_del(struct perf_event *event, int flags) +{ + arm_spe_pmu_stop(event, PERF_EF_UPDATE); +} + +static void arm_spe_pmu_read(struct perf_event *event) +{ +} + +static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) +{ + int i, cpu = event->cpu; + struct page **pglist; + struct arm_spe_pmu_buf *buf; + + /* We need at least two pages for this to work. */ + if (nr_pages < 2) + return NULL; + + /* + * We require an even number of pages for snapshot mode, so that + * we can effectively treat the buffer as consisting of two equal + * parts and give userspace a fighting chance of getting some + * useful data out of it. + */ + if (snapshot && (nr_pages & 1)) + return NULL; + + if (cpu == -1) + cpu = raw_smp_processor_id(); + + buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, cpu_to_node(cpu)); + if (!buf) + return NULL; + + pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL); + if (!pglist) + goto out_free_buf; + + for (i = 0; i < nr_pages; ++i) + pglist[i] = virt_to_page(pages[i]); + + buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); + if (!buf->base) + goto out_free_pglist; + + buf->nr_pages = nr_pages; + buf->snapshot = snapshot; + + kfree(pglist); + return buf; + +out_free_pglist: + kfree(pglist); +out_free_buf: + kfree(buf); + return NULL; +} + +static void arm_spe_pmu_free_aux(void *aux) +{ + struct arm_spe_pmu_buf *buf = aux; + + vunmap(buf->base); + kfree(buf); +} + +/* Initialisation and teardown functions */ +static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu) +{ + static atomic_t pmu_idx = ATOMIC_INIT(-1); + + int idx; + char *name; + struct device *dev = &spe_pmu->pdev->dev; + + spe_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE, + .attr_groups = arm_spe_pmu_attr_groups, + /* + * We hitch a ride on the software context here, so that + * we can support per-task profiling (which is not possible + * with the invalid context as it doesn't get sched callbacks). + * This requires that userspace either uses a dummy event for + * perf_event_open, since the aux buffer is not setup until + * a subsequent mmap, or creates the profiling event in a + * disabled state and explicitly PERF_EVENT_IOC_ENABLEs it + * once the buffer has been created. + */ + .task_ctx_nr = perf_sw_context, + .event_init = arm_spe_pmu_event_init, + .add = arm_spe_pmu_add, + .del = arm_spe_pmu_del, + .start = arm_spe_pmu_start, + .stop = arm_spe_pmu_stop, + .read = arm_spe_pmu_read, + .setup_aux = arm_spe_pmu_setup_aux, + .free_aux = arm_spe_pmu_free_aux, + }; + + idx = atomic_inc_return(&pmu_idx); + name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx); + if (!name) { + dev_err(dev, "failed to allocate name for pmu %d\n", idx); + return -ENOMEM; + } + + return perf_pmu_register(&spe_pmu->pmu, name, -1); +} + +static void arm_spe_pmu_perf_destroy(struct arm_spe_pmu *spe_pmu) +{ + perf_pmu_unregister(&spe_pmu->pmu); +} + +static void __arm_spe_pmu_dev_probe(void *info) +{ + int fld; + u64 reg; + struct arm_spe_pmu *spe_pmu = info; + struct device *dev = &spe_pmu->pdev->dev; + + fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64DFR0_EL1), + ID_AA64DFR0_PMSVER_SHIFT); + if (!fld) { + dev_err(dev, + "unsupported ID_AA64DFR0_EL1.PMSVer [%d] on CPU %d\n", + fld, smp_processor_id()); + return; + } + + /* Read PMBIDR first to determine whether or not we have access */ + reg = read_sysreg_s(SYS_PMBIDR_EL1); + if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) { + dev_err(dev, + "profiling buffer owned by higher exception level\n"); + return; + } + + /* Minimum alignment. If it's out-of-range, then fail the probe */ + fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK; + spe_pmu->align = 1 << fld; + if (spe_pmu->align > SZ_2K) { + dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n", + fld, smp_processor_id()); + return; + } + + /* It's now safe to read PMSIDR and figure out what we've got */ + reg = read_sysreg_s(SYS_PMSIDR_EL1); + if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; + + if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; + + if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; + + if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; + + if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_LDS; + + if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT)) + spe_pmu->features |= SPE_PMU_FEAT_ERND; + + /* This field has a spaced out encoding, so just use a look-up */ + fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK; + switch (fld) { + case 0: + spe_pmu->min_period = 256; + break; + case 2: + spe_pmu->min_period = 512; + break; + case 3: + spe_pmu->min_period = 768; + break; + case 4: + spe_pmu->min_period = 1024; + break; + case 5: + spe_pmu->min_period = 1536; + break; + case 6: + spe_pmu->min_period = 2048; + break; + case 7: + spe_pmu->min_period = 3072; + break; + default: + dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n", + fld); + fallthrough; + case 8: + spe_pmu->min_period = 4096; + } + + /* Maximum record size. If it's out-of-range, then fail the probe */ + fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK; + spe_pmu->max_record_sz = 1 << fld; + if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { + dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n", + fld, smp_processor_id()); + return; + } + + fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK; + switch (fld) { + default: + dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n", + fld); + fallthrough; + case 2: + spe_pmu->counter_sz = 12; + } + + dev_info(dev, + "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n", + cpumask_pr_args(&spe_pmu->supported_cpus), + spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features); + + spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED; + return; +} + +static void __arm_spe_pmu_reset_local(void) +{ + /* + * This is probably overkill, as we have no idea where we're + * draining any buffered data to... + */ + arm_spe_pmu_disable_and_drain_local(); + + /* Reset the buffer base pointer */ + write_sysreg_s(0, SYS_PMBPTR_EL1); + isb(); + + /* Clear any pending management interrupts */ + write_sysreg_s(0, SYS_PMBSR_EL1); + isb(); +} + +static void __arm_spe_pmu_setup_one(void *info) +{ + struct arm_spe_pmu *spe_pmu = info; + + __arm_spe_pmu_reset_local(); + enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE); +} + +static void __arm_spe_pmu_stop_one(void *info) +{ + struct arm_spe_pmu *spe_pmu = info; + + disable_percpu_irq(spe_pmu->irq); + __arm_spe_pmu_reset_local(); +} + +static int arm_spe_pmu_cpu_startup(unsigned int cpu, struct hlist_node *node) +{ + struct arm_spe_pmu *spe_pmu; + + spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); + if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) + return 0; + + __arm_spe_pmu_setup_one(spe_pmu); + return 0; +} + +static int arm_spe_pmu_cpu_teardown(unsigned int cpu, struct hlist_node *node) +{ + struct arm_spe_pmu *spe_pmu; + + spe_pmu = hlist_entry_safe(node, struct arm_spe_pmu, hotplug_node); + if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) + return 0; + + __arm_spe_pmu_stop_one(spe_pmu); + return 0; +} + +static int arm_spe_pmu_dev_init(struct arm_spe_pmu *spe_pmu) +{ + int ret; + cpumask_t *mask = &spe_pmu->supported_cpus; + + /* Make sure we probe the hardware on a relevant CPU */ + ret = smp_call_function_any(mask, __arm_spe_pmu_dev_probe, spe_pmu, 1); + if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED)) + return -ENXIO; + + /* Request our PPIs (note that the IRQ is still disabled) */ + ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME, + spe_pmu->handle); + if (ret) + return ret; + + /* + * Register our hotplug notifier now so we don't miss any events. + * This will enable the IRQ for any supported CPUs that are already + * up. + */ + ret = cpuhp_state_add_instance(arm_spe_pmu_online, + &spe_pmu->hotplug_node); + if (ret) + free_percpu_irq(spe_pmu->irq, spe_pmu->handle); + + return ret; +} + +static void arm_spe_pmu_dev_teardown(struct arm_spe_pmu *spe_pmu) +{ + cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node); + free_percpu_irq(spe_pmu->irq, spe_pmu->handle); +} + +/* Driver and device probing */ +static int arm_spe_pmu_irq_probe(struct arm_spe_pmu *spe_pmu) +{ + struct platform_device *pdev = spe_pmu->pdev; + int irq = platform_get_irq(pdev, 0); + + if (irq < 0) + return -ENXIO; + + if (!irq_is_percpu(irq)) { + dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq); + return -EINVAL; + } + + if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) { + dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq); + return -EINVAL; + } + + spe_pmu->irq = irq; + return 0; +} + +static const struct of_device_id arm_spe_pmu_of_match[] = { + { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match); + +static const struct platform_device_id arm_spe_match[] = { + { ARMV8_SPE_PDEV_NAME, 0}, + { } +}; +MODULE_DEVICE_TABLE(platform, arm_spe_match); + +static int arm_spe_pmu_device_probe(struct platform_device *pdev) +{ + int ret; + struct arm_spe_pmu *spe_pmu; + struct device *dev = &pdev->dev; + + /* + * If kernelspace is unmapped when running at EL0, then the SPE + * buffer will fault and prematurely terminate the AUX session. + */ + if (arm64_kernel_unmapped_at_el0()) { + dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command line\n"); + return -EPERM; + } + + spe_pmu = devm_kzalloc(dev, sizeof(*spe_pmu), GFP_KERNEL); + if (!spe_pmu) { + dev_err(dev, "failed to allocate spe_pmu\n"); + return -ENOMEM; + } + + spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle)); + if (!spe_pmu->handle) + return -ENOMEM; + + spe_pmu->pdev = pdev; + platform_set_drvdata(pdev, spe_pmu); + + ret = arm_spe_pmu_irq_probe(spe_pmu); + if (ret) + goto out_free_handle; + + ret = arm_spe_pmu_dev_init(spe_pmu); + if (ret) + goto out_free_handle; + + ret = arm_spe_pmu_perf_init(spe_pmu); + if (ret) + goto out_teardown_dev; + + return 0; + +out_teardown_dev: + arm_spe_pmu_dev_teardown(spe_pmu); +out_free_handle: + free_percpu(spe_pmu->handle); + return ret; +} + +static int arm_spe_pmu_device_remove(struct platform_device *pdev) +{ + struct arm_spe_pmu *spe_pmu = platform_get_drvdata(pdev); + + arm_spe_pmu_perf_destroy(spe_pmu); + arm_spe_pmu_dev_teardown(spe_pmu); + free_percpu(spe_pmu->handle); + return 0; +} + +static struct platform_driver arm_spe_pmu_driver = { + .id_table = arm_spe_match, + .driver = { + .name = DRVNAME, + .of_match_table = of_match_ptr(arm_spe_pmu_of_match), + .suppress_bind_attrs = true, + }, + .probe = arm_spe_pmu_device_probe, + .remove = arm_spe_pmu_device_remove, +}; + +static int __init arm_spe_pmu_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME, + arm_spe_pmu_cpu_startup, + arm_spe_pmu_cpu_teardown); + if (ret < 0) + return ret; + arm_spe_pmu_online = ret; + + ret = platform_driver_register(&arm_spe_pmu_driver); + if (ret) + cpuhp_remove_multi_state(arm_spe_pmu_online); + + return ret; +} + +static void __exit arm_spe_pmu_exit(void) +{ + platform_driver_unregister(&arm_spe_pmu_driver); + cpuhp_remove_multi_state(arm_spe_pmu_online); +} + +module_init(arm_spe_pmu_init); +module_exit(arm_spe_pmu_exit); + +MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension"); +MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c new file mode 100644 index 000000000..8dfb67530 --- /dev/null +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -0,0 +1,713 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2017 NXP + * Copyright 2016 Freescale Semiconductor, Inc. + */ + +#include <linux/bitfield.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/perf_event.h> +#include <linux/slab.h> + +#define COUNTER_CNTL 0x0 +#define COUNTER_READ 0x20 + +#define COUNTER_DPCR1 0x30 + +#define CNTL_OVER 0x1 +#define CNTL_CLEAR 0x2 +#define CNTL_EN 0x4 +#define CNTL_EN_MASK 0xFFFFFFFB +#define CNTL_CLEAR_MASK 0xFFFFFFFD +#define CNTL_OVER_MASK 0xFFFFFFFE + +#define CNTL_CSV_SHIFT 24 +#define CNTL_CSV_MASK (0xFFU << CNTL_CSV_SHIFT) + +#define EVENT_CYCLES_ID 0 +#define EVENT_CYCLES_COUNTER 0 +#define NUM_COUNTERS 4 + +#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ + +#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) + +#define DDR_PERF_DEV_NAME "imx8_ddr" +#define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" + +static DEFINE_IDA(ddr_ida); + +/* DDR Perf hardware feature */ +#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ +#define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ + +struct fsl_ddr_devtype_data { + unsigned int quirks; /* quirks needed for different DDR Perf core */ +}; + +static const struct fsl_ddr_devtype_data imx8_devtype_data; + +static const struct fsl_ddr_devtype_data imx8m_devtype_data = { + .quirks = DDR_CAP_AXI_ID_FILTER, +}; + +static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { + .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, +}; + +static const struct of_device_id imx_ddr_pmu_dt_ids[] = { + { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, + { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, + { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); + +struct ddr_pmu { + struct pmu pmu; + void __iomem *base; + unsigned int cpu; + struct hlist_node node; + struct device *dev; + struct perf_event *events[NUM_COUNTERS]; + int active_events; + enum cpuhp_state cpuhp_state; + const struct fsl_ddr_devtype_data *devtype_data; + int irq; + int id; + int active_counter; +}; + +enum ddr_perf_filter_capabilities { + PERF_CAP_AXI_ID_FILTER = 0, + PERF_CAP_AXI_ID_FILTER_ENHANCED, + PERF_CAP_AXI_ID_FEAT_MAX, +}; + +static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) +{ + u32 quirks = pmu->devtype_data->quirks; + + switch (cap) { + case PERF_CAP_AXI_ID_FILTER: + return !!(quirks & DDR_CAP_AXI_ID_FILTER); + case PERF_CAP_AXI_ID_FILTER_ENHANCED: + quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED; + return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED; + default: + WARN(1, "unknown filter cap %d\n", cap); + } + + return 0; +} + +static ssize_t ddr_perf_filter_cap_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ddr_pmu *pmu = dev_get_drvdata(dev); + struct dev_ext_attribute *ea = + container_of(attr, struct dev_ext_attribute, attr); + int cap = (long)ea->var; + + return snprintf(buf, PAGE_SIZE, "%u\n", + ddr_perf_filter_cap_get(pmu, cap)); +} + +#define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \ + (&((struct dev_ext_attribute) { \ + __ATTR(_name, 0444, _func, NULL), (void *)_var \ + }).attr.attr) + +#define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \ + PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var) + +static struct attribute *ddr_perf_filter_cap_attr[] = { + PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER), + PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED), + NULL, +}; + +static struct attribute_group ddr_perf_filter_cap_attr_group = { + .name = "caps", + .attrs = ddr_perf_filter_cap_attr, +}; + +static ssize_t ddr_perf_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ddr_pmu *pmu = dev_get_drvdata(dev); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); +} + +static struct device_attribute ddr_perf_cpumask_attr = + __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); + +static struct attribute *ddr_perf_cpumask_attrs[] = { + &ddr_perf_cpumask_attr.attr, + NULL, +}; + +static struct attribute_group ddr_perf_cpumask_attr_group = { + .attrs = ddr_perf_cpumask_attrs, +}; + +static ssize_t +ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\ + .id = _id, } \ + })[0].attr.attr) + +static struct attribute *ddr_perf_events_attrs[] = { + IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), + IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01), + IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04), + IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05), + IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08), + IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09), + IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10), + IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11), + IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12), + IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20), + IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21), + IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22), + IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23), + IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24), + IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25), + IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26), + IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27), + IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29), + IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a), + IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b), + IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30), + IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31), + IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32), + IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33), + IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34), + IMX8_DDR_PMU_EVENT_ATTR(read, 0x35), + IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36), + IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), + IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), + IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), + IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), + IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), + NULL, +}; + +static struct attribute_group ddr_perf_events_attr_group = { + .name = "events", + .attrs = ddr_perf_events_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); +PMU_FORMAT_ATTR(axi_id, "config1:0-15"); +PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); + +static struct attribute *ddr_perf_format_attrs[] = { + &format_attr_event.attr, + &format_attr_axi_id.attr, + &format_attr_axi_mask.attr, + NULL, +}; + +static struct attribute_group ddr_perf_format_attr_group = { + .name = "format", + .attrs = ddr_perf_format_attrs, +}; + +static const struct attribute_group *attr_groups[] = { + &ddr_perf_events_attr_group, + &ddr_perf_format_attr_group, + &ddr_perf_cpumask_attr_group, + &ddr_perf_filter_cap_attr_group, + NULL, +}; + +static bool ddr_perf_is_filtered(struct perf_event *event) +{ + return event->attr.config == 0x41 || event->attr.config == 0x42; +} + +static u32 ddr_perf_filter_val(struct perf_event *event) +{ + return event->attr.config1; +} + +static bool ddr_perf_filters_compatible(struct perf_event *a, + struct perf_event *b) +{ + if (!ddr_perf_is_filtered(a)) + return true; + if (!ddr_perf_is_filtered(b)) + return true; + return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); +} + +static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) +{ + unsigned int filt; + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + + filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; + return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) && + ddr_perf_is_filtered(event); +} + +static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) +{ + int i; + + /* + * Always map cycle event to counter 0 + * Cycles counter is dedicated for cycle event + * can't used for the other events + */ + if (event == EVENT_CYCLES_ID) { + if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) + return EVENT_CYCLES_COUNTER; + else + return -ENOENT; + } + + for (i = 1; i < NUM_COUNTERS; i++) { + if (pmu->events[i] == NULL) + return i; + } + + return -ENOENT; +} + +static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) +{ + pmu->events[counter] = NULL; +} + +static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) +{ + struct perf_event *event = pmu->events[counter]; + void __iomem *base = pmu->base; + + /* + * return bytes instead of bursts from ddr transaction for + * axid-read and axid-write event if PMU core supports enhanced + * filter. + */ + base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : + COUNTER_READ; + return readl_relaxed(base + counter * 4); +} + +static int ddr_perf_event_init(struct perf_event *event) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct perf_event *sibling; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + if (event->cpu < 0) { + dev_warn(pmu->dev, "Can't provide per-task data!\n"); + return -EOPNOTSUPP; + } + + /* + * We must NOT create groups containing mixed PMUs, although software + * events are acceptable (for example to create a CCN group + * periodically read when a hrtimer aka cpu-clock leader triggers). + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; + + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { + if (!ddr_perf_filters_compatible(event, event->group_leader)) + return -EINVAL; + for_each_sibling_event(sibling, event->group_leader) { + if (!ddr_perf_filters_compatible(event, sibling)) + return -EINVAL; + } + } + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) + return -EINVAL; + } + + event->cpu = pmu->cpu; + hwc->idx = -1; + + return 0; +} + + +static void ddr_perf_event_update(struct perf_event *event) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + int counter = hwc->idx; + + do { + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = ddr_perf_read_counter(pmu, counter); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + + delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; + + local64_add(delta, &event->count); +} + +static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, + int counter, bool enable) +{ + u8 reg = counter * 4 + COUNTER_CNTL; + int val; + + if (enable) { + /* + * cycle counter is special which should firstly write 0 then + * write 1 into CLEAR bit to clear it. Other counters only + * need write 0 into CLEAR bit and it turns out to be 1 by + * hardware. Below enable flow is harmless for all counters. + */ + writel(0, pmu->base + reg); + val = CNTL_EN | CNTL_CLEAR; + val |= FIELD_PREP(CNTL_CSV_MASK, config); + writel(val, pmu->base + reg); + } else { + /* Disable counter */ + val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; + writel(val, pmu->base + reg); + } +} + +static void ddr_perf_event_start(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + local64_set(&hwc->prev_count, 0); + + ddr_perf_counter_enable(pmu, event->attr.config, counter, true); + + if (!pmu->active_counter++) + ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, + EVENT_CYCLES_COUNTER, true); + + hwc->state = 0; +} + +static int ddr_perf_event_add(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter; + int cfg = event->attr.config; + int cfg1 = event->attr.config1; + + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { + int i; + + for (i = 1; i < NUM_COUNTERS; i++) { + if (pmu->events[i] && + !ddr_perf_filters_compatible(event, pmu->events[i])) + return -EINVAL; + } + + if (ddr_perf_is_filtered(event)) { + /* revert axi id masking(axi_mask) value */ + cfg1 ^= AXI_MASKING_REVERT; + writel(cfg1, pmu->base + COUNTER_DPCR1); + } + } + + counter = ddr_perf_alloc_counter(pmu, cfg); + if (counter < 0) { + dev_dbg(pmu->dev, "There are not enough counters\n"); + return -EOPNOTSUPP; + } + + pmu->events[counter] = event; + pmu->active_events++; + hwc->idx = counter; + + hwc->state |= PERF_HES_STOPPED; + + if (flags & PERF_EF_START) + ddr_perf_event_start(event, flags); + + return 0; +} + +static void ddr_perf_event_stop(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + ddr_perf_counter_enable(pmu, event->attr.config, counter, false); + ddr_perf_event_update(event); + + if (!--pmu->active_counter) + ddr_perf_counter_enable(pmu, EVENT_CYCLES_ID, + EVENT_CYCLES_COUNTER, false); + + hwc->state |= PERF_HES_STOPPED; +} + +static void ddr_perf_event_del(struct perf_event *event, int flags) +{ + struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int counter = hwc->idx; + + ddr_perf_event_stop(event, PERF_EF_UPDATE); + + ddr_perf_free_counter(pmu, counter); + pmu->active_events--; + hwc->idx = -1; +} + +static void ddr_perf_pmu_enable(struct pmu *pmu) +{ +} + +static void ddr_perf_pmu_disable(struct pmu *pmu) +{ +} + +static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, + struct device *dev) +{ + *pmu = (struct ddr_pmu) { + .pmu = (struct pmu) { + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .attr_groups = attr_groups, + .event_init = ddr_perf_event_init, + .add = ddr_perf_event_add, + .del = ddr_perf_event_del, + .start = ddr_perf_event_start, + .stop = ddr_perf_event_stop, + .read = ddr_perf_event_update, + .pmu_enable = ddr_perf_pmu_enable, + .pmu_disable = ddr_perf_pmu_disable, + }, + .base = base, + .dev = dev, + }; + + pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); + return pmu->id; +} + +static irqreturn_t ddr_perf_irq_handler(int irq, void *p) +{ + int i; + struct ddr_pmu *pmu = (struct ddr_pmu *) p; + struct perf_event *event, *cycle_event = NULL; + + /* all counter will stop if cycle counter disabled */ + ddr_perf_counter_enable(pmu, + EVENT_CYCLES_ID, + EVENT_CYCLES_COUNTER, + false); + /* + * When the cycle counter overflows, all counters are stopped, + * and an IRQ is raised. If any other counter overflows, it + * continues counting, and no IRQ is raised. + * + * Cycles occur at least 4 times as often as other events, so we + * can update all events on a cycle counter overflow and not + * lose events. + * + */ + for (i = 0; i < NUM_COUNTERS; i++) { + + if (!pmu->events[i]) + continue; + + event = pmu->events[i]; + + ddr_perf_event_update(event); + + if (event->hw.idx == EVENT_CYCLES_COUNTER) + cycle_event = event; + } + + ddr_perf_counter_enable(pmu, + EVENT_CYCLES_ID, + EVENT_CYCLES_COUNTER, + true); + if (cycle_event) + ddr_perf_event_update(cycle_event); + + return IRQ_HANDLED; +} + +static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); + int target; + + if (cpu != pmu->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&pmu->pmu, cpu, target); + pmu->cpu = target; + + WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu))); + + return 0; +} + +static int ddr_perf_probe(struct platform_device *pdev) +{ + struct ddr_pmu *pmu; + struct device_node *np; + void __iomem *base; + char *name; + int num; + int ret; + int irq; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + np = pdev->dev.of_node; + + pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); + if (!pmu) + return -ENOMEM; + + num = ddr_perf_init(pmu, base, &pdev->dev); + + platform_set_drvdata(pdev, pmu); + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", + num); + if (!name) { + ret = -ENOMEM; + goto cpuhp_state_err; + } + + pmu->devtype_data = of_device_get_match_data(&pdev->dev); + + pmu->cpu = raw_smp_processor_id(); + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + DDR_CPUHP_CB_NAME, + NULL, + ddr_perf_offline_cpu); + + if (ret < 0) { + dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); + goto cpuhp_state_err; + } + + pmu->cpuhp_state = ret; + + /* Register the pmu instance for cpu hotplug */ + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + goto cpuhp_instance_err; + } + + /* Request irq */ + irq = of_irq_get(np, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get irq: %d", irq); + ret = irq; + goto ddr_perf_err; + } + + ret = devm_request_irq(&pdev->dev, irq, + ddr_perf_irq_handler, + IRQF_NOBALANCING | IRQF_NO_THREAD, + DDR_CPUHP_CB_NAME, + pmu); + if (ret < 0) { + dev_err(&pdev->dev, "Request irq failed: %d", ret); + goto ddr_perf_err; + } + + pmu->irq = irq; + ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)); + if (ret) { + dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); + goto ddr_perf_err; + } + + ret = perf_pmu_register(&pmu->pmu, name, -1); + if (ret) + goto ddr_perf_err; + + return 0; + +ddr_perf_err: + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(pmu->cpuhp_state); +cpuhp_state_err: + ida_simple_remove(&ddr_ida, pmu->id); + dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); + return ret; +} + +static int ddr_perf_remove(struct platform_device *pdev) +{ + struct ddr_pmu *pmu = platform_get_drvdata(pdev); + + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); + cpuhp_remove_multi_state(pmu->cpuhp_state); + irq_set_affinity_hint(pmu->irq, NULL); + + perf_pmu_unregister(&pmu->pmu); + + ida_simple_remove(&ddr_ida, pmu->id); + return 0; +} + +static struct platform_driver imx_ddr_pmu_driver = { + .driver = { + .name = "imx-ddr-pmu", + .of_match_table = imx_ddr_pmu_dt_ids, + .suppress_bind_attrs = true, + }, + .probe = ddr_perf_probe, + .remove = ddr_perf_remove, +}; + +module_platform_driver(imx_ddr_pmu_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig new file mode 100644 index 000000000..c5d1b7019 --- /dev/null +++ b/drivers/perf/hisilicon/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config HISI_PMU + tristate "HiSilicon SoC PMU drivers" + depends on ARM64 && ACPI + help + Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home + Agent performance monitor and DDR Controller performance monitor. diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile new file mode 100644 index 000000000..e83770618 --- /dev/null +++ b/drivers/perf/hisilicon/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ + hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c new file mode 100644 index 000000000..5e3645c96 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -0,0 +1,460 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC DDRC uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> + * Anurup M <anurup.m@huawei.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* DDRC register definition */ +#define DDRC_PERF_CTRL 0x010 +#define DDRC_FLUX_WR 0x380 +#define DDRC_FLUX_RD 0x384 +#define DDRC_FLUX_WCMD 0x388 +#define DDRC_FLUX_RCMD 0x38c +#define DDRC_PRE_CMD 0x3c0 +#define DDRC_ACT_CMD 0x3c4 +#define DDRC_RNK_CHG 0x3cc +#define DDRC_RW_CHG 0x3d0 +#define DDRC_EVENT_CTRL 0x6C0 +#define DDRC_INT_MASK 0x6c8 +#define DDRC_INT_STATUS 0x6cc +#define DDRC_INT_CLEAR 0x6d0 + +/* DDRC has 8-counters */ +#define DDRC_NR_COUNTERS 0x8 +#define DDRC_PERF_CTRL_EN 0x2 + +/* + * For DDRC PMU, there are eight-events and every event has been mapped + * to fixed-purpose counters which register offset is not consistent. + * Therefore there is no write event type and we assume that event + * code (0 to 7) is equal to counter index in PMU driver. + */ +#define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7) + +static const u32 ddrc_reg_off[] = { + DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG +}; + +/* + * Select the counter register offset using the counter index. + * In DDRC there are no programmable counter, the count + * is readed form the statistics counter register itself. + */ +static u32 hisi_ddrc_pmu_get_counter_offset(int cntr_idx) +{ + return ddrc_reg_off[cntr_idx]; +} + +static u64 hisi_ddrc_pmu_read_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + /* Use event code as counter index */ + u32 idx = GET_DDRC_EVENTID(hwc); + + if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) { + dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + return readl(ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx)); +} + +static void hisi_ddrc_pmu_write_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + u32 idx = GET_DDRC_EVENTID(hwc); + + if (!hisi_uncore_pmu_counter_valid(ddrc_pmu, idx)) { + dev_err(ddrc_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + writel((u32)val, + ddrc_pmu->base + hisi_ddrc_pmu_get_counter_offset(idx)); +} + +/* + * For DDRC PMU, event has been mapped to fixed-purpose counter by hardware, + * so there is no need to write event type. + */ +static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ +} + +static void hisi_ddrc_pmu_start_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + /* Set perf_enable in DDRC_PERF_CTRL to start event counting */ + val = readl(ddrc_pmu->base + DDRC_PERF_CTRL); + val |= DDRC_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_PERF_CTRL); +} + +static void hisi_ddrc_pmu_stop_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */ + val = readl(ddrc_pmu->base + DDRC_PERF_CTRL); + val &= ~DDRC_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_PERF_CTRL); +} + +static void hisi_ddrc_pmu_enable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Set counter index(event code) in DDRC_EVENT_CTRL register */ + val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL); + val |= (1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL); +} + +static void hisi_ddrc_pmu_disable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index(event code) in DDRC_EVENT_CTRL register */ + val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL); + val &= ~(1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL); +} + +static int hisi_ddrc_pmu_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + /* For DDRC PMU, we use event code as counter index */ + int idx = GET_DDRC_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static void hisi_ddrc_pmu_enable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(ddrc_pmu->base + DDRC_INT_MASK); + val &= ~(1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_INT_MASK); +} + +static void hisi_ddrc_pmu_disable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(ddrc_pmu->base + DDRC_INT_MASK); + val |= (1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_INT_MASK); +} + +static irqreturn_t hisi_ddrc_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *ddrc_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read the DDRC_INT_STATUS register */ + overflown = readl(ddrc_pmu->base + DDRC_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it + */ + for_each_set_bit(idx, &overflown, DDRC_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), ddrc_pmu->base + DDRC_INT_CLEAR); + + /* Get the corresponding event struct */ + event = ddrc_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), ddrc_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + ddrc_pmu->irq = irq; + + return 0; +} + +static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = { + { "HISI0233", }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match); + +static int hisi_ddrc_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *ddrc_pmu) +{ + /* + * Use the SCCL_ID and DDRC channel ID to identify the + * DDRC PMU, while SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id", + &ddrc_pmu->index_id)) { + dev_err(&pdev->dev, "Can not read ddrc channel-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &ddrc_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n"); + return -EINVAL; + } + /* DDRC PMUs only share the same SCCL */ + ddrc_pmu->ccl_id = -1; + + ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ddrc_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n"); + return PTR_ERR(ddrc_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_ddrc_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-4"), + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_format_group = { + .name = "format", + .attrs = hisi_ddrc_pmu_format_attr, +}; + +static struct attribute *hisi_ddrc_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(flux_wr, 0x00), + HISI_PMU_EVENT_ATTR(flux_rd, 0x01), + HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02), + HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03), + HISI_PMU_EVENT_ATTR(pre_cmd, 0x04), + HISI_PMU_EVENT_ATTR(act_cmd, 0x05), + HISI_PMU_EVENT_ATTR(rnk_chg, 0x06), + HISI_PMU_EVENT_ATTR(rw_chg, 0x07), + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_events_group = { + .name = "events", + .attrs = hisi_ddrc_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = { + .attrs = hisi_ddrc_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_ddrc_pmu_attr_groups[] = { + &hisi_ddrc_pmu_format_group, + &hisi_ddrc_pmu_events_group, + &hisi_ddrc_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_ddrc_ops = { + .write_evtype = hisi_ddrc_pmu_write_evtype, + .get_event_idx = hisi_ddrc_pmu_get_event_idx, + .start_counters = hisi_ddrc_pmu_start_counters, + .stop_counters = hisi_ddrc_pmu_stop_counters, + .enable_counter = hisi_ddrc_pmu_enable_counter, + .disable_counter = hisi_ddrc_pmu_disable_counter, + .enable_counter_int = hisi_ddrc_pmu_enable_counter_int, + .disable_counter_int = hisi_ddrc_pmu_disable_counter_int, + .write_counter = hisi_ddrc_pmu_write_counter, + .read_counter = hisi_ddrc_pmu_read_counter, +}; + +static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *ddrc_pmu) +{ + int ret; + + ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu); + if (ret) + return ret; + + ret = hisi_ddrc_pmu_init_irq(ddrc_pmu, pdev); + if (ret) + return ret; + + ddrc_pmu->num_counters = DDRC_NR_COUNTERS; + ddrc_pmu->counter_bits = 32; + ddrc_pmu->ops = &hisi_uncore_ddrc_ops; + ddrc_pmu->dev = &pdev->dev; + ddrc_pmu->on_cpu = -1; + ddrc_pmu->check_event = 7; + + return 0; +} + +static int hisi_ddrc_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *ddrc_pmu; + char *name; + int ret; + + ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL); + if (!ddrc_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, ddrc_pmu); + + ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_ddrc%u", + ddrc_pmu->sccl_id, ddrc_pmu->index_id); + ddrc_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = hisi_ddrc_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); + if (ret) { + dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node); + irq_set_affinity_hint(ddrc_pmu->irq, NULL); + } + + return ret; +} + +static int hisi_ddrc_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&ddrc_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + irq_set_affinity_hint(ddrc_pmu->irq, NULL); + + return 0; +} + +static struct platform_driver hisi_ddrc_pmu_driver = { + .driver = { + .name = "hisi_ddrc_pmu", + .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_ddrc_pmu_probe, + .remove = hisi_ddrc_pmu_remove, +}; + +static int __init hisi_ddrc_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + "AP_PERF_ARM_HISI_DDRC_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_ddrc_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE); + + return ret; +} +module_init(hisi_ddrc_pmu_module_init); + +static void __exit hisi_ddrc_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_ddrc_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE); + +} +module_exit(hisi_ddrc_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); +MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c new file mode 100644 index 000000000..5eb816802 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC HHA uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> + * Anurup M <anurup.m@huawei.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* HHA register definition */ +#define HHA_INT_MASK 0x0804 +#define HHA_INT_STATUS 0x0808 +#define HHA_INT_CLEAR 0x080C +#define HHA_PERF_CTRL 0x1E00 +#define HHA_EVENT_CTRL 0x1E04 +#define HHA_EVENT_TYPE0 0x1E80 +/* + * Each counter is 48-bits and [48:63] are reserved + * which are Read-As-Zero and Writes-Ignored. + */ +#define HHA_CNT0_LOWER 0x1F00 + +/* HHA has 16-counters */ +#define HHA_NR_COUNTERS 0x10 + +#define HHA_PERF_CTRL_EN 0x1 +#define HHA_EVTYPE_NONE 0xff + +/* + * Select the counter register offset using the counter index + * each counter is 48-bits. + */ +static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx) +{ + return (HHA_CNT0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) { + dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + /* Read 64 bits and like L3C, top 16 bits are RAZ */ + return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx)); +} + +static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc, u64 val) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(hha_pmu, idx)) { + dev_err(hha_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + /* Write 64 bits and like L3C, top 16 bits are WI */ + writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(idx)); +} + +static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(HHA_EVENT_TYPEx). + * There are 4 event select registers for the 16 hardware counters. + * Event code is 8-bits and for the first 4 hardware counters, + * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters, + * HHA_EVENT_TYPE1 is chosen and so on. + */ + reg = HHA_EVENT_TYPE0 + 4 * (idx / 4); + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to HHA_EVENT_TYPEx register */ + val = readl(hha_pmu->base + reg); + val &= ~(HHA_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, hha_pmu->base + reg); +} + +static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in HHA_PERF_CTRL to start event + * counting for all enabled counters. + */ + val = readl(hha_pmu->base + HHA_PERF_CTRL); + val |= HHA_PERF_CTRL_EN; + writel(val, hha_pmu->base + HHA_PERF_CTRL); +} + +static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in HHA_PERF_CTRL to stop event + * counting for all enabled counters. + */ + val = readl(hha_pmu->base + HHA_PERF_CTRL); + val &= ~(HHA_PERF_CTRL_EN); + writel(val, hha_pmu->base + HHA_PERF_CTRL); +} + +static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in HHA_EVENT_CTRL register */ + val = readl(hha_pmu->base + HHA_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, hha_pmu->base + HHA_EVENT_CTRL); +} + +static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in HHA_EVENT_CTRL register */ + val = readl(hha_pmu->base + HHA_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, hha_pmu->base + HHA_EVENT_CTRL); +} + +static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(hha_pmu->base + HHA_INT_MASK); + val &= ~(1 << hwc->idx); + writel(val, hha_pmu->base + HHA_INT_MASK); +} + +static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(hha_pmu->base + HHA_INT_MASK); + val |= (1 << hwc->idx); + writel(val, hha_pmu->base + HHA_INT_MASK); +} + +static irqreturn_t hisi_hha_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *hha_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read HHA_INT_STATUS register */ + overflown = readl(hha_pmu->base + HHA_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it + */ + for_each_set_bit(idx, &overflown, HHA_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), hha_pmu->base + HHA_INT_CLEAR); + + /* Get the corresponding event struct */ + event = hha_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), hha_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + hha_pmu->irq = irq; + + return 0; +} + +static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = { + { "HISI0243", }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match); + +static int hisi_hha_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *hha_pmu) +{ + unsigned long long id; + acpi_status status; + + status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + "_UID", NULL, &id); + if (ACPI_FAILURE(status)) + return -EINVAL; + + hha_pmu->index_id = id; + + /* + * Use SCCL_ID and UID to identify the HHA PMU, while + * SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &hha_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read hha sccl-id!\n"); + return -EINVAL; + } + /* HHA PMUs only share the same SCCL */ + hha_pmu->ccl_id = -1; + + hha_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(hha_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n"); + return PTR_ERR(hha_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_hha_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_format_group = { + .name = "format", + .attrs = hisi_hha_pmu_format_attr, +}; + +static struct attribute *hisi_hha_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00), + HISI_PMU_EVENT_ATTR(rx_outer, 0x01), + HISI_PMU_EVENT_ATTR(rx_sccl, 0x02), + HISI_PMU_EVENT_ATTR(rx_ccix, 0x03), + HISI_PMU_EVENT_ATTR(rx_wbi, 0x04), + HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), + HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), + HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), + HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), + HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), + HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), + HISI_PMU_EVENT_ATTR(spill_num, 0x20), + HISI_PMU_EVENT_ATTR(spill_success, 0x21), + HISI_PMU_EVENT_ATTR(bi_num, 0x23), + HISI_PMU_EVENT_ATTR(mediated_num, 0x32), + HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33), + HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34), + HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35), + HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38), + HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c), + HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40), + HISI_PMU_EVENT_ATTR(edir-lookup, 0x41), + HISI_PMU_EVENT_ATTR(sdir-hit, 0x42), + HISI_PMU_EVENT_ATTR(edir-hit, 0x43), + HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c), + HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d), + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_events_group = { + .name = "events", + .attrs = hisi_hha_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_hha_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = { + .attrs = hisi_hha_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_hha_pmu_attr_groups[] = { + &hisi_hha_pmu_format_group, + &hisi_hha_pmu_events_group, + &hisi_hha_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_hha_ops = { + .write_evtype = hisi_hha_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_hha_pmu_start_counters, + .stop_counters = hisi_hha_pmu_stop_counters, + .enable_counter = hisi_hha_pmu_enable_counter, + .disable_counter = hisi_hha_pmu_disable_counter, + .enable_counter_int = hisi_hha_pmu_enable_counter_int, + .disable_counter_int = hisi_hha_pmu_disable_counter_int, + .write_counter = hisi_hha_pmu_write_counter, + .read_counter = hisi_hha_pmu_read_counter, +}; + +static int hisi_hha_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *hha_pmu) +{ + int ret; + + ret = hisi_hha_pmu_init_data(pdev, hha_pmu); + if (ret) + return ret; + + ret = hisi_hha_pmu_init_irq(hha_pmu, pdev); + if (ret) + return ret; + + hha_pmu->num_counters = HHA_NR_COUNTERS; + hha_pmu->counter_bits = 48; + hha_pmu->ops = &hisi_uncore_hha_ops; + hha_pmu->dev = &pdev->dev; + hha_pmu->on_cpu = -1; + hha_pmu->check_event = 0x65; + + return 0; +} + +static int hisi_hha_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *hha_pmu; + char *name; + int ret; + + hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL); + if (!hha_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, hha_pmu); + + ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", + hha_pmu->sccl_id, hha_pmu->index_id); + hha_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = hisi_hha_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + ret = perf_pmu_register(&hha_pmu->pmu, name, -1); + if (ret) { + dev_err(hha_pmu->dev, "HHA PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); + irq_set_affinity_hint(hha_pmu->irq, NULL); + } + + return ret; +} + +static int hisi_hha_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&hha_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + irq_set_affinity_hint(hha_pmu->irq, NULL); + + return 0; +} + +static struct platform_driver hisi_hha_pmu_driver = { + .driver = { + .name = "hisi_hha_pmu", + .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_hha_pmu_probe, + .remove = hisi_hha_pmu_remove, +}; + +static int __init hisi_hha_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + "AP_PERF_ARM_HISI_HHA_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_hha_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); + + return ret; +} +module_init(hisi_hha_pmu_module_init); + +static void __exit hisi_hha_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_hha_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); +} +module_exit(hisi_hha_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); +MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c new file mode 100644 index 000000000..3e8b5eab5 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC L3C uncore Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Anurup M <anurup.m@huawei.com> + * Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* L3C register definition */ +#define L3C_PERF_CTRL 0x0408 +#define L3C_INT_MASK 0x0800 +#define L3C_INT_STATUS 0x0808 +#define L3C_INT_CLEAR 0x080c +#define L3C_EVENT_CTRL 0x1c00 +#define L3C_EVENT_TYPE0 0x1d00 +/* + * Each counter is 48-bits and [48:63] are reserved + * which are Read-As-Zero and Writes-Ignored. + */ +#define L3C_CNTR0_LOWER 0x1e00 + +/* L3C has 8-counters */ +#define L3C_NR_COUNTERS 0x8 + +#define L3C_PERF_CTRL_EN 0x10000 +#define L3C_EVTYPE_NONE 0xff + +/* + * Select the counter register offset using the counter index + */ +static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx) +{ + return (L3C_CNTR0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) { + dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx); + return 0; + } + + /* Read 64-bits and the upper 16 bits are RAZ */ + return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx)); +} + +static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc, u64 val) +{ + u32 idx = hwc->idx; + + if (!hisi_uncore_pmu_counter_valid(l3c_pmu, idx)) { + dev_err(l3c_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + /* Write 64-bits and the upper 16 bits are WI */ + writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(idx)); +} + +static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(L3C_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * L3C_EVENT_TYPE1 is chosen. + */ + reg = L3C_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to L3C_EVENT_TYPEx Register */ + val = readl(l3c_pmu->base + reg); + val &= ~(L3C_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, l3c_pmu->base + reg); +} + +static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in L3C_PERF_CTRL register to start counting + * for all enabled counters. + */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val |= L3C_PERF_CTRL_EN; + writel(val, l3c_pmu->base + L3C_PERF_CTRL); +} + +static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting + * for all enabled counters. + */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val &= ~(L3C_PERF_CTRL_EN); + writel(val, l3c_pmu->base + L3C_PERF_CTRL); +} + +static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in L3C_EVENT_CTRL register */ + val = readl(l3c_pmu->base + L3C_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_EVENT_CTRL); +} + +static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in L3C_EVENT_CTRL register */ + val = readl(l3c_pmu->base + L3C_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_EVENT_CTRL); +} + +static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3c_pmu->base + L3C_INT_MASK); + /* Write 0 to enable interrupt */ + val &= ~(1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_INT_MASK); +} + +static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3c_pmu->base + L3C_INT_MASK); + /* Write 1 to mask interrupt */ + val |= (1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_INT_MASK); +} + +static irqreturn_t hisi_l3c_pmu_isr(int irq, void *dev_id) +{ + struct hisi_pmu *l3c_pmu = dev_id; + struct perf_event *event; + unsigned long overflown; + int idx; + + /* Read L3C_INT_STATUS register */ + overflown = readl(l3c_pmu->base + L3C_INT_STATUS); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it. + */ + for_each_set_bit(idx, &overflown, L3C_NR_COUNTERS) { + /* Write 1 to clear the IRQ status flag */ + writel((1 << idx), l3c_pmu->base + L3C_INT_CLEAR); + + /* Get the corresponding event struct */ + event = l3c_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + /* Read and init IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), l3c_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ:%d ret:%d\n", irq, ret); + return ret; + } + + l3c_pmu->irq = irq; + + return 0; +} + +static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = { + { "HISI0213", }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match); + +static int hisi_l3c_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *l3c_pmu) +{ + unsigned long long id; + acpi_status status; + + status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + "_UID", NULL, &id); + if (ACPI_FAILURE(status)) + return -EINVAL; + + l3c_pmu->index_id = id; + + /* + * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while + * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &l3c_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read l3c sccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id", + &l3c_pmu->ccl_id)) { + dev_err(&pdev->dev, "Can not read l3c ccl-id!\n"); + return -EINVAL; + } + + l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(l3c_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n"); + return PTR_ERR(l3c_pmu->base); + } + + return 0; +} + +static struct attribute *hisi_l3c_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_format_group = { + .name = "format", + .attrs = hisi_l3c_pmu_format_attr, +}; + +static struct attribute *hisi_l3c_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00), + HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01), + HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02), + HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03), + HISI_PMU_EVENT_ATTR(victim_num, 0x04), + HISI_PMU_EVENT_ATTR(rd_spipe, 0x20), + HISI_PMU_EVENT_ATTR(wr_spipe, 0x21), + HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22), + HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23), + HISI_PMU_EVENT_ATTR(back_invalid, 0x29), + HISI_PMU_EVENT_ATTR(retry_cpu, 0x40), + HISI_PMU_EVENT_ATTR(retry_ring, 0x41), + HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42), + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_events_group = { + .name = "events", + .attrs = hisi_l3c_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = { + .attrs = hisi_l3c_pmu_cpumask_attrs, +}; + +static const struct attribute_group *hisi_l3c_pmu_attr_groups[] = { + &hisi_l3c_pmu_format_group, + &hisi_l3c_pmu_events_group, + &hisi_l3c_pmu_cpumask_attr_group, + NULL, +}; + +static const struct hisi_uncore_ops hisi_uncore_l3c_ops = { + .write_evtype = hisi_l3c_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_l3c_pmu_start_counters, + .stop_counters = hisi_l3c_pmu_stop_counters, + .enable_counter = hisi_l3c_pmu_enable_counter, + .disable_counter = hisi_l3c_pmu_disable_counter, + .enable_counter_int = hisi_l3c_pmu_enable_counter_int, + .disable_counter_int = hisi_l3c_pmu_disable_counter_int, + .write_counter = hisi_l3c_pmu_write_counter, + .read_counter = hisi_l3c_pmu_read_counter, +}; + +static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *l3c_pmu) +{ + int ret; + + ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu); + if (ret) + return ret; + + ret = hisi_l3c_pmu_init_irq(l3c_pmu, pdev); + if (ret) + return ret; + + l3c_pmu->num_counters = L3C_NR_COUNTERS; + l3c_pmu->counter_bits = 48; + l3c_pmu->ops = &hisi_uncore_l3c_ops; + l3c_pmu->dev = &pdev->dev; + l3c_pmu->on_cpu = -1; + l3c_pmu->check_event = 0x59; + + return 0; +} + +static int hisi_l3c_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *l3c_pmu; + char *name; + int ret; + + l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL); + if (!l3c_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, l3c_pmu); + + ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", + l3c_pmu->sccl_id, l3c_pmu->index_id); + l3c_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = hisi_l3c_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); + if (ret) { + dev_err(l3c_pmu->dev, "L3C PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node); + irq_set_affinity_hint(l3c_pmu->irq, NULL); + } + + return ret; +} + +static int hisi_l3c_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&l3c_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + irq_set_affinity_hint(l3c_pmu->irq, NULL); + + return 0; +} + +static struct platform_driver hisi_l3c_pmu_driver = { + .driver = { + .name = "hisi_l3c_pmu", + .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_l3c_pmu_probe, + .remove = hisi_l3c_pmu_remove, +}; + +static int __init hisi_l3c_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + "AP_PERF_ARM_HISI_L3_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_l3c_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE); + + return ret; +} +module_init(hisi_l3c_pmu_module_init); + +static void __exit hisi_l3c_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_l3c_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE); +} +module_exit(hisi_l3c_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c new file mode 100644 index 000000000..97aff877a --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Anurup M <anurup.m@huawei.com> + * Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> + +#include <asm/cputype.h> +#include <asm/local64.h> + +#include "hisi_uncore_pmu.h" + +#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff) +#define HISI_MAX_PERIOD(nr) (BIT_ULL(nr) - 1) + +/* + * PMU format attributes + */ +ssize_t hisi_format_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(buf, "%s\n", (char *)eattr->var); +} +EXPORT_SYMBOL_GPL(hisi_format_sysfs_show); + +/* + * PMU event attributes + */ +ssize_t hisi_event_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var); +} +EXPORT_SYMBOL_GPL(hisi_event_sysfs_show); + +/* + * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show + */ +ssize_t hisi_cpumask_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev)); + + return sprintf(buf, "%d\n", hisi_pmu->on_cpu); +} +EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show); + +static bool hisi_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + /* Include count for the event */ + int counters = 1; + + if (!is_software_event(leader)) { + /* + * We must NOT create groups containing mixed PMUs, although + * software events are acceptable + */ + if (leader->pmu != event->pmu) + return false; + + /* Increment counter for the leader */ + if (leader != event) + counters++; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + if (sibling->pmu != event->pmu) + return false; + /* Increment counter for each sibling */ + counters++; + } + + /* The group can not count events more than the counters in the HW */ + return counters <= hisi_pmu->num_counters; +} + +int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx) +{ + return idx >= 0 && idx < hisi_pmu->num_counters; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_counter_valid); + +int hisi_uncore_pmu_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = hisi_pmu->pmu_events.used_mask; + u32 num_counters = hisi_pmu->num_counters; + int idx; + + idx = find_first_zero_bit(used_mask, num_counters); + if (idx == num_counters) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx); + +static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx) +{ + if (!hisi_uncore_pmu_counter_valid(hisi_pmu, idx)) { + dev_err(hisi_pmu->dev, "Unsupported event index:%d!\n", idx); + return; + } + + clear_bit(idx, hisi_pmu->pmu_events.used_mask); +} + +int hisi_uncore_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hisi_pmu *hisi_pmu; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * We do not support sampling as the counters are all + * shared by all CPU cores in a CPU die(SCCL). Also we + * do not support attach to a task(per-process mode) + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + /* + * The uncore counters not specific to any CPU, so cannot + * support per-task + */ + if (event->cpu < 0) + return -EINVAL; + + /* + * Validate if the events in group does not exceed the + * available counters in hardware. + */ + if (!hisi_validate_event_group(event)) + return -EINVAL; + + hisi_pmu = to_hisi_pmu(event->pmu); + if (event->attr.config > hisi_pmu->check_event) + return -EINVAL; + + if (hisi_pmu->on_cpu == -1) + return -EINVAL; + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. + */ + hwc->idx = -1; + hwc->config_base = event->attr.config; + + /* Enforce to use the same CPU for all events in this PMU */ + event->cpu = hisi_pmu->on_cpu; + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init); + +/* + * Set the counter to count the event that we're interested in, + * and enable interrupt and counter. + */ +static void hisi_uncore_pmu_enable_event(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx, + HISI_GET_EVENTID(event)); + + hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc); + hisi_pmu->ops->enable_counter(hisi_pmu, hwc); +} + +/* + * Disable counter and interrupt. + */ +static void hisi_uncore_pmu_disable_event(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pmu->ops->disable_counter(hisi_pmu, hwc); + hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc); +} + +void hisi_uncore_pmu_set_event_period(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * The HiSilicon PMU counters support 32 bits or 48 bits, depending on + * the PMU. We reduce it to 2^(counter_bits - 1) to account for the + * extreme interrupt latency. So we could hopefully handle the overflow + * interrupt before another 2^(counter_bits - 1) events occur and the + * counter overtakes its previous value. + */ + u64 val = BIT_ULL(hisi_pmu->counter_bits - 1); + + local64_set(&hwc->prev_count, val); + /* Write start value to the hardware event counter */ + hisi_pmu->ops->write_counter(hisi_pmu, hwc, val); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period); + +void hisi_uncore_pmu_event_update(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + + do { + /* Read the count from the counter register */ + new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc); + prev_raw_count = local64_read(&hwc->prev_count); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + /* + * compute the delta + */ + delta = (new_raw_count - prev_raw_count) & + HISI_MAX_PERIOD(hisi_pmu->counter_bits); + local64_add(delta, &event->count); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update); + +void hisi_uncore_pmu_start(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + hisi_uncore_pmu_set_event_period(event); + + if (flags & PERF_EF_RELOAD) { + u64 prev_raw_count = local64_read(&hwc->prev_count); + + hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count); + } + + hisi_uncore_pmu_enable_event(event); + perf_event_update_userpage(event); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start); + +void hisi_uncore_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hisi_uncore_pmu_disable_event(event); + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + /* Read hardware counter and update the perf counter statistics */ + hisi_uncore_pmu_event_update(event); + hwc->state |= PERF_HES_UPTODATE; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop); + +int hisi_uncore_pmu_add(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Get an available counter index for counting */ + idx = hisi_pmu->ops->get_event_idx(event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + hisi_pmu->pmu_events.hw_events[idx] = event; + + if (flags & PERF_EF_START) + hisi_uncore_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add); + +void hisi_uncore_pmu_del(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_uncore_pmu_stop(event, PERF_EF_UPDATE); + hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx); + perf_event_update_userpage(event); + hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del); + +void hisi_uncore_pmu_read(struct perf_event *event) +{ + /* Read hardware counter and update the perf counter statistics */ + hisi_uncore_pmu_event_update(event); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read); + +void hisi_uncore_pmu_enable(struct pmu *pmu) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu); + int enabled = bitmap_weight(hisi_pmu->pmu_events.used_mask, + hisi_pmu->num_counters); + + if (!enabled) + return; + + hisi_pmu->ops->start_counters(hisi_pmu); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable); + +void hisi_uncore_pmu_disable(struct pmu *pmu) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu); + + hisi_pmu->ops->stop_counters(hisi_pmu); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable); + + +/* + * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be + * determined from the MPIDR_EL1, but the encoding varies by CPU: + * + * - For MT variants of TSV110: + * SCCL is Aff2[7:3], CCL is Aff2[2:0] + * + * - For other MT parts: + * SCCL is Aff3[7:0], CCL is Aff2[7:0] + * + * - For non-MT parts: + * SCCL is Aff2[7:0], CCL is Aff1[7:0] + */ +static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) +{ + u64 mpidr = read_cpuid_mpidr(); + int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + bool mt = mpidr & MPIDR_MT_BITMASK; + int sccl, ccl; + + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + } else if (mt) { + sccl = aff3; + ccl = aff2; + } else { + sccl = aff2; + ccl = aff1; + } + + if (scclp) + *scclp = sccl; + if (cclp) + *cclp = ccl; +} + +/* + * Check whether the CPU is associated with this uncore PMU + */ +static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu) +{ + int sccl_id, ccl_id; + + if (hisi_pmu->ccl_id == -1) { + /* If CCL_ID is -1, the PMU only shares the same SCCL */ + hisi_read_sccl_and_ccl_id(&sccl_id, NULL); + + return sccl_id == hisi_pmu->sccl_id; + } + + hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id); + + return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id; +} + +int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu, + node); + + if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) + return 0; + + cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus); + + /* If another CPU is already managing this PMU, simply return. */ + if (hisi_pmu->on_cpu != -1) + return 0; + + /* Use this CPU in cpumask for event counting */ + hisi_pmu->on_cpu = cpu; + + /* Overflow interrupt also should use the same CPU */ + WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(cpu))); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu); + +int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu, + node); + cpumask_t pmu_online_cpus; + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus)) + return 0; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (hisi_pmu->on_cpu != cpu) + return 0; + + /* Give up ownership of the PMU */ + hisi_pmu->on_cpu = -1; + + /* Choose a new CPU to migrate ownership of the PMU to */ + cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus, + cpu_online_mask); + target = cpumask_any_but(&pmu_online_cpus, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target); + /* Use this CPU for event counting */ + hisi_pmu->on_cpu = target; + WARN_ON(irq_set_affinity_hint(hisi_pmu->irq, cpumask_of(target))); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h new file mode 100644 index 000000000..b59ec2216 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HiSilicon SoC Hardware event counters support + * + * Copyright (C) 2017 Hisilicon Limited + * Author: Anurup M <anurup.m@huawei.com> + * Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#ifndef __HISI_UNCORE_PMU_H__ +#define __HISI_UNCORE_PMU_H__ + +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/perf_event.h> +#include <linux/types.h> + +#undef pr_fmt +#define pr_fmt(fmt) "hisi_pmu: " fmt + +#define HISI_MAX_COUNTERS 0x10 +#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu)) + +#define HISI_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ + })[0].attr.attr) + +#define HISI_PMU_FORMAT_ATTR(_name, _config) \ + HISI_PMU_ATTR(_name, hisi_format_sysfs_show, (void *)_config) +#define HISI_PMU_EVENT_ATTR(_name, _config) \ + HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config) + +struct hisi_pmu; + +struct hisi_uncore_ops { + void (*write_evtype)(struct hisi_pmu *, int, u32); + int (*get_event_idx)(struct perf_event *); + u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64); + void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*start_counters)(struct hisi_pmu *); + void (*stop_counters)(struct hisi_pmu *); +}; + +struct hisi_pmu_hwevents { + struct perf_event *hw_events[HISI_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS); +}; + +/* Generic pmu struct for different pmu types */ +struct hisi_pmu { + struct pmu pmu; + const struct hisi_uncore_ops *ops; + struct hisi_pmu_hwevents pmu_events; + /* associated_cpus: All CPUs associated with the PMU */ + cpumask_t associated_cpus; + /* CPU used for counting */ + int on_cpu; + int irq; + struct device *dev; + struct hlist_node node; + int sccl_id; + int ccl_id; + void __iomem *base; + /* the ID of the PMU modules */ + u32 index_id; + int num_counters; + int counter_bits; + /* check event code range */ + int check_event; +}; + +int hisi_uncore_pmu_counter_valid(struct hisi_pmu *hisi_pmu, int idx); +int hisi_uncore_pmu_get_event_idx(struct perf_event *event); +void hisi_uncore_pmu_read(struct perf_event *event); +int hisi_uncore_pmu_add(struct perf_event *event, int flags); +void hisi_uncore_pmu_del(struct perf_event *event, int flags); +void hisi_uncore_pmu_start(struct perf_event *event, int flags); +void hisi_uncore_pmu_stop(struct perf_event *event, int flags); +void hisi_uncore_pmu_set_event_period(struct perf_event *event); +void hisi_uncore_pmu_event_update(struct perf_event *event); +int hisi_uncore_pmu_event_init(struct perf_event *event); +void hisi_uncore_pmu_enable(struct pmu *pmu); +void hisi_uncore_pmu_disable(struct pmu *pmu); +ssize_t hisi_event_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hisi_format_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hisi_cpumask_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node); +int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node); +#endif /* __HISI_UNCORE_PMU_H__ */ diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c new file mode 100644 index 000000000..d58810f53 --- /dev/null +++ b/drivers/perf/qcom_l2_pmu.c @@ -0,0 +1,1008 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved. + */ +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/percpu.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/smp.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#include <asm/barrier.h> +#include <asm/local64.h> +#include <asm/sysreg.h> +#include <soc/qcom/kryo-l2-accessors.h> + +#define MAX_L2_CTRS 9 + +#define L2PMCR_NUM_EV_SHIFT 11 +#define L2PMCR_NUM_EV_MASK 0x1F + +#define L2PMCR 0x400 +#define L2PMCNTENCLR 0x403 +#define L2PMCNTENSET 0x404 +#define L2PMINTENCLR 0x405 +#define L2PMINTENSET 0x406 +#define L2PMOVSCLR 0x407 +#define L2PMOVSSET 0x408 +#define L2PMCCNTCR 0x409 +#define L2PMCCNTR 0x40A +#define L2PMCCNTSR 0x40C +#define L2PMRESR 0x410 +#define IA_L2PMXEVCNTCR_BASE 0x420 +#define IA_L2PMXEVCNTR_BASE 0x421 +#define IA_L2PMXEVFILTER_BASE 0x423 +#define IA_L2PMXEVTYPER_BASE 0x424 + +#define IA_L2_REG_OFFSET 0x10 + +#define L2PMXEVFILTER_SUFILTER_ALL 0x000E0000 +#define L2PMXEVFILTER_ORGFILTER_IDINDEP 0x00000004 +#define L2PMXEVFILTER_ORGFILTER_ALL 0x00000003 + +#define L2EVTYPER_REG_SHIFT 3 + +#define L2PMRESR_GROUP_BITS 8 +#define L2PMRESR_GROUP_MASK GENMASK(7, 0) + +#define L2CYCLE_CTR_BIT 31 +#define L2CYCLE_CTR_RAW_CODE 0xFE + +#define L2PMCR_RESET_ALL 0x6 +#define L2PMCR_COUNTERS_ENABLE 0x1 +#define L2PMCR_COUNTERS_DISABLE 0x0 + +#define L2PMRESR_EN BIT_ULL(63) + +#define L2_EVT_MASK 0x00000FFF +#define L2_EVT_CODE_MASK 0x00000FF0 +#define L2_EVT_GRP_MASK 0x0000000F +#define L2_EVT_CODE_SHIFT 4 +#define L2_EVT_GRP_SHIFT 0 + +#define L2_EVT_CODE(event) (((event) & L2_EVT_CODE_MASK) >> L2_EVT_CODE_SHIFT) +#define L2_EVT_GROUP(event) (((event) & L2_EVT_GRP_MASK) >> L2_EVT_GRP_SHIFT) + +#define L2_EVT_GROUP_MAX 7 + +#define L2_COUNTER_RELOAD BIT_ULL(31) +#define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63) + + +#define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) + +/* + * Events + */ +#define L2_EVENT_CYCLES 0xfe +#define L2_EVENT_DCACHE_OPS 0x400 +#define L2_EVENT_ICACHE_OPS 0x401 +#define L2_EVENT_TLBI 0x402 +#define L2_EVENT_BARRIERS 0x403 +#define L2_EVENT_TOTAL_READS 0x405 +#define L2_EVENT_TOTAL_WRITES 0x406 +#define L2_EVENT_TOTAL_REQUESTS 0x407 +#define L2_EVENT_LDREX 0x420 +#define L2_EVENT_STREX 0x421 +#define L2_EVENT_CLREX 0x422 + + + +struct cluster_pmu; + +/* + * Aggregate PMU. Implements the core pmu functions and manages + * the hardware PMUs. + */ +struct l2cache_pmu { + struct hlist_node node; + u32 num_pmus; + struct pmu pmu; + int num_counters; + cpumask_t cpumask; + struct platform_device *pdev; + struct cluster_pmu * __percpu *pmu_cluster; + struct list_head clusters; +}; + +/* + * The cache is made up of one or more clusters, each cluster has its own PMU. + * Each cluster is associated with one or more CPUs. + * This structure represents one of the hardware PMUs. + * + * Events can be envisioned as a 2-dimensional array. Each column represents + * a group of events. There are 8 groups. Only one entry from each + * group can be in use at a time. + * + * Events are specified as 0xCCG, where CC is 2 hex digits specifying + * the code (array row) and G specifies the group (column). + * + * In addition there is a cycle counter event specified by L2CYCLE_CTR_RAW_CODE + * which is outside the above scheme. + */ +struct cluster_pmu { + struct list_head next; + struct perf_event *events[MAX_L2_CTRS]; + struct l2cache_pmu *l2cache_pmu; + DECLARE_BITMAP(used_counters, MAX_L2_CTRS); + DECLARE_BITMAP(used_groups, L2_EVT_GROUP_MAX + 1); + int irq; + int cluster_id; + /* The CPU that is used for collecting events on this cluster */ + int on_cpu; + /* All the CPUs associated with this cluster */ + cpumask_t cluster_cpus; + spinlock_t pmu_lock; +}; + +#define to_l2cache_pmu(p) (container_of(p, struct l2cache_pmu, pmu)) + +static u32 l2_cycle_ctr_idx; +static u32 l2_counter_present_mask; + +static inline u32 idx_to_reg_bit(u32 idx) +{ + if (idx == l2_cycle_ctr_idx) + return BIT(L2CYCLE_CTR_BIT); + + return BIT(idx); +} + +static inline struct cluster_pmu *get_cluster_pmu( + struct l2cache_pmu *l2cache_pmu, int cpu) +{ + return *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu); +} + +static void cluster_pmu_reset(void) +{ + /* Reset all counters */ + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); + kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); +} + +static inline void cluster_pmu_enable(void) +{ + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); +} + +static inline void cluster_pmu_disable(void) +{ + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); +} + +static inline void cluster_pmu_counter_set_value(u32 idx, u64 value) +{ + if (idx == l2_cycle_ctr_idx) + kryo_l2_set_indirect_reg(L2PMCCNTR, value); + else + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); +} + +static inline u64 cluster_pmu_counter_get_value(u32 idx) +{ + u64 value; + + if (idx == l2_cycle_ctr_idx) + value = kryo_l2_get_indirect_reg(L2PMCCNTR); + else + value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); + + return value; +} + +static inline void cluster_pmu_counter_enable(u32 idx) +{ + kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); +} + +static inline void cluster_pmu_counter_disable(u32 idx) +{ + kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); +} + +static inline void cluster_pmu_counter_enable_interrupt(u32 idx) +{ + kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); +} + +static inline void cluster_pmu_counter_disable_interrupt(u32 idx) +{ + kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); +} + +static inline void cluster_pmu_set_evccntcr(u32 val) +{ + kryo_l2_set_indirect_reg(L2PMCCNTCR, val); +} + +static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val) +{ + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); +} + +static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val) +{ + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); +} + +static void cluster_pmu_set_resr(struct cluster_pmu *cluster, + u32 event_group, u32 event_cc) +{ + u64 field; + u64 resr_val; + u32 shift; + unsigned long flags; + + shift = L2PMRESR_GROUP_BITS * event_group; + field = ((u64)(event_cc & L2PMRESR_GROUP_MASK) << shift); + + spin_lock_irqsave(&cluster->pmu_lock, flags); + + resr_val = kryo_l2_get_indirect_reg(L2PMRESR); + resr_val &= ~(L2PMRESR_GROUP_MASK << shift); + resr_val |= field; + resr_val |= L2PMRESR_EN; + kryo_l2_set_indirect_reg(L2PMRESR, resr_val); + + spin_unlock_irqrestore(&cluster->pmu_lock, flags); +} + +/* + * Hardware allows filtering of events based on the originating + * CPU. Turn this off by setting filter bits to allow events from + * all CPUS, subunits and ID independent events in this cluster. + */ +static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr) +{ + u32 val = L2PMXEVFILTER_SUFILTER_ALL | + L2PMXEVFILTER_ORGFILTER_IDINDEP | + L2PMXEVFILTER_ORGFILTER_ALL; + + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); +} + +static inline u32 cluster_pmu_getreset_ovsr(void) +{ + u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET); + + kryo_l2_set_indirect_reg(L2PMOVSCLR, result); + return result; +} + +static inline bool cluster_pmu_has_overflowed(u32 ovsr) +{ + return !!(ovsr & l2_counter_present_mask); +} + +static inline bool cluster_pmu_counter_has_overflowed(u32 ovsr, u32 idx) +{ + return !!(ovsr & idx_to_reg_bit(idx)); +} + +static void l2_cache_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev, now; + u32 idx = hwc->idx; + + do { + prev = local64_read(&hwc->prev_count); + now = cluster_pmu_counter_get_value(idx); + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + /* + * The cycle counter is 64-bit, but all other counters are + * 32-bit, and we must handle 32-bit overflow explicitly. + */ + delta = now - prev; + if (idx != l2_cycle_ctr_idx) + delta &= 0xffffffff; + + local64_add(delta, &event->count); +} + +static void l2_cache_cluster_set_period(struct cluster_pmu *cluster, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u64 new; + + /* + * We limit the max period to half the max counter value so + * that even in the case of extreme interrupt latency the + * counter will (hopefully) not wrap past its initial value. + */ + if (idx == l2_cycle_ctr_idx) + new = L2_CYCLE_COUNTER_RELOAD; + else + new = L2_COUNTER_RELOAD; + + local64_set(&hwc->prev_count, new); + cluster_pmu_counter_set_value(idx, new); +} + +static int l2_cache_get_event_idx(struct cluster_pmu *cluster, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx; + int num_ctrs = cluster->l2cache_pmu->num_counters - 1; + unsigned int group; + + if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { + if (test_and_set_bit(l2_cycle_ctr_idx, cluster->used_counters)) + return -EAGAIN; + + return l2_cycle_ctr_idx; + } + + idx = find_first_zero_bit(cluster->used_counters, num_ctrs); + if (idx == num_ctrs) + /* The counters are all in use. */ + return -EAGAIN; + + /* + * Check for column exclusion: event column already in use by another + * event. This is for events which are not in the same group. + * Conflicting events in the same group are detected in event_init. + */ + group = L2_EVT_GROUP(hwc->config_base); + if (test_bit(group, cluster->used_groups)) + return -EAGAIN; + + set_bit(idx, cluster->used_counters); + set_bit(group, cluster->used_groups); + + return idx; +} + +static void l2_cache_clear_event_idx(struct cluster_pmu *cluster, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + clear_bit(idx, cluster->used_counters); + if (hwc->config_base != L2CYCLE_CTR_RAW_CODE) + clear_bit(L2_EVT_GROUP(hwc->config_base), cluster->used_groups); +} + +static irqreturn_t l2_cache_handle_irq(int irq_num, void *data) +{ + struct cluster_pmu *cluster = data; + int num_counters = cluster->l2cache_pmu->num_counters; + u32 ovsr; + int idx; + + ovsr = cluster_pmu_getreset_ovsr(); + if (!cluster_pmu_has_overflowed(ovsr)) + return IRQ_NONE; + + for_each_set_bit(idx, cluster->used_counters, num_counters) { + struct perf_event *event = cluster->events[idx]; + struct hw_perf_event *hwc; + + if (WARN_ON_ONCE(!event)) + continue; + + if (!cluster_pmu_counter_has_overflowed(ovsr, idx)) + continue; + + l2_cache_event_update(event); + hwc = &event->hw; + + l2_cache_cluster_set_period(cluster, hwc); + } + + return IRQ_HANDLED; +} + +/* + * Implementation of abstract pmu functionality required by + * the core perf events code. + */ + +static void l2_cache_pmu_enable(struct pmu *pmu) +{ + /* + * Although there is only one PMU (per socket) controlling multiple + * physical PMUs (per cluster), because we do not support per-task mode + * each event is associated with a CPU. Each event has pmu_enable + * called on its CPU, so here it is only necessary to enable the + * counters for the current CPU. + */ + + cluster_pmu_enable(); +} + +static void l2_cache_pmu_disable(struct pmu *pmu) +{ + cluster_pmu_disable(); +} + +static int l2_cache_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct cluster_pmu *cluster; + struct perf_event *sibling; + struct l2cache_pmu *l2cache_pmu; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + l2cache_pmu = to_l2cache_pmu(event->pmu); + + if (hwc->sample_period) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Sampling not supported\n"); + return -EOPNOTSUPP; + } + + if (event->cpu < 0) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Per-task mode not supported\n"); + return -EOPNOTSUPP; + } + + if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) || + ((event->attr.config & ~L2_EVT_MASK) != 0)) && + (event->attr.config != L2CYCLE_CTR_RAW_CODE)) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Invalid config %llx\n", + event->attr.config); + return -EINVAL; + } + + /* Don't allow groups with mixed PMUs, except for s/w events */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Can't create mixed PMU group\n"); + return -EINVAL; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Can't create mixed PMU group\n"); + return -EINVAL; + } + } + + cluster = get_cluster_pmu(l2cache_pmu, event->cpu); + if (!cluster) { + /* CPU has not been initialised */ + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "CPU%d not associated with L2 cluster\n", event->cpu); + return -EINVAL; + } + + /* Ensure all events in a group are on the same cpu */ + if ((event->group_leader != event) && + (cluster->on_cpu != event->group_leader->cpu)) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Can't create group on CPUs %d and %d", + event->cpu, event->group_leader->cpu); + return -EINVAL; + } + + if ((event != event->group_leader) && + !is_software_event(event->group_leader) && + (L2_EVT_GROUP(event->group_leader->attr.config) == + L2_EVT_GROUP(event->attr.config))) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Column exclusion: conflicting events %llx %llx\n", + event->group_leader->attr.config, + event->attr.config); + return -EINVAL; + } + + for_each_sibling_event(sibling, event->group_leader) { + if ((sibling != event) && + !is_software_event(sibling) && + (L2_EVT_GROUP(sibling->attr.config) == + L2_EVT_GROUP(event->attr.config))) { + dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, + "Column exclusion: conflicting events %llx %llx\n", + sibling->attr.config, + event->attr.config); + return -EINVAL; + } + } + + hwc->idx = -1; + hwc->config_base = event->attr.config; + + /* + * Ensure all events are on the same cpu so all events are in the + * same cpu context, to avoid races on pmu_enable etc. + */ + event->cpu = cluster->on_cpu; + + return 0; +} + +static void l2_cache_event_start(struct perf_event *event, int flags) +{ + struct cluster_pmu *cluster; + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + u32 config; + u32 event_cc, event_group; + + hwc->state = 0; + + cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); + + l2_cache_cluster_set_period(cluster, hwc); + + if (hwc->config_base == L2CYCLE_CTR_RAW_CODE) { + cluster_pmu_set_evccntcr(0); + } else { + config = hwc->config_base; + event_cc = L2_EVT_CODE(config); + event_group = L2_EVT_GROUP(config); + + cluster_pmu_set_evcntcr(idx, 0); + cluster_pmu_set_evtyper(idx, event_group); + cluster_pmu_set_resr(cluster, event_group, event_cc); + cluster_pmu_set_evfilter_sys_mode(idx); + } + + cluster_pmu_counter_enable_interrupt(idx); + cluster_pmu_counter_enable(idx); +} + +static void l2_cache_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (hwc->state & PERF_HES_STOPPED) + return; + + cluster_pmu_counter_disable_interrupt(idx); + cluster_pmu_counter_disable(idx); + + if (flags & PERF_EF_UPDATE) + l2_cache_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int l2_cache_event_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + struct cluster_pmu *cluster; + + cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); + + idx = l2_cache_get_event_idx(cluster, event); + if (idx < 0) + return idx; + + hwc->idx = idx; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + cluster->events[idx] = event; + local64_set(&hwc->prev_count, 0); + + if (flags & PERF_EF_START) + l2_cache_event_start(event, flags); + + /* Propagate changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return err; +} + +static void l2_cache_event_del(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct cluster_pmu *cluster; + int idx = hwc->idx; + + cluster = get_cluster_pmu(to_l2cache_pmu(event->pmu), event->cpu); + + l2_cache_event_stop(event, flags | PERF_EF_UPDATE); + cluster->events[idx] = NULL; + l2_cache_clear_event_idx(cluster, event); + + perf_event_update_userpage(event); +} + +static void l2_cache_event_read(struct perf_event *event) +{ + l2_cache_event_update(event); +} + +static ssize_t l2_cache_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct l2cache_pmu *l2cache_pmu = to_l2cache_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &l2cache_pmu->cpumask); +} + +static struct device_attribute l2_cache_pmu_cpumask_attr = + __ATTR(cpumask, S_IRUGO, l2_cache_pmu_cpumask_show, NULL); + +static struct attribute *l2_cache_pmu_cpumask_attrs[] = { + &l2_cache_pmu_cpumask_attr.attr, + NULL, +}; + +static struct attribute_group l2_cache_pmu_cpumask_group = { + .attrs = l2_cache_pmu_cpumask_attrs, +}; + +/* CCG format for perf RAW codes. */ +PMU_FORMAT_ATTR(l2_code, "config:4-11"); +PMU_FORMAT_ATTR(l2_group, "config:0-3"); +PMU_FORMAT_ATTR(event, "config:0-11"); + +static struct attribute *l2_cache_pmu_formats[] = { + &format_attr_l2_code.attr, + &format_attr_l2_group.attr, + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group l2_cache_pmu_format_group = { + .name = "format", + .attrs = l2_cache_pmu_formats, +}; + +static ssize_t l2cache_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define L2CACHE_EVENT_ATTR(_name, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, l2cache_pmu_event_show, NULL), \ + .id = _id, } \ + })[0].attr.attr) + +static struct attribute *l2_cache_pmu_events[] = { + L2CACHE_EVENT_ATTR(cycles, L2_EVENT_CYCLES), + L2CACHE_EVENT_ATTR(dcache-ops, L2_EVENT_DCACHE_OPS), + L2CACHE_EVENT_ATTR(icache-ops, L2_EVENT_ICACHE_OPS), + L2CACHE_EVENT_ATTR(tlbi, L2_EVENT_TLBI), + L2CACHE_EVENT_ATTR(barriers, L2_EVENT_BARRIERS), + L2CACHE_EVENT_ATTR(total-reads, L2_EVENT_TOTAL_READS), + L2CACHE_EVENT_ATTR(total-writes, L2_EVENT_TOTAL_WRITES), + L2CACHE_EVENT_ATTR(total-requests, L2_EVENT_TOTAL_REQUESTS), + L2CACHE_EVENT_ATTR(ldrex, L2_EVENT_LDREX), + L2CACHE_EVENT_ATTR(strex, L2_EVENT_STREX), + L2CACHE_EVENT_ATTR(clrex, L2_EVENT_CLREX), + NULL +}; + +static struct attribute_group l2_cache_pmu_events_group = { + .name = "events", + .attrs = l2_cache_pmu_events, +}; + +static const struct attribute_group *l2_cache_pmu_attr_grps[] = { + &l2_cache_pmu_format_group, + &l2_cache_pmu_cpumask_group, + &l2_cache_pmu_events_group, + NULL, +}; + +/* + * Generic device handlers + */ + +static const struct acpi_device_id l2_cache_pmu_acpi_match[] = { + { "QCOM8130", }, + { } +}; + +static int get_num_counters(void) +{ + int val; + + val = kryo_l2_get_indirect_reg(L2PMCR); + + /* + * Read number of counters from L2PMCR and add 1 + * for the cycle counter. + */ + return ((val >> L2PMCR_NUM_EV_SHIFT) & L2PMCR_NUM_EV_MASK) + 1; +} + +static struct cluster_pmu *l2_cache_associate_cpu_with_cluster( + struct l2cache_pmu *l2cache_pmu, int cpu) +{ + u64 mpidr; + int cpu_cluster_id; + struct cluster_pmu *cluster; + + /* + * This assumes that the cluster_id is in MPIDR[aff1] for + * single-threaded cores, and MPIDR[aff2] for multi-threaded + * cores. This logic will have to be updated if this changes. + */ + mpidr = read_cpuid_mpidr(); + if (mpidr & MPIDR_MT_BITMASK) + cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + else + cpu_cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + + list_for_each_entry(cluster, &l2cache_pmu->clusters, next) { + if (cluster->cluster_id != cpu_cluster_id) + continue; + + dev_info(&l2cache_pmu->pdev->dev, + "CPU%d associated with cluster %d\n", cpu, + cluster->cluster_id); + cpumask_set_cpu(cpu, &cluster->cluster_cpus); + *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster; + return cluster; + } + + return NULL; +} + +static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct cluster_pmu *cluster; + struct l2cache_pmu *l2cache_pmu; + + l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); + cluster = get_cluster_pmu(l2cache_pmu, cpu); + if (!cluster) { + /* First time this CPU has come online */ + cluster = l2_cache_associate_cpu_with_cluster(l2cache_pmu, cpu); + if (!cluster) { + /* Only if broken firmware doesn't list every cluster */ + WARN_ONCE(1, "No L2 cache cluster for CPU%d\n", cpu); + return 0; + } + } + + /* If another CPU is managing this cluster, we're done */ + if (cluster->on_cpu != -1) + return 0; + + /* + * All CPUs on this cluster were down, use this one. + * Reset to put it into sane state. + */ + cluster->on_cpu = cpu; + cpumask_set_cpu(cpu, &l2cache_pmu->cpumask); + cluster_pmu_reset(); + + WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(cpu))); + enable_irq(cluster->irq); + + return 0; +} + +static int l2cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct cluster_pmu *cluster; + struct l2cache_pmu *l2cache_pmu; + cpumask_t cluster_online_cpus; + unsigned int target; + + l2cache_pmu = hlist_entry_safe(node, struct l2cache_pmu, node); + cluster = get_cluster_pmu(l2cache_pmu, cpu); + if (!cluster) + return 0; + + /* If this CPU is not managing the cluster, we're done */ + if (cluster->on_cpu != cpu) + return 0; + + /* Give up ownership of cluster */ + cpumask_clear_cpu(cpu, &l2cache_pmu->cpumask); + cluster->on_cpu = -1; + + /* Any other CPU for this cluster which is still online */ + cpumask_and(&cluster_online_cpus, &cluster->cluster_cpus, + cpu_online_mask); + target = cpumask_any_but(&cluster_online_cpus, cpu); + if (target >= nr_cpu_ids) { + disable_irq(cluster->irq); + return 0; + } + + perf_pmu_migrate_context(&l2cache_pmu->pmu, cpu, target); + cluster->on_cpu = target; + cpumask_set_cpu(target, &l2cache_pmu->cpumask); + WARN_ON(irq_set_affinity(cluster->irq, cpumask_of(target))); + + return 0; +} + +static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev->parent); + struct platform_device *sdev = to_platform_device(dev); + struct l2cache_pmu *l2cache_pmu = data; + struct cluster_pmu *cluster; + struct acpi_device *device; + unsigned long fw_cluster_id; + int err; + int irq; + + if (acpi_bus_get_device(ACPI_HANDLE(dev), &device)) + return -ENODEV; + + if (kstrtoul(device->pnp.unique_id, 10, &fw_cluster_id) < 0) { + dev_err(&pdev->dev, "unable to read ACPI uid\n"); + return -ENODEV; + } + + cluster = devm_kzalloc(&pdev->dev, sizeof(*cluster), GFP_KERNEL); + if (!cluster) + return -ENOMEM; + + INIT_LIST_HEAD(&cluster->next); + list_add(&cluster->next, &l2cache_pmu->clusters); + cluster->cluster_id = fw_cluster_id; + + irq = platform_get_irq(sdev, 0); + if (irq < 0) + return irq; + irq_set_status_flags(irq, IRQ_NOAUTOEN); + cluster->irq = irq; + + cluster->l2cache_pmu = l2cache_pmu; + cluster->on_cpu = -1; + + err = devm_request_irq(&pdev->dev, irq, l2_cache_handle_irq, + IRQF_NOBALANCING | IRQF_NO_THREAD, + "l2-cache-pmu", cluster); + if (err) { + dev_err(&pdev->dev, + "Unable to request IRQ%d for L2 PMU counters\n", irq); + return err; + } + + dev_info(&pdev->dev, + "Registered L2 cache PMU cluster %ld\n", fw_cluster_id); + + spin_lock_init(&cluster->pmu_lock); + + l2cache_pmu->num_pmus++; + + return 0; +} + +static int l2_cache_pmu_probe(struct platform_device *pdev) +{ + int err; + struct l2cache_pmu *l2cache_pmu; + + l2cache_pmu = + devm_kzalloc(&pdev->dev, sizeof(*l2cache_pmu), GFP_KERNEL); + if (!l2cache_pmu) + return -ENOMEM; + + INIT_LIST_HEAD(&l2cache_pmu->clusters); + + platform_set_drvdata(pdev, l2cache_pmu); + l2cache_pmu->pmu = (struct pmu) { + /* suffix is instance id for future use with multiple sockets */ + .name = "l2cache_0", + .task_ctx_nr = perf_invalid_context, + .pmu_enable = l2_cache_pmu_enable, + .pmu_disable = l2_cache_pmu_disable, + .event_init = l2_cache_event_init, + .add = l2_cache_event_add, + .del = l2_cache_event_del, + .start = l2_cache_event_start, + .stop = l2_cache_event_stop, + .read = l2_cache_event_read, + .attr_groups = l2_cache_pmu_attr_grps, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + l2cache_pmu->num_counters = get_num_counters(); + l2cache_pmu->pdev = pdev; + l2cache_pmu->pmu_cluster = devm_alloc_percpu(&pdev->dev, + struct cluster_pmu *); + if (!l2cache_pmu->pmu_cluster) + return -ENOMEM; + + l2_cycle_ctr_idx = l2cache_pmu->num_counters - 1; + l2_counter_present_mask = GENMASK(l2cache_pmu->num_counters - 2, 0) | + BIT(L2CYCLE_CTR_BIT); + + cpumask_clear(&l2cache_pmu->cpumask); + + /* Read cluster info and initialize each cluster */ + err = device_for_each_child(&pdev->dev, l2cache_pmu, + l2_cache_pmu_probe_cluster); + if (err) + return err; + + if (l2cache_pmu->num_pmus == 0) { + dev_err(&pdev->dev, "No hardware L2 cache PMUs found\n"); + return -ENODEV; + } + + err = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, + &l2cache_pmu->node); + if (err) { + dev_err(&pdev->dev, "Error %d registering hotplug", err); + return err; + } + + err = perf_pmu_register(&l2cache_pmu->pmu, l2cache_pmu->pmu.name, -1); + if (err) { + dev_err(&pdev->dev, "Error %d registering L2 cache PMU\n", err); + goto out_unregister; + } + + dev_info(&pdev->dev, "Registered L2 cache PMU using %d HW PMUs\n", + l2cache_pmu->num_pmus); + + return err; + +out_unregister: + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, + &l2cache_pmu->node); + return err; +} + +static int l2_cache_pmu_remove(struct platform_device *pdev) +{ + struct l2cache_pmu *l2cache_pmu = + to_l2cache_pmu(platform_get_drvdata(pdev)); + + perf_pmu_unregister(&l2cache_pmu->pmu); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, + &l2cache_pmu->node); + return 0; +} + +static struct platform_driver l2_cache_pmu_driver = { + .driver = { + .name = "qcom-l2cache-pmu", + .acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = l2_cache_pmu_probe, + .remove = l2_cache_pmu_remove, +}; + +static int __init register_l2_cache_pmu_driver(void) +{ + int err; + + err = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, + "AP_PERF_ARM_QCOM_L2_ONLINE", + l2cache_pmu_online_cpu, + l2cache_pmu_offline_cpu); + if (err) + return err; + + return platform_driver_register(&l2_cache_pmu_driver); +} +device_initcall(register_l2_cache_pmu_driver); diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c new file mode 100644 index 000000000..9ddb577c5 --- /dev/null +++ b/drivers/perf/qcom_l3_pmu.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for the L3 cache PMUs in Qualcomm Technologies chips. + * + * The driver supports a distributed cache architecture where the overall + * cache for a socket is comprised of multiple slices each with its own PMU. + * Access to each individual PMU is provided even though all CPUs share all + * the slices. User space needs to aggregate to individual counts to provide + * a global picture. + * + * See Documentation/admin-guide/perf/qcom_l3_pmu.rst for more details. + * + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + */ + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +/* + * General constants + */ + +/* Number of counters on each PMU */ +#define L3_NUM_COUNTERS 8 +/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */ +#define L3_EVTYPE_MASK 0xFF +/* + * Bit position of the 'long counter' flag within perf_event_attr.config. + * Reserve some space between the event type and this flag to allow expansion + * in the event type field. + */ +#define L3_EVENT_LC_BIT 32 + +/* + * Register offsets + */ + +/* Perfmon registers */ +#define L3_HML3_PM_CR 0x000 +#define L3_HML3_PM_EVCNTR(__cntr) (0x420 + ((__cntr) & 0x7) * 8) +#define L3_HML3_PM_CNTCTL(__cntr) (0x120 + ((__cntr) & 0x7) * 8) +#define L3_HML3_PM_EVTYPE(__cntr) (0x220 + ((__cntr) & 0x7) * 8) +#define L3_HML3_PM_FILTRA 0x300 +#define L3_HML3_PM_FILTRB 0x308 +#define L3_HML3_PM_FILTRC 0x310 +#define L3_HML3_PM_FILTRAM 0x304 +#define L3_HML3_PM_FILTRBM 0x30C +#define L3_HML3_PM_FILTRCM 0x314 + +/* Basic counter registers */ +#define L3_M_BC_CR 0x500 +#define L3_M_BC_SATROLL_CR 0x504 +#define L3_M_BC_CNTENSET 0x508 +#define L3_M_BC_CNTENCLR 0x50C +#define L3_M_BC_INTENSET 0x510 +#define L3_M_BC_INTENCLR 0x514 +#define L3_M_BC_GANG 0x718 +#define L3_M_BC_OVSR 0x740 +#define L3_M_BC_IRQCTL 0x96C + +/* + * Bit field definitions + */ + +/* L3_HML3_PM_CR */ +#define PM_CR_RESET (0) + +/* L3_HML3_PM_XCNTCTL/L3_HML3_PM_CNTCTLx */ +#define PMCNT_RESET (0) + +/* L3_HML3_PM_EVTYPEx */ +#define EVSEL(__val) ((__val) & L3_EVTYPE_MASK) + +/* Reset value for all the filter registers */ +#define PM_FLTR_RESET (0) + +/* L3_M_BC_CR */ +#define BC_RESET (1UL << 1) +#define BC_ENABLE (1UL << 0) + +/* L3_M_BC_SATROLL_CR */ +#define BC_SATROLL_CR_RESET (0) + +/* L3_M_BC_CNTENSET */ +#define PMCNTENSET(__cntr) (1UL << ((__cntr) & 0x7)) + +/* L3_M_BC_CNTENCLR */ +#define PMCNTENCLR(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_CNTENCLR_RESET (0xFF) + +/* L3_M_BC_INTENSET */ +#define PMINTENSET(__cntr) (1UL << ((__cntr) & 0x7)) + +/* L3_M_BC_INTENCLR */ +#define PMINTENCLR(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_INTENCLR_RESET (0xFF) + +/* L3_M_BC_GANG */ +#define GANG_EN(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_GANG_RESET (0) + +/* L3_M_BC_OVSR */ +#define PMOVSRCLR(__cntr) (1UL << ((__cntr) & 0x7)) +#define PMOVSRCLR_RESET (0xFF) + +/* L3_M_BC_IRQCTL */ +#define PMIRQONMSBEN(__cntr) (1UL << ((__cntr) & 0x7)) +#define BC_IRQCTL_RESET (0x0) + +/* + * Events + */ + +#define L3_EVENT_CYCLES 0x01 +#define L3_EVENT_READ_HIT 0x20 +#define L3_EVENT_READ_MISS 0x21 +#define L3_EVENT_READ_HIT_D 0x22 +#define L3_EVENT_READ_MISS_D 0x23 +#define L3_EVENT_WRITE_HIT 0x24 +#define L3_EVENT_WRITE_MISS 0x25 + +/* + * Decoding of settings from perf_event_attr + * + * The config format for perf events is: + * - config: bits 0-7: event type + * bit 32: HW counter size requested, 0: 32 bits, 1: 64 bits + */ + +static inline u32 get_event_type(struct perf_event *event) +{ + return (event->attr.config) & L3_EVTYPE_MASK; +} + +static inline bool event_uses_long_counter(struct perf_event *event) +{ + return !!(event->attr.config & BIT_ULL(L3_EVENT_LC_BIT)); +} + +static inline int event_num_counters(struct perf_event *event) +{ + return event_uses_long_counter(event) ? 2 : 1; +} + +/* + * Main PMU, inherits from the core perf PMU type + */ +struct l3cache_pmu { + struct pmu pmu; + struct hlist_node node; + void __iomem *regs; + struct perf_event *events[L3_NUM_COUNTERS]; + unsigned long used_mask[BITS_TO_LONGS(L3_NUM_COUNTERS)]; + cpumask_t cpumask; +}; + +#define to_l3cache_pmu(p) (container_of(p, struct l3cache_pmu, pmu)) + +/* + * Type used to group hardware counter operations + * + * Used to implement two types of hardware counters, standard (32bits) and + * long (64bits). The hardware supports counter chaining which we use to + * implement long counters. This support is exposed via the 'lc' flag field + * in perf_event_attr.config. + */ +struct l3cache_event_ops { + /* Called to start event monitoring */ + void (*start)(struct perf_event *event); + /* Called to stop event monitoring */ + void (*stop)(struct perf_event *event, int flags); + /* Called to update the perf_event */ + void (*update)(struct perf_event *event); +}; + +/* + * Implementation of long counter operations + * + * 64bit counters are implemented by chaining two of the 32bit physical + * counters. The PMU only supports chaining of adjacent even/odd pairs + * and for simplicity the driver always configures the odd counter to + * count the overflows of the lower-numbered even counter. Note that since + * the resulting hardware counter is 64bits no IRQs are required to maintain + * the software counter which is also 64bits. + */ + +static void qcom_l3_cache__64bit_counter_start(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 evsel = get_event_type(event); + u32 gang; + + /* Set the odd counter to count the overflows of the even counter */ + gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); + gang |= GANG_EN(idx + 1); + writel_relaxed(gang, l3pmu->regs + L3_M_BC_GANG); + + /* Initialize the hardware counters and reset prev_count*/ + local64_set(&event->hw.prev_count, 0); + writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); + writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + + /* + * Set the event types, the upper half must use zero and the lower + * half the actual event type + */ + writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(idx + 1)); + writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); + + /* Finally, enable the counters */ + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx + 1)); + writel_relaxed(PMCNTENSET(idx + 1), l3pmu->regs + L3_M_BC_CNTENSET); + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); + writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); +} + +static void qcom_l3_cache__64bit_counter_stop(struct perf_event *event, + int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 gang = readl_relaxed(l3pmu->regs + L3_M_BC_GANG); + + /* Disable the counters */ + writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); + writel_relaxed(PMCNTENCLR(idx + 1), l3pmu->regs + L3_M_BC_CNTENCLR); + + /* Disable chaining */ + writel_relaxed(gang & ~GANG_EN(idx + 1), l3pmu->regs + L3_M_BC_GANG); +} + +static void qcom_l3_cache__64bit_counter_update(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 hi, lo; + u64 prev, new; + + do { + prev = local64_read(&event->hw.prev_count); + do { + hi = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1)); + lo = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + } while (hi != readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx + 1))); + new = ((u64)hi << 32) | lo; + } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); + + local64_add(new - prev, &event->count); +} + +static const struct l3cache_event_ops event_ops_long = { + .start = qcom_l3_cache__64bit_counter_start, + .stop = qcom_l3_cache__64bit_counter_stop, + .update = qcom_l3_cache__64bit_counter_update, +}; + +/* + * Implementation of standard counter operations + * + * 32bit counters use a single physical counter and a hardware feature that + * asserts the overflow IRQ on the toggling of the most significant bit in + * the counter. This feature allows the counters to be left free-running + * without needing the usual reprogramming required to properly handle races + * during concurrent calls to update. + */ + +static void qcom_l3_cache__32bit_counter_start(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 evsel = get_event_type(event); + u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); + + /* Set the counter to assert the overflow IRQ on MSB toggling */ + writel_relaxed(irqctl | PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); + + /* Initialize the hardware counter and reset prev_count*/ + local64_set(&event->hw.prev_count, 0); + writel_relaxed(0, l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + + /* Set the event type */ + writel_relaxed(EVSEL(evsel), l3pmu->regs + L3_HML3_PM_EVTYPE(idx)); + + /* Enable interrupt generation by this counter */ + writel_relaxed(PMINTENSET(idx), l3pmu->regs + L3_M_BC_INTENSET); + + /* Finally, enable the counter */ + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(idx)); + writel_relaxed(PMCNTENSET(idx), l3pmu->regs + L3_M_BC_CNTENSET); +} + +static void qcom_l3_cache__32bit_counter_stop(struct perf_event *event, + int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 irqctl = readl_relaxed(l3pmu->regs + L3_M_BC_IRQCTL); + + /* Disable the counter */ + writel_relaxed(PMCNTENCLR(idx), l3pmu->regs + L3_M_BC_CNTENCLR); + + /* Disable interrupt generation by this counter */ + writel_relaxed(PMINTENCLR(idx), l3pmu->regs + L3_M_BC_INTENCLR); + + /* Set the counter to not assert the overflow IRQ on MSB toggling */ + writel_relaxed(irqctl & ~PMIRQONMSBEN(idx), l3pmu->regs + L3_M_BC_IRQCTL); +} + +static void qcom_l3_cache__32bit_counter_update(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + int idx = event->hw.idx; + u32 prev, new; + + do { + prev = local64_read(&event->hw.prev_count); + new = readl_relaxed(l3pmu->regs + L3_HML3_PM_EVCNTR(idx)); + } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); + + local64_add(new - prev, &event->count); +} + +static const struct l3cache_event_ops event_ops_std = { + .start = qcom_l3_cache__32bit_counter_start, + .stop = qcom_l3_cache__32bit_counter_stop, + .update = qcom_l3_cache__32bit_counter_update, +}; + +/* Retrieve the appropriate operations for the given event */ +static +const struct l3cache_event_ops *l3cache_event_get_ops(struct perf_event *event) +{ + if (event_uses_long_counter(event)) + return &event_ops_long; + else + return &event_ops_std; +} + +/* + * Top level PMU functions. + */ + +static inline void qcom_l3_cache__init(struct l3cache_pmu *l3pmu) +{ + int i; + + writel_relaxed(BC_RESET, l3pmu->regs + L3_M_BC_CR); + + /* + * Use writel for the first programming command to ensure the basic + * counter unit is stopped before proceeding + */ + writel(BC_SATROLL_CR_RESET, l3pmu->regs + L3_M_BC_SATROLL_CR); + + writel_relaxed(BC_CNTENCLR_RESET, l3pmu->regs + L3_M_BC_CNTENCLR); + writel_relaxed(BC_INTENCLR_RESET, l3pmu->regs + L3_M_BC_INTENCLR); + writel_relaxed(PMOVSRCLR_RESET, l3pmu->regs + L3_M_BC_OVSR); + writel_relaxed(BC_GANG_RESET, l3pmu->regs + L3_M_BC_GANG); + writel_relaxed(BC_IRQCTL_RESET, l3pmu->regs + L3_M_BC_IRQCTL); + writel_relaxed(PM_CR_RESET, l3pmu->regs + L3_HML3_PM_CR); + + for (i = 0; i < L3_NUM_COUNTERS; ++i) { + writel_relaxed(PMCNT_RESET, l3pmu->regs + L3_HML3_PM_CNTCTL(i)); + writel_relaxed(EVSEL(0), l3pmu->regs + L3_HML3_PM_EVTYPE(i)); + } + + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRA); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRAM); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRB); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRBM); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRC); + writel_relaxed(PM_FLTR_RESET, l3pmu->regs + L3_HML3_PM_FILTRCM); + + /* + * Use writel here to ensure all programming commands are done + * before proceeding + */ + writel(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); +} + +static irqreturn_t qcom_l3_cache__handle_irq(int irq_num, void *data) +{ + struct l3cache_pmu *l3pmu = data; + /* Read the overflow status register */ + long status = readl_relaxed(l3pmu->regs + L3_M_BC_OVSR); + int idx; + + if (status == 0) + return IRQ_NONE; + + /* Clear the bits we read on the overflow status register */ + writel_relaxed(status, l3pmu->regs + L3_M_BC_OVSR); + + for_each_set_bit(idx, &status, L3_NUM_COUNTERS) { + struct perf_event *event; + const struct l3cache_event_ops *ops; + + event = l3pmu->events[idx]; + if (!event) + continue; + + /* + * Since the IRQ is not enabled for events using long counters + * we should never see one of those here, however, be consistent + * and use the ops indirections like in the other operations. + */ + + ops = l3cache_event_get_ops(event); + ops->update(event); + } + + return IRQ_HANDLED; +} + +/* + * Implementation of abstract pmu functionality required by + * the core perf events code. + */ + +static void qcom_l3_cache__pmu_enable(struct pmu *pmu) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu); + + /* Ensure the other programming commands are observed before enabling */ + wmb(); + + writel_relaxed(BC_ENABLE, l3pmu->regs + L3_M_BC_CR); +} + +static void qcom_l3_cache__pmu_disable(struct pmu *pmu) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(pmu); + + writel_relaxed(0, l3pmu->regs + L3_M_BC_CR); + + /* Ensure the basic counter unit is stopped before proceeding */ + wmb(); +} + +/* + * We must NOT create groups containing events from multiple hardware PMUs, + * although mixing different software and hardware PMUs is allowed. + */ +static bool qcom_l3_cache__validate_event_group(struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct perf_event *sibling; + int counters = 0; + + if (leader->pmu != event->pmu && !is_software_event(leader)) + return false; + + counters = event_num_counters(event); + counters += event_num_counters(leader); + + for_each_sibling_event(sibling, leader) { + if (is_software_event(sibling)) + continue; + if (sibling->pmu != event->pmu) + return false; + counters += event_num_counters(sibling); + } + + /* + * If the group requires more counters than the HW has, it + * cannot ever be scheduled. + */ + return counters <= L3_NUM_COUNTERS; +} + +static int qcom_l3_cache__event_init(struct perf_event *event) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * Is the event for this PMU? + */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * Sampling not supported since these events are not core-attributable. + */ + if (hwc->sample_period) + return -EINVAL; + + /* + * Task mode not available, we run the counters as socket counters, + * not attributable to any CPU and therefore cannot attribute per-task. + */ + if (event->cpu < 0) + return -EINVAL; + + /* Validate the group */ + if (!qcom_l3_cache__validate_event_group(event)) + return -EINVAL; + + hwc->idx = -1; + + /* + * Many perf core operations (eg. events rotation) operate on a + * single CPU context. This is obvious for CPU PMUs, where one + * expects the same sets of events being observed on all CPUs, + * but can lead to issues for off-core PMUs, like this one, where + * each event could be theoretically assigned to a different CPU. + * To mitigate this, we enforce CPU assignment to one designated + * processor (the one described in the "cpumask" attribute exported + * by the PMU device). perf user space tools honor this and avoid + * opening more than one copy of the events. + */ + event->cpu = cpumask_first(&l3pmu->cpumask); + + return 0; +} + +static void qcom_l3_cache__event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); + + hwc->state = 0; + ops->start(event); +} + +static void qcom_l3_cache__event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); + + if (hwc->state & PERF_HES_STOPPED) + return; + + ops->stop(event, flags); + if (flags & PERF_EF_UPDATE) + ops->update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int qcom_l3_cache__event_add(struct perf_event *event, int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int order = event_uses_long_counter(event) ? 1 : 0; + int idx; + + /* + * Try to allocate a counter. + */ + idx = bitmap_find_free_region(l3pmu->used_mask, L3_NUM_COUNTERS, order); + if (idx < 0) + /* The counters are all in use. */ + return -EAGAIN; + + hwc->idx = idx; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + l3pmu->events[idx] = event; + + if (flags & PERF_EF_START) + qcom_l3_cache__event_start(event, 0); + + /* Propagate changes to the userspace mapping. */ + perf_event_update_userpage(event); + + return 0; +} + +static void qcom_l3_cache__event_del(struct perf_event *event, int flags) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int order = event_uses_long_counter(event) ? 1 : 0; + + /* Stop and clean up */ + qcom_l3_cache__event_stop(event, flags | PERF_EF_UPDATE); + l3pmu->events[hwc->idx] = NULL; + bitmap_release_region(l3pmu->used_mask, hwc->idx, order); + + /* Propagate changes to the userspace mapping. */ + perf_event_update_userpage(event); +} + +static void qcom_l3_cache__event_read(struct perf_event *event) +{ + const struct l3cache_event_ops *ops = l3cache_event_get_ops(event); + + ops->update(event); +} + +/* + * Add sysfs attributes + * + * We export: + * - formats, used by perf user space and other tools to configure events + * - events, used by perf user space and other tools to create events + * symbolically, e.g.: + * perf stat -a -e l3cache_0_0/event=read-miss/ ls + * perf stat -a -e l3cache_0_0/event=0x21/ ls + * - cpumask, used by perf user space and other tools to know on which CPUs + * to open the events + */ + +/* formats */ + +static ssize_t l3cache_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "%s\n", (char *) eattr->var); +} + +#define L3CACHE_PMU_FORMAT_ATTR(_name, _config) \ + (&((struct dev_ext_attribute[]) { \ + { .attr = __ATTR(_name, 0444, l3cache_pmu_format_show, NULL), \ + .var = (void *) _config, } \ + })[0].attr.attr) + +static struct attribute *qcom_l3_cache_pmu_formats[] = { + L3CACHE_PMU_FORMAT_ATTR(event, "config:0-7"), + L3CACHE_PMU_FORMAT_ATTR(lc, "config:" __stringify(L3_EVENT_LC_BIT)), + NULL, +}; + +static struct attribute_group qcom_l3_cache_pmu_format_group = { + .name = "format", + .attrs = qcom_l3_cache_pmu_formats, +}; + +/* events */ + +static ssize_t l3cache_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%02llx\n", pmu_attr->id); +} + +#define L3CACHE_EVENT_ATTR(_name, _id) \ + (&((struct perf_pmu_events_attr[]) { \ + { .attr = __ATTR(_name, 0444, l3cache_pmu_event_show, NULL), \ + .id = _id, } \ + })[0].attr.attr) + +static struct attribute *qcom_l3_cache_pmu_events[] = { + L3CACHE_EVENT_ATTR(cycles, L3_EVENT_CYCLES), + L3CACHE_EVENT_ATTR(read-hit, L3_EVENT_READ_HIT), + L3CACHE_EVENT_ATTR(read-miss, L3_EVENT_READ_MISS), + L3CACHE_EVENT_ATTR(read-hit-d-side, L3_EVENT_READ_HIT_D), + L3CACHE_EVENT_ATTR(read-miss-d-side, L3_EVENT_READ_MISS_D), + L3CACHE_EVENT_ATTR(write-hit, L3_EVENT_WRITE_HIT), + L3CACHE_EVENT_ATTR(write-miss, L3_EVENT_WRITE_MISS), + NULL +}; + +static struct attribute_group qcom_l3_cache_pmu_events_group = { + .name = "events", + .attrs = qcom_l3_cache_pmu_events, +}; + +/* cpumask */ + +static ssize_t qcom_l3_cache_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct l3cache_pmu *l3pmu = to_l3cache_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &l3pmu->cpumask); +} + +static DEVICE_ATTR(cpumask, 0444, qcom_l3_cache_pmu_cpumask_show, NULL); + +static struct attribute *qcom_l3_cache_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group qcom_l3_cache_pmu_cpumask_attr_group = { + .attrs = qcom_l3_cache_pmu_cpumask_attrs, +}; + +/* + * Per PMU device attribute groups + */ +static const struct attribute_group *qcom_l3_cache_pmu_attr_grps[] = { + &qcom_l3_cache_pmu_format_group, + &qcom_l3_cache_pmu_events_group, + &qcom_l3_cache_pmu_cpumask_attr_group, + NULL, +}; + +/* + * Probing functions and data. + */ + +static int qcom_l3_cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node); + + /* If there is not a CPU/PMU association pick this CPU */ + if (cpumask_empty(&l3pmu->cpumask)) + cpumask_set_cpu(cpu, &l3pmu->cpumask); + + return 0; +} + +static int qcom_l3_cache_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct l3cache_pmu *l3pmu = hlist_entry_safe(node, struct l3cache_pmu, node); + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &l3pmu->cpumask)) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + perf_pmu_migrate_context(&l3pmu->pmu, cpu, target); + cpumask_set_cpu(target, &l3pmu->cpumask); + return 0; +} + +static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) +{ + struct l3cache_pmu *l3pmu; + struct acpi_device *acpi_dev; + struct resource *memrc; + int ret; + char *name; + + /* Initialize the PMU data structures */ + + acpi_dev = ACPI_COMPANION(&pdev->dev); + if (!acpi_dev) + return -ENODEV; + + l3pmu = devm_kzalloc(&pdev->dev, sizeof(*l3pmu), GFP_KERNEL); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "l3cache_%s_%s", + acpi_dev->parent->pnp.unique_id, acpi_dev->pnp.unique_id); + if (!l3pmu || !name) + return -ENOMEM; + + l3pmu->pmu = (struct pmu) { + .task_ctx_nr = perf_invalid_context, + + .pmu_enable = qcom_l3_cache__pmu_enable, + .pmu_disable = qcom_l3_cache__pmu_disable, + .event_init = qcom_l3_cache__event_init, + .add = qcom_l3_cache__event_add, + .del = qcom_l3_cache__event_del, + .start = qcom_l3_cache__event_start, + .stop = qcom_l3_cache__event_stop, + .read = qcom_l3_cache__event_read, + + .attr_groups = qcom_l3_cache_pmu_attr_grps, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); + l3pmu->regs = devm_ioremap_resource(&pdev->dev, memrc); + if (IS_ERR(l3pmu->regs)) { + dev_err(&pdev->dev, "Can't map PMU @%pa\n", &memrc->start); + return PTR_ERR(l3pmu->regs); + } + + qcom_l3_cache__init(l3pmu); + + ret = platform_get_irq(pdev, 0); + if (ret <= 0) + return ret; + + ret = devm_request_irq(&pdev->dev, ret, qcom_l3_cache__handle_irq, 0, + name, l3pmu); + if (ret) { + dev_err(&pdev->dev, "Request for IRQ failed for slice @%pa\n", + &memrc->start); + return ret; + } + + /* Add this instance to the list used by the offline callback */ + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, &l3pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug", ret); + return ret; + } + + ret = perf_pmu_register(&l3pmu->pmu, name, -1); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register L3 cache PMU (%d)\n", ret); + return ret; + } + + dev_info(&pdev->dev, "Registered %s, type: %d\n", name, l3pmu->pmu.type); + + return 0; +} + +static const struct acpi_device_id qcom_l3_cache_pmu_acpi_match[] = { + { "QCOM8081", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, qcom_l3_cache_pmu_acpi_match); + +static struct platform_driver qcom_l3_cache_pmu_driver = { + .driver = { + .name = "qcom-l3cache-pmu", + .acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = qcom_l3_cache_pmu_probe, +}; + +static int __init register_qcom_l3_cache_pmu_driver(void) +{ + int ret; + + /* Install a hook to update the reader CPU in case it goes offline */ + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, + "perf/qcom/l3cache:online", + qcom_l3_cache_pmu_online_cpu, + qcom_l3_cache_pmu_offline_cpu); + if (ret) + return ret; + + return platform_driver_register(&qcom_l3_cache_pmu_driver); +} +device_initcall(register_qcom_l3_cache_pmu_driver); diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c new file mode 100644 index 000000000..e116815fa --- /dev/null +++ b/drivers/perf/thunderx2_pmu.c @@ -0,0 +1,1058 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CAVIUM THUNDERX2 SoC PMU UNCORE + * Copyright (C) 2018 Cavium Inc. + * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com> + */ + +#include <linux/acpi.h> +#include <linux/cpuhotplug.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> + +/* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device. + * Each UNCORE PMU device consists of 4 independent programmable counters. + * Counters are 32 bit and do not support overflow interrupt, + * they need to be sampled before overflow(i.e, at every 2 seconds). + */ + +#define TX2_PMU_DMC_L3C_MAX_COUNTERS 4 +#define TX2_PMU_CCPI2_MAX_COUNTERS 8 +#define TX2_PMU_MAX_COUNTERS TX2_PMU_CCPI2_MAX_COUNTERS + + +#define TX2_PMU_DMC_CHANNELS 8 +#define TX2_PMU_L3_TILES 16 + +#define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC) +#define GET_EVENTID(ev, mask) ((ev->hw.config) & mask) +#define GET_COUNTERID(ev, mask) ((ev->hw.idx) & mask) + /* 1 byte per counter(4 counters). + * Event id is encoded in bits [5:1] of a byte, + */ +#define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1)) + +/* bits[3:0] to select counters, are indexed from 8 to 15. */ +#define CCPI2_COUNTER_OFFSET 8 + +#define L3C_COUNTER_CTL 0xA8 +#define L3C_COUNTER_DATA 0xAC +#define DMC_COUNTER_CTL 0x234 +#define DMC_COUNTER_DATA 0x240 + +#define CCPI2_PERF_CTL 0x108 +#define CCPI2_COUNTER_CTL 0x10C +#define CCPI2_COUNTER_SEL 0x12c +#define CCPI2_COUNTER_DATA_L 0x130 +#define CCPI2_COUNTER_DATA_H 0x134 + +/* L3C event IDs */ +#define L3_EVENT_READ_REQ 0xD +#define L3_EVENT_WRITEBACK_REQ 0xE +#define L3_EVENT_INV_N_WRITE_REQ 0xF +#define L3_EVENT_INV_REQ 0x10 +#define L3_EVENT_EVICT_REQ 0x13 +#define L3_EVENT_INV_N_WRITE_HIT 0x14 +#define L3_EVENT_INV_HIT 0x15 +#define L3_EVENT_READ_HIT 0x17 +#define L3_EVENT_MAX 0x18 + +/* DMC event IDs */ +#define DMC_EVENT_COUNT_CYCLES 0x1 +#define DMC_EVENT_WRITE_TXNS 0xB +#define DMC_EVENT_DATA_TRANSFERS 0xD +#define DMC_EVENT_READ_TXNS 0xF +#define DMC_EVENT_MAX 0x10 + +#define CCPI2_EVENT_REQ_PKT_SENT 0x3D +#define CCPI2_EVENT_SNOOP_PKT_SENT 0x65 +#define CCPI2_EVENT_DATA_PKT_SENT 0x105 +#define CCPI2_EVENT_GIC_PKT_SENT 0x12D +#define CCPI2_EVENT_MAX 0x200 + +#define CCPI2_PERF_CTL_ENABLE BIT(0) +#define CCPI2_PERF_CTL_START BIT(1) +#define CCPI2_PERF_CTL_RESET BIT(4) +#define CCPI2_EVENT_LEVEL_RISING_EDGE BIT(10) +#define CCPI2_EVENT_TYPE_EDGE_SENSITIVE BIT(11) + +enum tx2_uncore_type { + PMU_TYPE_L3C, + PMU_TYPE_DMC, + PMU_TYPE_CCPI2, + PMU_TYPE_INVALID, +}; + +/* + * Each socket has 3 uncore devices associated with a PMU. The DMC and + * L3C have 4 32-bit counters and the CCPI2 has 8 64-bit counters. + */ +struct tx2_uncore_pmu { + struct hlist_node hpnode; + struct list_head entry; + struct pmu pmu; + char *name; + int node; + int cpu; + u32 max_counters; + u32 counters_mask; + u32 prorate_factor; + u32 max_events; + u32 events_mask; + u64 hrtimer_interval; + void __iomem *base; + DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS); + struct perf_event *events[TX2_PMU_MAX_COUNTERS]; + struct device *dev; + struct hrtimer hrtimer; + const struct attribute_group **attr_groups; + enum tx2_uncore_type type; + enum hrtimer_restart (*hrtimer_callback)(struct hrtimer *cb); + void (*init_cntr_base)(struct perf_event *event, + struct tx2_uncore_pmu *tx2_pmu); + void (*stop_event)(struct perf_event *event); + void (*start_event)(struct perf_event *event, int flags); +}; + +static LIST_HEAD(tx2_pmus); + +static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu) +{ + return container_of(pmu, struct tx2_uncore_pmu, pmu); +} + +#define TX2_PMU_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t \ +__tx2_pmu_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ + \ +static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __tx2_pmu_##_var##_show, NULL) + +TX2_PMU_FORMAT_ATTR(event, event, "config:0-4"); +TX2_PMU_FORMAT_ATTR(event_ccpi2, event, "config:0-9"); + +static struct attribute *l3c_pmu_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute *dmc_pmu_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute *ccpi2_pmu_format_attrs[] = { + &format_attr_event_ccpi2.attr, + NULL, +}; + +static const struct attribute_group l3c_pmu_format_attr_group = { + .name = "format", + .attrs = l3c_pmu_format_attrs, +}; + +static const struct attribute_group dmc_pmu_format_attr_group = { + .name = "format", + .attrs = dmc_pmu_format_attrs, +}; + +static const struct attribute_group ccpi2_pmu_format_attr_group = { + .name = "format", + .attrs = ccpi2_pmu_format_attrs, +}; + +/* + * sysfs event attributes + */ +static ssize_t tx2_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var); +} + +#define TX2_EVENT_ATTR(name, config) \ + PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \ + config, tx2_pmu_event_show) + +TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ); +TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ); +TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ); +TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ); +TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ); +TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT); +TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT); +TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT); + +static struct attribute *l3c_pmu_events_attrs[] = { + &tx2_pmu_event_attr_read_request.attr.attr, + &tx2_pmu_event_attr_writeback_request.attr.attr, + &tx2_pmu_event_attr_inv_nwrite_request.attr.attr, + &tx2_pmu_event_attr_inv_request.attr.attr, + &tx2_pmu_event_attr_evict_request.attr.attr, + &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr, + &tx2_pmu_event_attr_inv_hit.attr.attr, + &tx2_pmu_event_attr_read_hit.attr.attr, + NULL, +}; + +TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES); +TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS); +TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS); +TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS); + +static struct attribute *dmc_pmu_events_attrs[] = { + &tx2_pmu_event_attr_cnt_cycles.attr.attr, + &tx2_pmu_event_attr_write_txns.attr.attr, + &tx2_pmu_event_attr_data_transfers.attr.attr, + &tx2_pmu_event_attr_read_txns.attr.attr, + NULL, +}; + +TX2_EVENT_ATTR(req_pktsent, CCPI2_EVENT_REQ_PKT_SENT); +TX2_EVENT_ATTR(snoop_pktsent, CCPI2_EVENT_SNOOP_PKT_SENT); +TX2_EVENT_ATTR(data_pktsent, CCPI2_EVENT_DATA_PKT_SENT); +TX2_EVENT_ATTR(gic_pktsent, CCPI2_EVENT_GIC_PKT_SENT); + +static struct attribute *ccpi2_pmu_events_attrs[] = { + &tx2_pmu_event_attr_req_pktsent.attr.attr, + &tx2_pmu_event_attr_snoop_pktsent.attr.attr, + &tx2_pmu_event_attr_data_pktsent.attr.attr, + &tx2_pmu_event_attr_gic_pktsent.attr.attr, + NULL, +}; + +static const struct attribute_group l3c_pmu_events_attr_group = { + .name = "events", + .attrs = l3c_pmu_events_attrs, +}; + +static const struct attribute_group dmc_pmu_events_attr_group = { + .name = "events", + .attrs = dmc_pmu_events_attrs, +}; + +static const struct attribute_group ccpi2_pmu_events_attr_group = { + .name = "events", + .attrs = ccpi2_pmu_events_attrs, +}; + +/* + * sysfs cpumask attributes + */ +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tx2_uncore_pmu *tx2_pmu; + + tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev)); + return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *tx2_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group pmu_cpumask_attr_group = { + .attrs = tx2_pmu_cpumask_attrs, +}; + +/* + * Per PMU device attribute groups + */ +static const struct attribute_group *l3c_pmu_attr_groups[] = { + &l3c_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &l3c_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *dmc_pmu_attr_groups[] = { + &dmc_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &dmc_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *ccpi2_pmu_attr_groups[] = { + &ccpi2_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &ccpi2_pmu_events_attr_group, + NULL +}; + +static inline u32 reg_readl(unsigned long addr) +{ + return readl((void __iomem *)addr); +} + +static inline void reg_writel(u32 val, unsigned long addr) +{ + writel(val, (void __iomem *)addr); +} + +static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu) +{ + int counter; + + counter = find_first_zero_bit(tx2_pmu->active_counters, + tx2_pmu->max_counters); + if (counter == tx2_pmu->max_counters) + return -ENOSPC; + + set_bit(counter, tx2_pmu->active_counters); + return counter; +} + +static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter) +{ + clear_bit(counter, tx2_pmu->active_counters); +} + +static void init_cntr_base_l3c(struct perf_event *event, + struct tx2_uncore_pmu *tx2_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + u32 cmask; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + cmask = tx2_pmu->counters_mask; + + /* counter ctrl/data reg offset at 8 */ + hwc->config_base = (unsigned long)tx2_pmu->base + + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event, cmask)); + hwc->event_base = (unsigned long)tx2_pmu->base + + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event, cmask)); +} + +static void init_cntr_base_dmc(struct perf_event *event, + struct tx2_uncore_pmu *tx2_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + u32 cmask; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + cmask = tx2_pmu->counters_mask; + + hwc->config_base = (unsigned long)tx2_pmu->base + + DMC_COUNTER_CTL; + /* counter data reg offset at 0xc */ + hwc->event_base = (unsigned long)tx2_pmu->base + + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event, cmask)); +} + +static void init_cntr_base_ccpi2(struct perf_event *event, + struct tx2_uncore_pmu *tx2_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + u32 cmask; + + cmask = tx2_pmu->counters_mask; + + hwc->config_base = (unsigned long)tx2_pmu->base + + CCPI2_COUNTER_CTL + (4 * GET_COUNTERID(event, cmask)); + hwc->event_base = (unsigned long)tx2_pmu->base; +} + +static void uncore_start_event_l3c(struct perf_event *event, int flags) +{ + u32 val, emask; + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + emask = tx2_pmu->events_mask; + + /* event id encoded in bits [07:03] */ + val = GET_EVENTID(event, emask) << 3; + reg_writel(val, hwc->config_base); + local64_set(&hwc->prev_count, 0); + reg_writel(0, hwc->event_base); +} + +static inline void uncore_stop_event_l3c(struct perf_event *event) +{ + reg_writel(0, event->hw.config_base); +} + +static void uncore_start_event_dmc(struct perf_event *event, int flags) +{ + u32 val, cmask, emask; + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + int idx, event_id; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + cmask = tx2_pmu->counters_mask; + emask = tx2_pmu->events_mask; + + idx = GET_COUNTERID(event, cmask); + event_id = GET_EVENTID(event, emask); + + /* enable and start counters. + * 8 bits for each counter, bits[05:01] of a counter to set event type. + */ + val = reg_readl(hwc->config_base); + val &= ~DMC_EVENT_CFG(idx, 0x1f); + val |= DMC_EVENT_CFG(idx, event_id); + reg_writel(val, hwc->config_base); + local64_set(&hwc->prev_count, 0); + reg_writel(0, hwc->event_base); +} + +static void uncore_stop_event_dmc(struct perf_event *event) +{ + u32 val, cmask; + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + int idx; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + cmask = tx2_pmu->counters_mask; + idx = GET_COUNTERID(event, cmask); + + /* clear event type(bits[05:01]) to stop counter */ + val = reg_readl(hwc->config_base); + val &= ~DMC_EVENT_CFG(idx, 0x1f); + reg_writel(val, hwc->config_base); +} + +static void uncore_start_event_ccpi2(struct perf_event *event, int flags) +{ + u32 emask; + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + emask = tx2_pmu->events_mask; + + /* Bit [09:00] to set event id. + * Bits [10], set level to rising edge. + * Bits [11], set type to edge sensitive. + */ + reg_writel((CCPI2_EVENT_TYPE_EDGE_SENSITIVE | + CCPI2_EVENT_LEVEL_RISING_EDGE | + GET_EVENTID(event, emask)), hwc->config_base); + + /* reset[4], enable[0] and start[1] counters */ + reg_writel(CCPI2_PERF_CTL_RESET | + CCPI2_PERF_CTL_START | + CCPI2_PERF_CTL_ENABLE, + hwc->event_base + CCPI2_PERF_CTL); + local64_set(&event->hw.prev_count, 0ULL); +} + +static void uncore_stop_event_ccpi2(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* disable and stop counter */ + reg_writel(0, hwc->event_base + CCPI2_PERF_CTL); +} + +static void tx2_uncore_event_update(struct perf_event *event) +{ + u64 prev, delta, new = 0; + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + enum tx2_uncore_type type; + u32 prorate_factor; + u32 cmask, emask; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + type = tx2_pmu->type; + cmask = tx2_pmu->counters_mask; + emask = tx2_pmu->events_mask; + prorate_factor = tx2_pmu->prorate_factor; + if (type == PMU_TYPE_CCPI2) { + reg_writel(CCPI2_COUNTER_OFFSET + + GET_COUNTERID(event, cmask), + hwc->event_base + CCPI2_COUNTER_SEL); + new = reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_H); + new = (new << 32) + + reg_readl(hwc->event_base + CCPI2_COUNTER_DATA_L); + prev = local64_xchg(&hwc->prev_count, new); + delta = new - prev; + } else { + new = reg_readl(hwc->event_base); + prev = local64_xchg(&hwc->prev_count, new); + /* handles rollover of 32 bit counter */ + delta = (u32)(((1UL << 32) - prev) + new); + } + + /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */ + if (type == PMU_TYPE_DMC && + GET_EVENTID(event, emask) == DMC_EVENT_DATA_TRANSFERS) + delta = delta/4; + + /* L3C and DMC has 16 and 8 interleave channels respectively. + * The sampled value is for channel 0 and multiplied with + * prorate_factor to get the count for a device. + */ + local64_add(delta * prorate_factor, &event->count); +} + +static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev) +{ + int i = 0; + struct acpi_tx2_pmu_device { + __u8 id[ACPI_ID_LEN]; + enum tx2_uncore_type type; + } devices[] = { + {"CAV901D", PMU_TYPE_L3C}, + {"CAV901F", PMU_TYPE_DMC}, + {"CAV901E", PMU_TYPE_CCPI2}, + {"", PMU_TYPE_INVALID} + }; + + while (devices[i].type != PMU_TYPE_INVALID) { + if (!strcmp(acpi_device_hid(adev), devices[i].id)) + break; + i++; + } + + return devices[i].type; +} + +static bool tx2_uncore_validate_event(struct pmu *pmu, + struct perf_event *event, int *counters) +{ + if (is_software_event(event)) + return true; + /* Reject groups spanning multiple HW PMUs. */ + if (event->pmu != pmu) + return false; + + *counters = *counters + 1; + return true; +} + +/* + * Make sure the group of events can be scheduled at once + * on the PMU. + */ +static bool tx2_uncore_validate_event_group(struct perf_event *event, + int max_counters) +{ + struct perf_event *sibling, *leader = event->group_leader; + int counters = 0; + + if (event->group_leader == event) + return true; + + if (!tx2_uncore_validate_event(event->pmu, leader, &counters)) + return false; + + for_each_sibling_event(sibling, leader) { + if (!tx2_uncore_validate_event(event->pmu, sibling, &counters)) + return false; + } + + if (!tx2_uncore_validate_event(event->pmu, event, &counters)) + return false; + + /* + * If the group requires more counters than the HW has, + * it cannot ever be scheduled. + */ + return counters <= max_counters; +} + + +static int tx2_uncore_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + + /* Test the event attr type check for PMU enumeration */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * SOC PMU counters are shared across all cores. + * Therefore, it does not support per-process mode. + * Also, it does not support event sampling mode. + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + if (tx2_pmu->cpu >= nr_cpu_ids) + return -EINVAL; + event->cpu = tx2_pmu->cpu; + + if (event->attr.config >= tx2_pmu->max_events) + return -EINVAL; + + /* store event id */ + hwc->config = event->attr.config; + + /* Validate the group */ + if (!tx2_uncore_validate_event_group(event, tx2_pmu->max_counters)) + return -EINVAL; + + return 0; +} + +static void tx2_uncore_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + + hwc->state = 0; + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + + tx2_pmu->start_event(event, flags); + perf_event_update_userpage(event); + + /* No hrtimer needed for CCPI2, 64-bit counters */ + if (!tx2_pmu->hrtimer_callback) + return; + + /* Start timer for first event */ + if (bitmap_weight(tx2_pmu->active_counters, + tx2_pmu->max_counters) == 1) { + hrtimer_start(&tx2_pmu->hrtimer, + ns_to_ktime(tx2_pmu->hrtimer_interval), + HRTIMER_MODE_REL_PINNED); + } +} + +static void tx2_uncore_event_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + tx2_pmu->stop_event(event); + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + if (flags & PERF_EF_UPDATE) { + tx2_uncore_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int tx2_uncore_event_add(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct tx2_uncore_pmu *tx2_pmu; + + tx2_pmu = pmu_to_tx2_pmu(event->pmu); + + /* Allocate a free counter */ + hwc->idx = alloc_counter(tx2_pmu); + if (hwc->idx < 0) + return -EAGAIN; + + tx2_pmu->events[hwc->idx] = event; + /* set counter control and data registers base address */ + tx2_pmu->init_cntr_base(event, tx2_pmu); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (flags & PERF_EF_START) + tx2_uncore_event_start(event, flags); + + return 0; +} + +static void tx2_uncore_event_del(struct perf_event *event, int flags) +{ + struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u32 cmask; + + cmask = tx2_pmu->counters_mask; + tx2_uncore_event_stop(event, PERF_EF_UPDATE); + + /* clear the assigned counter */ + free_counter(tx2_pmu, GET_COUNTERID(event, cmask)); + + perf_event_update_userpage(event); + tx2_pmu->events[hwc->idx] = NULL; + hwc->idx = -1; + + if (!tx2_pmu->hrtimer_callback) + return; + + if (bitmap_empty(tx2_pmu->active_counters, tx2_pmu->max_counters)) + hrtimer_cancel(&tx2_pmu->hrtimer); +} + +static void tx2_uncore_event_read(struct perf_event *event) +{ + tx2_uncore_event_update(event); +} + +static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer) +{ + struct tx2_uncore_pmu *tx2_pmu; + int max_counters, idx; + + tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer); + max_counters = tx2_pmu->max_counters; + + if (bitmap_empty(tx2_pmu->active_counters, max_counters)) + return HRTIMER_NORESTART; + + for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) { + struct perf_event *event = tx2_pmu->events[idx]; + + tx2_uncore_event_update(event); + } + hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval)); + return HRTIMER_RESTART; +} + +static int tx2_uncore_pmu_register( + struct tx2_uncore_pmu *tx2_pmu) +{ + struct device *dev = tx2_pmu->dev; + char *name = tx2_pmu->name; + + /* Perf event registration */ + tx2_pmu->pmu = (struct pmu) { + .module = THIS_MODULE, + .attr_groups = tx2_pmu->attr_groups, + .task_ctx_nr = perf_invalid_context, + .event_init = tx2_uncore_event_init, + .add = tx2_uncore_event_add, + .del = tx2_uncore_event_del, + .start = tx2_uncore_event_start, + .stop = tx2_uncore_event_stop, + .read = tx2_uncore_event_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL, + "%s", name); + + return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1); +} + +static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu) +{ + int ret, cpu; + + cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node), + cpu_online_mask); + + tx2_pmu->cpu = cpu; + + if (tx2_pmu->hrtimer_callback) { + hrtimer_init(&tx2_pmu->hrtimer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); + tx2_pmu->hrtimer.function = tx2_pmu->hrtimer_callback; + } + + ret = tx2_uncore_pmu_register(tx2_pmu); + if (ret) { + dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n", + tx2_pmu->name); + return -ENODEV; + } + + /* register hotplug callback for the pmu */ + ret = cpuhp_state_add_instance( + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, + &tx2_pmu->hpnode); + if (ret) { + dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret); + return ret; + } + + /* Add to list */ + list_add(&tx2_pmu->entry, &tx2_pmus); + + dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n", + tx2_pmu->pmu.name); + return ret; +} + +static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev, + acpi_handle handle, struct acpi_device *adev, u32 type) +{ + struct tx2_uncore_pmu *tx2_pmu; + void __iomem *base; + struct resource res; + struct resource_entry *rentry; + struct list_head list; + int ret; + + INIT_LIST_HEAD(&list); + ret = acpi_dev_get_resources(adev, &list, NULL, NULL); + if (ret <= 0) { + dev_err(dev, "failed to parse _CRS method, error %d\n", ret); + return NULL; + } + + list_for_each_entry(rentry, &list, node) { + if (resource_type(rentry->res) == IORESOURCE_MEM) { + res = *rentry->res; + rentry = NULL; + break; + } + } + acpi_dev_free_resource_list(&list); + + if (rentry) { + dev_err(dev, "PMU type %d: Fail to find resource\n", type); + return NULL; + } + + base = devm_ioremap_resource(dev, &res); + if (IS_ERR(base)) { + dev_err(dev, "PMU type %d: Fail to map resource\n", type); + return NULL; + } + + tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL); + if (!tx2_pmu) + return NULL; + + tx2_pmu->dev = dev; + tx2_pmu->type = type; + tx2_pmu->base = base; + tx2_pmu->node = dev_to_node(dev); + INIT_LIST_HEAD(&tx2_pmu->entry); + + switch (tx2_pmu->type) { + case PMU_TYPE_L3C: + tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS; + tx2_pmu->counters_mask = 0x3; + tx2_pmu->prorate_factor = TX2_PMU_L3_TILES; + tx2_pmu->max_events = L3_EVENT_MAX; + tx2_pmu->events_mask = 0x1f; + tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL; + tx2_pmu->hrtimer_callback = tx2_hrtimer_callback; + tx2_pmu->attr_groups = l3c_pmu_attr_groups; + tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, + "uncore_l3c_%d", tx2_pmu->node); + tx2_pmu->init_cntr_base = init_cntr_base_l3c; + tx2_pmu->start_event = uncore_start_event_l3c; + tx2_pmu->stop_event = uncore_stop_event_l3c; + break; + case PMU_TYPE_DMC: + tx2_pmu->max_counters = TX2_PMU_DMC_L3C_MAX_COUNTERS; + tx2_pmu->counters_mask = 0x3; + tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS; + tx2_pmu->max_events = DMC_EVENT_MAX; + tx2_pmu->events_mask = 0x1f; + tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL; + tx2_pmu->hrtimer_callback = tx2_hrtimer_callback; + tx2_pmu->attr_groups = dmc_pmu_attr_groups; + tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, + "uncore_dmc_%d", tx2_pmu->node); + tx2_pmu->init_cntr_base = init_cntr_base_dmc; + tx2_pmu->start_event = uncore_start_event_dmc; + tx2_pmu->stop_event = uncore_stop_event_dmc; + break; + case PMU_TYPE_CCPI2: + /* CCPI2 has 8 counters */ + tx2_pmu->max_counters = TX2_PMU_CCPI2_MAX_COUNTERS; + tx2_pmu->counters_mask = 0x7; + tx2_pmu->prorate_factor = 1; + tx2_pmu->max_events = CCPI2_EVENT_MAX; + tx2_pmu->events_mask = 0x1ff; + tx2_pmu->attr_groups = ccpi2_pmu_attr_groups; + tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, + "uncore_ccpi2_%d", tx2_pmu->node); + tx2_pmu->init_cntr_base = init_cntr_base_ccpi2; + tx2_pmu->start_event = uncore_start_event_ccpi2; + tx2_pmu->stop_event = uncore_stop_event_ccpi2; + tx2_pmu->hrtimer_callback = NULL; + break; + case PMU_TYPE_INVALID: + devm_kfree(dev, tx2_pmu); + return NULL; + } + + return tx2_pmu; +} + +static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct tx2_uncore_pmu *tx2_pmu; + struct acpi_device *adev; + enum tx2_uncore_type type; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + if (acpi_bus_get_status(adev) || !adev->status.present) + return AE_OK; + + type = get_tx2_pmu_type(adev); + if (type == PMU_TYPE_INVALID) + return AE_OK; + + tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data, + handle, adev, type); + + if (!tx2_pmu) + return AE_ERROR; + + if (tx2_uncore_pmu_add_dev(tx2_pmu)) { + /* Can't add the PMU device, abort */ + return AE_ERROR; + } + return AE_OK; +} + +static int tx2_uncore_pmu_online_cpu(unsigned int cpu, + struct hlist_node *hpnode) +{ + struct tx2_uncore_pmu *tx2_pmu; + + tx2_pmu = hlist_entry_safe(hpnode, + struct tx2_uncore_pmu, hpnode); + + /* Pick this CPU, If there is no CPU/PMU association and both are + * from same node. + */ + if ((tx2_pmu->cpu >= nr_cpu_ids) && + (tx2_pmu->node == cpu_to_node(cpu))) + tx2_pmu->cpu = cpu; + + return 0; +} + +static int tx2_uncore_pmu_offline_cpu(unsigned int cpu, + struct hlist_node *hpnode) +{ + int new_cpu; + struct tx2_uncore_pmu *tx2_pmu; + struct cpumask cpu_online_mask_temp; + + tx2_pmu = hlist_entry_safe(hpnode, + struct tx2_uncore_pmu, hpnode); + + if (cpu != tx2_pmu->cpu) + return 0; + + if (tx2_pmu->hrtimer_callback) + hrtimer_cancel(&tx2_pmu->hrtimer); + + cpumask_copy(&cpu_online_mask_temp, cpu_online_mask); + cpumask_clear_cpu(cpu, &cpu_online_mask_temp); + new_cpu = cpumask_any_and( + cpumask_of_node(tx2_pmu->node), + &cpu_online_mask_temp); + + tx2_pmu->cpu = new_cpu; + if (new_cpu >= nr_cpu_ids) + return 0; + perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu); + + return 0; +} + +static const struct acpi_device_id tx2_uncore_acpi_match[] = { + {"CAV901C", 0}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match); + +static int tx2_uncore_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + acpi_handle handle; + acpi_status status; + + set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev))); + + if (!has_acpi_companion(dev)) + return -ENODEV; + + handle = ACPI_HANDLE(dev); + if (!handle) + return -EINVAL; + + /* Walk through the tree for all PMU UNCORE devices */ + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + tx2_uncore_pmu_add, + NULL, dev, NULL); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to probe PMU devices\n"); + return_ACPI_STATUS(status); + } + + dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev)); + return 0; +} + +static int tx2_uncore_remove(struct platform_device *pdev) +{ + struct tx2_uncore_pmu *tx2_pmu, *temp; + struct device *dev = &pdev->dev; + + if (!list_empty(&tx2_pmus)) { + list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) { + if (tx2_pmu->node == dev_to_node(dev)) { + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, + &tx2_pmu->hpnode); + perf_pmu_unregister(&tx2_pmu->pmu); + list_del(&tx2_pmu->entry); + } + } + } + return 0; +} + +static struct platform_driver tx2_uncore_driver = { + .driver = { + .name = "tx2-uncore-pmu", + .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = tx2_uncore_probe, + .remove = tx2_uncore_remove, +}; + +static int __init tx2_uncore_driver_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, + "perf/tx2/uncore:online", + tx2_uncore_pmu_online_cpu, + tx2_uncore_pmu_offline_cpu); + if (ret) { + pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret); + return ret; + } + ret = platform_driver_register(&tx2_uncore_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE); + + return ret; +} +module_init(tx2_uncore_driver_init); + +static void __exit tx2_uncore_driver_exit(void) +{ + platform_driver_unregister(&tx2_uncore_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE); +} +module_exit(tx2_uncore_driver_exit); + +MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>"); diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c new file mode 100644 index 000000000..633cf07ba --- /dev/null +++ b/drivers/perf/xgene_pmu.c @@ -0,0 +1,1984 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * APM X-Gene SoC PMU (Performance Monitor Unit) + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Author: Hoan Tran <hotran@apm.com> + * Tai Nguyen <ttnguyen@apm.com> + */ + +#include <linux/acpi.h> +#include <linux/clk.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define CSW_CSWCR 0x0000 +#define CSW_CSWCR_DUALMCB_MASK BIT(0) +#define CSW_CSWCR_MCB0_ROUTING(x) (((x) & 0x0C) >> 2) +#define CSW_CSWCR_MCB1_ROUTING(x) (((x) & 0x30) >> 4) +#define MCBADDRMR 0x0000 +#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2) + +#define PCPPMU_INTSTATUS_REG 0x000 +#define PCPPMU_INTMASK_REG 0x004 +#define PCPPMU_INTMASK 0x0000000F +#define PCPPMU_INTENMASK 0xFFFFFFFF +#define PCPPMU_INTCLRMASK 0xFFFFFFF0 +#define PCPPMU_INT_MCU BIT(0) +#define PCPPMU_INT_MCB BIT(1) +#define PCPPMU_INT_L3C BIT(2) +#define PCPPMU_INT_IOB BIT(3) + +#define PCPPMU_V3_INTMASK 0x00FF33FF +#define PCPPMU_V3_INTENMASK 0xFFFFFFFF +#define PCPPMU_V3_INTCLRMASK 0xFF00CC00 +#define PCPPMU_V3_INT_MCU 0x000000FF +#define PCPPMU_V3_INT_MCB 0x00000300 +#define PCPPMU_V3_INT_L3C 0x00FF0000 +#define PCPPMU_V3_INT_IOB 0x00003000 + +#define PMU_MAX_COUNTERS 4 +#define PMU_CNT_MAX_PERIOD 0xFFFFFFFFULL +#define PMU_V3_CNT_MAX_PERIOD 0xFFFFFFFFFFFFFFFFULL +#define PMU_OVERFLOW_MASK 0xF +#define PMU_PMCR_E BIT(0) +#define PMU_PMCR_P BIT(1) + +#define PMU_PMEVCNTR0 0x000 +#define PMU_PMEVCNTR1 0x004 +#define PMU_PMEVCNTR2 0x008 +#define PMU_PMEVCNTR3 0x00C +#define PMU_PMEVTYPER0 0x400 +#define PMU_PMEVTYPER1 0x404 +#define PMU_PMEVTYPER2 0x408 +#define PMU_PMEVTYPER3 0x40C +#define PMU_PMAMR0 0xA00 +#define PMU_PMAMR1 0xA04 +#define PMU_PMCNTENSET 0xC00 +#define PMU_PMCNTENCLR 0xC20 +#define PMU_PMINTENSET 0xC40 +#define PMU_PMINTENCLR 0xC60 +#define PMU_PMOVSR 0xC80 +#define PMU_PMCR 0xE04 + +/* PMU registers for V3 */ +#define PMU_PMOVSCLR 0xC80 +#define PMU_PMOVSSET 0xCC0 + +#define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu) +#define GET_CNTR(ev) (ev->hw.idx) +#define GET_EVENTID(ev) (ev->hw.config & 0xFFULL) +#define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL) +#define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL) + +struct hw_pmu_info { + u32 type; + u32 enable_mask; + void __iomem *csr; +}; + +struct xgene_pmu_dev { + struct hw_pmu_info *inf; + struct xgene_pmu *parent; + struct pmu pmu; + u8 max_counters; + DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS); + u64 max_period; + const struct attribute_group **attr_groups; + struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS]; +}; + +struct xgene_pmu_ops { + void (*mask_int)(struct xgene_pmu *pmu); + void (*unmask_int)(struct xgene_pmu *pmu); + u64 (*read_counter)(struct xgene_pmu_dev *pmu, int idx); + void (*write_counter)(struct xgene_pmu_dev *pmu, int idx, u64 val); + void (*write_evttype)(struct xgene_pmu_dev *pmu_dev, int idx, u32 val); + void (*write_agentmsk)(struct xgene_pmu_dev *pmu_dev, u32 val); + void (*write_agent1msk)(struct xgene_pmu_dev *pmu_dev, u32 val); + void (*enable_counter)(struct xgene_pmu_dev *pmu_dev, int idx); + void (*disable_counter)(struct xgene_pmu_dev *pmu_dev, int idx); + void (*enable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx); + void (*disable_counter_int)(struct xgene_pmu_dev *pmu_dev, int idx); + void (*reset_counters)(struct xgene_pmu_dev *pmu_dev); + void (*start_counters)(struct xgene_pmu_dev *pmu_dev); + void (*stop_counters)(struct xgene_pmu_dev *pmu_dev); +}; + +struct xgene_pmu { + struct device *dev; + struct hlist_node node; + int version; + void __iomem *pcppmu_csr; + u32 mcb_active_mask; + u32 mc_active_mask; + u32 l3c_active_mask; + cpumask_t cpu; + int irq; + raw_spinlock_t lock; + const struct xgene_pmu_ops *ops; + struct list_head l3cpmus; + struct list_head iobpmus; + struct list_head mcbpmus; + struct list_head mcpmus; +}; + +struct xgene_pmu_dev_ctx { + char *name; + struct list_head next; + struct xgene_pmu_dev *pmu_dev; + struct hw_pmu_info inf; +}; + +struct xgene_pmu_data { + int id; + u32 data; +}; + +enum xgene_pmu_version { + PCP_PMU_V1 = 1, + PCP_PMU_V2, + PCP_PMU_V3, +}; + +enum xgene_pmu_dev_type { + PMU_TYPE_L3C = 0, + PMU_TYPE_IOB, + PMU_TYPE_IOB_SLOW, + PMU_TYPE_MCB, + PMU_TYPE_MC, +}; + +/* + * sysfs format attributes + */ +static ssize_t xgene_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "%s\n", (char *) eattr->var); +} + +#define XGENE_PMU_FORMAT_ATTR(_name, _config) \ + (&((struct dev_ext_attribute[]) { \ + { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \ + .var = (void *) _config, } \ + })[0].attr.attr) + +static struct attribute *l3c_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"), + XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"), + NULL, +}; + +static struct attribute *iob_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"), + XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"), + NULL, +}; + +static struct attribute *mcb_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"), + XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"), + NULL, +}; + +static struct attribute *mc_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"), + NULL, +}; + +static const struct attribute_group l3c_pmu_format_attr_group = { + .name = "format", + .attrs = l3c_pmu_format_attrs, +}; + +static const struct attribute_group iob_pmu_format_attr_group = { + .name = "format", + .attrs = iob_pmu_format_attrs, +}; + +static const struct attribute_group mcb_pmu_format_attr_group = { + .name = "format", + .attrs = mcb_pmu_format_attrs, +}; + +static const struct attribute_group mc_pmu_format_attr_group = { + .name = "format", + .attrs = mc_pmu_format_attrs, +}; + +static struct attribute *l3c_pmu_v3_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-39"), + NULL, +}; + +static struct attribute *iob_pmu_v3_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-47"), + NULL, +}; + +static struct attribute *iob_slow_pmu_v3_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(iob_slow_eventid, "config:0-16"), + NULL, +}; + +static struct attribute *mcb_pmu_v3_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-35"), + NULL, +}; + +static struct attribute *mc_pmu_v3_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-44"), + NULL, +}; + +static const struct attribute_group l3c_pmu_v3_format_attr_group = { + .name = "format", + .attrs = l3c_pmu_v3_format_attrs, +}; + +static const struct attribute_group iob_pmu_v3_format_attr_group = { + .name = "format", + .attrs = iob_pmu_v3_format_attrs, +}; + +static const struct attribute_group iob_slow_pmu_v3_format_attr_group = { + .name = "format", + .attrs = iob_slow_pmu_v3_format_attrs, +}; + +static const struct attribute_group mcb_pmu_v3_format_attr_group = { + .name = "format", + .attrs = mcb_pmu_v3_format_attrs, +}; + +static const struct attribute_group mc_pmu_v3_format_attr_group = { + .name = "format", + .attrs = mc_pmu_v3_format_attrs, +}; + +/* + * sysfs event attributes + */ +static ssize_t xgene_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var); +} + +#define XGENE_PMU_EVENT_ATTR(_name, _config) \ + (&((struct dev_ext_attribute[]) { \ + { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \ + .var = (void *) _config, } \ + })[0].attr.attr) + +static struct attribute *l3c_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(read-hit, 0x02), + XGENE_PMU_EVENT_ATTR(read-miss, 0x03), + XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06), + XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07), + XGENE_PMU_EVENT_ATTR(tq-full, 0x08), + XGENE_PMU_EVENT_ATTR(ackq-full, 0x09), + XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a), + XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b), + XGENE_PMU_EVENT_ATTR(odb-full, 0x0c), + XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d), + XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e), + XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f), + NULL, +}; + +static struct attribute *iob_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(axi0-read, 0x02), + XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03), + XGENE_PMU_EVENT_ATTR(axi1-read, 0x04), + XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05), + XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06), + XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07), + XGENE_PMU_EVENT_ATTR(axi0-write, 0x10), + XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11), + XGENE_PMU_EVENT_ATTR(axi1-write, 0x13), + XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14), + XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16), + NULL, +}; + +static struct attribute *mcb_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(csw-read, 0x02), + XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03), + XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04), + XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05), + NULL, +}; + +static struct attribute *mc_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02), + XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03), + XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04), + XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05), + XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06), + XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07), + XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08), + XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09), + XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a), + XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b), + XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c), + XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d), + XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e), + XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f), + XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10), + XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11), + XGENE_PMU_EVENT_ATTR(mcu-request, 0x12), + XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13), + XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14), + XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17), + XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a), + XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b), + XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c), + NULL, +}; + +static const struct attribute_group l3c_pmu_events_attr_group = { + .name = "events", + .attrs = l3c_pmu_events_attrs, +}; + +static const struct attribute_group iob_pmu_events_attr_group = { + .name = "events", + .attrs = iob_pmu_events_attrs, +}; + +static const struct attribute_group mcb_pmu_events_attr_group = { + .name = "events", + .attrs = mcb_pmu_events_attrs, +}; + +static const struct attribute_group mc_pmu_events_attr_group = { + .name = "events", + .attrs = mc_pmu_events_attrs, +}; + +static struct attribute *l3c_pmu_v3_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(read-hit, 0x01), + XGENE_PMU_EVENT_ATTR(read-miss, 0x02), + XGENE_PMU_EVENT_ATTR(index-flush-eviction, 0x03), + XGENE_PMU_EVENT_ATTR(write-caused-replacement, 0x04), + XGENE_PMU_EVENT_ATTR(write-not-caused-replacement, 0x05), + XGENE_PMU_EVENT_ATTR(clean-eviction, 0x06), + XGENE_PMU_EVENT_ATTR(dirty-eviction, 0x07), + XGENE_PMU_EVENT_ATTR(read, 0x08), + XGENE_PMU_EVENT_ATTR(write, 0x09), + XGENE_PMU_EVENT_ATTR(request, 0x0a), + XGENE_PMU_EVENT_ATTR(tq-bank-conflict-issue-stall, 0x0b), + XGENE_PMU_EVENT_ATTR(tq-full, 0x0c), + XGENE_PMU_EVENT_ATTR(ackq-full, 0x0d), + XGENE_PMU_EVENT_ATTR(wdb-full, 0x0e), + XGENE_PMU_EVENT_ATTR(odb-full, 0x10), + XGENE_PMU_EVENT_ATTR(wbq-full, 0x11), + XGENE_PMU_EVENT_ATTR(input-req-async-fifo-stall, 0x12), + XGENE_PMU_EVENT_ATTR(output-req-async-fifo-stall, 0x13), + XGENE_PMU_EVENT_ATTR(output-data-async-fifo-stall, 0x14), + XGENE_PMU_EVENT_ATTR(total-insertion, 0x15), + XGENE_PMU_EVENT_ATTR(sip-insertions-r-set, 0x16), + XGENE_PMU_EVENT_ATTR(sip-insertions-r-clear, 0x17), + XGENE_PMU_EVENT_ATTR(dip-insertions-r-set, 0x18), + XGENE_PMU_EVENT_ATTR(dip-insertions-r-clear, 0x19), + XGENE_PMU_EVENT_ATTR(dip-insertions-force-r-set, 0x1a), + XGENE_PMU_EVENT_ATTR(egression, 0x1b), + XGENE_PMU_EVENT_ATTR(replacement, 0x1c), + XGENE_PMU_EVENT_ATTR(old-replacement, 0x1d), + XGENE_PMU_EVENT_ATTR(young-replacement, 0x1e), + XGENE_PMU_EVENT_ATTR(r-set-replacement, 0x1f), + XGENE_PMU_EVENT_ATTR(r-clear-replacement, 0x20), + XGENE_PMU_EVENT_ATTR(old-r-replacement, 0x21), + XGENE_PMU_EVENT_ATTR(old-nr-replacement, 0x22), + XGENE_PMU_EVENT_ATTR(young-r-replacement, 0x23), + XGENE_PMU_EVENT_ATTR(young-nr-replacement, 0x24), + XGENE_PMU_EVENT_ATTR(bloomfilter-clearing, 0x25), + XGENE_PMU_EVENT_ATTR(generation-flip, 0x26), + XGENE_PMU_EVENT_ATTR(vcc-droop-detected, 0x27), + NULL, +}; + +static struct attribute *iob_fast_pmu_v3_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-all, 0x01), + XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-rd, 0x02), + XGENE_PMU_EVENT_ATTR(pa-req-buf-alloc-wr, 0x03), + XGENE_PMU_EVENT_ATTR(pa-all-cp-req, 0x04), + XGENE_PMU_EVENT_ATTR(pa-cp-blk-req, 0x05), + XGENE_PMU_EVENT_ATTR(pa-cp-ptl-req, 0x06), + XGENE_PMU_EVENT_ATTR(pa-cp-rd-req, 0x07), + XGENE_PMU_EVENT_ATTR(pa-cp-wr-req, 0x08), + XGENE_PMU_EVENT_ATTR(ba-all-req, 0x09), + XGENE_PMU_EVENT_ATTR(ba-rd-req, 0x0a), + XGENE_PMU_EVENT_ATTR(ba-wr-req, 0x0b), + XGENE_PMU_EVENT_ATTR(pa-rd-shared-req-issued, 0x10), + XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-req-issued, 0x11), + XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-stashable, 0x12), + XGENE_PMU_EVENT_ATTR(pa-wr-invalidate-req-issued-nonstashable, 0x13), + XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-stashable, 0x14), + XGENE_PMU_EVENT_ATTR(pa-wr-back-req-issued-nonstashable, 0x15), + XGENE_PMU_EVENT_ATTR(pa-ptl-wr-req, 0x16), + XGENE_PMU_EVENT_ATTR(pa-ptl-rd-req, 0x17), + XGENE_PMU_EVENT_ATTR(pa-wr-back-clean-data, 0x18), + XGENE_PMU_EVENT_ATTR(pa-wr-back-cancelled-on-SS, 0x1b), + XGENE_PMU_EVENT_ATTR(pa-barrier-occurrence, 0x1c), + XGENE_PMU_EVENT_ATTR(pa-barrier-cycles, 0x1d), + XGENE_PMU_EVENT_ATTR(pa-total-cp-snoops, 0x20), + XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop, 0x21), + XGENE_PMU_EVENT_ATTR(pa-rd-shared-snoop-hit, 0x22), + XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop, 0x23), + XGENE_PMU_EVENT_ATTR(pa-rd-exclusive-snoop-hit, 0x24), + XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop, 0x25), + XGENE_PMU_EVENT_ATTR(pa-rd-wr-invalid-snoop-hit, 0x26), + XGENE_PMU_EVENT_ATTR(pa-req-buffer-full, 0x28), + XGENE_PMU_EVENT_ATTR(cswlf-outbound-req-fifo-full, 0x29), + XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a), + XGENE_PMU_EVENT_ATTR(cswlf-outbound-lack-fifo-full, 0x2b), + XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c), + XGENE_PMU_EVENT_ATTR(cswlf-outbound-data-fifo-full, 0x2d), + XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e), + XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure, 0x2f), + NULL, +}; + +static struct attribute *iob_slow_pmu_v3_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(pa-axi0-rd-req, 0x01), + XGENE_PMU_EVENT_ATTR(pa-axi0-wr-req, 0x02), + XGENE_PMU_EVENT_ATTR(pa-axi1-rd-req, 0x03), + XGENE_PMU_EVENT_ATTR(pa-axi1-wr-req, 0x04), + XGENE_PMU_EVENT_ATTR(ba-all-axi-req, 0x07), + XGENE_PMU_EVENT_ATTR(ba-axi-rd-req, 0x08), + XGENE_PMU_EVENT_ATTR(ba-axi-wr-req, 0x09), + XGENE_PMU_EVENT_ATTR(ba-free-list-empty, 0x10), + NULL, +}; + +static struct attribute *mcb_pmu_v3_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(req-receive, 0x01), + XGENE_PMU_EVENT_ATTR(rd-req-recv, 0x02), + XGENE_PMU_EVENT_ATTR(rd-req-recv-2, 0x03), + XGENE_PMU_EVENT_ATTR(wr-req-recv, 0x04), + XGENE_PMU_EVENT_ATTR(wr-req-recv-2, 0x05), + XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu, 0x06), + XGENE_PMU_EVENT_ATTR(rd-req-sent-to-mcu-2, 0x07), + XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu, 0x08), + XGENE_PMU_EVENT_ATTR(rd-req-sent-to-spec-mcu-2, 0x09), + XGENE_PMU_EVENT_ATTR(glbl-ack-recv-for-rd-sent-to-spec-mcu, 0x0a), + XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-for-rd-sent-to-spec-mcu, 0x0b), + XGENE_PMU_EVENT_ATTR(glbl-ack-nogo-recv-for-rd-sent-to-spec-mcu, 0x0c), + XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req, 0x0d), + XGENE_PMU_EVENT_ATTR(glbl-ack-go-recv-any-rd-req-2, 0x0e), + XGENE_PMU_EVENT_ATTR(wr-req-sent-to-mcu, 0x0f), + XGENE_PMU_EVENT_ATTR(gack-recv, 0x10), + XGENE_PMU_EVENT_ATTR(rd-gack-recv, 0x11), + XGENE_PMU_EVENT_ATTR(wr-gack-recv, 0x12), + XGENE_PMU_EVENT_ATTR(cancel-rd-gack, 0x13), + XGENE_PMU_EVENT_ATTR(cancel-wr-gack, 0x14), + XGENE_PMU_EVENT_ATTR(mcb-csw-req-stall, 0x15), + XGENE_PMU_EVENT_ATTR(mcu-req-intf-blocked, 0x16), + XGENE_PMU_EVENT_ATTR(mcb-mcu-rd-intf-stall, 0x17), + XGENE_PMU_EVENT_ATTR(csw-rd-intf-blocked, 0x18), + XGENE_PMU_EVENT_ATTR(csw-local-ack-intf-blocked, 0x19), + XGENE_PMU_EVENT_ATTR(mcu-req-table-full, 0x1a), + XGENE_PMU_EVENT_ATTR(mcu-stat-table-full, 0x1b), + XGENE_PMU_EVENT_ATTR(mcu-wr-table-full, 0x1c), + XGENE_PMU_EVENT_ATTR(mcu-rdreceipt-resp, 0x1d), + XGENE_PMU_EVENT_ATTR(mcu-wrcomplete-resp, 0x1e), + XGENE_PMU_EVENT_ATTR(mcu-retryack-resp, 0x1f), + XGENE_PMU_EVENT_ATTR(mcu-pcrdgrant-resp, 0x20), + XGENE_PMU_EVENT_ATTR(mcu-req-from-lastload, 0x21), + XGENE_PMU_EVENT_ATTR(mcu-req-from-bypass, 0x22), + XGENE_PMU_EVENT_ATTR(volt-droop-detect, 0x23), + NULL, +}; + +static struct attribute *mc_pmu_v3_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(act-sent, 0x01), + XGENE_PMU_EVENT_ATTR(pre-sent, 0x02), + XGENE_PMU_EVENT_ATTR(rd-sent, 0x03), + XGENE_PMU_EVENT_ATTR(rda-sent, 0x04), + XGENE_PMU_EVENT_ATTR(wr-sent, 0x05), + XGENE_PMU_EVENT_ATTR(wra-sent, 0x06), + XGENE_PMU_EVENT_ATTR(pd-entry-vld, 0x07), + XGENE_PMU_EVENT_ATTR(sref-entry-vld, 0x08), + XGENE_PMU_EVENT_ATTR(prea-sent, 0x09), + XGENE_PMU_EVENT_ATTR(ref-sent, 0x0a), + XGENE_PMU_EVENT_ATTR(rd-rda-sent, 0x0b), + XGENE_PMU_EVENT_ATTR(wr-wra-sent, 0x0c), + XGENE_PMU_EVENT_ATTR(raw-hazard, 0x0d), + XGENE_PMU_EVENT_ATTR(war-hazard, 0x0e), + XGENE_PMU_EVENT_ATTR(waw-hazard, 0x0f), + XGENE_PMU_EVENT_ATTR(rar-hazard, 0x10), + XGENE_PMU_EVENT_ATTR(raw-war-waw-hazard, 0x11), + XGENE_PMU_EVENT_ATTR(hprd-lprd-wr-req-vld, 0x12), + XGENE_PMU_EVENT_ATTR(lprd-req-vld, 0x13), + XGENE_PMU_EVENT_ATTR(hprd-req-vld, 0x14), + XGENE_PMU_EVENT_ATTR(hprd-lprd-req-vld, 0x15), + XGENE_PMU_EVENT_ATTR(wr-req-vld, 0x16), + XGENE_PMU_EVENT_ATTR(partial-wr-req-vld, 0x17), + XGENE_PMU_EVENT_ATTR(rd-retry, 0x18), + XGENE_PMU_EVENT_ATTR(wr-retry, 0x19), + XGENE_PMU_EVENT_ATTR(retry-gnt, 0x1a), + XGENE_PMU_EVENT_ATTR(rank-change, 0x1b), + XGENE_PMU_EVENT_ATTR(dir-change, 0x1c), + XGENE_PMU_EVENT_ATTR(rank-dir-change, 0x1d), + XGENE_PMU_EVENT_ATTR(rank-active, 0x1e), + XGENE_PMU_EVENT_ATTR(rank-idle, 0x1f), + XGENE_PMU_EVENT_ATTR(rank-pd, 0x20), + XGENE_PMU_EVENT_ATTR(rank-sref, 0x21), + XGENE_PMU_EVENT_ATTR(queue-fill-gt-thresh, 0x22), + XGENE_PMU_EVENT_ATTR(queue-rds-gt-thresh, 0x23), + XGENE_PMU_EVENT_ATTR(queue-wrs-gt-thresh, 0x24), + XGENE_PMU_EVENT_ATTR(phy-updt-complt, 0x25), + XGENE_PMU_EVENT_ATTR(tz-fail, 0x26), + XGENE_PMU_EVENT_ATTR(dram-errc, 0x27), + XGENE_PMU_EVENT_ATTR(dram-errd, 0x28), + XGENE_PMU_EVENT_ATTR(rd-enq, 0x29), + XGENE_PMU_EVENT_ATTR(wr-enq, 0x2a), + XGENE_PMU_EVENT_ATTR(tmac-limit-reached, 0x2b), + XGENE_PMU_EVENT_ATTR(tmaw-tracker-full, 0x2c), + NULL, +}; + +static const struct attribute_group l3c_pmu_v3_events_attr_group = { + .name = "events", + .attrs = l3c_pmu_v3_events_attrs, +}; + +static const struct attribute_group iob_fast_pmu_v3_events_attr_group = { + .name = "events", + .attrs = iob_fast_pmu_v3_events_attrs, +}; + +static const struct attribute_group iob_slow_pmu_v3_events_attr_group = { + .name = "events", + .attrs = iob_slow_pmu_v3_events_attrs, +}; + +static const struct attribute_group mcb_pmu_v3_events_attr_group = { + .name = "events", + .attrs = mcb_pmu_v3_events_attrs, +}; + +static const struct attribute_group mc_pmu_v3_events_attr_group = { + .name = "events", + .attrs = mc_pmu_v3_events_attrs, +}; + +/* + * sysfs cpumask attributes + */ +static ssize_t xgene_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu); +} + +static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL); + +static struct attribute *xgene_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group pmu_cpumask_attr_group = { + .attrs = xgene_pmu_cpumask_attrs, +}; + +/* + * Per PMU device attribute groups of PMU v1 and v2 + */ +static const struct attribute_group *l3c_pmu_attr_groups[] = { + &l3c_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &l3c_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *iob_pmu_attr_groups[] = { + &iob_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &iob_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *mcb_pmu_attr_groups[] = { + &mcb_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &mcb_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *mc_pmu_attr_groups[] = { + &mc_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &mc_pmu_events_attr_group, + NULL +}; + +/* + * Per PMU device attribute groups of PMU v3 + */ +static const struct attribute_group *l3c_pmu_v3_attr_groups[] = { + &l3c_pmu_v3_format_attr_group, + &pmu_cpumask_attr_group, + &l3c_pmu_v3_events_attr_group, + NULL +}; + +static const struct attribute_group *iob_fast_pmu_v3_attr_groups[] = { + &iob_pmu_v3_format_attr_group, + &pmu_cpumask_attr_group, + &iob_fast_pmu_v3_events_attr_group, + NULL +}; + +static const struct attribute_group *iob_slow_pmu_v3_attr_groups[] = { + &iob_slow_pmu_v3_format_attr_group, + &pmu_cpumask_attr_group, + &iob_slow_pmu_v3_events_attr_group, + NULL +}; + +static const struct attribute_group *mcb_pmu_v3_attr_groups[] = { + &mcb_pmu_v3_format_attr_group, + &pmu_cpumask_attr_group, + &mcb_pmu_v3_events_attr_group, + NULL +}; + +static const struct attribute_group *mc_pmu_v3_attr_groups[] = { + &mc_pmu_v3_format_attr_group, + &pmu_cpumask_attr_group, + &mc_pmu_v3_events_attr_group, + NULL +}; + +static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev) +{ + int cntr; + + cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask, + pmu_dev->max_counters); + if (cntr == pmu_dev->max_counters) + return -ENOSPC; + set_bit(cntr, pmu_dev->cntr_assign_mask); + + return cntr; +} + +static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr) +{ + clear_bit(cntr, pmu_dev->cntr_assign_mask); +} + +static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu) +{ + writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); +} + +static inline void xgene_pmu_v3_mask_int(struct xgene_pmu *xgene_pmu) +{ + writel(PCPPMU_V3_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); +} + +static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu) +{ + writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); +} + +static inline void xgene_pmu_v3_unmask_int(struct xgene_pmu *xgene_pmu) +{ + writel(PCPPMU_V3_INTCLRMASK, + xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); +} + +static inline u64 xgene_pmu_read_counter32(struct xgene_pmu_dev *pmu_dev, + int idx) +{ + return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); +} + +static inline u64 xgene_pmu_read_counter64(struct xgene_pmu_dev *pmu_dev, + int idx) +{ + u32 lo, hi; + + /* + * v3 has 64-bit counter registers composed by 2 32-bit registers + * This can be a problem if the counter increases and carries + * out of bit [31] between 2 reads. The extra reads would help + * to prevent this issue. + */ + do { + hi = xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1); + lo = xgene_pmu_read_counter32(pmu_dev, 2 * idx); + } while (hi != xgene_pmu_read_counter32(pmu_dev, 2 * idx + 1)); + + return (((u64)hi << 32) | lo); +} + +static inline void +xgene_pmu_write_counter32(struct xgene_pmu_dev *pmu_dev, int idx, u64 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); +} + +static inline void +xgene_pmu_write_counter64(struct xgene_pmu_dev *pmu_dev, int idx, u64 val) +{ + u32 cnt_lo, cnt_hi; + + cnt_hi = upper_32_bits(val); + cnt_lo = lower_32_bits(val); + + /* v3 has 64-bit counter registers composed by 2 32-bit registers */ + xgene_pmu_write_counter32(pmu_dev, 2 * idx, cnt_lo); + xgene_pmu_write_counter32(pmu_dev, 2 * idx + 1, cnt_hi); +} + +static inline void +xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx)); +} + +static inline void +xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMAMR0); +} + +static inline void +xgene_pmu_v3_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) { } + +static inline void +xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMAMR1); +} + +static inline void +xgene_pmu_v3_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) { } + +static inline void +xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET); +} + +static inline void +xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR); +} + +static inline void +xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMINTENSET); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMINTENSET); +} + +static inline void +xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR); +} + +static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCR); + val |= PMU_PMCR_P; + writel(val, pmu_dev->inf->csr + PMU_PMCR); +} + +static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCR); + val |= PMU_PMCR_E; + writel(val, pmu_dev->inf->csr + PMU_PMCR); +} + +static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCR); + val &= ~PMU_PMCR_E; + writel(val, pmu_dev->inf->csr + PMU_PMCR); +} + +static void xgene_perf_pmu_enable(struct pmu *pmu) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + int enabled = bitmap_weight(pmu_dev->cntr_assign_mask, + pmu_dev->max_counters); + + if (!enabled) + return; + + xgene_pmu->ops->start_counters(pmu_dev); +} + +static void xgene_perf_pmu_disable(struct pmu *pmu) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + + xgene_pmu->ops->stop_counters(pmu_dev); +} + +static int xgene_perf_event_init(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct perf_event *sibling; + + /* Test the event attr type check for PMU enumeration */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * SOC PMU counters are shared across all cores. + * Therefore, it does not support per-process mode. + * Also, it does not support event sampling mode. + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + /* + * Many perf core operations (eg. events rotation) operate on a + * single CPU context. This is obvious for CPU PMUs, where one + * expects the same sets of events being observed on all CPUs, + * but can lead to issues for off-core PMUs, where each + * event could be theoretically assigned to a different CPU. To + * mitigate this, we enforce CPU assignment to one, selected + * processor (the one described in the "cpumask" attribute). + */ + event->cpu = cpumask_first(&pmu_dev->parent->cpu); + + hw->config = event->attr.config; + /* + * Each bit of the config1 field represents an agent from which the + * request of the event come. The event is counted only if it's caused + * by a request of an agent has the bit cleared. + * By default, the event is counted for all agents. + */ + hw->config_base = event->attr.config1; + + /* + * We must NOT create groups containing mixed PMUs, although software + * events are acceptable + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) + return -EINVAL; + } + + return 0; +} + +static void xgene_perf_enable_event(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + + xgene_pmu->ops->write_evttype(pmu_dev, GET_CNTR(event), + GET_EVENTID(event)); + xgene_pmu->ops->write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event))); + if (pmu_dev->inf->type == PMU_TYPE_IOB) + xgene_pmu->ops->write_agent1msk(pmu_dev, + ~((u32)GET_AGENT1ID(event))); + + xgene_pmu->ops->enable_counter(pmu_dev, GET_CNTR(event)); + xgene_pmu->ops->enable_counter_int(pmu_dev, GET_CNTR(event)); +} + +static void xgene_perf_disable_event(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + + xgene_pmu->ops->disable_counter(pmu_dev, GET_CNTR(event)); + xgene_pmu->ops->disable_counter_int(pmu_dev, GET_CNTR(event)); +} + +static void xgene_perf_event_set_period(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + struct hw_perf_event *hw = &event->hw; + /* + * For 32 bit counter, it has a period of 2^32. To account for the + * possibility of extreme interrupt latency we program for a period of + * half that. Hopefully, we can handle the interrupt before another 2^31 + * events occur and the counter overtakes its previous value. + * For 64 bit counter, we don't expect it overflow. + */ + u64 val = 1ULL << 31; + + local64_set(&hw->prev_count, val); + xgene_pmu->ops->write_counter(pmu_dev, hw->idx, val); +} + +static void xgene_perf_event_update(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + struct hw_perf_event *hw = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + +again: + prev_raw_count = local64_read(&hw->prev_count); + new_raw_count = xgene_pmu->ops->read_counter(pmu_dev, GET_CNTR(event)); + + if (local64_cmpxchg(&hw->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period; + + local64_add(delta, &event->count); +} + +static void xgene_perf_read(struct perf_event *event) +{ + xgene_perf_event_update(event); +} + +static void xgene_perf_start(struct perf_event *event, int flags) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + struct hw_perf_event *hw = &event->hw; + + if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE)); + hw->state = 0; + + xgene_perf_event_set_period(event); + + if (flags & PERF_EF_RELOAD) { + u64 prev_raw_count = local64_read(&hw->prev_count); + + xgene_pmu->ops->write_counter(pmu_dev, GET_CNTR(event), + prev_raw_count); + } + + xgene_perf_enable_event(event); + perf_event_update_userpage(event); +} + +static void xgene_perf_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hw = &event->hw; + + if (hw->state & PERF_HES_UPTODATE) + return; + + xgene_perf_disable_event(event); + WARN_ON_ONCE(hw->state & PERF_HES_STOPPED); + hw->state |= PERF_HES_STOPPED; + + if (hw->state & PERF_HES_UPTODATE) + return; + + xgene_perf_read(event); + hw->state |= PERF_HES_UPTODATE; +} + +static int xgene_perf_add(struct perf_event *event, int flags) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + + hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + /* Allocate an event counter */ + hw->idx = get_next_avail_cntr(pmu_dev); + if (hw->idx < 0) + return -EAGAIN; + + /* Update counter event pointer for Interrupt handler */ + pmu_dev->pmu_counter_event[hw->idx] = event; + + if (flags & PERF_EF_START) + xgene_perf_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void xgene_perf_del(struct perf_event *event, int flags) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + + xgene_perf_stop(event, PERF_EF_UPDATE); + + /* clear the assigned counter */ + clear_avail_cntr(pmu_dev, GET_CNTR(event)); + + perf_event_update_userpage(event); + pmu_dev->pmu_counter_event[hw->idx] = NULL; +} + +static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name) +{ + struct xgene_pmu *xgene_pmu; + + if (pmu_dev->parent->version == PCP_PMU_V3) + pmu_dev->max_period = PMU_V3_CNT_MAX_PERIOD; + else + pmu_dev->max_period = PMU_CNT_MAX_PERIOD; + /* First version PMU supports only single event counter */ + xgene_pmu = pmu_dev->parent; + if (xgene_pmu->version == PCP_PMU_V1) + pmu_dev->max_counters = 1; + else + pmu_dev->max_counters = PMU_MAX_COUNTERS; + + /* Perf driver registration */ + pmu_dev->pmu = (struct pmu) { + .attr_groups = pmu_dev->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = xgene_perf_pmu_enable, + .pmu_disable = xgene_perf_pmu_disable, + .event_init = xgene_perf_event_init, + .add = xgene_perf_add, + .del = xgene_perf_del, + .start = xgene_perf_start, + .stop = xgene_perf_stop, + .read = xgene_perf_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + /* Hardware counter init */ + xgene_pmu->ops->stop_counters(pmu_dev); + xgene_pmu->ops->reset_counters(pmu_dev); + + return perf_pmu_register(&pmu_dev->pmu, name, -1); +} + +static int +xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) +{ + struct device *dev = xgene_pmu->dev; + struct xgene_pmu_dev *pmu; + + pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL); + if (!pmu) + return -ENOMEM; + pmu->parent = xgene_pmu; + pmu->inf = &ctx->inf; + ctx->pmu_dev = pmu; + + switch (pmu->inf->type) { + case PMU_TYPE_L3C: + if (!(xgene_pmu->l3c_active_mask & pmu->inf->enable_mask)) + return -ENODEV; + if (xgene_pmu->version == PCP_PMU_V3) + pmu->attr_groups = l3c_pmu_v3_attr_groups; + else + pmu->attr_groups = l3c_pmu_attr_groups; + break; + case PMU_TYPE_IOB: + if (xgene_pmu->version == PCP_PMU_V3) + pmu->attr_groups = iob_fast_pmu_v3_attr_groups; + else + pmu->attr_groups = iob_pmu_attr_groups; + break; + case PMU_TYPE_IOB_SLOW: + if (xgene_pmu->version == PCP_PMU_V3) + pmu->attr_groups = iob_slow_pmu_v3_attr_groups; + break; + case PMU_TYPE_MCB: + if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask)) + return -ENODEV; + if (xgene_pmu->version == PCP_PMU_V3) + pmu->attr_groups = mcb_pmu_v3_attr_groups; + else + pmu->attr_groups = mcb_pmu_attr_groups; + break; + case PMU_TYPE_MC: + if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask)) + return -ENODEV; + if (xgene_pmu->version == PCP_PMU_V3) + pmu->attr_groups = mc_pmu_v3_attr_groups; + else + pmu->attr_groups = mc_pmu_attr_groups; + break; + default: + return -EINVAL; + } + + if (xgene_init_perf(pmu, ctx->name)) { + dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name); + return -ENODEV; + } + + dev_info(dev, "%s PMU registered\n", ctx->name); + + return 0; +} + +static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev) +{ + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + void __iomem *csr = pmu_dev->inf->csr; + u32 pmovsr; + int idx; + + xgene_pmu->ops->stop_counters(pmu_dev); + + if (xgene_pmu->version == PCP_PMU_V3) + pmovsr = readl(csr + PMU_PMOVSSET) & PMU_OVERFLOW_MASK; + else + pmovsr = readl(csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK; + + if (!pmovsr) + goto out; + + /* Clear interrupt flag */ + if (xgene_pmu->version == PCP_PMU_V1) + writel(0x0, csr + PMU_PMOVSR); + else if (xgene_pmu->version == PCP_PMU_V2) + writel(pmovsr, csr + PMU_PMOVSR); + else + writel(pmovsr, csr + PMU_PMOVSCLR); + + for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) { + struct perf_event *event = pmu_dev->pmu_counter_event[idx]; + int overflowed = pmovsr & BIT(idx); + + /* Ignore if we don't have an event. */ + if (!event || !overflowed) + continue; + xgene_perf_event_update(event); + xgene_perf_event_set_period(event); + } + +out: + xgene_pmu->ops->start_counters(pmu_dev); +} + +static irqreturn_t xgene_pmu_isr(int irq, void *dev_id) +{ + u32 intr_mcu, intr_mcb, intr_l3c, intr_iob; + struct xgene_pmu_dev_ctx *ctx; + struct xgene_pmu *xgene_pmu = dev_id; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&xgene_pmu->lock, flags); + + /* Get Interrupt PMU source */ + val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG); + if (xgene_pmu->version == PCP_PMU_V3) { + intr_mcu = PCPPMU_V3_INT_MCU; + intr_mcb = PCPPMU_V3_INT_MCB; + intr_l3c = PCPPMU_V3_INT_L3C; + intr_iob = PCPPMU_V3_INT_IOB; + } else { + intr_mcu = PCPPMU_INT_MCU; + intr_mcb = PCPPMU_INT_MCB; + intr_l3c = PCPPMU_INT_L3C; + intr_iob = PCPPMU_INT_IOB; + } + if (val & intr_mcu) { + list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + if (val & intr_mcb) { + list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + if (val & intr_l3c) { + list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + if (val & intr_iob) { + list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + + raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags); + + return IRQ_HANDLED; +} + +static int acpi_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + void __iomem *csw_csr, *mcba_csr, *mcbb_csr; + unsigned int reg; + + csw_csr = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(csw_csr)) { + dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n"); + return PTR_ERR(csw_csr); + } + + mcba_csr = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(mcba_csr)) { + dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n"); + return PTR_ERR(mcba_csr); + } + + mcbb_csr = devm_platform_ioremap_resource(pdev, 3); + if (IS_ERR(mcbb_csr)) { + dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n"); + return PTR_ERR(mcbb_csr); + } + + xgene_pmu->l3c_active_mask = 0x1; + + reg = readl(csw_csr + CSW_CSWCR); + if (reg & CSW_CSWCR_DUALMCB_MASK) { + /* Dual MCB active */ + xgene_pmu->mcb_active_mask = 0x3; + /* Probe all active MC(s) */ + reg = readl(mcbb_csr + CSW_CSWCR); + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5; + } else { + /* Single MCB active */ + xgene_pmu->mcb_active_mask = 0x1; + /* Probe all active MC(s) */ + reg = readl(mcba_csr + CSW_CSWCR); + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1; + } + + return 0; +} + +static int acpi_pmu_v3_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + void __iomem *csw_csr; + unsigned int reg; + u32 mcb0routing; + u32 mcb1routing; + + csw_csr = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(csw_csr)) { + dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n"); + return PTR_ERR(csw_csr); + } + + reg = readl(csw_csr + CSW_CSWCR); + mcb0routing = CSW_CSWCR_MCB0_ROUTING(reg); + mcb1routing = CSW_CSWCR_MCB1_ROUTING(reg); + if (reg & CSW_CSWCR_DUALMCB_MASK) { + /* Dual MCB active */ + xgene_pmu->mcb_active_mask = 0x3; + /* Probe all active L3C(s), maximum is 8 */ + xgene_pmu->l3c_active_mask = 0xFF; + /* Probe all active MC(s), maximum is 8 */ + if ((mcb0routing == 0x2) && (mcb1routing == 0x2)) + xgene_pmu->mc_active_mask = 0xFF; + else if ((mcb0routing == 0x1) && (mcb1routing == 0x1)) + xgene_pmu->mc_active_mask = 0x33; + else + xgene_pmu->mc_active_mask = 0x11; + } else { + /* Single MCB active */ + xgene_pmu->mcb_active_mask = 0x1; + /* Probe all active L3C(s), maximum is 4 */ + xgene_pmu->l3c_active_mask = 0x0F; + /* Probe all active MC(s), maximum is 4 */ + if (mcb0routing == 0x2) + xgene_pmu->mc_active_mask = 0x0F; + else if (mcb0routing == 0x1) + xgene_pmu->mc_active_mask = 0x03; + else + xgene_pmu->mc_active_mask = 0x01; + } + + return 0; +} + +static int fdt_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + struct regmap *csw_map, *mcba_map, *mcbb_map; + struct device_node *np = pdev->dev.of_node; + unsigned int reg; + + csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw"); + if (IS_ERR(csw_map)) { + dev_err(&pdev->dev, "unable to get syscon regmap csw\n"); + return PTR_ERR(csw_map); + } + + mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba"); + if (IS_ERR(mcba_map)) { + dev_err(&pdev->dev, "unable to get syscon regmap mcba\n"); + return PTR_ERR(mcba_map); + } + + mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb"); + if (IS_ERR(mcbb_map)) { + dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n"); + return PTR_ERR(mcbb_map); + } + + xgene_pmu->l3c_active_mask = 0x1; + if (regmap_read(csw_map, CSW_CSWCR, ®)) + return -EINVAL; + + if (reg & CSW_CSWCR_DUALMCB_MASK) { + /* Dual MCB active */ + xgene_pmu->mcb_active_mask = 0x3; + /* Probe all active MC(s) */ + if (regmap_read(mcbb_map, MCBADDRMR, ®)) + return 0; + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5; + } else { + /* Single MCB active */ + xgene_pmu->mcb_active_mask = 0x1; + /* Probe all active MC(s) */ + if (regmap_read(mcba_map, MCBADDRMR, ®)) + return 0; + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1; + } + + return 0; +} + +static int xgene_pmu_probe_active_mcb_mcu_l3c(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + if (has_acpi_companion(&pdev->dev)) { + if (xgene_pmu->version == PCP_PMU_V3) + return acpi_pmu_v3_probe_active_mcb_mcu_l3c(xgene_pmu, + pdev); + else + return acpi_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, + pdev); + } + return fdt_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev); +} + +static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) +{ + switch (type) { + case PMU_TYPE_L3C: + return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id); + case PMU_TYPE_IOB: + return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id); + case PMU_TYPE_IOB_SLOW: + return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id); + case PMU_TYPE_MCB: + return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id); + case PMU_TYPE_MC: + return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id); + default: + return devm_kasprintf(dev, GFP_KERNEL, "unknown"); + } +} + +#if defined(CONFIG_ACPI) +static struct +xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct acpi_device *adev, u32 type) +{ + struct device *dev = xgene_pmu->dev; + struct list_head resource_list; + struct xgene_pmu_dev_ctx *ctx; + const union acpi_object *obj; + struct hw_pmu_info *inf; + void __iomem *dev_csr; + struct resource res; + struct resource_entry *rentry; + int enable_bit; + int rc; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + INIT_LIST_HEAD(&resource_list); + rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); + if (rc <= 0) { + dev_err(dev, "PMU type %d: No resources found\n", type); + return NULL; + } + + list_for_each_entry(rentry, &resource_list, node) { + if (resource_type(rentry->res) == IORESOURCE_MEM) { + res = *rentry->res; + rentry = NULL; + break; + } + } + acpi_dev_free_resource_list(&resource_list); + + if (rentry) { + dev_err(dev, "PMU type %d: No memory resource found\n", type); + return NULL; + } + + dev_csr = devm_ioremap_resource(dev, &res); + if (IS_ERR(dev_csr)) { + dev_err(dev, "PMU type %d: Fail to map resource\n", type); + return NULL; + } + + /* A PMU device node without enable-bit-index is always enabled */ + rc = acpi_dev_get_property(adev, "enable-bit-index", + ACPI_TYPE_INTEGER, &obj); + if (rc < 0) + enable_bit = 0; + else + enable_bit = (int) obj->integer.value; + + ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); + if (!ctx->name) { + dev_err(dev, "PMU type %d: Fail to get device name\n", type); + return NULL; + } + inf = &ctx->inf; + inf->type = type; + inf->csr = dev_csr; + inf->enable_mask = 1 << enable_bit; + + return ctx; +} + +static const struct acpi_device_id xgene_pmu_acpi_type_match[] = { + {"APMC0D5D", PMU_TYPE_L3C}, + {"APMC0D5E", PMU_TYPE_IOB}, + {"APMC0D5F", PMU_TYPE_MCB}, + {"APMC0D60", PMU_TYPE_MC}, + {"APMC0D84", PMU_TYPE_L3C}, + {"APMC0D85", PMU_TYPE_IOB}, + {"APMC0D86", PMU_TYPE_IOB_SLOW}, + {"APMC0D87", PMU_TYPE_MCB}, + {"APMC0D88", PMU_TYPE_MC}, + {}, +}; + +static const struct acpi_device_id *xgene_pmu_acpi_match_type( + const struct acpi_device_id *ids, + struct acpi_device *adev) +{ + const struct acpi_device_id *match_id = NULL; + const struct acpi_device_id *id; + + for (id = ids; id->id[0] || id->cls; id++) { + if (!acpi_match_device_ids(adev, id)) + match_id = id; + else if (match_id) + break; + } + + return match_id; +} + +static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + const struct acpi_device_id *acpi_id; + struct xgene_pmu *xgene_pmu = data; + struct xgene_pmu_dev_ctx *ctx; + struct acpi_device *adev; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + if (acpi_bus_get_status(adev) || !adev->status.present) + return AE_OK; + + acpi_id = xgene_pmu_acpi_match_type(xgene_pmu_acpi_type_match, adev); + if (!acpi_id) + return AE_OK; + + ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, (u32)acpi_id->driver_data); + if (!ctx) + return AE_OK; + + if (xgene_pmu_dev_add(xgene_pmu, ctx)) { + /* Can't add the PMU device, skip it */ + devm_kfree(xgene_pmu->dev, ctx); + return AE_OK; + } + + switch (ctx->inf.type) { + case PMU_TYPE_L3C: + list_add(&ctx->next, &xgene_pmu->l3cpmus); + break; + case PMU_TYPE_IOB: + list_add(&ctx->next, &xgene_pmu->iobpmus); + break; + case PMU_TYPE_IOB_SLOW: + list_add(&ctx->next, &xgene_pmu->iobpmus); + break; + case PMU_TYPE_MCB: + list_add(&ctx->next, &xgene_pmu->mcbpmus); + break; + case PMU_TYPE_MC: + list_add(&ctx->next, &xgene_pmu->mcpmus); + break; + } + return AE_OK; +} + +static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + struct device *dev = xgene_pmu->dev; + acpi_handle handle; + acpi_status status; + + handle = ACPI_HANDLE(dev); + if (!handle) + return -EINVAL; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + acpi_pmu_dev_add, NULL, xgene_pmu, NULL); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to probe PMU devices\n"); + return -ENODEV; + } + + return 0; +} +#else +static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + return 0; +} +#endif + +static struct +xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct device_node *np, u32 type) +{ + struct device *dev = xgene_pmu->dev; + struct xgene_pmu_dev_ctx *ctx; + struct hw_pmu_info *inf; + void __iomem *dev_csr; + struct resource res; + int enable_bit; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + if (of_address_to_resource(np, 0, &res) < 0) { + dev_err(dev, "PMU type %d: No resource address found\n", type); + return NULL; + } + + dev_csr = devm_ioremap_resource(dev, &res); + if (IS_ERR(dev_csr)) { + dev_err(dev, "PMU type %d: Fail to map resource\n", type); + return NULL; + } + + /* A PMU device node without enable-bit-index is always enabled */ + if (of_property_read_u32(np, "enable-bit-index", &enable_bit)) + enable_bit = 0; + + ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); + if (!ctx->name) { + dev_err(dev, "PMU type %d: Fail to get device name\n", type); + return NULL; + } + + inf = &ctx->inf; + inf->type = type; + inf->csr = dev_csr; + inf->enable_mask = 1 << enable_bit; + + return ctx; +} + +static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + struct xgene_pmu_dev_ctx *ctx; + struct device_node *np; + + for_each_child_of_node(pdev->dev.of_node, np) { + if (!of_device_is_available(np)) + continue; + + if (of_device_is_compatible(np, "apm,xgene-pmu-l3c")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C); + else if (of_device_is_compatible(np, "apm,xgene-pmu-iob")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB); + else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB); + else if (of_device_is_compatible(np, "apm,xgene-pmu-mc")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC); + else + ctx = NULL; + + if (!ctx) + continue; + + if (xgene_pmu_dev_add(xgene_pmu, ctx)) { + /* Can't add the PMU device, skip it */ + devm_kfree(xgene_pmu->dev, ctx); + continue; + } + + switch (ctx->inf.type) { + case PMU_TYPE_L3C: + list_add(&ctx->next, &xgene_pmu->l3cpmus); + break; + case PMU_TYPE_IOB: + list_add(&ctx->next, &xgene_pmu->iobpmus); + break; + case PMU_TYPE_IOB_SLOW: + list_add(&ctx->next, &xgene_pmu->iobpmus); + break; + case PMU_TYPE_MCB: + list_add(&ctx->next, &xgene_pmu->mcbpmus); + break; + case PMU_TYPE_MC: + list_add(&ctx->next, &xgene_pmu->mcpmus); + break; + } + } + + return 0; +} + +static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + if (has_acpi_companion(&pdev->dev)) + return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev); + return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev); +} + +static const struct xgene_pmu_data xgene_pmu_data = { + .id = PCP_PMU_V1, +}; + +static const struct xgene_pmu_data xgene_pmu_v2_data = { + .id = PCP_PMU_V2, +}; + +static const struct xgene_pmu_ops xgene_pmu_ops = { + .mask_int = xgene_pmu_mask_int, + .unmask_int = xgene_pmu_unmask_int, + .read_counter = xgene_pmu_read_counter32, + .write_counter = xgene_pmu_write_counter32, + .write_evttype = xgene_pmu_write_evttype, + .write_agentmsk = xgene_pmu_write_agentmsk, + .write_agent1msk = xgene_pmu_write_agent1msk, + .enable_counter = xgene_pmu_enable_counter, + .disable_counter = xgene_pmu_disable_counter, + .enable_counter_int = xgene_pmu_enable_counter_int, + .disable_counter_int = xgene_pmu_disable_counter_int, + .reset_counters = xgene_pmu_reset_counters, + .start_counters = xgene_pmu_start_counters, + .stop_counters = xgene_pmu_stop_counters, +}; + +static const struct xgene_pmu_ops xgene_pmu_v3_ops = { + .mask_int = xgene_pmu_v3_mask_int, + .unmask_int = xgene_pmu_v3_unmask_int, + .read_counter = xgene_pmu_read_counter64, + .write_counter = xgene_pmu_write_counter64, + .write_evttype = xgene_pmu_write_evttype, + .write_agentmsk = xgene_pmu_v3_write_agentmsk, + .write_agent1msk = xgene_pmu_v3_write_agent1msk, + .enable_counter = xgene_pmu_enable_counter, + .disable_counter = xgene_pmu_disable_counter, + .enable_counter_int = xgene_pmu_enable_counter_int, + .disable_counter_int = xgene_pmu_disable_counter_int, + .reset_counters = xgene_pmu_reset_counters, + .start_counters = xgene_pmu_start_counters, + .stop_counters = xgene_pmu_stop_counters, +}; + +static const struct of_device_id xgene_pmu_of_match[] = { + { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data }, + { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgene_pmu_of_match); +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_pmu_acpi_match[] = { + {"APMC0D5B", PCP_PMU_V1}, + {"APMC0D5C", PCP_PMU_V2}, + {"APMC0D83", PCP_PMU_V3}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match); +#endif + +static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu, + node); + + if (cpumask_empty(&xgene_pmu->cpu)) + cpumask_set_cpu(cpu, &xgene_pmu->cpu); + + /* Overflow interrupt also should use the same CPU */ + WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu)); + + return 0; +} + +static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu, + node); + struct xgene_pmu_dev_ctx *ctx; + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu)) + return 0; + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) { + perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); + } + list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) { + perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); + } + list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) { + perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); + } + list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) { + perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target); + } + + cpumask_set_cpu(target, &xgene_pmu->cpu); + /* Overflow interrupt also should use the same CPU */ + WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu)); + + return 0; +} + +static int xgene_pmu_probe(struct platform_device *pdev) +{ + const struct xgene_pmu_data *dev_data; + const struct of_device_id *of_id; + struct xgene_pmu *xgene_pmu; + struct resource *res; + int irq, rc; + int version; + + /* Install a hook to update the reader CPU in case it goes offline */ + rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, + "CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE", + xgene_pmu_online_cpu, + xgene_pmu_offline_cpu); + if (rc) + return rc; + + xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL); + if (!xgene_pmu) + return -ENOMEM; + xgene_pmu->dev = &pdev->dev; + platform_set_drvdata(pdev, xgene_pmu); + + version = -EINVAL; + of_id = of_match_device(xgene_pmu_of_match, &pdev->dev); + if (of_id) { + dev_data = (const struct xgene_pmu_data *) of_id->data; + version = dev_data->id; + } + +#ifdef CONFIG_ACPI + if (ACPI_COMPANION(&pdev->dev)) { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev); + if (acpi_id) + version = (int) acpi_id->driver_data; + } +#endif + if (version < 0) + return -ENODEV; + + if (version == PCP_PMU_V3) + xgene_pmu->ops = &xgene_pmu_v3_ops; + else + xgene_pmu->ops = &xgene_pmu_ops; + + INIT_LIST_HEAD(&xgene_pmu->l3cpmus); + INIT_LIST_HEAD(&xgene_pmu->iobpmus); + INIT_LIST_HEAD(&xgene_pmu->mcbpmus); + INIT_LIST_HEAD(&xgene_pmu->mcpmus); + + xgene_pmu->version = version; + dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xgene_pmu->pcppmu_csr)) { + dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n"); + return PTR_ERR(xgene_pmu->pcppmu_csr); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), xgene_pmu); + if (rc) { + dev_err(&pdev->dev, "Could not request IRQ %d\n", irq); + return rc; + } + + xgene_pmu->irq = irq; + + raw_spin_lock_init(&xgene_pmu->lock); + + /* Check for active MCBs and MCUs */ + rc = xgene_pmu_probe_active_mcb_mcu_l3c(xgene_pmu, pdev); + if (rc) { + dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n"); + xgene_pmu->mcb_active_mask = 0x1; + xgene_pmu->mc_active_mask = 0x1; + } + + /* Add this instance to the list used by the hotplug callback */ + rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, + &xgene_pmu->node); + if (rc) { + dev_err(&pdev->dev, "Error %d registering hotplug", rc); + return rc; + } + + /* Walk through the tree for all PMU perf devices */ + rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev); + if (rc) { + dev_err(&pdev->dev, "No PMU perf devices found!\n"); + goto out_unregister; + } + + /* Enable interrupt */ + xgene_pmu->ops->unmask_int(xgene_pmu); + + return 0; + +out_unregister: + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, + &xgene_pmu->node); + return rc; +} + +static void +xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus) +{ + struct xgene_pmu_dev_ctx *ctx; + + list_for_each_entry(ctx, pmus, next) { + perf_pmu_unregister(&ctx->pmu_dev->pmu); + } +} + +static int xgene_pmu_remove(struct platform_device *pdev) +{ + struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev); + + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus); + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus); + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus); + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus); + cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, + &xgene_pmu->node); + + return 0; +} + +static struct platform_driver xgene_pmu_driver = { + .probe = xgene_pmu_probe, + .remove = xgene_pmu_remove, + .driver = { + .name = "xgene-pmu", + .of_match_table = xgene_pmu_of_match, + .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), + .suppress_bind_attrs = true, + }, +}; + +builtin_platform_driver(xgene_pmu_driver); |