diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/perf/hisilicon | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/perf/hisilicon')
-rw-r--r-- | drivers/perf/hisilicon/Kconfig | 26 | ||||
-rw-r--r-- | drivers/perf/hisilicon/Makefile | 7 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_pcie_pmu.c | 949 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c | 409 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c | 583 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_hha_pmu.c | 585 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c | 623 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_pa_pmu.c | 479 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_pmu.c | 552 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_pmu.h | 126 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c | 514 | ||||
-rw-r--r-- | drivers/perf/hisilicon/hns3_pmu.c | 1671 |
12 files changed, 6524 insertions, 0 deletions
diff --git a/drivers/perf/hisilicon/Kconfig b/drivers/perf/hisilicon/Kconfig new file mode 100644 index 000000000..171bfc1b6 --- /dev/null +++ b/drivers/perf/hisilicon/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +config HISI_PMU + tristate "HiSilicon SoC PMU drivers" + depends on ARM64 && ACPI + help + Support for HiSilicon SoC L3 Cache performance monitor, Hydra Home + Agent performance monitor and DDR Controller performance monitor. + +config HISI_PCIE_PMU + tristate "HiSilicon PCIE PERF PMU" + depends on PCI && ARM64 + help + Provide support for HiSilicon PCIe performance monitoring unit (PMU) + RCiEP devices. + Adds the PCIe PMU into perf events system for monitoring latency, + bandwidth etc. + +config HNS3_PMU + tristate "HNS3 PERF PMU" + depends on ARM64 || COMPILE_TEST + depends on PCI + help + Provide support for HNS3 performance monitoring unit (PMU) RCiEP + devices. + Adds the HNS3 PMU into perf events system for monitoring latency, + bandwidth etc. diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile new file mode 100644 index 000000000..4d2c9abe3 --- /dev/null +++ b/drivers/perf/hisilicon/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \ + hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \ + hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o + +obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o +obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c new file mode 100644 index 000000000..c4c1cd269 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c @@ -0,0 +1,949 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This driver adds support for PCIe PMU RCiEP device. Related + * perf events are bandwidth, latency etc. + * + * Copyright (C) 2021 HiSilicon Limited + * Author: Qi Liu <liuqi115@huawei.com> + */ +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/perf_event.h> + +#define DRV_NAME "hisi_pcie_pmu" +/* Define registers */ +#define HISI_PCIE_GLOBAL_CTRL 0x00 +#define HISI_PCIE_EVENT_CTRL 0x010 +#define HISI_PCIE_CNT 0x090 +#define HISI_PCIE_EXT_CNT 0x110 +#define HISI_PCIE_INT_STAT 0x150 +#define HISI_PCIE_INT_MASK 0x154 +#define HISI_PCIE_REG_BDF 0xfe0 +#define HISI_PCIE_REG_VERSION 0xfe4 +#define HISI_PCIE_REG_INFO 0xfe8 + +/* Define command in HISI_PCIE_GLOBAL_CTRL */ +#define HISI_PCIE_GLOBAL_EN 0x01 +#define HISI_PCIE_GLOBAL_NONE 0 + +/* Define command in HISI_PCIE_EVENT_CTRL */ +#define HISI_PCIE_EVENT_EN BIT_ULL(20) +#define HISI_PCIE_RESET_CNT BIT_ULL(22) +#define HISI_PCIE_INIT_SET BIT_ULL(34) +#define HISI_PCIE_THR_EN BIT_ULL(26) +#define HISI_PCIE_TARGET_EN BIT_ULL(32) +#define HISI_PCIE_TRIG_EN BIT_ULL(52) + +/* Define offsets in HISI_PCIE_EVENT_CTRL */ +#define HISI_PCIE_EVENT_M GENMASK_ULL(15, 0) +#define HISI_PCIE_THR_MODE_M GENMASK_ULL(27, 27) +#define HISI_PCIE_THR_M GENMASK_ULL(31, 28) +#define HISI_PCIE_TARGET_M GENMASK_ULL(52, 36) +#define HISI_PCIE_TRIG_MODE_M GENMASK_ULL(53, 53) +#define HISI_PCIE_TRIG_M GENMASK_ULL(59, 56) + +#define HISI_PCIE_MAX_COUNTERS 8 +#define HISI_PCIE_REG_STEP 8 +#define HISI_PCIE_THR_MAX_VAL 10 +#define HISI_PCIE_TRIG_MAX_VAL 10 +#define HISI_PCIE_MAX_PERIOD (GENMASK_ULL(63, 0)) +#define HISI_PCIE_INIT_VAL BIT_ULL(63) + +struct hisi_pcie_pmu { + struct perf_event *hw_events[HISI_PCIE_MAX_COUNTERS]; + struct hlist_node node; + struct pci_dev *pdev; + struct pmu pmu; + void __iomem *base; + int irq; + u32 identifier; + /* Minimum and maximum BDF of root ports monitored by PMU */ + u16 bdf_min; + u16 bdf_max; + int on_cpu; +}; + +struct hisi_pcie_reg_pair { + u16 lo; + u16 hi; +}; + +#define to_pcie_pmu(p) (container_of((p), struct hisi_pcie_pmu, pmu)) +#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) + +#define HISI_PCIE_PMU_FILTER_ATTR(_name, _config, _hi, _lo) \ + static u64 hisi_pcie_get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK(_hi, _lo), event->attr._config); \ + } \ + +HISI_PCIE_PMU_FILTER_ATTR(event, config, 16, 0); +HISI_PCIE_PMU_FILTER_ATTR(thr_len, config1, 3, 0); +HISI_PCIE_PMU_FILTER_ATTR(thr_mode, config1, 4, 4); +HISI_PCIE_PMU_FILTER_ATTR(trig_len, config1, 8, 5); +HISI_PCIE_PMU_FILTER_ATTR(trig_mode, config1, 9, 9); +HISI_PCIE_PMU_FILTER_ATTR(port, config2, 15, 0); +HISI_PCIE_PMU_FILTER_ATTR(bdf, config2, 31, 16); + +static ssize_t hisi_pcie_format_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t hisi_pcie_event_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct perf_pmu_events_attr *pmu_attr = + container_of(attr, struct perf_pmu_events_attr, attr); + + return sysfs_emit(buf, "config=0x%llx\n", pmu_attr->id); +} + +#define HISI_PCIE_PMU_FORMAT_ATTR(_name, _format) \ + (&((struct dev_ext_attribute[]){ \ + { .attr = __ATTR(_name, 0444, hisi_pcie_format_sysfs_show, \ + NULL), \ + .var = (void *)_format } \ + })[0].attr.attr) + +#define HISI_PCIE_PMU_EVENT_ATTR(_name, _id) \ + PMU_EVENT_ATTR_ID(_name, hisi_pcie_event_sysfs_show, _id) + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static ssize_t identifier_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%#x\n", pcie_pmu->identifier); +} +static DEVICE_ATTR_RO(identifier); + +static ssize_t bus_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%#04x\n", PCI_BUS_NUM(pcie_pmu->bdf_min)); +} +static DEVICE_ATTR_RO(bus); + +static struct hisi_pcie_reg_pair +hisi_pcie_parse_reg_value(struct hisi_pcie_pmu *pcie_pmu, u32 reg_off) +{ + u32 val = readl_relaxed(pcie_pmu->base + reg_off); + struct hisi_pcie_reg_pair regs = { + .lo = val, + .hi = val >> 16, + }; + + return regs; +} + +/* + * Hardware counter and ext_counter work together for bandwidth, latency, bus + * utilization and buffer occupancy events. For example, RX memory write latency + * events(index = 0x0010), counter counts total delay cycles and ext_counter + * counts RX memory write PCIe packets number. + * + * As we don't want PMU driver to process these two data, "delay cycles" can + * be treated as an independent event(index = 0x0010), "RX memory write packets + * number" as another(index = 0x10010). BIT 16 is used to distinguish and 0-15 + * bits are "real" event index, which can be used to set HISI_PCIE_EVENT_CTRL. + */ +#define EXT_COUNTER_IS_USED(idx) ((idx) & BIT(16)) + +static u32 hisi_pcie_get_real_event(struct perf_event *event) +{ + return hisi_pcie_get_event(event) & GENMASK(15, 0); +} + +static u32 hisi_pcie_pmu_get_offset(u32 offset, u32 idx) +{ + return offset + HISI_PCIE_REG_STEP * idx; +} + +static u32 hisi_pcie_pmu_readl(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, + u32 idx) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + return readl_relaxed(pcie_pmu->base + offset); +} + +static void hisi_pcie_pmu_writel(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u32 val) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + writel_relaxed(val, pcie_pmu->base + offset); +} + +static u64 hisi_pcie_pmu_readq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + return readq_relaxed(pcie_pmu->base + offset); +} + +static void hisi_pcie_pmu_writeq(struct hisi_pcie_pmu *pcie_pmu, u32 reg_offset, u32 idx, u64 val) +{ + u32 offset = hisi_pcie_pmu_get_offset(reg_offset, idx); + + writeq_relaxed(val, pcie_pmu->base + offset); +} + +static void hisi_pcie_pmu_config_filter(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 reg = HISI_PCIE_INIT_SET; + u64 port, trig_len, thr_len; + + /* Config HISI_PCIE_EVENT_CTRL according to event. */ + reg |= FIELD_PREP(HISI_PCIE_EVENT_M, hisi_pcie_get_real_event(event)); + + /* Config HISI_PCIE_EVENT_CTRL according to root port or EP device. */ + port = hisi_pcie_get_port(event); + if (port) + reg |= FIELD_PREP(HISI_PCIE_TARGET_M, port); + else + reg |= HISI_PCIE_TARGET_EN | + FIELD_PREP(HISI_PCIE_TARGET_M, hisi_pcie_get_bdf(event)); + + /* Config HISI_PCIE_EVENT_CTRL according to trigger condition. */ + trig_len = hisi_pcie_get_trig_len(event); + if (trig_len) { + reg |= FIELD_PREP(HISI_PCIE_TRIG_M, trig_len); + reg |= FIELD_PREP(HISI_PCIE_TRIG_MODE_M, hisi_pcie_get_trig_mode(event)); + reg |= HISI_PCIE_TRIG_EN; + } + + /* Config HISI_PCIE_EVENT_CTRL according to threshold condition. */ + thr_len = hisi_pcie_get_thr_len(event); + if (thr_len) { + reg |= FIELD_PREP(HISI_PCIE_THR_M, thr_len); + reg |= FIELD_PREP(HISI_PCIE_THR_MODE_M, hisi_pcie_get_thr_mode(event)); + reg |= HISI_PCIE_THR_EN; + } + + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, reg); +} + +static void hisi_pcie_pmu_clear_filter(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, hwc->idx, HISI_PCIE_INIT_SET); +} + +static bool hisi_pcie_pmu_valid_requester_id(struct hisi_pcie_pmu *pcie_pmu, u32 bdf) +{ + struct pci_dev *root_port, *pdev; + u16 rp_bdf; + + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pcie_pmu->pdev->bus), PCI_BUS_NUM(bdf), + GET_PCI_DEVFN(bdf)); + if (!pdev) + return false; + + root_port = pcie_find_root_port(pdev); + if (!root_port) { + pci_dev_put(pdev); + return false; + } + + pci_dev_put(pdev); + rp_bdf = pci_dev_id(root_port); + return rp_bdf >= pcie_pmu->bdf_min && rp_bdf <= pcie_pmu->bdf_max; +} + +static bool hisi_pcie_pmu_valid_filter(struct perf_event *event, + struct hisi_pcie_pmu *pcie_pmu) +{ + u32 requester_id = hisi_pcie_get_bdf(event); + + if (hisi_pcie_get_thr_len(event) > HISI_PCIE_THR_MAX_VAL) + return false; + + if (hisi_pcie_get_trig_len(event) > HISI_PCIE_TRIG_MAX_VAL) + return false; + + if (requester_id) { + if (!hisi_pcie_pmu_valid_requester_id(pcie_pmu, requester_id)) + return false; + } + + return true; +} + +static bool hisi_pcie_pmu_cmp_event(struct perf_event *target, + struct perf_event *event) +{ + return hisi_pcie_get_real_event(target) == hisi_pcie_get_real_event(event); +} + +static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct perf_event *event_group[HISI_PCIE_MAX_COUNTERS]; + int counters = 1; + int num; + + event_group[0] = leader; + if (!is_software_event(leader)) { + if (leader->pmu != event->pmu) + return false; + + if (leader != event && !hisi_pcie_pmu_cmp_event(leader, event)) + event_group[counters++] = event; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (sibling->pmu != event->pmu) + return false; + + for (num = 0; num < counters; num++) { + if (hisi_pcie_pmu_cmp_event(event_group[num], sibling)) + break; + } + + if (num == counters) + event_group[counters++] = sibling; + } + + return counters <= HISI_PCIE_MAX_COUNTERS; +} + +static int hisi_pcie_pmu_event_init(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* Check the type first before going on, otherwise it's not our event */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + event->cpu = pcie_pmu->on_cpu; + + if (EXT_COUNTER_IS_USED(hisi_pcie_get_event(event))) + hwc->event_base = HISI_PCIE_EXT_CNT; + else + hwc->event_base = HISI_PCIE_CNT; + + /* Sampling is not supported. */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + if (!hisi_pcie_pmu_valid_filter(event, pcie_pmu)) + return -EINVAL; + + if (!hisi_pcie_pmu_validate_event_group(event)) + return -EINVAL; + + return 0; +} + +static u64 hisi_pcie_pmu_read_counter(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + u32 idx = event->hw.idx; + + return hisi_pcie_pmu_readq(pcie_pmu, event->hw.event_base, idx); +} + +static int hisi_pcie_pmu_find_related_event(struct hisi_pcie_pmu *pcie_pmu, + struct perf_event *event) +{ + struct perf_event *sibling; + int idx; + + for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { + sibling = pcie_pmu->hw_events[idx]; + if (!sibling) + continue; + + if (!hisi_pcie_pmu_cmp_event(sibling, event)) + continue; + + /* Related events must be used in group */ + if (sibling->group_leader == event->group_leader) + return idx; + else + return -EINVAL; + } + + return idx; +} + +static int hisi_pcie_pmu_get_event_idx(struct hisi_pcie_pmu *pcie_pmu) +{ + int idx; + + for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { + if (!pcie_pmu->hw_events[idx]) + return idx; + } + + return -EINVAL; +} + +static void hisi_pcie_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 new_cnt, prev_cnt, delta; + + do { + prev_cnt = local64_read(&hwc->prev_count); + new_cnt = hisi_pcie_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, + new_cnt) != prev_cnt); + + delta = (new_cnt - prev_cnt) & HISI_PCIE_MAX_PERIOD; + local64_add(delta, &event->count); +} + +static void hisi_pcie_pmu_read(struct perf_event *event) +{ + hisi_pcie_pmu_event_update(event); +} + +static void hisi_pcie_pmu_set_period(struct perf_event *event) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + local64_set(&hwc->prev_count, HISI_PCIE_INIT_VAL); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_CNT, idx, HISI_PCIE_INIT_VAL); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EXT_CNT, idx, HISI_PCIE_INIT_VAL); +} + +static void hisi_pcie_pmu_enable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u64 val; + + val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx); + val |= HISI_PCIE_EVENT_EN; + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val); +} + +static void hisi_pcie_pmu_disable_counter(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u64 val; + + val = hisi_pcie_pmu_readq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx); + val &= ~HISI_PCIE_EVENT_EN; + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, val); +} + +static void hisi_pcie_pmu_enable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 0); +} + +static void hisi_pcie_pmu_disable_int(struct hisi_pcie_pmu *pcie_pmu, struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + + hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_MASK, idx, 1); +} + +static void hisi_pcie_pmu_reset_counter(struct hisi_pcie_pmu *pcie_pmu, int idx) +{ + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_RESET_CNT); + hisi_pcie_pmu_writeq(pcie_pmu, HISI_PCIE_EVENT_CTRL, idx, HISI_PCIE_INIT_SET); +} + +static void hisi_pcie_pmu_start(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + u64 prev_cnt; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + + hisi_pcie_pmu_config_filter(event); + hisi_pcie_pmu_enable_counter(pcie_pmu, hwc); + hisi_pcie_pmu_enable_int(pcie_pmu, hwc); + hisi_pcie_pmu_set_period(event); + + if (flags & PERF_EF_RELOAD) { + prev_cnt = local64_read(&hwc->prev_count); + hisi_pcie_pmu_writeq(pcie_pmu, hwc->event_base, idx, prev_cnt); + } + + perf_event_update_userpage(event); +} + +static void hisi_pcie_pmu_stop(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pcie_pmu_event_update(event); + hisi_pcie_pmu_disable_int(pcie_pmu, hwc); + hisi_pcie_pmu_disable_counter(pcie_pmu, hwc); + hisi_pcie_pmu_clear_filter(event); + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + hwc->state |= PERF_HES_UPTODATE; +} + +static int hisi_pcie_pmu_add(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Check all working events to find a related event. */ + idx = hisi_pcie_pmu_find_related_event(pcie_pmu, event); + if (idx < 0) + return idx; + + /* Current event shares an enabled counter with the related event */ + if (idx < HISI_PCIE_MAX_COUNTERS) { + hwc->idx = idx; + goto start_count; + } + + idx = hisi_pcie_pmu_get_event_idx(pcie_pmu); + if (idx < 0) + return idx; + + hwc->idx = idx; + pcie_pmu->hw_events[idx] = event; + /* Reset Counter to avoid previous statistic interference. */ + hisi_pcie_pmu_reset_counter(pcie_pmu, idx); + +start_count: + if (flags & PERF_EF_START) + hisi_pcie_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void hisi_pcie_pmu_del(struct perf_event *event, int flags) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pcie_pmu_stop(event, PERF_EF_UPDATE); + pcie_pmu->hw_events[hwc->idx] = NULL; + perf_event_update_userpage(event); +} + +static void hisi_pcie_pmu_enable(struct pmu *pmu) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu); + int num; + + for (num = 0; num < HISI_PCIE_MAX_COUNTERS; num++) { + if (pcie_pmu->hw_events[num]) + break; + } + + if (num == HISI_PCIE_MAX_COUNTERS) + return; + + writel(HISI_PCIE_GLOBAL_EN, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL); +} + +static void hisi_pcie_pmu_disable(struct pmu *pmu) +{ + struct hisi_pcie_pmu *pcie_pmu = to_pcie_pmu(pmu); + + writel(HISI_PCIE_GLOBAL_NONE, pcie_pmu->base + HISI_PCIE_GLOBAL_CTRL); +} + +static irqreturn_t hisi_pcie_pmu_irq(int irq, void *data) +{ + struct hisi_pcie_pmu *pcie_pmu = data; + irqreturn_t ret = IRQ_NONE; + struct perf_event *event; + u32 overflown; + int idx; + + for (idx = 0; idx < HISI_PCIE_MAX_COUNTERS; idx++) { + overflown = hisi_pcie_pmu_readl(pcie_pmu, HISI_PCIE_INT_STAT, idx); + if (!overflown) + continue; + + /* Clear status of interrupt. */ + hisi_pcie_pmu_writel(pcie_pmu, HISI_PCIE_INT_STAT, idx, 1); + event = pcie_pmu->hw_events[idx]; + if (!event) + continue; + + hisi_pcie_pmu_event_update(event); + hisi_pcie_pmu_set_period(event); + ret = IRQ_HANDLED; + } + + return ret; +} + +static int hisi_pcie_pmu_irq_register(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + int irq, ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) { + pci_err(pdev, "Failed to enable MSI vectors: %d\n", ret); + return ret; + } + + irq = pci_irq_vector(pdev, 0); + ret = request_irq(irq, hisi_pcie_pmu_irq, IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME, + pcie_pmu); + if (ret) { + pci_err(pdev, "Failed to register IRQ: %d\n", ret); + pci_free_irq_vectors(pdev); + return ret; + } + + pcie_pmu->irq = irq; + + return 0; +} + +static void hisi_pcie_pmu_irq_unregister(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + free_irq(pcie_pmu->irq, pcie_pmu); + pci_free_irq_vectors(pdev); +} + +static int hisi_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node); + + if (pcie_pmu->on_cpu == -1) { + pcie_pmu->on_cpu = cpu; + WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(cpu))); + } + + return 0; +} + +static int hisi_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pcie_pmu *pcie_pmu = hlist_entry_safe(node, struct hisi_pcie_pmu, node); + unsigned int target; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (pcie_pmu->on_cpu != cpu) + return 0; + + pcie_pmu->on_cpu = -1; + /* Choose a new CPU from all online cpus. */ + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) { + pci_err(pcie_pmu->pdev, "There is no CPU to set\n"); + return 0; + } + + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + /* Use this CPU for event counting */ + pcie_pmu->on_cpu = target; + WARN_ON(irq_set_affinity(pcie_pmu->irq, cpumask_of(target))); + + return 0; +} + +static struct attribute *hisi_pcie_pmu_events_attr[] = { + HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_latency, 0x0010), + HISI_PCIE_PMU_EVENT_ATTR(rx_mwr_cnt, 0x10010), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_latency, 0x0210), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_cnt, 0x10210), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_latency, 0x0011), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_cnt, 0x10011), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_flux, 0x0804), + HISI_PCIE_PMU_EVENT_ATTR(rx_mrd_time, 0x10804), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_flux, 0x0405), + HISI_PCIE_PMU_EVENT_ATTR(tx_mrd_time, 0x10405), + NULL +}; + +static struct attribute_group hisi_pcie_pmu_events_group = { + .name = "events", + .attrs = hisi_pcie_pmu_events_attr, +}; + +static struct attribute *hisi_pcie_pmu_format_attr[] = { + HISI_PCIE_PMU_FORMAT_ATTR(event, "config:0-16"), + HISI_PCIE_PMU_FORMAT_ATTR(thr_len, "config1:0-3"), + HISI_PCIE_PMU_FORMAT_ATTR(thr_mode, "config1:4"), + HISI_PCIE_PMU_FORMAT_ATTR(trig_len, "config1:5-8"), + HISI_PCIE_PMU_FORMAT_ATTR(trig_mode, "config1:9"), + HISI_PCIE_PMU_FORMAT_ATTR(port, "config2:0-15"), + HISI_PCIE_PMU_FORMAT_ATTR(bdf, "config2:16-31"), + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_format_group = { + .name = "format", + .attrs = hisi_pcie_pmu_format_attr, +}; + +static struct attribute *hisi_pcie_pmu_bus_attrs[] = { + &dev_attr_bus.attr, + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_bus_attr_group = { + .attrs = hisi_pcie_pmu_bus_attrs, +}; + +static struct attribute *hisi_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_cpumask_attr_group = { + .attrs = hisi_pcie_pmu_cpumask_attrs, +}; + +static struct attribute *hisi_pcie_pmu_identifier_attrs[] = { + &dev_attr_identifier.attr, + NULL +}; + +static const struct attribute_group hisi_pcie_pmu_identifier_attr_group = { + .attrs = hisi_pcie_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_pcie_pmu_attr_groups[] = { + &hisi_pcie_pmu_events_group, + &hisi_pcie_pmu_format_group, + &hisi_pcie_pmu_bus_attr_group, + &hisi_pcie_pmu_cpumask_attr_group, + &hisi_pcie_pmu_identifier_attr_group, + NULL +}; + +static int hisi_pcie_alloc_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + struct hisi_pcie_reg_pair regs; + u16 sicl_id, core_id; + char *name; + + regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_BDF); + pcie_pmu->bdf_min = regs.lo; + pcie_pmu->bdf_max = regs.hi; + + regs = hisi_pcie_parse_reg_value(pcie_pmu, HISI_PCIE_REG_INFO); + sicl_id = regs.hi; + core_id = regs.lo; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_pcie%u_core%u", sicl_id, core_id); + if (!name) + return -ENOMEM; + + pcie_pmu->pdev = pdev; + pcie_pmu->on_cpu = -1; + pcie_pmu->identifier = readl(pcie_pmu->base + HISI_PCIE_REG_VERSION); + pcie_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .event_init = hisi_pcie_pmu_event_init, + .pmu_enable = hisi_pcie_pmu_enable, + .pmu_disable = hisi_pcie_pmu_disable, + .add = hisi_pcie_pmu_add, + .del = hisi_pcie_pmu_del, + .start = hisi_pcie_pmu_start, + .stop = hisi_pcie_pmu_stop, + .read = hisi_pcie_pmu_read, + .task_ctx_nr = perf_invalid_context, + .attr_groups = hisi_pcie_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + return 0; +} + +static int hisi_pcie_init_pmu(struct pci_dev *pdev, struct hisi_pcie_pmu *pcie_pmu) +{ + int ret; + + pcie_pmu->base = pci_ioremap_bar(pdev, 2); + if (!pcie_pmu->base) { + pci_err(pdev, "Ioremap failed for pcie_pmu resource\n"); + return -ENOMEM; + } + + ret = hisi_pcie_alloc_pmu(pdev, pcie_pmu); + if (ret) + goto err_iounmap; + + ret = hisi_pcie_pmu_irq_register(pdev, pcie_pmu); + if (ret) + goto err_iounmap; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); + if (ret) { + pci_err(pdev, "Failed to register hotplug: %d\n", ret); + goto err_irq_unregister; + } + + ret = perf_pmu_register(&pcie_pmu->pmu, pcie_pmu->pmu.name, -1); + if (ret) { + pci_err(pdev, "Failed to register PCIe PMU: %d\n", ret); + goto err_hotplug_unregister; + } + + return ret; + +err_hotplug_unregister: + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); + +err_irq_unregister: + hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu); + +err_iounmap: + iounmap(pcie_pmu->base); + + return ret; +} + +static void hisi_pcie_uninit_pmu(struct pci_dev *pdev) +{ + struct hisi_pcie_pmu *pcie_pmu = pci_get_drvdata(pdev); + + perf_pmu_unregister(&pcie_pmu->pmu); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, &pcie_pmu->node); + hisi_pcie_pmu_irq_unregister(pdev, pcie_pmu); + iounmap(pcie_pmu->base); +} + +static int hisi_pcie_init_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + pci_err(pdev, "Failed to enable PCI device: %d\n", ret); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(2), DRV_NAME); + if (ret < 0) { + pci_err(pdev, "Failed to request PCI mem regions: %d\n", ret); + return ret; + } + + pci_set_master(pdev); + + return 0; +} + +static int hisi_pcie_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hisi_pcie_pmu *pcie_pmu; + int ret; + + pcie_pmu = devm_kzalloc(&pdev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + ret = hisi_pcie_init_dev(pdev); + if (ret) + return ret; + + ret = hisi_pcie_init_pmu(pdev, pcie_pmu); + if (ret) + return ret; + + pci_set_drvdata(pdev, pcie_pmu); + + return ret; +} + +static void hisi_pcie_pmu_remove(struct pci_dev *pdev) +{ + hisi_pcie_uninit_pmu(pdev); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id hisi_pcie_pmu_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa12d) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hisi_pcie_pmu_ids); + +static struct pci_driver hisi_pcie_pmu_driver = { + .name = DRV_NAME, + .id_table = hisi_pcie_pmu_ids, + .probe = hisi_pcie_pmu_probe, + .remove = hisi_pcie_pmu_remove, +}; + +static int __init hisi_pcie_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE, + "AP_PERF_ARM_HISI_PCIE_PMU_ONLINE", + hisi_pcie_pmu_online_cpu, + hisi_pcie_pmu_offline_cpu); + if (ret) { + pr_err("Failed to setup PCIe PMU hotplug: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&hisi_pcie_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE); + + return ret; +} +module_init(hisi_pcie_module_init); + +static void __exit hisi_pcie_module_exit(void) +{ + pci_unregister_driver(&hisi_pcie_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PCIE_PMU_ONLINE); +} +module_exit(hisi_pcie_module_exit); + +MODULE_DESCRIPTION("HiSilicon PCIe PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c new file mode 100644 index 000000000..a9bb73f76 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support + * + * Copyright (C) 2022 HiSilicon Limited + * Author: Qi Liu <liuqi115@huawei.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ + +#define pr_fmt(fmt) "cpa pmu: " fmt +#include <linux/acpi.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* CPA register definition */ +#define CPA_PERF_CTRL 0x1c00 +#define CPA_EVENT_CTRL 0x1c04 +#define CPA_INT_MASK 0x1c70 +#define CPA_INT_STATUS 0x1c78 +#define CPA_INT_CLEAR 0x1c7c +#define CPA_EVENT_TYPE0 0x1c80 +#define CPA_VERSION 0x1cf0 +#define CPA_CNT0_LOWER 0x1d00 +#define CPA_CFG_REG 0x0534 + +/* CPA operation command */ +#define CPA_PERF_CTRL_EN BIT_ULL(0) +#define CPA_EVTYPE_MASK 0xffUL +#define CPA_PM_CTRL BIT_ULL(9) + +/* CPA has 8-counters */ +#define CPA_NR_COUNTERS 0x8 +#define CPA_COUNTER_BITS 64 +#define CPA_NR_EVENTS 0xff +#define CPA_REG_OFFSET 0x8 + +static u32 hisi_cpa_pmu_get_counter_offset(int idx) +{ + return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET); +} + +static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(CPA_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * CPA_EVENT_TYPE1 is chosen. + */ + reg = CPA_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = CPA_REG_OFFSET * reg_idx; + + /* Write event code to CPA_EVENT_TYPEx Register */ + val = readl(cpa_pmu->base + reg); + val &= ~(CPA_EVTYPE_MASK << shift); + val |= type << shift; + writel(val, cpa_pmu->base + reg); +} + +static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_PERF_CTRL); + val |= CPA_PERF_CTRL_EN; + writel(val, cpa_pmu->base + CPA_PERF_CTRL); +} + +static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_PERF_CTRL); + val &= ~(CPA_PERF_CTRL_EN); + writel(val, cpa_pmu->base + CPA_PERF_CTRL); +} + +static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_CFG_REG); + val |= CPA_PM_CTRL; + writel(val, cpa_pmu->base + CPA_CFG_REG); +} + +static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu) +{ + u32 val; + + val = readl(cpa_pmu->base + CPA_CFG_REG); + val &= ~(CPA_PM_CTRL); + writel(val, cpa_pmu->base + CPA_CFG_REG); +} + +static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in CPA_EVENT_CTRL register */ + val = readl(cpa_pmu->base + CPA_EVENT_CTRL); + val |= 1 << hwc->idx; + writel(val, cpa_pmu->base + CPA_EVENT_CTRL); +} + +static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in CPA_EVENT_CTRL register */ + val = readl(cpa_pmu->base + CPA_EVENT_CTRL); + val &= ~(1UL << hwc->idx); + writel(val, cpa_pmu->base + CPA_EVENT_CTRL); +} + +static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(cpa_pmu->base + CPA_INT_MASK); + val &= ~(1UL << hwc->idx); + writel(val, cpa_pmu->base + CPA_INT_MASK); +} + +static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(cpa_pmu->base + CPA_INT_MASK); + val |= 1 << hwc->idx; + writel(val, cpa_pmu->base + CPA_INT_MASK); +} + +static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu) +{ + return readl(cpa_pmu->base + CPA_INT_STATUS); +} + +static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx) +{ + writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR); +} + +static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = { + { "HISI0281", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match); + +static int hisi_cpa_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *cpa_pmu) +{ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &cpa_pmu->sicl_id)) { + dev_err(&pdev->dev, "Can not read sicl-id\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id", + &cpa_pmu->index_id)) { + dev_err(&pdev->dev, "Cannot read idx-id\n"); + return -EINVAL; + } + + cpa_pmu->ccl_id = -1; + cpa_pmu->sccl_id = -1; + cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(cpa_pmu->base)) + return PTR_ERR(cpa_pmu->base); + + cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION); + + return 0; +} + +static struct attribute *hisi_cpa_pmu_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-15"), + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_format_group = { + .name = "format", + .attrs = hisi_cpa_pmu_format_attr, +}; + +static struct attribute *hisi_cpa_pmu_events_attr[] = { + HISI_PMU_EVENT_ATTR(cpa_cycles, 0x00), + HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat, 0x61), + HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat, 0x62), + HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat, 0xE1), + HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat, 0xE2), + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_events_group = { + .name = "events", + .attrs = hisi_cpa_pmu_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = { + .attrs = hisi_cpa_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_cpa_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_cpa_pmu_identifier_attrs[] = { + &hisi_cpa_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_cpa_pmu_identifier_group = { + .attrs = hisi_cpa_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = { + &hisi_cpa_pmu_format_group, + &hisi_cpa_pmu_events_group, + &hisi_cpa_pmu_cpumask_attr_group, + &hisi_cpa_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = { + .write_evtype = hisi_cpa_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_cpa_pmu_start_counters, + .stop_counters = hisi_cpa_pmu_stop_counters, + .enable_counter = hisi_cpa_pmu_enable_counter, + .disable_counter = hisi_cpa_pmu_disable_counter, + .enable_counter_int = hisi_cpa_pmu_enable_counter_int, + .disable_counter_int = hisi_cpa_pmu_disable_counter_int, + .write_counter = hisi_cpa_pmu_write_counter, + .read_counter = hisi_cpa_pmu_read_counter, + .get_int_status = hisi_cpa_pmu_get_int_status, + .clear_int_status = hisi_cpa_pmu_clear_int_status, +}; + +static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *cpa_pmu) +{ + int ret; + + ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev); + if (ret) + return ret; + + cpa_pmu->counter_bits = CPA_COUNTER_BITS; + cpa_pmu->check_event = CPA_NR_EVENTS; + cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups; + cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops; + cpa_pmu->num_counters = CPA_NR_COUNTERS; + cpa_pmu->dev = &pdev->dev; + cpa_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_cpa_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *cpa_pmu; + char *name; + int ret; + + cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL); + if (!cpa_pmu) + return -ENOMEM; + + ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u", + cpa_pmu->sicl_id, cpa_pmu->index_id); + if (!name) + return -ENOMEM; + + cpa_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .task_ctx_nr = perf_invalid_context, + .event_init = hisi_uncore_pmu_event_init, + .pmu_enable = hisi_uncore_pmu_enable, + .pmu_disable = hisi_uncore_pmu_disable, + .add = hisi_uncore_pmu_add, + .del = hisi_uncore_pmu_del, + .start = hisi_uncore_pmu_start, + .stop = hisi_uncore_pmu_stop, + .read = hisi_uncore_pmu_read, + .attr_groups = cpa_pmu->pmu_events.attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + /* Power Management should be disabled before using CPA PMU. */ + hisi_cpa_pmu_disable_pm(cpa_pmu); + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + &cpa_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return ret; + } + + ret = perf_pmu_register(&cpa_pmu->pmu, name, -1); + if (ret) { + dev_err(cpa_pmu->dev, "PMU register failed\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return ret; + } + + platform_set_drvdata(pdev, cpa_pmu); + return ret; +} + +static int hisi_cpa_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&cpa_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + &cpa_pmu->node); + hisi_cpa_pmu_enable_pm(cpa_pmu); + return 0; +} + +static struct platform_driver hisi_cpa_pmu_driver = { + .driver = { + .name = "hisi_cpa_pmu", + .acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_cpa_pmu_probe, + .remove = hisi_cpa_pmu_remove, +}; + +static int __init hisi_cpa_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, + "AP_PERF_ARM_HISI_CPA_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("setup hotplug failed: %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_cpa_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE); + + return ret; +} +module_init(hisi_cpa_pmu_module_init); + +static void __exit hisi_cpa_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_cpa_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE); +} +module_exit(hisi_cpa_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c new file mode 100644 index 000000000..50d0c0a2f --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC DDRC uncore Hardware event counters support + * + * Copyright (C) 2017 HiSilicon Limited + * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> + * Anurup M <anurup.m@huawei.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* DDRC register definition in v1 */ +#define DDRC_PERF_CTRL 0x010 +#define DDRC_FLUX_WR 0x380 +#define DDRC_FLUX_RD 0x384 +#define DDRC_FLUX_WCMD 0x388 +#define DDRC_FLUX_RCMD 0x38c +#define DDRC_PRE_CMD 0x3c0 +#define DDRC_ACT_CMD 0x3c4 +#define DDRC_RNK_CHG 0x3cc +#define DDRC_RW_CHG 0x3d0 +#define DDRC_EVENT_CTRL 0x6C0 +#define DDRC_INT_MASK 0x6c8 +#define DDRC_INT_STATUS 0x6cc +#define DDRC_INT_CLEAR 0x6d0 +#define DDRC_VERSION 0x710 + +/* DDRC register definition in v2 */ +#define DDRC_V2_INT_MASK 0x528 +#define DDRC_V2_INT_STATUS 0x52c +#define DDRC_V2_INT_CLEAR 0x530 +#define DDRC_V2_EVENT_CNT 0xe00 +#define DDRC_V2_EVENT_CTRL 0xe70 +#define DDRC_V2_EVENT_TYPE 0xe74 +#define DDRC_V2_PERF_CTRL 0xeA0 + +/* DDRC has 8-counters */ +#define DDRC_NR_COUNTERS 0x8 +#define DDRC_V1_PERF_CTRL_EN 0x2 +#define DDRC_V2_PERF_CTRL_EN 0x1 +#define DDRC_V1_NR_EVENTS 0x7 +#define DDRC_V2_NR_EVENTS 0x90 + +/* + * For PMU v1, there are eight-events and every event has been mapped + * to fixed-purpose counters which register offset is not consistent. + * Therefore there is no write event type and we assume that event + * code (0 to 7) is equal to counter index in PMU driver. + */ +#define GET_DDRC_EVENTID(hwc) (hwc->config_base & 0x7) + +static const u32 ddrc_reg_off[] = { + DDRC_FLUX_WR, DDRC_FLUX_RD, DDRC_FLUX_WCMD, DDRC_FLUX_RCMD, + DDRC_PRE_CMD, DDRC_ACT_CMD, DDRC_RNK_CHG, DDRC_RW_CHG +}; + +/* + * Select the counter register offset using the counter index. + * In PMU v1, there are no programmable counter, the count + * is read form the statistics counter register itself. + */ +static u32 hisi_ddrc_pmu_v1_get_counter_offset(int cntr_idx) +{ + return ddrc_reg_off[cntr_idx]; +} + +static u32 hisi_ddrc_pmu_v2_get_counter_offset(int cntr_idx) +{ + return DDRC_V2_EVENT_CNT + cntr_idx * 8; +} + +static u64 hisi_ddrc_pmu_v1_read_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + return readl(ddrc_pmu->base + + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx)); +} + +static void hisi_ddrc_pmu_v1_write_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writel((u32)val, + ddrc_pmu->base + hisi_ddrc_pmu_v1_get_counter_offset(hwc->idx)); +} + +static u64 hisi_ddrc_pmu_v2_read_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + return readq(ddrc_pmu->base + + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx)); +} + +static void hisi_ddrc_pmu_v2_write_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, + ddrc_pmu->base + hisi_ddrc_pmu_v2_get_counter_offset(hwc->idx)); +} + +/* + * For DDRC PMU v1, event has been mapped to fixed-purpose counter by hardware, + * so there is no need to write event type, while it is programmable counter in + * PMU v2. + */ +static void hisi_ddrc_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ + u32 offset; + + if (hha_pmu->identifier >= HISI_PMU_V2) { + offset = DDRC_V2_EVENT_TYPE + 4 * idx; + writel(type, hha_pmu->base + offset); + } +} + +static void hisi_ddrc_pmu_v1_start_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + /* Set perf_enable in DDRC_PERF_CTRL to start event counting */ + val = readl(ddrc_pmu->base + DDRC_PERF_CTRL); + val |= DDRC_V1_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_PERF_CTRL); +} + +static void hisi_ddrc_pmu_v1_stop_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + /* Clear perf_enable in DDRC_PERF_CTRL to stop event counting */ + val = readl(ddrc_pmu->base + DDRC_PERF_CTRL); + val &= ~DDRC_V1_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_PERF_CTRL); +} + +static void hisi_ddrc_pmu_v1_enable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Set counter index(event code) in DDRC_EVENT_CTRL register */ + val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL); + val |= (1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL); +} + +static void hisi_ddrc_pmu_v1_disable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index(event code) in DDRC_EVENT_CTRL register */ + val = readl(ddrc_pmu->base + DDRC_EVENT_CTRL); + val &= ~(1 << GET_DDRC_EVENTID(hwc)); + writel(val, ddrc_pmu->base + DDRC_EVENT_CTRL); +} + +static int hisi_ddrc_pmu_v1_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *ddrc_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = ddrc_pmu->pmu_events.used_mask; + struct hw_perf_event *hwc = &event->hw; + /* For DDRC PMU, we use event code as counter index */ + int idx = GET_DDRC_EVENTID(hwc); + + if (test_bit(idx, used_mask)) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} + +static int hisi_ddrc_pmu_v2_get_event_idx(struct perf_event *event) +{ + return hisi_uncore_pmu_get_event_idx(event); +} + +static void hisi_ddrc_pmu_v2_start_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL); + val |= DDRC_V2_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL); +} + +static void hisi_ddrc_pmu_v2_stop_counters(struct hisi_pmu *ddrc_pmu) +{ + u32 val; + + val = readl(ddrc_pmu->base + DDRC_V2_PERF_CTRL); + val &= ~DDRC_V2_PERF_CTRL_EN; + writel(val, ddrc_pmu->base + DDRC_V2_PERF_CTRL); +} + +static void hisi_ddrc_pmu_v2_enable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL); + val |= 1 << hwc->idx; + writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL); +} + +static void hisi_ddrc_pmu_v2_disable_counter(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(ddrc_pmu->base + DDRC_V2_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, ddrc_pmu->base + DDRC_V2_EVENT_CTRL); +} + +static void hisi_ddrc_pmu_v1_enable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(ddrc_pmu->base + DDRC_INT_MASK); + val &= ~(1 << hwc->idx); + writel(val, ddrc_pmu->base + DDRC_INT_MASK); +} + +static void hisi_ddrc_pmu_v1_disable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(ddrc_pmu->base + DDRC_INT_MASK); + val |= 1 << hwc->idx; + writel(val, ddrc_pmu->base + DDRC_INT_MASK); +} + +static void hisi_ddrc_pmu_v2_enable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK); + val &= ~(1 << hwc->idx); + writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK); +} + +static void hisi_ddrc_pmu_v2_disable_counter_int(struct hisi_pmu *ddrc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(ddrc_pmu->base + DDRC_V2_INT_MASK); + val |= 1 << hwc->idx; + writel(val, ddrc_pmu->base + DDRC_V2_INT_MASK); +} + +static u32 hisi_ddrc_pmu_v1_get_int_status(struct hisi_pmu *ddrc_pmu) +{ + return readl(ddrc_pmu->base + DDRC_INT_STATUS); +} + +static void hisi_ddrc_pmu_v1_clear_int_status(struct hisi_pmu *ddrc_pmu, + int idx) +{ + writel(1 << idx, ddrc_pmu->base + DDRC_INT_CLEAR); +} + +static u32 hisi_ddrc_pmu_v2_get_int_status(struct hisi_pmu *ddrc_pmu) +{ + return readl(ddrc_pmu->base + DDRC_V2_INT_STATUS); +} + +static void hisi_ddrc_pmu_v2_clear_int_status(struct hisi_pmu *ddrc_pmu, + int idx) +{ + writel(1 << idx, ddrc_pmu->base + DDRC_V2_INT_CLEAR); +} + +static const struct acpi_device_id hisi_ddrc_pmu_acpi_match[] = { + { "HISI0233", }, + { "HISI0234", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_ddrc_pmu_acpi_match); + +static int hisi_ddrc_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *ddrc_pmu) +{ + /* + * Use the SCCL_ID and DDRC channel ID to identify the + * DDRC PMU, while SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,ch-id", + &ddrc_pmu->index_id)) { + dev_err(&pdev->dev, "Can not read ddrc channel-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &ddrc_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read ddrc sccl-id!\n"); + return -EINVAL; + } + /* DDRC PMUs only share the same SCCL */ + ddrc_pmu->ccl_id = -1; + + ddrc_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ddrc_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for ddrc_pmu resource\n"); + return PTR_ERR(ddrc_pmu->base); + } + + ddrc_pmu->identifier = readl(ddrc_pmu->base + DDRC_VERSION); + if (ddrc_pmu->identifier >= HISI_PMU_V2) { + if (device_property_read_u32(&pdev->dev, "hisilicon,sub-id", + &ddrc_pmu->sub_id)) { + dev_err(&pdev->dev, "Can not read sub-id!\n"); + return -EINVAL; + } + } + + return 0; +} + +static struct attribute *hisi_ddrc_pmu_v1_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-4"), + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_v1_format_group = { + .name = "format", + .attrs = hisi_ddrc_pmu_v1_format_attr, +}; + +static struct attribute *hisi_ddrc_pmu_v2_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL +}; + +static const struct attribute_group hisi_ddrc_pmu_v2_format_group = { + .name = "format", + .attrs = hisi_ddrc_pmu_v2_format_attr, +}; + +static struct attribute *hisi_ddrc_pmu_v1_events_attr[] = { + HISI_PMU_EVENT_ATTR(flux_wr, 0x00), + HISI_PMU_EVENT_ATTR(flux_rd, 0x01), + HISI_PMU_EVENT_ATTR(flux_wcmd, 0x02), + HISI_PMU_EVENT_ATTR(flux_rcmd, 0x03), + HISI_PMU_EVENT_ATTR(pre_cmd, 0x04), + HISI_PMU_EVENT_ATTR(act_cmd, 0x05), + HISI_PMU_EVENT_ATTR(rnk_chg, 0x06), + HISI_PMU_EVENT_ATTR(rw_chg, 0x07), + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_v1_events_group = { + .name = "events", + .attrs = hisi_ddrc_pmu_v1_events_attr, +}; + +static struct attribute *hisi_ddrc_pmu_v2_events_attr[] = { + HISI_PMU_EVENT_ATTR(cycles, 0x00), + HISI_PMU_EVENT_ATTR(flux_wr, 0x83), + HISI_PMU_EVENT_ATTR(flux_rd, 0x84), + NULL +}; + +static const struct attribute_group hisi_ddrc_pmu_v2_events_group = { + .name = "events", + .attrs = hisi_ddrc_pmu_v2_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_ddrc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_ddrc_pmu_cpumask_attr_group = { + .attrs = hisi_ddrc_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_ddrc_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_ddrc_pmu_identifier_attrs[] = { + &hisi_ddrc_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_ddrc_pmu_identifier_group = { + .attrs = hisi_ddrc_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_ddrc_pmu_v1_attr_groups[] = { + &hisi_ddrc_pmu_v1_format_group, + &hisi_ddrc_pmu_v1_events_group, + &hisi_ddrc_pmu_cpumask_attr_group, + &hisi_ddrc_pmu_identifier_group, + NULL, +}; + +static const struct attribute_group *hisi_ddrc_pmu_v2_attr_groups[] = { + &hisi_ddrc_pmu_v2_format_group, + &hisi_ddrc_pmu_v2_events_group, + &hisi_ddrc_pmu_cpumask_attr_group, + &hisi_ddrc_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_ddrc_v1_ops = { + .write_evtype = hisi_ddrc_pmu_write_evtype, + .get_event_idx = hisi_ddrc_pmu_v1_get_event_idx, + .start_counters = hisi_ddrc_pmu_v1_start_counters, + .stop_counters = hisi_ddrc_pmu_v1_stop_counters, + .enable_counter = hisi_ddrc_pmu_v1_enable_counter, + .disable_counter = hisi_ddrc_pmu_v1_disable_counter, + .enable_counter_int = hisi_ddrc_pmu_v1_enable_counter_int, + .disable_counter_int = hisi_ddrc_pmu_v1_disable_counter_int, + .write_counter = hisi_ddrc_pmu_v1_write_counter, + .read_counter = hisi_ddrc_pmu_v1_read_counter, + .get_int_status = hisi_ddrc_pmu_v1_get_int_status, + .clear_int_status = hisi_ddrc_pmu_v1_clear_int_status, +}; + +static const struct hisi_uncore_ops hisi_uncore_ddrc_v2_ops = { + .write_evtype = hisi_ddrc_pmu_write_evtype, + .get_event_idx = hisi_ddrc_pmu_v2_get_event_idx, + .start_counters = hisi_ddrc_pmu_v2_start_counters, + .stop_counters = hisi_ddrc_pmu_v2_stop_counters, + .enable_counter = hisi_ddrc_pmu_v2_enable_counter, + .disable_counter = hisi_ddrc_pmu_v2_disable_counter, + .enable_counter_int = hisi_ddrc_pmu_v2_enable_counter_int, + .disable_counter_int = hisi_ddrc_pmu_v2_disable_counter_int, + .write_counter = hisi_ddrc_pmu_v2_write_counter, + .read_counter = hisi_ddrc_pmu_v2_read_counter, + .get_int_status = hisi_ddrc_pmu_v2_get_int_status, + .clear_int_status = hisi_ddrc_pmu_v2_clear_int_status, +}; + +static int hisi_ddrc_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *ddrc_pmu) +{ + int ret; + + ret = hisi_ddrc_pmu_init_data(pdev, ddrc_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(ddrc_pmu, pdev); + if (ret) + return ret; + + if (ddrc_pmu->identifier >= HISI_PMU_V2) { + ddrc_pmu->counter_bits = 48; + ddrc_pmu->check_event = DDRC_V2_NR_EVENTS; + ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v2_attr_groups; + ddrc_pmu->ops = &hisi_uncore_ddrc_v2_ops; + } else { + ddrc_pmu->counter_bits = 32; + ddrc_pmu->check_event = DDRC_V1_NR_EVENTS; + ddrc_pmu->pmu_events.attr_groups = hisi_ddrc_pmu_v1_attr_groups; + ddrc_pmu->ops = &hisi_uncore_ddrc_v1_ops; + } + + ddrc_pmu->num_counters = DDRC_NR_COUNTERS; + ddrc_pmu->dev = &pdev->dev; + ddrc_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_ddrc_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *ddrc_pmu; + char *name; + int ret; + + ddrc_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddrc_pmu), GFP_KERNEL); + if (!ddrc_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, ddrc_pmu); + + ret = hisi_ddrc_pmu_dev_probe(pdev, ddrc_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug;\n", ret); + return ret; + } + + if (ddrc_pmu->identifier >= HISI_PMU_V2) + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "hisi_sccl%u_ddrc%u_%u", + ddrc_pmu->sccl_id, ddrc_pmu->index_id, + ddrc_pmu->sub_id); + else + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, + "hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id, + ddrc_pmu->index_id); + + hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE); + + ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); + if (ret) { + dev_err(ddrc_pmu->dev, "DDRC PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, &ddrc_pmu->node); + } + + return ret; +} + +static int hisi_ddrc_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *ddrc_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&ddrc_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + &ddrc_pmu->node); + return 0; +} + +static struct platform_driver hisi_ddrc_pmu_driver = { + .driver = { + .name = "hisi_ddrc_pmu", + .acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_ddrc_pmu_probe, + .remove = hisi_ddrc_pmu_remove, +}; + +static int __init hisi_ddrc_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE, + "AP_PERF_ARM_HISI_DDRC_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("DDRC PMU: setup hotplug, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_ddrc_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE); + + return ret; +} +module_init(hisi_ddrc_pmu_module_init); + +static void __exit hisi_ddrc_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_ddrc_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE); + +} +module_exit(hisi_ddrc_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC DDRC uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); +MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c new file mode 100644 index 000000000..13017b341 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC HHA uncore Hardware event counters support + * + * Copyright (C) 2017 HiSilicon Limited + * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> + * Anurup M <anurup.m@huawei.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* HHA register definition */ +#define HHA_INT_MASK 0x0804 +#define HHA_INT_STATUS 0x0808 +#define HHA_INT_CLEAR 0x080C +#define HHA_VERSION 0x1cf0 +#define HHA_PERF_CTRL 0x1E00 +#define HHA_EVENT_CTRL 0x1E04 +#define HHA_SRCID_CTRL 0x1E08 +#define HHA_DATSRC_CTRL 0x1BF0 +#define HHA_EVENT_TYPE0 0x1E80 +/* + * If the HW version only supports a 48-bit counter, then + * bits [63:48] are reserved, which are Read-As-Zero and + * Writes-Ignored. + */ +#define HHA_CNT0_LOWER 0x1F00 + +/* HHA PMU v1 has 16 counters and v2 only has 8 counters */ +#define HHA_V1_NR_COUNTERS 0x10 +#define HHA_V2_NR_COUNTERS 0x8 + +#define HHA_PERF_CTRL_EN 0x1 +#define HHA_TRACETAG_EN BIT(31) +#define HHA_SRCID_EN BIT(2) +#define HHA_SRCID_CMD_SHIFT 6 +#define HHA_SRCID_MSK_SHIFT 20 +#define HHA_SRCID_CMD GENMASK(16, 6) +#define HHA_SRCID_MSK GENMASK(30, 20) +#define HHA_DATSRC_SKT_EN BIT(23) +#define HHA_EVTYPE_NONE 0xff +#define HHA_V1_NR_EVENT 0x65 +#define HHA_V2_NR_EVENT 0xCE + +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 10, 0); +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 21, 11); +HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 22, 22); +HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 23, 23); + +static void hisi_hha_pmu_enable_tracetag(struct perf_event *event) +{ + struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); + u32 tt_en = hisi_get_tracetag_en(event); + + if (tt_en) { + u32 val; + + val = readl(hha_pmu->base + HHA_SRCID_CTRL); + val |= HHA_TRACETAG_EN; + writel(val, hha_pmu->base + HHA_SRCID_CTRL); + } +} + +static void hisi_hha_pmu_clear_tracetag(struct perf_event *event) +{ + struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); + u32 val; + + val = readl(hha_pmu->base + HHA_SRCID_CTRL); + val &= ~HHA_TRACETAG_EN; + writel(val, hha_pmu->base + HHA_SRCID_CTRL); +} + +static void hisi_hha_pmu_config_ds(struct perf_event *event) +{ + struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); + u32 ds_skt = hisi_get_datasrc_skt(event); + + if (ds_skt) { + u32 val; + + val = readl(hha_pmu->base + HHA_DATSRC_CTRL); + val |= HHA_DATSRC_SKT_EN; + writel(val, hha_pmu->base + HHA_DATSRC_CTRL); + } +} + +static void hisi_hha_pmu_clear_ds(struct perf_event *event) +{ + struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); + u32 ds_skt = hisi_get_datasrc_skt(event); + + if (ds_skt) { + u32 val; + + val = readl(hha_pmu->base + HHA_DATSRC_CTRL); + val &= ~HHA_DATSRC_SKT_EN; + writel(val, hha_pmu->base + HHA_DATSRC_CTRL); + } +} + +static void hisi_hha_pmu_config_srcid(struct perf_event *event) +{ + struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_srcid_cmd(event); + + if (cmd) { + u32 val, msk; + + msk = hisi_get_srcid_msk(event); + val = readl(hha_pmu->base + HHA_SRCID_CTRL); + val |= HHA_SRCID_EN | (cmd << HHA_SRCID_CMD_SHIFT) | + (msk << HHA_SRCID_MSK_SHIFT); + writel(val, hha_pmu->base + HHA_SRCID_CTRL); + } +} + +static void hisi_hha_pmu_disable_srcid(struct perf_event *event) +{ + struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_srcid_cmd(event); + + if (cmd) { + u32 val; + + val = readl(hha_pmu->base + HHA_SRCID_CTRL); + val &= ~(HHA_SRCID_EN | HHA_SRCID_MSK | HHA_SRCID_CMD); + writel(val, hha_pmu->base + HHA_SRCID_CTRL); + } +} + +static void hisi_hha_pmu_enable_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_hha_pmu_enable_tracetag(event); + hisi_hha_pmu_config_ds(event); + hisi_hha_pmu_config_srcid(event); + } +} + +static void hisi_hha_pmu_disable_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_hha_pmu_disable_srcid(event); + hisi_hha_pmu_clear_ds(event); + hisi_hha_pmu_clear_tracetag(event); + } +} + +/* + * Select the counter register offset using the counter index + * each counter is 48-bits. + */ +static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx) +{ + return (HHA_CNT0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + /* Read 64 bits and like L3C, top 16 bits are RAZ */ + return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc, u64 val) +{ + /* Write 64 bits and like L3C, top 16 bits are WI */ + writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(HHA_EVENT_TYPEx). + * There are 4 event select registers for the 16 hardware counters. + * Event code is 8-bits and for the first 4 hardware counters, + * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters, + * HHA_EVENT_TYPE1 is chosen and so on. + */ + reg = HHA_EVENT_TYPE0 + 4 * (idx / 4); + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to HHA_EVENT_TYPEx register */ + val = readl(hha_pmu->base + reg); + val &= ~(HHA_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, hha_pmu->base + reg); +} + +static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in HHA_PERF_CTRL to start event + * counting for all enabled counters. + */ + val = readl(hha_pmu->base + HHA_PERF_CTRL); + val |= HHA_PERF_CTRL_EN; + writel(val, hha_pmu->base + HHA_PERF_CTRL); +} + +static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in HHA_PERF_CTRL to stop event + * counting for all enabled counters. + */ + val = readl(hha_pmu->base + HHA_PERF_CTRL); + val &= ~(HHA_PERF_CTRL_EN); + writel(val, hha_pmu->base + HHA_PERF_CTRL); +} + +static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in HHA_EVENT_CTRL register */ + val = readl(hha_pmu->base + HHA_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, hha_pmu->base + HHA_EVENT_CTRL); +} + +static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in HHA_EVENT_CTRL register */ + val = readl(hha_pmu->base + HHA_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, hha_pmu->base + HHA_EVENT_CTRL); +} + +static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(hha_pmu->base + HHA_INT_MASK); + val &= ~(1 << hwc->idx); + writel(val, hha_pmu->base + HHA_INT_MASK); +} + +static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(hha_pmu->base + HHA_INT_MASK); + val |= (1 << hwc->idx); + writel(val, hha_pmu->base + HHA_INT_MASK); +} + +static u32 hisi_hha_pmu_get_int_status(struct hisi_pmu *hha_pmu) +{ + return readl(hha_pmu->base + HHA_INT_STATUS); +} + +static void hisi_hha_pmu_clear_int_status(struct hisi_pmu *hha_pmu, int idx) +{ + writel(1 << idx, hha_pmu->base + HHA_INT_CLEAR); +} + +static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = { + { "HISI0243", }, + { "HISI0244", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match); + +static int hisi_hha_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *hha_pmu) +{ + unsigned long long id; + acpi_status status; + + /* + * Use SCCL_ID and UID to identify the HHA PMU, while + * SCCL_ID is in MPIDR[aff2]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &hha_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read hha sccl-id!\n"); + return -EINVAL; + } + + /* + * Early versions of BIOS support _UID by mistake, so we support + * both "hisilicon, idx-id" as preference, if available. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id", + &hha_pmu->index_id)) { + status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), + "_UID", NULL, &id); + if (ACPI_FAILURE(status)) { + dev_err(&pdev->dev, "Cannot read idx-id!\n"); + return -EINVAL; + } + + hha_pmu->index_id = id; + } + /* HHA PMUs only share the same SCCL */ + hha_pmu->ccl_id = -1; + + hha_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(hha_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n"); + return PTR_ERR(hha_pmu->base); + } + + hha_pmu->identifier = readl(hha_pmu->base + HHA_VERSION); + + return 0; +} + +static struct attribute *hisi_hha_pmu_v1_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_v1_format_group = { + .name = "format", + .attrs = hisi_hha_pmu_v1_format_attr, +}; + +static struct attribute *hisi_hha_pmu_v2_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:0-10"), + HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:11-21"), + HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:22"), + HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:23"), + NULL +}; + +static const struct attribute_group hisi_hha_pmu_v2_format_group = { + .name = "format", + .attrs = hisi_hha_pmu_v2_format_attr, +}; + +static struct attribute *hisi_hha_pmu_v1_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00), + HISI_PMU_EVENT_ATTR(rx_outer, 0x01), + HISI_PMU_EVENT_ATTR(rx_sccl, 0x02), + HISI_PMU_EVENT_ATTR(rx_ccix, 0x03), + HISI_PMU_EVENT_ATTR(rx_wbi, 0x04), + HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), + HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), + HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), + HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), + HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), + HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), + HISI_PMU_EVENT_ATTR(spill_num, 0x20), + HISI_PMU_EVENT_ATTR(spill_success, 0x21), + HISI_PMU_EVENT_ATTR(bi_num, 0x23), + HISI_PMU_EVENT_ATTR(mediated_num, 0x32), + HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33), + HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34), + HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35), + HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38), + HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c), + HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40), + HISI_PMU_EVENT_ATTR(edir-lookup, 0x41), + HISI_PMU_EVENT_ATTR(sdir-hit, 0x42), + HISI_PMU_EVENT_ATTR(edir-hit, 0x43), + HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c), + HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d), + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_v1_events_group = { + .name = "events", + .attrs = hisi_hha_pmu_v1_events_attr, +}; + +static struct attribute *hisi_hha_pmu_v2_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00), + HISI_PMU_EVENT_ATTR(rx_outer, 0x01), + HISI_PMU_EVENT_ATTR(rx_sccl, 0x02), + HISI_PMU_EVENT_ATTR(hha_retry, 0x2e), + HISI_PMU_EVENT_ATTR(cycles, 0x55), + NULL +}; + +static const struct attribute_group hisi_hha_pmu_v2_events_group = { + .name = "events", + .attrs = hisi_hha_pmu_v2_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_hha_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = { + .attrs = hisi_hha_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_hha_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_hha_pmu_identifier_attrs[] = { + &hisi_hha_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_hha_pmu_identifier_group = { + .attrs = hisi_hha_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = { + &hisi_hha_pmu_v1_format_group, + &hisi_hha_pmu_v1_events_group, + &hisi_hha_pmu_cpumask_attr_group, + &hisi_hha_pmu_identifier_group, + NULL, +}; + +static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = { + &hisi_hha_pmu_v2_format_group, + &hisi_hha_pmu_v2_events_group, + &hisi_hha_pmu_cpumask_attr_group, + &hisi_hha_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_hha_ops = { + .write_evtype = hisi_hha_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_hha_pmu_start_counters, + .stop_counters = hisi_hha_pmu_stop_counters, + .enable_counter = hisi_hha_pmu_enable_counter, + .disable_counter = hisi_hha_pmu_disable_counter, + .enable_counter_int = hisi_hha_pmu_enable_counter_int, + .disable_counter_int = hisi_hha_pmu_disable_counter_int, + .write_counter = hisi_hha_pmu_write_counter, + .read_counter = hisi_hha_pmu_read_counter, + .get_int_status = hisi_hha_pmu_get_int_status, + .clear_int_status = hisi_hha_pmu_clear_int_status, + .enable_filter = hisi_hha_pmu_enable_filter, + .disable_filter = hisi_hha_pmu_disable_filter, +}; + +static int hisi_hha_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *hha_pmu) +{ + int ret; + + ret = hisi_hha_pmu_init_data(pdev, hha_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(hha_pmu, pdev); + if (ret) + return ret; + + if (hha_pmu->identifier >= HISI_PMU_V2) { + hha_pmu->counter_bits = 64; + hha_pmu->check_event = HHA_V2_NR_EVENT; + hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v2_attr_groups; + hha_pmu->num_counters = HHA_V2_NR_COUNTERS; + } else { + hha_pmu->counter_bits = 48; + hha_pmu->check_event = HHA_V1_NR_EVENT; + hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v1_attr_groups; + hha_pmu->num_counters = HHA_V1_NR_COUNTERS; + } + hha_pmu->ops = &hisi_uncore_hha_ops; + hha_pmu->dev = &pdev->dev; + hha_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_hha_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *hha_pmu; + char *name; + int ret; + + hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL); + if (!hha_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, hha_pmu); + + ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", + hha_pmu->sccl_id, hha_pmu->index_id); + hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE); + + ret = perf_pmu_register(&hha_pmu->pmu, name, -1); + if (ret) { + dev_err(hha_pmu->dev, "HHA PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); + } + + return ret; +} + +static int hisi_hha_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&hha_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + &hha_pmu->node); + return 0; +} + +static struct platform_driver hisi_hha_pmu_driver = { + .driver = { + .name = "hisi_hha_pmu", + .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_hha_pmu_probe, + .remove = hisi_hha_pmu_remove, +}; + +static int __init hisi_hha_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, + "AP_PERF_ARM_HISI_HHA_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_hha_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); + + return ret; +} +module_init(hisi_hha_pmu_module_init); + +static void __exit hisi_hha_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_hha_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); +} +module_exit(hisi_hha_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); +MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c new file mode 100644 index 000000000..2995f3630 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC L3C uncore Hardware event counters support + * + * Copyright (C) 2017 HiSilicon Limited + * Author: Anurup M <anurup.m@huawei.com> + * Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* L3C register definition */ +#define L3C_PERF_CTRL 0x0408 +#define L3C_INT_MASK 0x0800 +#define L3C_INT_STATUS 0x0808 +#define L3C_INT_CLEAR 0x080c +#define L3C_CORE_CTRL 0x1b04 +#define L3C_TRACETAG_CTRL 0x1b20 +#define L3C_DATSRC_TYPE 0x1b48 +#define L3C_DATSRC_CTRL 0x1bf0 +#define L3C_EVENT_CTRL 0x1c00 +#define L3C_VERSION 0x1cf0 +#define L3C_EVENT_TYPE0 0x1d00 +/* + * If the HW version only supports a 48-bit counter, then + * bits [63:48] are reserved, which are Read-As-Zero and + * Writes-Ignored. + */ +#define L3C_CNTR0_LOWER 0x1e00 + +/* L3C has 8-counters */ +#define L3C_NR_COUNTERS 0x8 + +#define L3C_PERF_CTRL_EN 0x10000 +#define L3C_TRACETAG_EN BIT(31) +#define L3C_TRACETAG_REQ_SHIFT 7 +#define L3C_TRACETAG_MARK_EN BIT(0) +#define L3C_TRACETAG_REQ_EN (L3C_TRACETAG_MARK_EN | BIT(2)) +#define L3C_TRACETAG_CORE_EN (L3C_TRACETAG_MARK_EN | BIT(3)) +#define L3C_CORE_EN BIT(20) +#define L3C_COER_NONE 0x0 +#define L3C_DATSRC_MASK 0xFF +#define L3C_DATSRC_SKT_EN BIT(23) +#define L3C_DATSRC_NONE 0x0 +#define L3C_EVTYPE_NONE 0xff +#define L3C_V1_NR_EVENTS 0x59 +#define L3C_V2_NR_EVENTS 0xFF + +HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0); +HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8); +HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11); +HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16); + +static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event) +{ + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); + u32 tt_req = hisi_get_tt_req(event); + + if (tt_req) { + u32 val; + + /* Set request-type for tracetag */ + val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL); + val |= tt_req << L3C_TRACETAG_REQ_SHIFT; + val |= L3C_TRACETAG_REQ_EN; + writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL); + + /* Enable request-tracetag statistics */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val |= L3C_TRACETAG_EN; + writel(val, l3c_pmu->base + L3C_PERF_CTRL); + } +} + +static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event) +{ + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); + u32 tt_req = hisi_get_tt_req(event); + + if (tt_req) { + u32 val; + + /* Clear request-type */ + val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL); + val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT); + val &= ~L3C_TRACETAG_REQ_EN; + writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL); + + /* Disable request-tracetag statistics */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val &= ~L3C_TRACETAG_EN; + writel(val, l3c_pmu->base + L3C_PERF_CTRL); + } +} + +static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg) +{ + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u32 reg, reg_idx, shift, val; + int idx = hwc->idx; + + /* + * Select the appropriate datasource register(L3C_DATSRC_TYPE0/1). + * There are 2 datasource ctrl register for the 8 hardware counters. + * Datasrc is 8-bits and for the former 4 hardware counters, + * L3C_DATSRC_TYPE0 is chosen. For the latter 4 hardware counters, + * L3C_DATSRC_TYPE1 is chosen. + */ + reg = L3C_DATSRC_TYPE + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + val = readl(l3c_pmu->base + reg); + val &= ~(L3C_DATSRC_MASK << shift); + val |= ds_cfg << shift; + writel(val, l3c_pmu->base + reg); +} + +static void hisi_l3c_pmu_config_ds(struct perf_event *event) +{ + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); + u32 ds_cfg = hisi_get_datasrc_cfg(event); + u32 ds_skt = hisi_get_datasrc_skt(event); + + if (ds_cfg) + hisi_l3c_pmu_write_ds(event, ds_cfg); + + if (ds_skt) { + u32 val; + + val = readl(l3c_pmu->base + L3C_DATSRC_CTRL); + val |= L3C_DATSRC_SKT_EN; + writel(val, l3c_pmu->base + L3C_DATSRC_CTRL); + } +} + +static void hisi_l3c_pmu_clear_ds(struct perf_event *event) +{ + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); + u32 ds_cfg = hisi_get_datasrc_cfg(event); + u32 ds_skt = hisi_get_datasrc_skt(event); + + if (ds_cfg) + hisi_l3c_pmu_write_ds(event, L3C_DATSRC_NONE); + + if (ds_skt) { + u32 val; + + val = readl(l3c_pmu->base + L3C_DATSRC_CTRL); + val &= ~L3C_DATSRC_SKT_EN; + writel(val, l3c_pmu->base + L3C_DATSRC_CTRL); + } +} + +static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event) +{ + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); + u32 core = hisi_get_tt_core(event); + + if (core) { + u32 val; + + /* Config and enable core information */ + writel(core, l3c_pmu->base + L3C_CORE_CTRL); + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val |= L3C_CORE_EN; + writel(val, l3c_pmu->base + L3C_PERF_CTRL); + + /* Enable core-tracetag statistics */ + val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL); + val |= L3C_TRACETAG_CORE_EN; + writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL); + } +} + +static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event) +{ + struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu); + u32 core = hisi_get_tt_core(event); + + if (core) { + u32 val; + + /* Clear core information */ + writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL); + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val &= ~L3C_CORE_EN; + writel(val, l3c_pmu->base + L3C_PERF_CTRL); + + /* Disable core-tracetag statistics */ + val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL); + val &= ~L3C_TRACETAG_CORE_EN; + writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL); + } +} + +static void hisi_l3c_pmu_enable_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_l3c_pmu_config_req_tracetag(event); + hisi_l3c_pmu_config_core_tracetag(event); + hisi_l3c_pmu_config_ds(event); + } +} + +static void hisi_l3c_pmu_disable_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_l3c_pmu_clear_ds(event); + hisi_l3c_pmu_clear_core_tracetag(event); + hisi_l3c_pmu_clear_req_tracetag(event); + } +} + +/* + * Select the counter register offset using the counter index + */ +static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx) +{ + return (L3C_CNTR0_LOWER + (cntr_idx * 8)); +} + +static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(L3C_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * L3C_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * L3C_EVENT_TYPE1 is chosen. + */ + reg = L3C_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to L3C_EVENT_TYPEx Register */ + val = readl(l3c_pmu->base + reg); + val &= ~(L3C_EVTYPE_NONE << shift); + val |= (type << shift); + writel(val, l3c_pmu->base + reg); +} + +static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu) +{ + u32 val; + + /* + * Set perf_enable bit in L3C_PERF_CTRL register to start counting + * for all enabled counters. + */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val |= L3C_PERF_CTRL_EN; + writel(val, l3c_pmu->base + L3C_PERF_CTRL); +} + +static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu) +{ + u32 val; + + /* + * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting + * for all enabled counters. + */ + val = readl(l3c_pmu->base + L3C_PERF_CTRL); + val &= ~(L3C_PERF_CTRL_EN); + writel(val, l3c_pmu->base + L3C_PERF_CTRL); +} + +static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in L3C_EVENT_CTRL register */ + val = readl(l3c_pmu->base + L3C_EVENT_CTRL); + val |= (1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_EVENT_CTRL); +} + +static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in L3C_EVENT_CTRL register */ + val = readl(l3c_pmu->base + L3C_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_EVENT_CTRL); +} + +static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3c_pmu->base + L3C_INT_MASK); + /* Write 0 to enable interrupt */ + val &= ~(1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_INT_MASK); +} + +static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(l3c_pmu->base + L3C_INT_MASK); + /* Write 1 to mask interrupt */ + val |= (1 << hwc->idx); + writel(val, l3c_pmu->base + L3C_INT_MASK); +} + +static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu) +{ + return readl(l3c_pmu->base + L3C_INT_STATUS); +} + +static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx) +{ + writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR); +} + +static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = { + { "HISI0213", }, + { "HISI0214", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match); + +static int hisi_l3c_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *l3c_pmu) +{ + /* + * Use the SCCL_ID and CCL_ID to identify the L3C PMU, while + * SCCL_ID is in MPIDR[aff2] and CCL_ID is in MPIDR[aff1]. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &l3c_pmu->sccl_id)) { + dev_err(&pdev->dev, "Can not read l3c sccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,ccl-id", + &l3c_pmu->ccl_id)) { + dev_err(&pdev->dev, "Can not read l3c ccl-id!\n"); + return -EINVAL; + } + + l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(l3c_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n"); + return PTR_ERR(l3c_pmu->base); + } + + l3c_pmu->identifier = readl(l3c_pmu->base + L3C_VERSION); + + return 0; +} + +static struct attribute *hisi_l3c_pmu_v1_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_v1_format_group = { + .name = "format", + .attrs = hisi_l3c_pmu_v1_format_attr, +}; + +static struct attribute *hisi_l3c_pmu_v2_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"), + HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"), + HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"), + HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"), + NULL +}; + +static const struct attribute_group hisi_l3c_pmu_v2_format_group = { + .name = "format", + .attrs = hisi_l3c_pmu_v2_format_attr, +}; + +static struct attribute *hisi_l3c_pmu_v1_events_attr[] = { + HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00), + HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01), + HISI_PMU_EVENT_ATTR(rd_hit_cpipe, 0x02), + HISI_PMU_EVENT_ATTR(wr_hit_cpipe, 0x03), + HISI_PMU_EVENT_ATTR(victim_num, 0x04), + HISI_PMU_EVENT_ATTR(rd_spipe, 0x20), + HISI_PMU_EVENT_ATTR(wr_spipe, 0x21), + HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x22), + HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x23), + HISI_PMU_EVENT_ATTR(back_invalid, 0x29), + HISI_PMU_EVENT_ATTR(retry_cpu, 0x40), + HISI_PMU_EVENT_ATTR(retry_ring, 0x41), + HISI_PMU_EVENT_ATTR(prefetch_drop, 0x42), + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_v1_events_group = { + .name = "events", + .attrs = hisi_l3c_pmu_v1_events_attr, +}; + +static struct attribute *hisi_l3c_pmu_v2_events_attr[] = { + HISI_PMU_EVENT_ATTR(l3c_hit, 0x48), + HISI_PMU_EVENT_ATTR(cycles, 0x7f), + HISI_PMU_EVENT_ATTR(l3c_ref, 0xb8), + HISI_PMU_EVENT_ATTR(dat_access, 0xb9), + NULL +}; + +static const struct attribute_group hisi_l3c_pmu_v2_events_group = { + .name = "events", + .attrs = hisi_l3c_pmu_v2_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_l3c_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group hisi_l3c_pmu_cpumask_attr_group = { + .attrs = hisi_l3c_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_l3c_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_l3c_pmu_identifier_attrs[] = { + &hisi_l3c_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_l3c_pmu_identifier_group = { + .attrs = hisi_l3c_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = { + &hisi_l3c_pmu_v1_format_group, + &hisi_l3c_pmu_v1_events_group, + &hisi_l3c_pmu_cpumask_attr_group, + &hisi_l3c_pmu_identifier_group, + NULL, +}; + +static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = { + &hisi_l3c_pmu_v2_format_group, + &hisi_l3c_pmu_v2_events_group, + &hisi_l3c_pmu_cpumask_attr_group, + &hisi_l3c_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_l3c_ops = { + .write_evtype = hisi_l3c_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_l3c_pmu_start_counters, + .stop_counters = hisi_l3c_pmu_stop_counters, + .enable_counter = hisi_l3c_pmu_enable_counter, + .disable_counter = hisi_l3c_pmu_disable_counter, + .enable_counter_int = hisi_l3c_pmu_enable_counter_int, + .disable_counter_int = hisi_l3c_pmu_disable_counter_int, + .write_counter = hisi_l3c_pmu_write_counter, + .read_counter = hisi_l3c_pmu_read_counter, + .get_int_status = hisi_l3c_pmu_get_int_status, + .clear_int_status = hisi_l3c_pmu_clear_int_status, + .enable_filter = hisi_l3c_pmu_enable_filter, + .disable_filter = hisi_l3c_pmu_disable_filter, +}; + +static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *l3c_pmu) +{ + int ret; + + ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(l3c_pmu, pdev); + if (ret) + return ret; + + if (l3c_pmu->identifier >= HISI_PMU_V2) { + l3c_pmu->counter_bits = 64; + l3c_pmu->check_event = L3C_V2_NR_EVENTS; + l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups; + } else { + l3c_pmu->counter_bits = 48; + l3c_pmu->check_event = L3C_V1_NR_EVENTS; + l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups; + } + + l3c_pmu->num_counters = L3C_NR_COUNTERS; + l3c_pmu->ops = &hisi_uncore_l3c_ops; + l3c_pmu->dev = &pdev->dev; + l3c_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_l3c_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *l3c_pmu; + char *name; + int ret; + + l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL); + if (!l3c_pmu) + return -ENOMEM; + + platform_set_drvdata(pdev, l3c_pmu); + + ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + /* + * CCL_ID is used to identify the L3C in the same SCCL which was + * used _UID by mistake. + */ + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u", + l3c_pmu->sccl_id, l3c_pmu->ccl_id); + hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE); + + ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); + if (ret) { + dev_err(l3c_pmu->dev, "L3C PMU register failed!\n"); + cpuhp_state_remove_instance_nocalls( + CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, &l3c_pmu->node); + } + + return ret; +} + +static int hisi_l3c_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *l3c_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&l3c_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + &l3c_pmu->node); + return 0; +} + +static struct platform_driver hisi_l3c_pmu_driver = { + .driver = { + .name = "hisi_l3c_pmu", + .acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match), + .suppress_bind_attrs = true, + }, + .probe = hisi_l3c_pmu_probe, + .remove = hisi_l3c_pmu_remove, +}; + +static int __init hisi_l3c_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE, + "AP_PERF_ARM_HISI_L3_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_l3c_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE); + + return ret; +} +module_init(hisi_l3c_pmu_module_init); + +static void __exit hisi_l3c_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_l3c_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE); +} +module_exit(hisi_l3c_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SoC L3C uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c new file mode 100644 index 000000000..d385234fa --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon PA uncore Hardware event counters support + * + * Copyright (C) 2020 HiSilicon Limited + * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* PA register definition */ +#define PA_PERF_CTRL 0x1c00 +#define PA_EVENT_CTRL 0x1c04 +#define PA_TT_CTRL 0x1c08 +#define PA_TGTID_CTRL 0x1c14 +#define PA_SRCID_CTRL 0x1c18 +#define PA_INT_MASK 0x1c70 +#define PA_INT_STATUS 0x1c78 +#define PA_INT_CLEAR 0x1c7c +#define PA_EVENT_TYPE0 0x1c80 +#define PA_PMU_VERSION 0x1cf0 +#define PA_EVENT_CNT0_L 0x1d00 + +#define PA_EVTYPE_MASK 0xff +#define PA_NR_COUNTERS 0x8 +#define PA_PERF_CTRL_EN BIT(0) +#define PA_TRACETAG_EN BIT(4) +#define PA_TGTID_EN BIT(11) +#define PA_SRCID_EN BIT(11) +#define PA_TGTID_NONE 0 +#define PA_SRCID_NONE 0 +#define PA_TGTID_MSK_SHIFT 12 +#define PA_SRCID_MSK_SHIFT 12 + +HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_cmd, config1, 10, 0); +HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_msk, config1, 21, 11); +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22); +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33); +HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44); + +static void hisi_pa_pmu_enable_tracetag(struct perf_event *event) +{ + struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu); + u32 tt_en = hisi_get_tracetag_en(event); + + if (tt_en) { + u32 val; + + val = readl(pa_pmu->base + PA_TT_CTRL); + val |= PA_TRACETAG_EN; + writel(val, pa_pmu->base + PA_TT_CTRL); + } +} + +static void hisi_pa_pmu_clear_tracetag(struct perf_event *event) +{ + struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu); + u32 tt_en = hisi_get_tracetag_en(event); + + if (tt_en) { + u32 val; + + val = readl(pa_pmu->base + PA_TT_CTRL); + val &= ~PA_TRACETAG_EN; + writel(val, pa_pmu->base + PA_TT_CTRL); + } +} + +static void hisi_pa_pmu_config_tgtid(struct perf_event *event) +{ + struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_tgtid_cmd(event); + + if (cmd) { + u32 msk = hisi_get_tgtid_msk(event); + u32 val = cmd | PA_TGTID_EN | (msk << PA_TGTID_MSK_SHIFT); + + writel(val, pa_pmu->base + PA_TGTID_CTRL); + } +} + +static void hisi_pa_pmu_clear_tgtid(struct perf_event *event) +{ + struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_tgtid_cmd(event); + + if (cmd) + writel(PA_TGTID_NONE, pa_pmu->base + PA_TGTID_CTRL); +} + +static void hisi_pa_pmu_config_srcid(struct perf_event *event) +{ + struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_srcid_cmd(event); + + if (cmd) { + u32 msk = hisi_get_srcid_msk(event); + u32 val = cmd | PA_SRCID_EN | (msk << PA_SRCID_MSK_SHIFT); + + writel(val, pa_pmu->base + PA_SRCID_CTRL); + } +} + +static void hisi_pa_pmu_clear_srcid(struct perf_event *event) +{ + struct hisi_pmu *pa_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_srcid_cmd(event); + + if (cmd) + writel(PA_SRCID_NONE, pa_pmu->base + PA_SRCID_CTRL); +} + +static void hisi_pa_pmu_enable_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_pa_pmu_enable_tracetag(event); + hisi_pa_pmu_config_srcid(event); + hisi_pa_pmu_config_tgtid(event); + } +} + +static void hisi_pa_pmu_disable_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_pa_pmu_clear_tgtid(event); + hisi_pa_pmu_clear_srcid(event); + hisi_pa_pmu_clear_tracetag(event); + } +} + +static u32 hisi_pa_pmu_get_counter_offset(int idx) +{ + return (PA_EVENT_CNT0_L + idx * 8); +} + +static u64 hisi_pa_pmu_read_counter(struct hisi_pmu *pa_pmu, + struct hw_perf_event *hwc) +{ + return readq(pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_pa_pmu_write_counter(struct hisi_pmu *pa_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, pa_pmu->base + hisi_pa_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_pa_pmu_write_evtype(struct hisi_pmu *pa_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(PA_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * PA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * PA_EVENT_TYPE1 is chosen. + */ + reg = PA_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to pa_EVENT_TYPEx Register */ + val = readl(pa_pmu->base + reg); + val &= ~(PA_EVTYPE_MASK << shift); + val |= (type << shift); + writel(val, pa_pmu->base + reg); +} + +static void hisi_pa_pmu_start_counters(struct hisi_pmu *pa_pmu) +{ + u32 val; + + val = readl(pa_pmu->base + PA_PERF_CTRL); + val |= PA_PERF_CTRL_EN; + writel(val, pa_pmu->base + PA_PERF_CTRL); +} + +static void hisi_pa_pmu_stop_counters(struct hisi_pmu *pa_pmu) +{ + u32 val; + + val = readl(pa_pmu->base + PA_PERF_CTRL); + val &= ~(PA_PERF_CTRL_EN); + writel(val, pa_pmu->base + PA_PERF_CTRL); +} + +static void hisi_pa_pmu_enable_counter(struct hisi_pmu *pa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Enable counter index in PA_EVENT_CTRL register */ + val = readl(pa_pmu->base + PA_EVENT_CTRL); + val |= 1 << hwc->idx; + writel(val, pa_pmu->base + PA_EVENT_CTRL); +} + +static void hisi_pa_pmu_disable_counter(struct hisi_pmu *pa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Clear counter index in PA_EVENT_CTRL register */ + val = readl(pa_pmu->base + PA_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, pa_pmu->base + PA_EVENT_CTRL); +} + +static void hisi_pa_pmu_enable_counter_int(struct hisi_pmu *pa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 0 to enable interrupt */ + val = readl(pa_pmu->base + PA_INT_MASK); + val &= ~(1 << hwc->idx); + writel(val, pa_pmu->base + PA_INT_MASK); +} + +static void hisi_pa_pmu_disable_counter_int(struct hisi_pmu *pa_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + /* Write 1 to mask interrupt */ + val = readl(pa_pmu->base + PA_INT_MASK); + val |= 1 << hwc->idx; + writel(val, pa_pmu->base + PA_INT_MASK); +} + +static u32 hisi_pa_pmu_get_int_status(struct hisi_pmu *pa_pmu) +{ + return readl(pa_pmu->base + PA_INT_STATUS); +} + +static void hisi_pa_pmu_clear_int_status(struct hisi_pmu *pa_pmu, int idx) +{ + writel(1 << idx, pa_pmu->base + PA_INT_CLEAR); +} + +static const struct acpi_device_id hisi_pa_pmu_acpi_match[] = { + { "HISI0273", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_pa_pmu_acpi_match); + +static int hisi_pa_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *pa_pmu) +{ + /* + * As PA PMU is in a SICL, use the SICL_ID and the index ID + * to identify the PA PMU. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &pa_pmu->sicl_id)) { + dev_err(&pdev->dev, "Cannot read sicl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id", + &pa_pmu->index_id)) { + dev_err(&pdev->dev, "Cannot read idx-id!\n"); + return -EINVAL; + } + + pa_pmu->ccl_id = -1; + pa_pmu->sccl_id = -1; + + pa_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(pa_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for pa_pmu resource.\n"); + return PTR_ERR(pa_pmu->base); + } + + pa_pmu->identifier = readl(pa_pmu->base + PA_PMU_VERSION); + + return 0; +} + +static struct attribute *hisi_pa_pmu_v2_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + HISI_PMU_FORMAT_ATTR(tgtid_cmd, "config1:0-10"), + HISI_PMU_FORMAT_ATTR(tgtid_msk, "config1:11-21"), + HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"), + HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"), + HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"), + NULL, +}; + +static const struct attribute_group hisi_pa_pmu_v2_format_group = { + .name = "format", + .attrs = hisi_pa_pmu_v2_format_attr, +}; + +static struct attribute *hisi_pa_pmu_v2_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_req, 0x40), + HISI_PMU_EVENT_ATTR(tx_req, 0x5c), + HISI_PMU_EVENT_ATTR(cycle, 0x78), + NULL +}; + +static const struct attribute_group hisi_pa_pmu_v2_events_group = { + .name = "events", + .attrs = hisi_pa_pmu_v2_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_pa_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group hisi_pa_pmu_cpumask_attr_group = { + .attrs = hisi_pa_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_pa_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_pa_pmu_identifier_attrs[] = { + &hisi_pa_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_pa_pmu_identifier_group = { + .attrs = hisi_pa_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_pa_pmu_v2_attr_groups[] = { + &hisi_pa_pmu_v2_format_group, + &hisi_pa_pmu_v2_events_group, + &hisi_pa_pmu_cpumask_attr_group, + &hisi_pa_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_pa_ops = { + .write_evtype = hisi_pa_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_pa_pmu_start_counters, + .stop_counters = hisi_pa_pmu_stop_counters, + .enable_counter = hisi_pa_pmu_enable_counter, + .disable_counter = hisi_pa_pmu_disable_counter, + .enable_counter_int = hisi_pa_pmu_enable_counter_int, + .disable_counter_int = hisi_pa_pmu_disable_counter_int, + .write_counter = hisi_pa_pmu_write_counter, + .read_counter = hisi_pa_pmu_read_counter, + .get_int_status = hisi_pa_pmu_get_int_status, + .clear_int_status = hisi_pa_pmu_clear_int_status, + .enable_filter = hisi_pa_pmu_enable_filter, + .disable_filter = hisi_pa_pmu_disable_filter, +}; + +static int hisi_pa_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *pa_pmu) +{ + int ret; + + ret = hisi_pa_pmu_init_data(pdev, pa_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(pa_pmu, pdev); + if (ret) + return ret; + + pa_pmu->pmu_events.attr_groups = hisi_pa_pmu_v2_attr_groups; + pa_pmu->num_counters = PA_NR_COUNTERS; + pa_pmu->ops = &hisi_uncore_pa_ops; + pa_pmu->check_event = 0xB0; + pa_pmu->counter_bits = 64; + pa_pmu->dev = &pdev->dev; + pa_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_pa_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *pa_pmu; + char *name; + int ret; + + pa_pmu = devm_kzalloc(&pdev->dev, sizeof(*pa_pmu), GFP_KERNEL); + if (!pa_pmu) + return -ENOMEM; + + ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u", + pa_pmu->sicl_id, pa_pmu->index_id); + if (!name) + return -ENOMEM; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, + &pa_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE); + ret = perf_pmu_register(&pa_pmu->pmu, name, -1); + if (ret) { + dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, + &pa_pmu->node); + return ret; + } + + platform_set_drvdata(pdev, pa_pmu); + return ret; +} + +static int hisi_pa_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *pa_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&pa_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, + &pa_pmu->node); + return 0; +} + +static struct platform_driver hisi_pa_pmu_driver = { + .driver = { + .name = "hisi_pa_pmu", + .acpi_match_table = hisi_pa_pmu_acpi_match, + .suppress_bind_attrs = true, + }, + .probe = hisi_pa_pmu_probe, + .remove = hisi_pa_pmu_remove, +}; + +static int __init hisi_pa_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE, + "AP_PERF_ARM_HISI_PA_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("PA PMU: cpuhp state setup failed, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_pa_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE); + + return ret; +} +module_init(hisi_pa_pmu_module_init); + +static void __exit hisi_pa_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_pa_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_PA_ONLINE); +} +module_exit(hisi_pa_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon Protocol Adapter uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); +MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c new file mode 100644 index 000000000..fbc8a93d5 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SoC Hardware event counters support + * + * Copyright (C) 2017 HiSilicon Limited + * Author: Anurup M <anurup.m@huawei.com> + * Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> + +#include <asm/cputype.h> +#include <asm/local64.h> + +#include "hisi_uncore_pmu.h" + +#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff) +#define HISI_MAX_PERIOD(nr) (GENMASK_ULL((nr) - 1, 0)) + +/* + * PMU format attributes + */ +ssize_t hisi_format_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} +EXPORT_SYMBOL_GPL(hisi_format_sysfs_show); + +/* + * PMU event attributes + */ +ssize_t hisi_event_sysfs_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(page, "config=0x%lx\n", (unsigned long)eattr->var); +} +EXPORT_SYMBOL_GPL(hisi_event_sysfs_show); + +/* + * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show + */ +ssize_t hisi_cpumask_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%d\n", hisi_pmu->on_cpu); +} +EXPORT_SYMBOL_GPL(hisi_cpumask_sysfs_show); + +static bool hisi_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + /* Include count for the event */ + int counters = 1; + + if (!is_software_event(leader)) { + /* + * We must NOT create groups containing mixed PMUs, although + * software events are acceptable + */ + if (leader->pmu != event->pmu) + return false; + + /* Increment counter for the leader */ + if (leader != event) + counters++; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + if (sibling->pmu != event->pmu) + return false; + /* Increment counter for each sibling */ + counters++; + } + + /* The group can not count events more than the counters in the HW */ + return counters <= hisi_pmu->num_counters; +} + +int hisi_uncore_pmu_get_event_idx(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + unsigned long *used_mask = hisi_pmu->pmu_events.used_mask; + u32 num_counters = hisi_pmu->num_counters; + int idx; + + idx = find_first_zero_bit(used_mask, num_counters); + if (idx == num_counters) + return -EAGAIN; + + set_bit(idx, used_mask); + + return idx; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_get_event_idx); + +ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(page, "0x%08x\n", hisi_pmu->identifier); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_identifier_attr_show); + +static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx) +{ + clear_bit(idx, hisi_pmu->pmu_events.used_mask); +} + +static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data) +{ + struct hisi_pmu *hisi_pmu = data; + struct perf_event *event; + unsigned long overflown; + int idx; + + overflown = hisi_pmu->ops->get_int_status(hisi_pmu); + if (!overflown) + return IRQ_NONE; + + /* + * Find the counter index which overflowed if the bit was set + * and handle it. + */ + for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) { + /* Write 1 to clear the IRQ status flag */ + hisi_pmu->ops->clear_int_status(hisi_pmu, idx); + /* Get the corresponding event struct */ + event = hisi_pmu->pmu_events.hw_events[idx]; + if (!event) + continue; + + hisi_uncore_pmu_event_update(event); + hisi_uncore_pmu_set_event_period(event); + } + + return IRQ_HANDLED; +} + +int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, + struct platform_device *pdev) +{ + int irq, ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), hisi_pmu); + if (ret < 0) { + dev_err(&pdev->dev, + "Fail to request IRQ: %d ret: %d.\n", irq, ret); + return ret; + } + + hisi_pmu->irq = irq; + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_init_irq); + +int hisi_uncore_pmu_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct hisi_pmu *hisi_pmu; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * We do not support sampling as the counters are all + * shared by all CPU cores in a CPU die(SCCL). Also we + * do not support attach to a task(per-process mode) + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + /* + * The uncore counters not specific to any CPU, so cannot + * support per-task + */ + if (event->cpu < 0) + return -EINVAL; + + /* + * Validate if the events in group does not exceed the + * available counters in hardware. + */ + if (!hisi_validate_event_group(event)) + return -EINVAL; + + hisi_pmu = to_hisi_pmu(event->pmu); + if (event->attr.config > hisi_pmu->check_event) + return -EINVAL; + + if (hisi_pmu->on_cpu == -1) + return -EINVAL; + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. + */ + hwc->idx = -1; + hwc->config_base = event->attr.config; + + /* Enforce to use the same CPU for all events in this PMU */ + event->cpu = hisi_pmu->on_cpu; + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_init); + +/* + * Set the counter to count the event that we're interested in, + * and enable interrupt and counter. + */ +static void hisi_uncore_pmu_enable_event(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pmu->ops->write_evtype(hisi_pmu, hwc->idx, + HISI_GET_EVENTID(event)); + + if (hisi_pmu->ops->enable_filter) + hisi_pmu->ops->enable_filter(event); + + hisi_pmu->ops->enable_counter_int(hisi_pmu, hwc); + hisi_pmu->ops->enable_counter(hisi_pmu, hwc); +} + +/* + * Disable counter and interrupt. + */ +static void hisi_uncore_pmu_disable_event(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_pmu->ops->disable_counter(hisi_pmu, hwc); + hisi_pmu->ops->disable_counter_int(hisi_pmu, hwc); + + if (hisi_pmu->ops->disable_filter) + hisi_pmu->ops->disable_filter(event); +} + +void hisi_uncore_pmu_set_event_period(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * The HiSilicon PMU counters support 32 bits or 48 bits, depending on + * the PMU. We reduce it to 2^(counter_bits - 1) to account for the + * extreme interrupt latency. So we could hopefully handle the overflow + * interrupt before another 2^(counter_bits - 1) events occur and the + * counter overtakes its previous value. + */ + u64 val = BIT_ULL(hisi_pmu->counter_bits - 1); + + local64_set(&hwc->prev_count, val); + /* Write start value to the hardware event counter */ + hisi_pmu->ops->write_counter(hisi_pmu, hwc, val); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_set_event_period); + +void hisi_uncore_pmu_event_update(struct perf_event *event) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + + do { + /* Read the count from the counter register */ + new_raw_count = hisi_pmu->ops->read_counter(hisi_pmu, hwc); + prev_raw_count = local64_read(&hwc->prev_count); + } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count); + /* + * compute the delta + */ + delta = (new_raw_count - prev_raw_count) & + HISI_MAX_PERIOD(hisi_pmu->counter_bits); + local64_add(delta, &event->count); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_event_update); + +void hisi_uncore_pmu_start(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + hisi_uncore_pmu_set_event_period(event); + + if (flags & PERF_EF_RELOAD) { + u64 prev_raw_count = local64_read(&hwc->prev_count); + + hisi_pmu->ops->write_counter(hisi_pmu, hwc, prev_raw_count); + } + + hisi_uncore_pmu_enable_event(event); + perf_event_update_userpage(event); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_start); + +void hisi_uncore_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + hisi_uncore_pmu_disable_event(event); + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + /* Read hardware counter and update the perf counter statistics */ + hisi_uncore_pmu_event_update(event); + hwc->state |= PERF_HES_UPTODATE; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_stop); + +int hisi_uncore_pmu_add(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Get an available counter index for counting */ + idx = hisi_pmu->ops->get_event_idx(event); + if (idx < 0) + return idx; + + event->hw.idx = idx; + hisi_pmu->pmu_events.hw_events[idx] = event; + + if (flags & PERF_EF_START) + hisi_uncore_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_add); + +void hisi_uncore_pmu_del(struct perf_event *event, int flags) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hisi_uncore_pmu_stop(event, PERF_EF_UPDATE); + hisi_uncore_pmu_clear_event_idx(hisi_pmu, hwc->idx); + perf_event_update_userpage(event); + hisi_pmu->pmu_events.hw_events[hwc->idx] = NULL; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_del); + +void hisi_uncore_pmu_read(struct perf_event *event) +{ + /* Read hardware counter and update the perf counter statistics */ + hisi_uncore_pmu_event_update(event); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_read); + +void hisi_uncore_pmu_enable(struct pmu *pmu) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu); + bool enabled = !bitmap_empty(hisi_pmu->pmu_events.used_mask, + hisi_pmu->num_counters); + + if (!enabled) + return; + + hisi_pmu->ops->start_counters(hisi_pmu); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_enable); + +void hisi_uncore_pmu_disable(struct pmu *pmu) +{ + struct hisi_pmu *hisi_pmu = to_hisi_pmu(pmu); + + hisi_pmu->ops->stop_counters(hisi_pmu); +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_disable); + + +/* + * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be + * determined from the MPIDR_EL1, but the encoding varies by CPU: + * + * - For MT variants of TSV110: + * SCCL is Aff2[7:3], CCL is Aff2[2:0] + * + * - For other MT parts: + * SCCL is Aff3[7:0], CCL is Aff2[7:0] + * + * - For non-MT parts: + * SCCL is Aff2[7:0], CCL is Aff1[7:0] + */ +static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) +{ + u64 mpidr = read_cpuid_mpidr(); + int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + bool mt = mpidr & MPIDR_MT_BITMASK; + int sccl, ccl; + + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + } else if (mt) { + sccl = aff3; + ccl = aff2; + } else { + sccl = aff2; + ccl = aff1; + } + + if (scclp) + *scclp = sccl; + if (cclp) + *cclp = ccl; +} + +/* + * Check whether the CPU is associated with this uncore PMU + */ +static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu) +{ + int sccl_id, ccl_id; + + /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */ + if (hisi_pmu->sccl_id == -1) + return true; + + if (hisi_pmu->ccl_id == -1) { + /* If CCL_ID is -1, the PMU only shares the same SCCL */ + hisi_read_sccl_and_ccl_id(&sccl_id, NULL); + + return sccl_id == hisi_pmu->sccl_id; + } + + hisi_read_sccl_and_ccl_id(&sccl_id, &ccl_id); + + return sccl_id == hisi_pmu->sccl_id && ccl_id == hisi_pmu->ccl_id; +} + +int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu, + node); + + if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) + return 0; + + cpumask_set_cpu(cpu, &hisi_pmu->associated_cpus); + + /* If another CPU is already managing this PMU, simply return. */ + if (hisi_pmu->on_cpu != -1) + return 0; + + /* Use this CPU in cpumask for event counting */ + hisi_pmu->on_cpu = cpu; + + /* Overflow interrupt also should use the same CPU */ + WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu))); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_online_cpu); + +int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hisi_pmu *hisi_pmu = hlist_entry_safe(node, struct hisi_pmu, + node); + cpumask_t pmu_online_cpus; + unsigned int target; + + if (!cpumask_test_and_clear_cpu(cpu, &hisi_pmu->associated_cpus)) + return 0; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (hisi_pmu->on_cpu != cpu) + return 0; + + /* Give up ownership of the PMU */ + hisi_pmu->on_cpu = -1; + + /* Choose a new CPU to migrate ownership of the PMU to */ + cpumask_and(&pmu_online_cpus, &hisi_pmu->associated_cpus, + cpu_online_mask); + target = cpumask_any_but(&pmu_online_cpus, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target); + /* Use this CPU for event counting */ + hisi_pmu->on_cpu = target; + WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target))); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu); + +void hisi_pmu_init(struct pmu *pmu, const char *name, + const struct attribute_group **attr_groups, struct module *module) +{ + pmu->name = name; + pmu->module = module; + pmu->task_ctx_nr = perf_invalid_context; + pmu->event_init = hisi_uncore_pmu_event_init; + pmu->pmu_enable = hisi_uncore_pmu_enable; + pmu->pmu_disable = hisi_uncore_pmu_disable; + pmu->add = hisi_uncore_pmu_add; + pmu->del = hisi_uncore_pmu_del; + pmu->start = hisi_uncore_pmu_start; + pmu->stop = hisi_uncore_pmu_stop; + pmu->read = hisi_uncore_pmu_read; + pmu->attr_groups = attr_groups; +} +EXPORT_SYMBOL_GPL(hisi_pmu_init); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h new file mode 100644 index 000000000..b59de33cd --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HiSilicon SoC Hardware event counters support + * + * Copyright (C) 2017 HiSilicon Limited + * Author: Anurup M <anurup.m@huawei.com> + * Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#ifndef __HISI_UNCORE_PMU_H__ +#define __HISI_UNCORE_PMU_H__ + +#include <linux/bitfield.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/types.h> + +#undef pr_fmt +#define pr_fmt(fmt) "hisi_pmu: " fmt + +#define HISI_PMU_V2 0x30 +#define HISI_MAX_COUNTERS 0x10 +#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu)) + +#define HISI_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ + })[0].attr.attr) + +#define HISI_PMU_FORMAT_ATTR(_name, _config) \ + HISI_PMU_ATTR(_name, hisi_format_sysfs_show, (void *)_config) +#define HISI_PMU_EVENT_ATTR(_name, _config) \ + HISI_PMU_ATTR(_name, hisi_event_sysfs_show, (unsigned long)_config) + +#define HISI_PMU_EVENT_ATTR_EXTRACTOR(name, config, hi, lo) \ + static inline u32 hisi_get_##name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \ + } + +struct hisi_pmu; + +struct hisi_uncore_ops { + void (*write_evtype)(struct hisi_pmu *, int, u32); + int (*get_event_idx)(struct perf_event *); + u64 (*read_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*write_counter)(struct hisi_pmu *, struct hw_perf_event *, u64); + void (*enable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter)(struct hisi_pmu *, struct hw_perf_event *); + void (*enable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*disable_counter_int)(struct hisi_pmu *, struct hw_perf_event *); + void (*start_counters)(struct hisi_pmu *); + void (*stop_counters)(struct hisi_pmu *); + u32 (*get_int_status)(struct hisi_pmu *hisi_pmu); + void (*clear_int_status)(struct hisi_pmu *hisi_pmu, int idx); + void (*enable_filter)(struct perf_event *event); + void (*disable_filter)(struct perf_event *event); +}; + +struct hisi_pmu_hwevents { + struct perf_event *hw_events[HISI_MAX_COUNTERS]; + DECLARE_BITMAP(used_mask, HISI_MAX_COUNTERS); + const struct attribute_group **attr_groups; +}; + +/* Generic pmu struct for different pmu types */ +struct hisi_pmu { + struct pmu pmu; + const struct hisi_uncore_ops *ops; + struct hisi_pmu_hwevents pmu_events; + /* associated_cpus: All CPUs associated with the PMU */ + cpumask_t associated_cpus; + /* CPU used for counting */ + int on_cpu; + int irq; + struct device *dev; + struct hlist_node node; + int sccl_id; + int sicl_id; + int ccl_id; + void __iomem *base; + /* the ID of the PMU modules */ + u32 index_id; + /* For DDRC PMU v2: each DDRC has more than one DMC */ + u32 sub_id; + int num_counters; + int counter_bits; + /* check event code range */ + int check_event; + u32 identifier; +}; + +int hisi_uncore_pmu_get_event_idx(struct perf_event *event); +void hisi_uncore_pmu_read(struct perf_event *event); +int hisi_uncore_pmu_add(struct perf_event *event, int flags); +void hisi_uncore_pmu_del(struct perf_event *event, int flags); +void hisi_uncore_pmu_start(struct perf_event *event, int flags); +void hisi_uncore_pmu_stop(struct perf_event *event, int flags); +void hisi_uncore_pmu_set_event_period(struct perf_event *event); +void hisi_uncore_pmu_event_update(struct perf_event *event); +int hisi_uncore_pmu_event_init(struct perf_event *event); +void hisi_uncore_pmu_enable(struct pmu *pmu); +void hisi_uncore_pmu_disable(struct pmu *pmu); +ssize_t hisi_event_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hisi_format_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hisi_cpumask_sysfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +int hisi_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *node); +int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node); + +ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev, + struct device_attribute *attr, + char *page); +int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu, + struct platform_device *pdev); + +void hisi_pmu_init(struct pmu *pmu, const char *name, + const struct attribute_group **attr_groups, struct module *module); +#endif /* __HISI_UNCORE_PMU_H__ */ diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c new file mode 100644 index 000000000..7d363d475 --- /dev/null +++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HiSilicon SLLC uncore Hardware event counters support + * + * Copyright (C) 2020 HiSilicon Limited + * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> + * + * This code is based on the uncore PMUs like arm-cci and arm-ccn. + */ +#include <linux/acpi.h> +#include <linux/cpuhotplug.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/smp.h> + +#include "hisi_uncore_pmu.h" + +/* SLLC register definition */ +#define SLLC_INT_MASK 0x0814 +#define SLLC_INT_STATUS 0x0818 +#define SLLC_INT_CLEAR 0x081c +#define SLLC_PERF_CTRL 0x1c00 +#define SLLC_SRCID_CTRL 0x1c04 +#define SLLC_TGTID_CTRL 0x1c08 +#define SLLC_EVENT_CTRL 0x1c14 +#define SLLC_EVENT_TYPE0 0x1c18 +#define SLLC_VERSION 0x1cf0 +#define SLLC_EVENT_CNT0_L 0x1d00 + +#define SLLC_EVTYPE_MASK 0xff +#define SLLC_PERF_CTRL_EN BIT(0) +#define SLLC_FILT_EN BIT(1) +#define SLLC_TRACETAG_EN BIT(2) +#define SLLC_SRCID_EN BIT(4) +#define SLLC_SRCID_NONE 0x0 +#define SLLC_TGTID_EN BIT(5) +#define SLLC_TGTID_NONE 0x0 +#define SLLC_TGTID_MIN_SHIFT 1 +#define SLLC_TGTID_MAX_SHIFT 12 +#define SLLC_SRCID_CMD_SHIFT 1 +#define SLLC_SRCID_MSK_SHIFT 12 +#define SLLC_NR_EVENTS 0x80 + +HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_min, config1, 10, 0); +HISI_PMU_EVENT_ATTR_EXTRACTOR(tgtid_max, config1, 21, 11); +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 32, 22); +HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 43, 33); +HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 44, 44); + +static bool tgtid_is_valid(u32 max, u32 min) +{ + return max > 0 && max >= min; +} + +static void hisi_sllc_pmu_enable_tracetag(struct perf_event *event) +{ + struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu); + u32 tt_en = hisi_get_tracetag_en(event); + + if (tt_en) { + u32 val; + + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val |= SLLC_TRACETAG_EN | SLLC_FILT_EN; + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); + } +} + +static void hisi_sllc_pmu_disable_tracetag(struct perf_event *event) +{ + struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu); + u32 tt_en = hisi_get_tracetag_en(event); + + if (tt_en) { + u32 val; + + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val &= ~(SLLC_TRACETAG_EN | SLLC_FILT_EN); + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); + } +} + +static void hisi_sllc_pmu_config_tgtid(struct perf_event *event) +{ + struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu); + u32 min = hisi_get_tgtid_min(event); + u32 max = hisi_get_tgtid_max(event); + + if (tgtid_is_valid(max, min)) { + u32 val = (max << SLLC_TGTID_MAX_SHIFT) | (min << SLLC_TGTID_MIN_SHIFT); + + writel(val, sllc_pmu->base + SLLC_TGTID_CTRL); + /* Enable the tgtid */ + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val |= SLLC_TGTID_EN | SLLC_FILT_EN; + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); + } +} + +static void hisi_sllc_pmu_clear_tgtid(struct perf_event *event) +{ + struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu); + u32 min = hisi_get_tgtid_min(event); + u32 max = hisi_get_tgtid_max(event); + + if (tgtid_is_valid(max, min)) { + u32 val; + + writel(SLLC_TGTID_NONE, sllc_pmu->base + SLLC_TGTID_CTRL); + /* Disable the tgtid */ + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val &= ~(SLLC_TGTID_EN | SLLC_FILT_EN); + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); + } +} + +static void hisi_sllc_pmu_config_srcid(struct perf_event *event) +{ + struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_srcid_cmd(event); + + if (cmd) { + u32 val, msk; + + msk = hisi_get_srcid_msk(event); + val = (cmd << SLLC_SRCID_CMD_SHIFT) | (msk << SLLC_SRCID_MSK_SHIFT); + writel(val, sllc_pmu->base + SLLC_SRCID_CTRL); + /* Enable the srcid */ + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val |= SLLC_SRCID_EN | SLLC_FILT_EN; + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); + } +} + +static void hisi_sllc_pmu_clear_srcid(struct perf_event *event) +{ + struct hisi_pmu *sllc_pmu = to_hisi_pmu(event->pmu); + u32 cmd = hisi_get_srcid_cmd(event); + + if (cmd) { + u32 val; + + writel(SLLC_SRCID_NONE, sllc_pmu->base + SLLC_SRCID_CTRL); + /* Disable the srcid */ + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val &= ~(SLLC_SRCID_EN | SLLC_FILT_EN); + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); + } +} + +static void hisi_sllc_pmu_enable_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_sllc_pmu_enable_tracetag(event); + hisi_sllc_pmu_config_srcid(event); + hisi_sllc_pmu_config_tgtid(event); + } +} + +static void hisi_sllc_pmu_clear_filter(struct perf_event *event) +{ + if (event->attr.config1 != 0x0) { + hisi_sllc_pmu_disable_tracetag(event); + hisi_sllc_pmu_clear_srcid(event); + hisi_sllc_pmu_clear_tgtid(event); + } +} + +static u32 hisi_sllc_pmu_get_counter_offset(int idx) +{ + return (SLLC_EVENT_CNT0_L + idx * 8); +} + +static u64 hisi_sllc_pmu_read_counter(struct hisi_pmu *sllc_pmu, + struct hw_perf_event *hwc) +{ + return readq(sllc_pmu->base + + hisi_sllc_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_sllc_pmu_write_counter(struct hisi_pmu *sllc_pmu, + struct hw_perf_event *hwc, u64 val) +{ + writeq(val, sllc_pmu->base + + hisi_sllc_pmu_get_counter_offset(hwc->idx)); +} + +static void hisi_sllc_pmu_write_evtype(struct hisi_pmu *sllc_pmu, int idx, + u32 type) +{ + u32 reg, reg_idx, shift, val; + + /* + * Select the appropriate event select register(SLLC_EVENT_TYPE0/1). + * There are 2 event select registers for the 8 hardware counters. + * Event code is 8-bits and for the former 4 hardware counters, + * SLLC_EVENT_TYPE0 is chosen. For the latter 4 hardware counters, + * SLLC_EVENT_TYPE1 is chosen. + */ + reg = SLLC_EVENT_TYPE0 + (idx / 4) * 4; + reg_idx = idx % 4; + shift = 8 * reg_idx; + + /* Write event code to SLLC_EVENT_TYPEx Register */ + val = readl(sllc_pmu->base + reg); + val &= ~(SLLC_EVTYPE_MASK << shift); + val |= (type << shift); + writel(val, sllc_pmu->base + reg); +} + +static void hisi_sllc_pmu_start_counters(struct hisi_pmu *sllc_pmu) +{ + u32 val; + + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val |= SLLC_PERF_CTRL_EN; + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); +} + +static void hisi_sllc_pmu_stop_counters(struct hisi_pmu *sllc_pmu) +{ + u32 val; + + val = readl(sllc_pmu->base + SLLC_PERF_CTRL); + val &= ~(SLLC_PERF_CTRL_EN); + writel(val, sllc_pmu->base + SLLC_PERF_CTRL); +} + +static void hisi_sllc_pmu_enable_counter(struct hisi_pmu *sllc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(sllc_pmu->base + SLLC_EVENT_CTRL); + val |= 1 << hwc->idx; + writel(val, sllc_pmu->base + SLLC_EVENT_CTRL); +} + +static void hisi_sllc_pmu_disable_counter(struct hisi_pmu *sllc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(sllc_pmu->base + SLLC_EVENT_CTRL); + val &= ~(1 << hwc->idx); + writel(val, sllc_pmu->base + SLLC_EVENT_CTRL); +} + +static void hisi_sllc_pmu_enable_counter_int(struct hisi_pmu *sllc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(sllc_pmu->base + SLLC_INT_MASK); + /* Write 0 to enable interrupt */ + val &= ~(1 << hwc->idx); + writel(val, sllc_pmu->base + SLLC_INT_MASK); +} + +static void hisi_sllc_pmu_disable_counter_int(struct hisi_pmu *sllc_pmu, + struct hw_perf_event *hwc) +{ + u32 val; + + val = readl(sllc_pmu->base + SLLC_INT_MASK); + /* Write 1 to mask interrupt */ + val |= 1 << hwc->idx; + writel(val, sllc_pmu->base + SLLC_INT_MASK); +} + +static u32 hisi_sllc_pmu_get_int_status(struct hisi_pmu *sllc_pmu) +{ + return readl(sllc_pmu->base + SLLC_INT_STATUS); +} + +static void hisi_sllc_pmu_clear_int_status(struct hisi_pmu *sllc_pmu, int idx) +{ + writel(1 << idx, sllc_pmu->base + SLLC_INT_CLEAR); +} + +static const struct acpi_device_id hisi_sllc_pmu_acpi_match[] = { + { "HISI0263", }, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_sllc_pmu_acpi_match); + +static int hisi_sllc_pmu_init_data(struct platform_device *pdev, + struct hisi_pmu *sllc_pmu) +{ + /* + * Use the SCCL_ID and the index ID to identify the SLLC PMU, + * while SCCL_ID is from MPIDR_EL1 by CPU. + */ + if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", + &sllc_pmu->sccl_id)) { + dev_err(&pdev->dev, "Cannot read sccl-id!\n"); + return -EINVAL; + } + + if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id", + &sllc_pmu->index_id)) { + dev_err(&pdev->dev, "Cannot read idx-id!\n"); + return -EINVAL; + } + + /* SLLC PMUs only share the same SCCL */ + sllc_pmu->ccl_id = -1; + + sllc_pmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(sllc_pmu->base)) { + dev_err(&pdev->dev, "ioremap failed for sllc_pmu resource.\n"); + return PTR_ERR(sllc_pmu->base); + } + + sllc_pmu->identifier = readl(sllc_pmu->base + SLLC_VERSION); + + return 0; +} + +static struct attribute *hisi_sllc_pmu_v2_format_attr[] = { + HISI_PMU_FORMAT_ATTR(event, "config:0-7"), + HISI_PMU_FORMAT_ATTR(tgtid_min, "config1:0-10"), + HISI_PMU_FORMAT_ATTR(tgtid_max, "config1:11-21"), + HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:22-32"), + HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:33-43"), + HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:44"), + NULL +}; + +static const struct attribute_group hisi_sllc_pmu_v2_format_group = { + .name = "format", + .attrs = hisi_sllc_pmu_v2_format_attr, +}; + +static struct attribute *hisi_sllc_pmu_v2_events_attr[] = { + HISI_PMU_EVENT_ATTR(rx_req, 0x30), + HISI_PMU_EVENT_ATTR(rx_data, 0x31), + HISI_PMU_EVENT_ATTR(tx_req, 0x34), + HISI_PMU_EVENT_ATTR(tx_data, 0x35), + HISI_PMU_EVENT_ATTR(cycles, 0x09), + NULL +}; + +static const struct attribute_group hisi_sllc_pmu_v2_events_group = { + .name = "events", + .attrs = hisi_sllc_pmu_v2_events_attr, +}; + +static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); + +static struct attribute *hisi_sllc_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static const struct attribute_group hisi_sllc_pmu_cpumask_attr_group = { + .attrs = hisi_sllc_pmu_cpumask_attrs, +}; + +static struct device_attribute hisi_sllc_pmu_identifier_attr = + __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); + +static struct attribute *hisi_sllc_pmu_identifier_attrs[] = { + &hisi_sllc_pmu_identifier_attr.attr, + NULL +}; + +static const struct attribute_group hisi_sllc_pmu_identifier_group = { + .attrs = hisi_sllc_pmu_identifier_attrs, +}; + +static const struct attribute_group *hisi_sllc_pmu_v2_attr_groups[] = { + &hisi_sllc_pmu_v2_format_group, + &hisi_sllc_pmu_v2_events_group, + &hisi_sllc_pmu_cpumask_attr_group, + &hisi_sllc_pmu_identifier_group, + NULL +}; + +static const struct hisi_uncore_ops hisi_uncore_sllc_ops = { + .write_evtype = hisi_sllc_pmu_write_evtype, + .get_event_idx = hisi_uncore_pmu_get_event_idx, + .start_counters = hisi_sllc_pmu_start_counters, + .stop_counters = hisi_sllc_pmu_stop_counters, + .enable_counter = hisi_sllc_pmu_enable_counter, + .disable_counter = hisi_sllc_pmu_disable_counter, + .enable_counter_int = hisi_sllc_pmu_enable_counter_int, + .disable_counter_int = hisi_sllc_pmu_disable_counter_int, + .write_counter = hisi_sllc_pmu_write_counter, + .read_counter = hisi_sllc_pmu_read_counter, + .get_int_status = hisi_sllc_pmu_get_int_status, + .clear_int_status = hisi_sllc_pmu_clear_int_status, + .enable_filter = hisi_sllc_pmu_enable_filter, + .disable_filter = hisi_sllc_pmu_clear_filter, +}; + +static int hisi_sllc_pmu_dev_probe(struct platform_device *pdev, + struct hisi_pmu *sllc_pmu) +{ + int ret; + + ret = hisi_sllc_pmu_init_data(pdev, sllc_pmu); + if (ret) + return ret; + + ret = hisi_uncore_pmu_init_irq(sllc_pmu, pdev); + if (ret) + return ret; + + sllc_pmu->pmu_events.attr_groups = hisi_sllc_pmu_v2_attr_groups; + sllc_pmu->ops = &hisi_uncore_sllc_ops; + sllc_pmu->check_event = SLLC_NR_EVENTS; + sllc_pmu->counter_bits = 64; + sllc_pmu->num_counters = 8; + sllc_pmu->dev = &pdev->dev; + sllc_pmu->on_cpu = -1; + + return 0; +} + +static int hisi_sllc_pmu_probe(struct platform_device *pdev) +{ + struct hisi_pmu *sllc_pmu; + char *name; + int ret; + + sllc_pmu = devm_kzalloc(&pdev->dev, sizeof(*sllc_pmu), GFP_KERNEL); + if (!sllc_pmu) + return -ENOMEM; + + ret = hisi_sllc_pmu_dev_probe(pdev, sllc_pmu); + if (ret) + return ret; + + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_sllc%u", + sllc_pmu->sccl_id, sllc_pmu->index_id); + if (!name) + return -ENOMEM; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, + &sllc_pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + return ret; + } + + hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE); + + ret = perf_pmu_register(&sllc_pmu->pmu, name, -1); + if (ret) { + dev_err(sllc_pmu->dev, "PMU register failed, ret = %d\n", ret); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, + &sllc_pmu->node); + return ret; + } + + platform_set_drvdata(pdev, sllc_pmu); + + return ret; +} + +static int hisi_sllc_pmu_remove(struct platform_device *pdev) +{ + struct hisi_pmu *sllc_pmu = platform_get_drvdata(pdev); + + perf_pmu_unregister(&sllc_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, + &sllc_pmu->node); + return 0; +} + +static struct platform_driver hisi_sllc_pmu_driver = { + .driver = { + .name = "hisi_sllc_pmu", + .acpi_match_table = hisi_sllc_pmu_acpi_match, + .suppress_bind_attrs = true, + }, + .probe = hisi_sllc_pmu_probe, + .remove = hisi_sllc_pmu_remove, +}; + +static int __init hisi_sllc_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE, + "AP_PERF_ARM_HISI_SLLC_ONLINE", + hisi_uncore_pmu_online_cpu, + hisi_uncore_pmu_offline_cpu); + if (ret) { + pr_err("SLLC PMU: cpuhp state setup failed, ret = %d\n", ret); + return ret; + } + + ret = platform_driver_register(&hisi_sllc_pmu_driver); + if (ret) + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE); + + return ret; +} +module_init(hisi_sllc_pmu_module_init); + +static void __exit hisi_sllc_pmu_module_exit(void) +{ + platform_driver_unregister(&hisi_sllc_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_SLLC_ONLINE); +} +module_exit(hisi_sllc_pmu_module_exit); + +MODULE_DESCRIPTION("HiSilicon SLLC uncore PMU driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); +MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>"); diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c new file mode 100644 index 000000000..16869bf5b --- /dev/null +++ b/drivers/perf/hisilicon/hns3_pmu.c @@ -0,0 +1,1671 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This driver adds support for HNS3 PMU iEP device. Related perf events are + * bandwidth, latency, packet rate, interrupt rate etc. + * + * Copyright (C) 2022 HiSilicon Limited + */ +#include <linux/bitfield.h> +#include <linux/bitmap.h> +#include <linux/bug.h> +#include <linux/cpuhotplug.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pci-epf.h> +#include <linux/perf_event.h> +#include <linux/smp.h> + +/* registers offset address */ +#define HNS3_PMU_REG_GLOBAL_CTRL 0x0000 +#define HNS3_PMU_REG_CLOCK_FREQ 0x0020 +#define HNS3_PMU_REG_BDF 0x0fe0 +#define HNS3_PMU_REG_VERSION 0x0fe4 +#define HNS3_PMU_REG_DEVICE_ID 0x0fe8 + +#define HNS3_PMU_REG_EVENT_OFFSET 0x1000 +#define HNS3_PMU_REG_EVENT_SIZE 0x1000 +#define HNS3_PMU_REG_EVENT_CTRL_LOW 0x00 +#define HNS3_PMU_REG_EVENT_CTRL_HIGH 0x04 +#define HNS3_PMU_REG_EVENT_INTR_STATUS 0x08 +#define HNS3_PMU_REG_EVENT_INTR_MASK 0x0c +#define HNS3_PMU_REG_EVENT_COUNTER 0x10 +#define HNS3_PMU_REG_EVENT_EXT_COUNTER 0x18 +#define HNS3_PMU_REG_EVENT_QID_CTRL 0x28 +#define HNS3_PMU_REG_EVENT_QID_PARA 0x2c + +#define HNS3_PMU_FILTER_SUPPORT_GLOBAL BIT(0) +#define HNS3_PMU_FILTER_SUPPORT_PORT BIT(1) +#define HNS3_PMU_FILTER_SUPPORT_PORT_TC BIT(2) +#define HNS3_PMU_FILTER_SUPPORT_FUNC BIT(3) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE BIT(4) +#define HNS3_PMU_FILTER_SUPPORT_FUNC_INTR BIT(5) + +#define HNS3_PMU_FILTER_ALL_TC 0xf +#define HNS3_PMU_FILTER_ALL_QUEUE 0xffff + +#define HNS3_PMU_CTRL_SUBEVENT_S 4 +#define HNS3_PMU_CTRL_FILTER_MODE_S 24 + +#define HNS3_PMU_GLOBAL_START BIT(0) + +#define HNS3_PMU_EVENT_STATUS_RESET BIT(11) +#define HNS3_PMU_EVENT_EN BIT(12) +#define HNS3_PMU_EVENT_OVERFLOW_RESTART BIT(15) + +#define HNS3_PMU_QID_PARA_FUNC_S 0 +#define HNS3_PMU_QID_PARA_QUEUE_S 16 + +#define HNS3_PMU_QID_CTRL_REQ_ENABLE BIT(0) +#define HNS3_PMU_QID_CTRL_DONE BIT(1) +#define HNS3_PMU_QID_CTRL_MISS BIT(2) + +#define HNS3_PMU_INTR_MASK_OVERFLOW BIT(1) + +#define HNS3_PMU_MAX_HW_EVENTS 8 + +/* + * Each hardware event contains two registers (counter and ext_counter) for + * bandwidth, packet rate, latency and interrupt rate. These two registers will + * be triggered to run at the same when a hardware event is enabled. The meaning + * of counter and ext_counter of different event type are different, their + * meaning show as follow: + * + * +----------------+------------------+---------------+ + * | event type | counter | ext_counter | + * +----------------+------------------+---------------+ + * | bandwidth | byte number | cycle number | + * +----------------+------------------+---------------+ + * | packet rate | packet number | cycle number | + * +----------------+------------------+---------------+ + * | latency | cycle number | packet number | + * +----------------+------------------+---------------+ + * | interrupt rate | interrupt number | cycle number | + * +----------------+------------------+---------------+ + * + * The cycle number indicates increment of counter of hardware timer, the + * frequency of hardware timer can be read from hw_clk_freq file. + * + * Performance of each hardware event is calculated by: counter / ext_counter. + * + * Since processing of data is preferred to be done in userspace, we expose + * ext_counter as a separate event for userspace and use bit 16 to indicate it. + * For example, event 0x00001 and 0x10001 are actually one event for hardware + * because bit 0-15 are same. If the bit 16 of one event is 0 means to read + * counter register, otherwise means to read ext_counter register. + */ +/* bandwidth events */ +#define HNS3_PMU_EVT_BW_SSU_EGU_BYTE_NUM 0x00001 +#define HNS3_PMU_EVT_BW_SSU_EGU_TIME 0x10001 +#define HNS3_PMU_EVT_BW_SSU_RPU_BYTE_NUM 0x00002 +#define HNS3_PMU_EVT_BW_SSU_RPU_TIME 0x10002 +#define HNS3_PMU_EVT_BW_SSU_ROCE_BYTE_NUM 0x00003 +#define HNS3_PMU_EVT_BW_SSU_ROCE_TIME 0x10003 +#define HNS3_PMU_EVT_BW_ROCE_SSU_BYTE_NUM 0x00004 +#define HNS3_PMU_EVT_BW_ROCE_SSU_TIME 0x10004 +#define HNS3_PMU_EVT_BW_TPU_SSU_BYTE_NUM 0x00005 +#define HNS3_PMU_EVT_BW_TPU_SSU_TIME 0x10005 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_BYTE_NUM 0x00006 +#define HNS3_PMU_EVT_BW_RPU_RCBRX_TIME 0x10006 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_BYTE_NUM 0x00008 +#define HNS3_PMU_EVT_BW_RCBTX_TXSCH_TIME 0x10008 +#define HNS3_PMU_EVT_BW_WR_FBD_BYTE_NUM 0x00009 +#define HNS3_PMU_EVT_BW_WR_FBD_TIME 0x10009 +#define HNS3_PMU_EVT_BW_WR_EBD_BYTE_NUM 0x0000a +#define HNS3_PMU_EVT_BW_WR_EBD_TIME 0x1000a +#define HNS3_PMU_EVT_BW_RD_FBD_BYTE_NUM 0x0000b +#define HNS3_PMU_EVT_BW_RD_FBD_TIME 0x1000b +#define HNS3_PMU_EVT_BW_RD_EBD_BYTE_NUM 0x0000c +#define HNS3_PMU_EVT_BW_RD_EBD_TIME 0x1000c +#define HNS3_PMU_EVT_BW_RD_PAY_M0_BYTE_NUM 0x0000d +#define HNS3_PMU_EVT_BW_RD_PAY_M0_TIME 0x1000d +#define HNS3_PMU_EVT_BW_RD_PAY_M1_BYTE_NUM 0x0000e +#define HNS3_PMU_EVT_BW_RD_PAY_M1_TIME 0x1000e +#define HNS3_PMU_EVT_BW_WR_PAY_M0_BYTE_NUM 0x0000f +#define HNS3_PMU_EVT_BW_WR_PAY_M0_TIME 0x1000f +#define HNS3_PMU_EVT_BW_WR_PAY_M1_BYTE_NUM 0x00010 +#define HNS3_PMU_EVT_BW_WR_PAY_M1_TIME 0x10010 + +/* packet rate events */ +#define HNS3_PMU_EVT_PPS_IGU_SSU_PACKET_NUM 0x00100 +#define HNS3_PMU_EVT_PPS_IGU_SSU_TIME 0x10100 +#define HNS3_PMU_EVT_PPS_SSU_EGU_PACKET_NUM 0x00101 +#define HNS3_PMU_EVT_PPS_SSU_EGU_TIME 0x10101 +#define HNS3_PMU_EVT_PPS_SSU_RPU_PACKET_NUM 0x00102 +#define HNS3_PMU_EVT_PPS_SSU_RPU_TIME 0x10102 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_PACKET_NUM 0x00103 +#define HNS3_PMU_EVT_PPS_SSU_ROCE_TIME 0x10103 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_PACKET_NUM 0x00104 +#define HNS3_PMU_EVT_PPS_ROCE_SSU_TIME 0x10104 +#define HNS3_PMU_EVT_PPS_TPU_SSU_PACKET_NUM 0x00105 +#define HNS3_PMU_EVT_PPS_TPU_SSU_TIME 0x10105 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_PACKET_NUM 0x00106 +#define HNS3_PMU_EVT_PPS_RPU_RCBRX_TIME 0x10106 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_PACKET_NUM 0x00107 +#define HNS3_PMU_EVT_PPS_RCBTX_TPU_TIME 0x10107 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_PACKET_NUM 0x00108 +#define HNS3_PMU_EVT_PPS_RCBTX_TXSCH_TIME 0x10108 +#define HNS3_PMU_EVT_PPS_WR_FBD_PACKET_NUM 0x00109 +#define HNS3_PMU_EVT_PPS_WR_FBD_TIME 0x10109 +#define HNS3_PMU_EVT_PPS_WR_EBD_PACKET_NUM 0x0010a +#define HNS3_PMU_EVT_PPS_WR_EBD_TIME 0x1010a +#define HNS3_PMU_EVT_PPS_RD_FBD_PACKET_NUM 0x0010b +#define HNS3_PMU_EVT_PPS_RD_FBD_TIME 0x1010b +#define HNS3_PMU_EVT_PPS_RD_EBD_PACKET_NUM 0x0010c +#define HNS3_PMU_EVT_PPS_RD_EBD_TIME 0x1010c +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_PACKET_NUM 0x0010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M0_TIME 0x1010d +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_PACKET_NUM 0x0010e +#define HNS3_PMU_EVT_PPS_RD_PAY_M1_TIME 0x1010e +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_PACKET_NUM 0x0010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M0_TIME 0x1010f +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_PACKET_NUM 0x00110 +#define HNS3_PMU_EVT_PPS_WR_PAY_M1_TIME 0x10110 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_PACKET_NUM 0x00111 +#define HNS3_PMU_EVT_PPS_NICROH_TX_PRE_TIME 0x10111 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_PACKET_NUM 0x00112 +#define HNS3_PMU_EVT_PPS_NICROH_RX_PRE_TIME 0x10112 + +/* latency events */ +#define HNS3_PMU_EVT_DLY_TX_PUSH_TIME 0x00202 +#define HNS3_PMU_EVT_DLY_TX_PUSH_PACKET_NUM 0x10202 +#define HNS3_PMU_EVT_DLY_TX_TIME 0x00204 +#define HNS3_PMU_EVT_DLY_TX_PACKET_NUM 0x10204 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_TIME 0x00206 +#define HNS3_PMU_EVT_DLY_SSU_TX_NIC_PACKET_NUM 0x10206 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_TIME 0x00207 +#define HNS3_PMU_EVT_DLY_SSU_TX_ROCE_PACKET_NUM 0x10207 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_TIME 0x00208 +#define HNS3_PMU_EVT_DLY_SSU_RX_NIC_PACKET_NUM 0x10208 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_TIME 0x00209 +#define HNS3_PMU_EVT_DLY_SSU_RX_ROCE_PACKET_NUM 0x10209 +#define HNS3_PMU_EVT_DLY_RPU_TIME 0x0020e +#define HNS3_PMU_EVT_DLY_RPU_PACKET_NUM 0x1020e +#define HNS3_PMU_EVT_DLY_TPU_TIME 0x0020f +#define HNS3_PMU_EVT_DLY_TPU_PACKET_NUM 0x1020f +#define HNS3_PMU_EVT_DLY_RPE_TIME 0x00210 +#define HNS3_PMU_EVT_DLY_RPE_PACKET_NUM 0x10210 +#define HNS3_PMU_EVT_DLY_TPE_TIME 0x00211 +#define HNS3_PMU_EVT_DLY_TPE_PACKET_NUM 0x10211 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_TIME 0x00212 +#define HNS3_PMU_EVT_DLY_TPE_PUSH_PACKET_NUM 0x10212 +#define HNS3_PMU_EVT_DLY_WR_FBD_TIME 0x00213 +#define HNS3_PMU_EVT_DLY_WR_FBD_PACKET_NUM 0x10213 +#define HNS3_PMU_EVT_DLY_WR_EBD_TIME 0x00214 +#define HNS3_PMU_EVT_DLY_WR_EBD_PACKET_NUM 0x10214 +#define HNS3_PMU_EVT_DLY_RD_FBD_TIME 0x00215 +#define HNS3_PMU_EVT_DLY_RD_FBD_PACKET_NUM 0x10215 +#define HNS3_PMU_EVT_DLY_RD_EBD_TIME 0x00216 +#define HNS3_PMU_EVT_DLY_RD_EBD_PACKET_NUM 0x10216 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_TIME 0x00217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M0_PACKET_NUM 0x10217 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_TIME 0x00218 +#define HNS3_PMU_EVT_DLY_RD_PAY_M1_PACKET_NUM 0x10218 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_TIME 0x00219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M0_PACKET_NUM 0x10219 +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_TIME 0x0021a +#define HNS3_PMU_EVT_DLY_WR_PAY_M1_PACKET_NUM 0x1021a +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_TIME 0x0021c +#define HNS3_PMU_EVT_DLY_MSIX_WRITE_PACKET_NUM 0x1021c + +/* interrupt rate events */ +#define HNS3_PMU_EVT_PPS_MSIX_NIC_INTR_NUM 0x00300 +#define HNS3_PMU_EVT_PPS_MSIX_NIC_TIME 0x10300 + +/* filter mode supported by each bandwidth event */ +#define HNS3_PMU_FILTER_BW_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_BW_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_BW_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_BW_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_BW_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_BW_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_BW_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_BW_WR_FBD 0x1b +#define HNS3_PMU_FILTER_BW_WR_EBD 0x11 +#define HNS3_PMU_FILTER_BW_RD_FBD 0x01 +#define HNS3_PMU_FILTER_BW_RD_EBD 0x1b +#define HNS3_PMU_FILTER_BW_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_BW_WR_PAY_M1 0x01 + +/* filter mode supported by each packet rate event */ +#define HNS3_PMU_FILTER_PPS_IGU_SSU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_EGU 0x07 +#define HNS3_PMU_FILTER_PPS_SSU_RPU 0x1f +#define HNS3_PMU_FILTER_PPS_SSU_ROCE 0x0f +#define HNS3_PMU_FILTER_PPS_ROCE_SSU 0x0f +#define HNS3_PMU_FILTER_PPS_TPU_SSU 0x1f +#define HNS3_PMU_FILTER_PPS_RPU_RCBRX 0x11 +#define HNS3_PMU_FILTER_PPS_RCBTX_TPU 0x1f +#define HNS3_PMU_FILTER_PPS_RCBTX_TXSCH 0x11 +#define HNS3_PMU_FILTER_PPS_WR_FBD 0x1b +#define HNS3_PMU_FILTER_PPS_WR_EBD 0x11 +#define HNS3_PMU_FILTER_PPS_RD_FBD 0x01 +#define HNS3_PMU_FILTER_PPS_RD_EBD 0x1b +#define HNS3_PMU_FILTER_PPS_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_PPS_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_TX_PRE 0x01 +#define HNS3_PMU_FILTER_PPS_NICROH_RX_PRE 0x01 + +/* filter mode supported by each latency event */ +#define HNS3_PMU_FILTER_DLY_TX_PUSH 0x01 +#define HNS3_PMU_FILTER_DLY_TX 0x01 +#define HNS3_PMU_FILTER_DLY_SSU_TX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_TX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_NIC 0x07 +#define HNS3_PMU_FILTER_DLY_SSU_RX_ROCE 0x07 +#define HNS3_PMU_FILTER_DLY_RPU 0x11 +#define HNS3_PMU_FILTER_DLY_TPU 0x1f +#define HNS3_PMU_FILTER_DLY_RPE 0x01 +#define HNS3_PMU_FILTER_DLY_TPE 0x0b +#define HNS3_PMU_FILTER_DLY_TPE_PUSH 0x1b +#define HNS3_PMU_FILTER_DLY_WR_FBD 0x1b +#define HNS3_PMU_FILTER_DLY_WR_EBD 0x11 +#define HNS3_PMU_FILTER_DLY_RD_FBD 0x01 +#define HNS3_PMU_FILTER_DLY_RD_EBD 0x1b +#define HNS3_PMU_FILTER_DLY_RD_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_RD_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M0 0x01 +#define HNS3_PMU_FILTER_DLY_WR_PAY_M1 0x01 +#define HNS3_PMU_FILTER_DLY_MSIX_WRITE 0x01 + +/* filter mode supported by each interrupt rate event */ +#define HNS3_PMU_FILTER_INTR_MSIX_NIC 0x01 + +enum hns3_pmu_hw_filter_mode { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +struct hns3_pmu_event_attr { + u32 event; + u16 filter_support; +}; + +struct hns3_pmu { + struct perf_event *hw_events[HNS3_PMU_MAX_HW_EVENTS]; + struct hlist_node node; + struct pci_dev *pdev; + struct pmu pmu; + void __iomem *base; + int irq; + int on_cpu; + u32 identifier; + u32 hw_clk_freq; /* hardware clock frequency of PMU */ + /* maximum and minimum bdf allowed by PMU */ + u16 bdf_min; + u16 bdf_max; +}; + +#define to_hns3_pmu(p) (container_of((p), struct hns3_pmu, pmu)) + +#define GET_PCI_DEVFN(bdf) ((bdf) & 0xff) + +#define FILTER_CONDITION_PORT(port) ((1 << (port)) & 0xff) +#define FILTER_CONDITION_PORT_TC(port, tc) (((port) << 3) | ((tc) & 0x07)) +#define FILTER_CONDITION_FUNC_INTR(func, intr) (((intr) << 8) | (func)) + +#define HNS3_PMU_FILTER_ATTR(_name, _config, _start, _end) \ + static inline u64 hns3_pmu_get_##_name(struct perf_event *event) \ + { \ + return FIELD_GET(GENMASK_ULL(_end, _start), \ + event->attr._config); \ + } + +HNS3_PMU_FILTER_ATTR(subevent, config, 0, 7); +HNS3_PMU_FILTER_ATTR(event_type, config, 8, 15); +HNS3_PMU_FILTER_ATTR(ext_counter_used, config, 16, 16); +HNS3_PMU_FILTER_ATTR(port, config1, 0, 3); +HNS3_PMU_FILTER_ATTR(tc, config1, 4, 7); +HNS3_PMU_FILTER_ATTR(bdf, config1, 8, 23); +HNS3_PMU_FILTER_ATTR(queue, config1, 24, 39); +HNS3_PMU_FILTER_ATTR(intr, config1, 40, 51); +HNS3_PMU_FILTER_ATTR(global, config1, 52, 52); + +#define HNS3_BW_EVT_BYTE_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_BYTE_NUM, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_BW_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_BW_##_name##_TIME, \ + HNS3_PMU_FILTER_BW_##_name}) +#define HNS3_PPS_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_PPS_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_PPS_##_name}) +#define HNS3_DLY_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_TIME, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_DLY_EVT_PACKET_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_DLY_##_name##_PACKET_NUM, \ + HNS3_PMU_FILTER_DLY_##_name}) +#define HNS3_INTR_EVT_INTR_NUM(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_INTR_NUM, \ + HNS3_PMU_FILTER_INTR_##_name}) +#define HNS3_INTR_EVT_TIME(_name) (&(struct hns3_pmu_event_attr) {\ + HNS3_PMU_EVT_PPS_##_name##_TIME, \ + HNS3_PMU_FILTER_INTR_##_name}) + +static ssize_t hns3_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + + return sysfs_emit(buf, "%s\n", (char *)eattr->var); +} + +static ssize_t hns3_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + return sysfs_emit(buf, "config=0x%x\n", event->event); +} + +static ssize_t hns3_pmu_filter_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu_event_attr *event; + struct dev_ext_attribute *eattr; + int len; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + event = eattr->var; + + len = sysfs_emit_at(buf, 0, "filter mode supported: "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL) + len += sysfs_emit_at(buf, len, "global "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT) + len += sysfs_emit_at(buf, len, "port "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC) + len += sysfs_emit_at(buf, len, "port-tc "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC) + len += sysfs_emit_at(buf, len, "func "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE) + len += sysfs_emit_at(buf, len, "func-queue "); + if (event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR) + len += sysfs_emit_at(buf, len, "func-intr "); + + len += sysfs_emit_at(buf, len, "\n"); + + return len; +} + +#define HNS3_PMU_ATTR(_name, _func, _config) \ + (&((struct dev_ext_attribute[]) { \ + { __ATTR(_name, 0444, _func, NULL), (void *)_config } \ + })[0].attr.attr) + +#define HNS3_PMU_FORMAT_ATTR(_name, _format) \ + HNS3_PMU_ATTR(_name, hns3_pmu_format_show, (void *)_format) +#define HNS3_PMU_EVENT_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_event_show, (void *)_event) +#define HNS3_PMU_FLT_MODE_ATTR(_name, _event) \ + HNS3_PMU_ATTR(_name, hns3_pmu_filter_mode_show, (void *)_event) + +#define HNS3_PMU_BW_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_EVT_PAIR(_name, _macro) \ + HNS3_PMU_EVENT_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_EVENT_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +#define HNS3_PMU_BW_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_byte_num, HNS3_BW_EVT_BYTE_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_BW_EVT_TIME(_macro)) +#define HNS3_PMU_PPS_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_PPS_EVT_PACKET_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_PPS_EVT_TIME(_macro)) +#define HNS3_PMU_DLY_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_DLY_EVT_TIME(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_packet_num, HNS3_DLY_EVT_PACKET_NUM(_macro)) +#define HNS3_PMU_INTR_FLT_MODE_PAIR(_name, _macro) \ + HNS3_PMU_FLT_MODE_ATTR(_name##_intr_num, HNS3_INTR_EVT_INTR_NUM(_macro)), \ + HNS3_PMU_FLT_MODE_ATTR(_name##_time, HNS3_INTR_EVT_TIME(_macro)) + +static u8 hns3_pmu_hw_filter_modes[] = { + HNS3_PMU_HW_FILTER_GLOBAL, + HNS3_PMU_HW_FILTER_PORT, + HNS3_PMU_HW_FILTER_PORT_TC, + HNS3_PMU_HW_FILTER_FUNC, + HNS3_PMU_HW_FILTER_FUNC_QUEUE, + HNS3_PMU_HW_FILTER_FUNC_INTR, +}; + +#define HNS3_PMU_SET_HW_FILTER(_hwc, _mode) \ + ((_hwc)->addr_filters = (void *)&hns3_pmu_hw_filter_modes[(_mode)]) + +static ssize_t identifier_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "0x%x\n", hns3_pmu->identifier); +} +static DEVICE_ATTR_RO(identifier); + +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%d\n", hns3_pmu->on_cpu); +} +static DEVICE_ATTR_RO(cpumask); + +static ssize_t bdf_min_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_min; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_min); + +static ssize_t bdf_max_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + u16 bdf = hns3_pmu->bdf_max; + + return sysfs_emit(buf, "%02x:%02x.%x\n", PCI_BUS_NUM(bdf), + PCI_SLOT(bdf), PCI_FUNC(bdf)); +} +static DEVICE_ATTR_RO(bdf_max); + +static ssize_t hw_clk_freq_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(dev_get_drvdata(dev)); + + return sysfs_emit(buf, "%u\n", hns3_pmu->hw_clk_freq); +} +static DEVICE_ATTR_RO(hw_clk_freq); + +static struct attribute *hns3_pmu_events_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_EVT_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_EVT_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_EVT_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_EVT_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_EVT_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_EVT_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_EVT_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_EVT_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_EVT_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_EVT_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_EVT_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_EVT_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_EVT_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_EVT_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_EVT_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_EVT_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_EVT_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_EVT_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_EVT_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_EVT_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_EVT_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_EVT_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_EVT_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute *hns3_pmu_filter_mode_attr[] = { + /* bandwidth events */ + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_egu, SSU_EGU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_rpu, SSU_RPU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_ssu_roce, SSU_ROCE), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_roce_ssu, ROCE_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_tpu_ssu, TPU_SSU), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_fbd, WR_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_ebd, WR_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_fbd, RD_FBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_ebd, RD_EBD), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_BW_FLT_MODE_PAIR(bw_wr_pay_m1, WR_PAY_M1), + + /* packet rate events */ + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_igu_ssu, IGU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_egu, SSU_EGU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_rpu, SSU_RPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_ssu_roce, SSU_ROCE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_roce_ssu, ROCE_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_tpu_ssu, TPU_SSU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rpu_rcbrx, RPU_RCBRX), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_tpu, RCBTX_TPU), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rcbtx_txsch, RCBTX_TXSCH), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_fbd, WR_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_ebd, WR_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_fbd, RD_FBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_ebd, RD_EBD), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_tx_pre, NICROH_TX_PRE), + HNS3_PMU_PPS_FLT_MODE_PAIR(pps_intr_nicroh_rx_pre, NICROH_RX_PRE), + + /* latency events */ + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_push_to_mac, TX_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tx_normal_to_mac, TX), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_nic, SSU_TX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_tx_th_roce, SSU_TX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_nic, SSU_RX_NIC), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_ssu_rx_th_roce, SSU_RX_ROCE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpu, RPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpu, TPU), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rpe, RPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_normal, TPE), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_tpe_push, TPE_PUSH), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_fbd, WR_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_ebd, WR_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_fbd, RD_FBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_ebd, RD_EBD), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m0, RD_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_rd_pay_m1, RD_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m0, WR_PAY_M0), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_wr_pay_m1, WR_PAY_M1), + HNS3_PMU_DLY_FLT_MODE_PAIR(dly_msix_write, MSIX_WRITE), + + /* interrupt rate events */ + HNS3_PMU_INTR_FLT_MODE_PAIR(pps_intr_msix_nic, MSIX_NIC), + + NULL +}; + +static struct attribute_group hns3_pmu_events_group = { + .name = "events", + .attrs = hns3_pmu_events_attr, +}; + +static struct attribute_group hns3_pmu_filter_mode_group = { + .name = "filtermode", + .attrs = hns3_pmu_filter_mode_attr, +}; + +static struct attribute *hns3_pmu_format_attr[] = { + HNS3_PMU_FORMAT_ATTR(subevent, "config:0-7"), + HNS3_PMU_FORMAT_ATTR(event_type, "config:8-15"), + HNS3_PMU_FORMAT_ATTR(ext_counter_used, "config:16"), + HNS3_PMU_FORMAT_ATTR(port, "config1:0-3"), + HNS3_PMU_FORMAT_ATTR(tc, "config1:4-7"), + HNS3_PMU_FORMAT_ATTR(bdf, "config1:8-23"), + HNS3_PMU_FORMAT_ATTR(queue, "config1:24-39"), + HNS3_PMU_FORMAT_ATTR(intr, "config1:40-51"), + HNS3_PMU_FORMAT_ATTR(global, "config1:52"), + NULL +}; + +static struct attribute_group hns3_pmu_format_group = { + .name = "format", + .attrs = hns3_pmu_format_attr, +}; + +static struct attribute *hns3_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group hns3_pmu_cpumask_attr_group = { + .attrs = hns3_pmu_cpumask_attrs, +}; + +static struct attribute *hns3_pmu_identifier_attrs[] = { + &dev_attr_identifier.attr, + NULL +}; + +static struct attribute_group hns3_pmu_identifier_attr_group = { + .attrs = hns3_pmu_identifier_attrs, +}; + +static struct attribute *hns3_pmu_bdf_range_attrs[] = { + &dev_attr_bdf_min.attr, + &dev_attr_bdf_max.attr, + NULL +}; + +static struct attribute_group hns3_pmu_bdf_range_attr_group = { + .attrs = hns3_pmu_bdf_range_attrs, +}; + +static struct attribute *hns3_pmu_hw_clk_freq_attrs[] = { + &dev_attr_hw_clk_freq.attr, + NULL +}; + +static struct attribute_group hns3_pmu_hw_clk_freq_attr_group = { + .attrs = hns3_pmu_hw_clk_freq_attrs, +}; + +static const struct attribute_group *hns3_pmu_attr_groups[] = { + &hns3_pmu_events_group, + &hns3_pmu_filter_mode_group, + &hns3_pmu_format_group, + &hns3_pmu_cpumask_attr_group, + &hns3_pmu_identifier_attr_group, + &hns3_pmu_bdf_range_attr_group, + &hns3_pmu_hw_clk_freq_attr_group, + NULL +}; + +static u32 hns3_pmu_get_event(struct perf_event *event) +{ + return hns3_pmu_get_ext_counter_used(event) << 16 | + hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_real_event(struct perf_event *event) +{ + return hns3_pmu_get_event_type(event) << 8 | + hns3_pmu_get_subevent(event); +} + +static u32 hns3_pmu_get_offset(u32 offset, u32 idx) +{ + return offset + HNS3_PMU_REG_EVENT_OFFSET + + HNS3_PMU_REG_EVENT_SIZE * idx; +} + +static u32 hns3_pmu_readl(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readl(hns3_pmu->base + offset); +} + +static void hns3_pmu_writel(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u32 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writel(val, hns3_pmu->base + offset); +} + +static u64 hns3_pmu_readq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + return readq(hns3_pmu->base + offset); +} + +static void hns3_pmu_writeq(struct hns3_pmu *hns3_pmu, u32 reg_offset, u32 idx, + u64 val) +{ + u32 offset = hns3_pmu_get_offset(reg_offset, idx); + + writeq(val, hns3_pmu->base + offset); +} + +static bool hns3_pmu_cmp_event(struct perf_event *target, + struct perf_event *event) +{ + return hns3_pmu_get_real_event(target) == hns3_pmu_get_real_event(event); +} + +static int hns3_pmu_find_related_event_idx(struct hns3_pmu *hns3_pmu, + struct perf_event *event) +{ + struct perf_event *sibling; + int hw_event_used = 0; + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + sibling = hns3_pmu->hw_events[idx]; + if (!sibling) + continue; + + hw_event_used++; + + if (!hns3_pmu_cmp_event(sibling, event)) + continue; + + /* Related events is used in group */ + if (sibling->group_leader == event->group_leader) + return idx; + } + + /* No related event and all hardware events are used up */ + if (hw_event_used >= HNS3_PMU_MAX_HW_EVENTS) + return -EBUSY; + + /* No related event and there is extra hardware events can be use */ + return -ENOENT; +} + +static int hns3_pmu_get_event_idx(struct hns3_pmu *hns3_pmu) +{ + int idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + if (!hns3_pmu->hw_events[idx]) + return idx; + } + + return -EBUSY; +} + +static bool hns3_pmu_valid_bdf(struct hns3_pmu *hns3_pmu, u16 bdf) +{ + struct pci_dev *pdev; + + if (bdf < hns3_pmu->bdf_min || bdf > hns3_pmu->bdf_max) { + pci_err(hns3_pmu->pdev, "Invalid EP device: %#x!\n", bdf); + return false; + } + + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(hns3_pmu->pdev->bus), + PCI_BUS_NUM(bdf), + GET_PCI_DEVFN(bdf)); + if (!pdev) { + pci_err(hns3_pmu->pdev, "Nonexistent EP device: %#x!\n", bdf); + return false; + } + + pci_dev_put(pdev); + return true; +} + +static void hns3_pmu_set_qid_para(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + u32 val; + + val = GET_PCI_DEVFN(bdf); + val |= (u32)queue << HNS3_PMU_QID_PARA_QUEUE_S; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_PARA, idx, val); +} + +static bool hns3_pmu_qid_req_start(struct hns3_pmu *hns3_pmu, u32 idx) +{ + bool queue_id_valid = false; + u32 reg_qid_ctrl, val; + int err; + + /* enable queue id request */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, + HNS3_PMU_QID_CTRL_REQ_ENABLE); + + reg_qid_ctrl = hns3_pmu_get_offset(HNS3_PMU_REG_EVENT_QID_CTRL, idx); + err = readl_poll_timeout(hns3_pmu->base + reg_qid_ctrl, val, + val & HNS3_PMU_QID_CTRL_DONE, 1, 1000); + if (err == -ETIMEDOUT) { + pci_err(hns3_pmu->pdev, "QID request timeout!\n"); + goto out; + } + + queue_id_valid = !(val & HNS3_PMU_QID_CTRL_MISS); + +out: + /* disable qid request and clear status */ + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_QID_CTRL, idx, 0); + + return queue_id_valid; +} + +static bool hns3_pmu_valid_queue(struct hns3_pmu *hns3_pmu, u32 idx, u16 bdf, + u16 queue) +{ + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue); + + return hns3_pmu_qid_req_start(hns3_pmu, idx); +} + +static struct hns3_pmu_event_attr *hns3_pmu_get_pmu_event(u32 event) +{ + struct hns3_pmu_event_attr *pmu_event; + struct dev_ext_attribute *eattr; + struct device_attribute *dattr; + struct attribute *attr; + u32 i; + + for (i = 0; i < ARRAY_SIZE(hns3_pmu_events_attr) - 1; i++) { + attr = hns3_pmu_events_attr[i]; + dattr = container_of(attr, struct device_attribute, attr); + eattr = container_of(dattr, struct dev_ext_attribute, attr); + pmu_event = eattr->var; + + if (event == pmu_event->event) + return pmu_event; + } + + return NULL; +} + +static int hns3_pmu_set_func_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC); + + return 0; +} + +static int hns3_pmu_set_func_queue_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u16 bdf = hns3_pmu_get_bdf(event); + + if (!hns3_pmu_valid_bdf(hns3_pmu, bdf)) + return -ENOENT; + + if (!hns3_pmu_valid_queue(hns3_pmu, hwc->idx, bdf, queue_id)) { + pci_err(hns3_pmu->pdev, "Invalid queue: %u\n", queue_id); + return -ENOENT; + } + + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_QUEUE); + + return 0; +} + +static bool +hns3_pmu_is_enabled_global_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 global = hns3_pmu_get_global(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_GLOBAL)) + return false; + + return global; +} + +static bool hns3_pmu_is_enabled_func_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC)) + return false; + else if (queue_id != HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool +hns3_pmu_is_enabled_func_queue_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 queue_id = hns3_pmu_get_queue(event); + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_QUEUE)) + return false; + else if (queue_id == HNS3_PMU_FILTER_ALL_QUEUE) + return false; + + return bdf; +} + +static bool hns3_pmu_is_enabled_port_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT)) + return false; + + return tc_id == HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_port_tc_mode(struct perf_event *event, + struct hns3_pmu_event_attr *pmu_event) +{ + u8 tc_id = hns3_pmu_get_tc(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_PORT_TC)) + return false; + + return tc_id != HNS3_PMU_FILTER_ALL_TC; +} + +static bool +hns3_pmu_is_enabled_func_intr_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu, + struct hns3_pmu_event_attr *pmu_event) +{ + u16 bdf = hns3_pmu_get_bdf(event); + + if (!(pmu_event->filter_support & HNS3_PMU_FILTER_SUPPORT_FUNC_INTR)) + return false; + + return hns3_pmu_valid_bdf(hns3_pmu, bdf); +} + +static int hns3_pmu_select_filter_mode(struct perf_event *event, + struct hns3_pmu *hns3_pmu) +{ + u32 event_id = hns3_pmu_get_event(event); + struct hw_perf_event *hwc = &event->hw; + struct hns3_pmu_event_attr *pmu_event; + + pmu_event = hns3_pmu_get_pmu_event(event_id); + if (!pmu_event) { + pci_err(hns3_pmu->pdev, "Invalid pmu event\n"); + return -ENOENT; + } + + if (hns3_pmu_is_enabled_global_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_GLOBAL); + return 0; + } + + if (hns3_pmu_is_enabled_func_mode(event, pmu_event)) + return hns3_pmu_set_func_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_func_queue_mode(event, pmu_event)) + return hns3_pmu_set_func_queue_mode(event, hns3_pmu); + + if (hns3_pmu_is_enabled_port_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT); + return 0; + } + + if (hns3_pmu_is_enabled_port_tc_mode(event, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_PORT_TC); + return 0; + } + + if (hns3_pmu_is_enabled_func_intr_mode(event, hns3_pmu, pmu_event)) { + HNS3_PMU_SET_HW_FILTER(hwc, HNS3_PMU_HW_FILTER_FUNC_INTR); + return 0; + } + + return -ENOENT; +} + +static bool hns3_pmu_validate_event_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct perf_event *event_group[HNS3_PMU_MAX_HW_EVENTS]; + int counters = 1; + int num; + + event_group[0] = leader; + if (!is_software_event(leader)) { + if (leader->pmu != event->pmu) + return false; + + if (leader != event && !hns3_pmu_cmp_event(leader, event)) + event_group[counters++] = event; + } + + for_each_sibling_event(sibling, event->group_leader) { + if (is_software_event(sibling)) + continue; + + if (sibling->pmu != event->pmu) + return false; + + for (num = 0; num < counters; num++) { + if (hns3_pmu_cmp_event(event_group[num], sibling)) + break; + } + + if (num == counters) + event_group[counters++] = sibling; + } + + return counters <= HNS3_PMU_MAX_HW_EVENTS; +} + +static u32 hns3_pmu_get_filter_condition(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u16 intr_id = hns3_pmu_get_intr(event); + u8 port_id = hns3_pmu_get_port(event); + u16 bdf = hns3_pmu_get_bdf(event); + u8 tc_id = hns3_pmu_get_tc(event); + u8 filter_mode; + + filter_mode = *(u8 *)hwc->addr_filters; + switch (filter_mode) { + case HNS3_PMU_HW_FILTER_PORT: + return FILTER_CONDITION_PORT(port_id); + case HNS3_PMU_HW_FILTER_PORT_TC: + return FILTER_CONDITION_PORT_TC(port_id, tc_id); + case HNS3_PMU_HW_FILTER_FUNC: + case HNS3_PMU_HW_FILTER_FUNC_QUEUE: + return GET_PCI_DEVFN(bdf); + case HNS3_PMU_HW_FILTER_FUNC_INTR: + return FILTER_CONDITION_FUNC_INTR(GET_PCI_DEVFN(bdf), intr_id); + default: + break; + } + + return 0; +} + +static void hns3_pmu_config_filter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u8 event_type = hns3_pmu_get_event_type(event); + u8 subevent_id = hns3_pmu_get_subevent(event); + u16 queue_id = hns3_pmu_get_queue(event); + struct hw_perf_event *hwc = &event->hw; + u8 filter_mode = *(u8 *)hwc->addr_filters; + u16 bdf = hns3_pmu_get_bdf(event); + u32 idx = hwc->idx; + u32 val; + + val = event_type; + val |= subevent_id << HNS3_PMU_CTRL_SUBEVENT_S; + val |= filter_mode << HNS3_PMU_CTRL_FILTER_MODE_S; + val |= HNS3_PMU_EVENT_OVERFLOW_RESTART; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_get_filter_condition(event); + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_HIGH, idx, val); + + if (filter_mode == HNS3_PMU_HW_FILTER_FUNC_QUEUE) + hns3_pmu_set_qid_para(hns3_pmu, idx, bdf, queue_id); +} + +static void hns3_pmu_enable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_disable_counter(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_EN; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static void hns3_pmu_enable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val &= ~HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_disable_intr(struct hns3_pmu *hns3_pmu, + struct hw_perf_event *hwc) +{ + u32 idx = hwc->idx; + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx); + val |= HNS3_PMU_INTR_MASK_OVERFLOW; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_INTR_MASK, idx, val); +} + +static void hns3_pmu_clear_intr_status(struct hns3_pmu *hns3_pmu, u32 idx) +{ + u32 val; + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val |= HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); + + val = hns3_pmu_readl(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx); + val &= ~HNS3_PMU_EVENT_STATUS_RESET; + hns3_pmu_writel(hns3_pmu, HNS3_PMU_REG_EVENT_CTRL_LOW, idx, val); +} + +static u64 hns3_pmu_read_counter(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + + return hns3_pmu_readq(hns3_pmu, event->hw.event_base, event->hw.idx); +} + +static void hns3_pmu_write_counter(struct perf_event *event, u64 value) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + u32 idx = event->hw.idx; + + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_COUNTER, idx, value); + hns3_pmu_writeq(hns3_pmu, HNS3_PMU_REG_EVENT_EXT_COUNTER, idx, value); +} + +static void hns3_pmu_init_counter(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + local64_set(&hwc->prev_count, 0); + hns3_pmu_write_counter(event, 0); +} + +static int hns3_pmu_event_init(struct perf_event *event) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Sampling is not supported */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + + event->cpu = hns3_pmu->on_cpu; + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) { + pci_err(hns3_pmu->pdev, "Up to %u events are supported!\n", + HNS3_PMU_MAX_HW_EVENTS); + return -EBUSY; + } + + hwc->idx = idx; + + ret = hns3_pmu_select_filter_mode(event, hns3_pmu); + if (ret) { + pci_err(hns3_pmu->pdev, "Invalid filter, ret = %d.\n", ret); + return ret; + } + + if (!hns3_pmu_validate_event_group(event)) { + pci_err(hns3_pmu->pdev, "Invalid event group.\n"); + return -EINVAL; + } + + if (hns3_pmu_get_ext_counter_used(event)) + hwc->event_base = HNS3_PMU_REG_EVENT_EXT_COUNTER; + else + hwc->event_base = HNS3_PMU_REG_EVENT_COUNTER; + + return 0; +} + +static void hns3_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 new_cnt, prev_cnt, delta; + + do { + prev_cnt = local64_read(&hwc->prev_count); + new_cnt = hns3_pmu_read_counter(event); + } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != + prev_cnt); + + delta = new_cnt - prev_cnt; + local64_add(delta, &event->count); +} + +static void hns3_pmu_start(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + hwc->state = 0; + + hns3_pmu_config_filter(event); + hns3_pmu_init_counter(event); + hns3_pmu_enable_intr(hns3_pmu, hwc); + hns3_pmu_enable_counter(hns3_pmu, hwc); + + perf_event_update_userpage(event); +} + +static void hns3_pmu_stop(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_disable_counter(hns3_pmu, hwc); + hns3_pmu_disable_intr(hns3_pmu, hwc); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (hwc->state & PERF_HES_UPTODATE) + return; + + /* Read hardware counter and update the perf counter statistics */ + hns3_pmu_read(event); + hwc->state |= PERF_HES_UPTODATE; +} + +static int hns3_pmu_add(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int idx; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + /* Check all working events to find a related event. */ + idx = hns3_pmu_find_related_event_idx(hns3_pmu, event); + if (idx < 0 && idx != -ENOENT) + return idx; + + /* Current event shares an enabled hardware event with related event */ + if (idx >= 0 && idx < HNS3_PMU_MAX_HW_EVENTS) { + hwc->idx = idx; + goto start_count; + } + + idx = hns3_pmu_get_event_idx(hns3_pmu); + if (idx < 0) + return idx; + + hwc->idx = idx; + hns3_pmu->hw_events[idx] = event; + +start_count: + if (flags & PERF_EF_START) + hns3_pmu_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void hns3_pmu_del(struct perf_event *event, int flags) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + hns3_pmu_stop(event, PERF_EF_UPDATE); + hns3_pmu->hw_events[hwc->idx] = NULL; + perf_event_update_userpage(event); +} + +static void hns3_pmu_enable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val |= HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static void hns3_pmu_disable(struct pmu *pmu) +{ + struct hns3_pmu *hns3_pmu = to_hns3_pmu(pmu); + u32 val; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); + val &= ~HNS3_PMU_GLOBAL_START; + writel(val, hns3_pmu->base + HNS3_PMU_REG_GLOBAL_CTRL); +} + +static int hns3_pmu_alloc_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + u16 device_id; + char *name; + u32 val; + + hns3_pmu->base = pcim_iomap_table(pdev)[BAR_2]; + if (!hns3_pmu->base) { + pci_err(pdev, "ioremap failed\n"); + return -ENOMEM; + } + + hns3_pmu->hw_clk_freq = readl(hns3_pmu->base + HNS3_PMU_REG_CLOCK_FREQ); + + val = readl(hns3_pmu->base + HNS3_PMU_REG_BDF); + hns3_pmu->bdf_min = val & 0xffff; + hns3_pmu->bdf_max = val >> 16; + + val = readl(hns3_pmu->base + HNS3_PMU_REG_DEVICE_ID); + device_id = val & 0xffff; + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hns3_pmu_sicl_%u", device_id); + if (!name) + return -ENOMEM; + + hns3_pmu->pdev = pdev; + hns3_pmu->on_cpu = -1; + hns3_pmu->identifier = readl(hns3_pmu->base + HNS3_PMU_REG_VERSION); + hns3_pmu->pmu = (struct pmu) { + .name = name, + .module = THIS_MODULE, + .event_init = hns3_pmu_event_init, + .pmu_enable = hns3_pmu_enable, + .pmu_disable = hns3_pmu_disable, + .add = hns3_pmu_add, + .del = hns3_pmu_del, + .start = hns3_pmu_start, + .stop = hns3_pmu_stop, + .read = hns3_pmu_read, + .task_ctx_nr = perf_invalid_context, + .attr_groups = hns3_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + + return 0; +} + +static irqreturn_t hns3_pmu_irq(int irq, void *data) +{ + struct hns3_pmu *hns3_pmu = data; + u32 intr_status, idx; + + for (idx = 0; idx < HNS3_PMU_MAX_HW_EVENTS; idx++) { + intr_status = hns3_pmu_readl(hns3_pmu, + HNS3_PMU_REG_EVENT_INTR_STATUS, + idx); + + /* + * As each counter will restart from 0 when it is overflowed, + * extra processing is no need, just clear interrupt status. + */ + if (intr_status) + hns3_pmu_clear_intr_status(hns3_pmu, idx); + } + + return IRQ_HANDLED; +} + +static int hns3_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + if (hns3_pmu->on_cpu == -1) { + hns3_pmu->on_cpu = cpu; + irq_set_affinity(hns3_pmu->irq, cpumask_of(cpu)); + } + + return 0; +} + +static int hns3_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct hns3_pmu *hns3_pmu; + unsigned int target; + + hns3_pmu = hlist_entry_safe(node, struct hns3_pmu, node); + if (!hns3_pmu) + return -ENODEV; + + /* Nothing to do if this CPU doesn't own the PMU */ + if (hns3_pmu->on_cpu != cpu) + return 0; + + /* Choose a new CPU from all online cpus */ + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + perf_pmu_migrate_context(&hns3_pmu->pmu, cpu, target); + hns3_pmu->on_cpu = target; + irq_set_affinity(hns3_pmu->irq, cpumask_of(target)); + + return 0; +} + +static void hns3_pmu_free_irq(void *data) +{ + struct pci_dev *pdev = data; + + pci_free_irq_vectors(pdev); +} + +static int hns3_pmu_irq_register(struct pci_dev *pdev, + struct hns3_pmu *hns3_pmu) +{ + int irq, ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret < 0) { + pci_err(pdev, "failed to enable MSI vectors, ret = %d.\n", ret); + return ret; + } + + ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev); + if (ret) { + pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret); + return ret; + } + + irq = pci_irq_vector(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, hns3_pmu_irq, 0, + hns3_pmu->pmu.name, hns3_pmu); + if (ret) { + pci_err(pdev, "failed to register irq, ret = %d.\n", ret); + return ret; + } + + hns3_pmu->irq = irq; + + return 0; +} + +static int hns3_pmu_init_pmu(struct pci_dev *pdev, struct hns3_pmu *hns3_pmu) +{ + int ret; + + ret = hns3_pmu_alloc_pmu(pdev, hns3_pmu); + if (ret) + return ret; + + ret = hns3_pmu_irq_register(pdev, hns3_pmu); + if (ret) + return ret; + + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + if (ret) { + pci_err(pdev, "failed to register hotplug, ret = %d.\n", ret); + return ret; + } + + ret = perf_pmu_register(&hns3_pmu->pmu, hns3_pmu->pmu.name, -1); + if (ret) { + pci_err(pdev, "failed to register perf PMU, ret = %d.\n", ret); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); + } + + return ret; +} + +static void hns3_pmu_uninit_pmu(struct pci_dev *pdev) +{ + struct hns3_pmu *hns3_pmu = pci_get_drvdata(pdev); + + perf_pmu_unregister(&hns3_pmu->pmu); + cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + &hns3_pmu->node); +} + +static int hns3_pmu_init_dev(struct pci_dev *pdev) +{ + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + pci_err(pdev, "failed to enable pci device, ret = %d.\n", ret); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(BAR_2), "hns3_pmu"); + if (ret < 0) { + pci_err(pdev, "failed to request pci region, ret = %d.\n", ret); + return ret; + } + + pci_set_master(pdev); + + return 0; +} + +static int hns3_pmu_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hns3_pmu *hns3_pmu; + int ret; + + hns3_pmu = devm_kzalloc(&pdev->dev, sizeof(*hns3_pmu), GFP_KERNEL); + if (!hns3_pmu) + return -ENOMEM; + + ret = hns3_pmu_init_dev(pdev); + if (ret) + return ret; + + ret = hns3_pmu_init_pmu(pdev, hns3_pmu); + if (ret) { + pci_clear_master(pdev); + return ret; + } + + pci_set_drvdata(pdev, hns3_pmu); + + return ret; +} + +static void hns3_pmu_remove(struct pci_dev *pdev) +{ + hns3_pmu_uninit_pmu(pdev); + pci_clear_master(pdev); + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id hns3_pmu_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa22b) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pmu_ids); + +static struct pci_driver hns3_pmu_driver = { + .name = "hns3_pmu", + .id_table = hns3_pmu_ids, + .probe = hns3_pmu_probe, + .remove = hns3_pmu_remove, +}; + +static int __init hns3_pmu_module_init(void) +{ + int ret; + + ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE, + "AP_PERF_ARM_HNS3_PMU_ONLINE", + hns3_pmu_online_cpu, + hns3_pmu_offline_cpu); + if (ret) { + pr_err("failed to setup HNS3 PMU hotplug, ret = %d.\n", ret); + return ret; + } + + ret = pci_register_driver(&hns3_pmu_driver); + if (ret) { + pr_err("failed to register pci driver, ret = %d.\n", ret); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); + } + + return ret; +} +module_init(hns3_pmu_module_init); + +static void __exit hns3_pmu_module_exit(void) +{ + pci_unregister_driver(&hns3_pmu_driver); + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HNS3_PMU_ONLINE); +} +module_exit(hns3_pmu_module_exit); + +MODULE_DESCRIPTION("HNS3 PMU driver"); +MODULE_LICENSE("GPL v2"); |