summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf/8xx-pmu.c
blob: 308a2e40d7be9992624b15c40ce85bfdf4df5dd0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Performance event support - PPC 8xx
 *
 * Copyright 2016 Christophe Leroy, CS Systemes d'Information
 */

#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
#include <asm/inst.h>

#define PERF_8xx_ID_CPU_CYCLES		1
#define PERF_8xx_ID_HW_INSTRUCTIONS	2
#define PERF_8xx_ID_ITLB_LOAD_MISS	3
#define PERF_8xx_ID_DTLB_LOAD_MISS	4

#define C(x)	PERF_COUNT_HW_CACHE_##x
#define DTLB_LOAD_MISS	(C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
#define ITLB_LOAD_MISS	(C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))

extern unsigned long itlb_miss_counter, dtlb_miss_counter;
extern atomic_t instruction_counter;

static atomic_t insn_ctr_ref;
static atomic_t itlb_miss_ref;
static atomic_t dtlb_miss_ref;

static s64 get_insn_ctr(void)
{
	int ctr;
	unsigned long counta;

	do {
		ctr = atomic_read(&instruction_counter);
		counta = mfspr(SPRN_COUNTA);
	} while (ctr != atomic_read(&instruction_counter));

	return ((s64)ctr << 16) | (counta >> 16);
}

static int event_type(struct perf_event *event)
{
	switch (event->attr.type) {
	case PERF_TYPE_HARDWARE:
		if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES)
			return PERF_8xx_ID_CPU_CYCLES;
		if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS)
			return PERF_8xx_ID_HW_INSTRUCTIONS;
		break;
	case PERF_TYPE_HW_CACHE:
		if (event->attr.config == ITLB_LOAD_MISS)
			return PERF_8xx_ID_ITLB_LOAD_MISS;
		if (event->attr.config == DTLB_LOAD_MISS)
			return PERF_8xx_ID_DTLB_LOAD_MISS;
		break;
	case PERF_TYPE_RAW:
		break;
	default:
		return -ENOENT;
	}
	return -EOPNOTSUPP;
}

static int mpc8xx_pmu_event_init(struct perf_event *event)
{
	int type = event_type(event);

	if (type < 0)
		return type;
	return 0;
}

static int mpc8xx_pmu_add(struct perf_event *event, int flags)
{
	int type = event_type(event);
	s64 val = 0;

	if (type < 0)
		return type;

	switch (type) {
	case PERF_8xx_ID_CPU_CYCLES:
		val = get_tb();
		break;
	case PERF_8xx_ID_HW_INSTRUCTIONS:
		if (atomic_inc_return(&insn_ctr_ref) == 1)
			mtspr(SPRN_ICTRL, 0xc0080007);
		val = get_insn_ctr();
		break;
	case PERF_8xx_ID_ITLB_LOAD_MISS:
		if (atomic_inc_return(&itlb_miss_ref) == 1) {
			unsigned long target = patch_site_addr(&patch__itlbmiss_perf);

			patch_branch_site(&patch__itlbmiss_exit_1, target, 0);
		}
		val = itlb_miss_counter;
		break;
	case PERF_8xx_ID_DTLB_LOAD_MISS:
		if (atomic_inc_return(&dtlb_miss_ref) == 1) {
			unsigned long target = patch_site_addr(&patch__dtlbmiss_perf);

			patch_branch_site(&patch__dtlbmiss_exit_1, target, 0);
		}
		val = dtlb_miss_counter;
		break;
	}
	local64_set(&event->hw.prev_count, val);
	return 0;
}

static void mpc8xx_pmu_read(struct perf_event *event)
{
	int type = event_type(event);
	s64 prev, val = 0, delta = 0;

	if (type < 0)
		return;

	do {
		prev = local64_read(&event->hw.prev_count);
		switch (type) {
		case PERF_8xx_ID_CPU_CYCLES:
			val = get_tb();
			delta = 16 * (val - prev);
			break;
		case PERF_8xx_ID_HW_INSTRUCTIONS:
			val = get_insn_ctr();
			delta = prev - val;
			if (delta < 0)
				delta += 0x1000000000000LL;
			break;
		case PERF_8xx_ID_ITLB_LOAD_MISS:
			val = itlb_miss_counter;
			delta = (s64)((s32)val - (s32)prev);
			break;
		case PERF_8xx_ID_DTLB_LOAD_MISS:
			val = dtlb_miss_counter;
			delta = (s64)((s32)val - (s32)prev);
			break;
		}
	} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);

	local64_add(delta, &event->count);
}

static void mpc8xx_pmu_del(struct perf_event *event, int flags)
{
	ppc_inst_t insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2));

	mpc8xx_pmu_read(event);

	/* If it was the last user, stop counting to avoid useless overhead */
	switch (event_type(event)) {
	case PERF_8xx_ID_CPU_CYCLES:
		break;
	case PERF_8xx_ID_HW_INSTRUCTIONS:
		if (atomic_dec_return(&insn_ctr_ref) == 0)
			mtspr(SPRN_ICTRL, 7);
		break;
	case PERF_8xx_ID_ITLB_LOAD_MISS:
		if (atomic_dec_return(&itlb_miss_ref) == 0)
			patch_instruction_site(&patch__itlbmiss_exit_1, insn);
		break;
	case PERF_8xx_ID_DTLB_LOAD_MISS:
		if (atomic_dec_return(&dtlb_miss_ref) == 0)
			patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
		break;
	}
}

static struct pmu mpc8xx_pmu = {
	.event_init	= mpc8xx_pmu_event_init,
	.add		= mpc8xx_pmu_add,
	.del		= mpc8xx_pmu_del,
	.read		= mpc8xx_pmu_read,
	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT |
			  PERF_PMU_CAP_NO_NMI,
};

static int init_mpc8xx_pmu(void)
{
	mtspr(SPRN_ICTRL, 7);
	mtspr(SPRN_CMPA, 0);
	mtspr(SPRN_COUNTA, 0xffff);

	return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW);
}

early_initcall(init_mpc8xx_pmu);