summaryrefslogtreecommitdiffstats
path: root/arch/csky/kernel/perf_callchain.c
blob: 1612f43540877a02f6b01a6f6e3b90fa020c7154 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.

#include <linux/perf_event.h>
#include <linux/uaccess.h>

/* Kernel callchain */
struct stackframe {
	unsigned long fp;
	unsigned long lr;
};

static int unwind_frame_kernel(struct stackframe *frame)
{
	unsigned long low = (unsigned long)task_stack_page(current);
	unsigned long high = low + THREAD_SIZE;

	if (unlikely(frame->fp < low || frame->fp > high))
		return -EPERM;

	if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
		return -EPERM;

	*frame = *(struct stackframe *)frame->fp;

	if (__kernel_text_address(frame->lr)) {
		int graph = 0;

		frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr,
				NULL);
	}
	return 0;
}

static void notrace walk_stackframe(struct stackframe *fr,
			struct perf_callchain_entry_ctx *entry)
{
	do {
		perf_callchain_store(entry, fr->lr);
	} while (unwind_frame_kernel(fr) >= 0);
}

/*
 * Get the return address for a single stackframe and return a pointer to the
 * next frame tail.
 */
static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
			unsigned long fp, unsigned long reg_lr)
{
	struct stackframe buftail;
	unsigned long lr = 0;
	unsigned long __user *user_frame_tail = (unsigned long __user *)fp;

	/* Check accessibility of one struct frame_tail beyond */
	if (!access_ok(user_frame_tail, sizeof(buftail)))
		return 0;
	if (__copy_from_user_inatomic(&buftail, user_frame_tail,
				      sizeof(buftail)))
		return 0;

	if (reg_lr != 0)
		lr = reg_lr;
	else
		lr = buftail.lr;

	fp = buftail.fp;
	perf_callchain_store(entry, lr);

	return fp;
}

/*
 * This will be called when the target is in user mode
 * This function will only be called when we use
 * "PERF_SAMPLE_CALLCHAIN" in
 * kernel/events/core.c:perf_prepare_sample()
 *
 * How to trigger perf_callchain_[user/kernel] :
 * $ perf record -e cpu-clock --call-graph fp ./program
 * $ perf report --call-graph
 *
 * On C-SKY platform, the program being sampled and the C library
 * need to be compiled with * -mbacktrace, otherwise the user
 * stack will not contain function frame.
 */
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
			 struct pt_regs *regs)
{
	unsigned long fp = 0;

	fp = regs->regs[4];
	perf_callchain_store(entry, regs->pc);

	/*
	 * While backtrace from leaf function, lr is normally
	 * not saved inside frame on C-SKY, so get lr from pt_regs
	 * at the sample point. However, lr value can be incorrect if
	 * lr is used as temp register
	 */
	fp = user_backtrace(entry, fp, regs->lr);

	while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
		fp = user_backtrace(entry, fp, 0);
}

void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
			   struct pt_regs *regs)
{
	struct stackframe fr;

	fr.fp = regs->regs[4];
	fr.lr = regs->lr;
	walk_stackframe(&fr, entry);
}