diff options
Diffstat (limited to 'arch/arm64/kernel/stacktrace.c')
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 146 |
1 files changed, 102 insertions, 44 deletions
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 17f66a74c7..b2a60e0bcf 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -8,6 +8,7 @@ #include <linux/efi.h> #include <linux/export.h> #include <linux/ftrace.h> +#include <linux/kprobes.h> #include <linux/sched.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> @@ -19,6 +20,31 @@ #include <asm/stacktrace.h> /* + * Kernel unwind state + * + * @common: Common unwind state. + * @task: The task being unwound. + * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance + * associated with the most recently encountered replacement lr + * value. + */ +struct kunwind_state { + struct unwind_state common; + struct task_struct *task; +#ifdef CONFIG_KRETPROBES + struct llist_node *kr_cur; +#endif +}; + +static __always_inline void +kunwind_init(struct kunwind_state *state, + struct task_struct *task) +{ + unwind_init_common(&state->common); + state->task = task; +} + +/* * Start an unwind from a pt_regs. * * The unwind will begin at the PC within the regs. @@ -26,13 +52,13 @@ * The regs must be on a stack currently owned by the calling task. */ static __always_inline void -unwind_init_from_regs(struct unwind_state *state, - struct pt_regs *regs) +kunwind_init_from_regs(struct kunwind_state *state, + struct pt_regs *regs) { - unwind_init_common(state, current); + kunwind_init(state, current); - state->fp = regs->regs[29]; - state->pc = regs->pc; + state->common.fp = regs->regs[29]; + state->common.pc = regs->pc; } /* @@ -44,12 +70,12 @@ unwind_init_from_regs(struct unwind_state *state, * The function which invokes this must be noinline. */ static __always_inline void -unwind_init_from_caller(struct unwind_state *state) +kunwind_init_from_caller(struct kunwind_state *state) { - unwind_init_common(state, current); + kunwind_init(state, current); - state->fp = (unsigned long)__builtin_frame_address(1); - state->pc = (unsigned long)__builtin_return_address(0); + state->common.fp = (unsigned long)__builtin_frame_address(1); + state->common.pc = (unsigned long)__builtin_return_address(0); } /* @@ -63,35 +89,38 @@ unwind_init_from_caller(struct unwind_state *state) * call this for the current task. */ static __always_inline void -unwind_init_from_task(struct unwind_state *state, - struct task_struct *task) +kunwind_init_from_task(struct kunwind_state *state, + struct task_struct *task) { - unwind_init_common(state, task); + kunwind_init(state, task); - state->fp = thread_saved_fp(task); - state->pc = thread_saved_pc(task); + state->common.fp = thread_saved_fp(task); + state->common.pc = thread_saved_pc(task); } static __always_inline int -unwind_recover_return_address(struct unwind_state *state) +kunwind_recover_return_address(struct kunwind_state *state) { #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (state->task->ret_stack && - (state->pc == (unsigned long)return_to_handler)) { + (state->common.pc == (unsigned long)return_to_handler)) { unsigned long orig_pc; - orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc, - (void *)state->fp); - if (WARN_ON_ONCE(state->pc == orig_pc)) + orig_pc = ftrace_graph_ret_addr(state->task, NULL, + state->common.pc, + (void *)state->common.fp); + if (WARN_ON_ONCE(state->common.pc == orig_pc)) return -EINVAL; - state->pc = orig_pc; + state->common.pc = orig_pc; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_KRETPROBES - if (is_kretprobe_trampoline(state->pc)) { - state->pc = kretprobe_find_ret_addr(state->task, - (void *)state->fp, - &state->kr_cur); + if (is_kretprobe_trampoline(state->common.pc)) { + unsigned long orig_pc; + orig_pc = kretprobe_find_ret_addr(state->task, + (void *)state->common.fp, + &state->kr_cur); + state->common.pc = orig_pc; } #endif /* CONFIG_KRETPROBES */ @@ -106,38 +135,40 @@ unwind_recover_return_address(struct unwind_state *state) * and the location (but not the fp value) of B. */ static __always_inline int -unwind_next(struct unwind_state *state) +kunwind_next(struct kunwind_state *state) { struct task_struct *tsk = state->task; - unsigned long fp = state->fp; + unsigned long fp = state->common.fp; int err; /* Final frame; nothing to unwind */ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) return -ENOENT; - err = unwind_next_frame_record(state); + err = unwind_next_frame_record(&state->common); if (err) return err; - state->pc = ptrauth_strip_kernel_insn_pac(state->pc); + state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc); - return unwind_recover_return_address(state); + return kunwind_recover_return_address(state); } +typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie); + static __always_inline void -unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, - void *cookie) +do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state, + void *cookie) { - if (unwind_recover_return_address(state)) + if (kunwind_recover_return_address(state)) return; while (1) { int ret; - if (!consume_entry(cookie, state->pc)) + if (!consume_state(state, cookie)) break; - ret = unwind_next(state); + ret = kunwind_next(state); if (ret < 0) break; } @@ -172,9 +203,10 @@ unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, : stackinfo_get_unknown(); \ }) -noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, - void *cookie, struct task_struct *task, - struct pt_regs *regs) +static __always_inline void +kunwind_stack_walk(kunwind_consume_fn consume_state, + void *cookie, struct task_struct *task, + struct pt_regs *regs) { struct stack_info stacks[] = { stackinfo_get_task(task), @@ -190,22 +222,48 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, STACKINFO_EFI, #endif }; - struct unwind_state state = { - .stacks = stacks, - .nr_stacks = ARRAY_SIZE(stacks), + struct kunwind_state state = { + .common = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }, }; if (regs) { if (task != current) return; - unwind_init_from_regs(&state, regs); + kunwind_init_from_regs(&state, regs); } else if (task == current) { - unwind_init_from_caller(&state); + kunwind_init_from_caller(&state); } else { - unwind_init_from_task(&state, task); + kunwind_init_from_task(&state, task); } - unwind(&state, consume_entry, cookie); + do_kunwind(&state, consume_state, cookie); +} + +struct kunwind_consume_entry_data { + stack_trace_consume_fn consume_entry; + void *cookie; +}; + +static __always_inline bool +arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie) +{ + struct kunwind_consume_entry_data *data = cookie; + return data->consume_entry(data->cookie, state->common.pc); +} + +noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task, + struct pt_regs *regs) +{ + struct kunwind_consume_entry_data data = { + .consume_entry = consume_entry, + .cookie = cookie, + }; + + kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs); } static bool dump_backtrace_entry(void *arg, unsigned long where) |