1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_IRQ_STACK_H
#define _ASM_X86_IRQ_STACK_H
#include <linux/ptrace.h>
#include <linux/objtool.h>
#include <asm/processor.h>
#ifdef CONFIG_X86_64
/*
* Macro to inline switching to an interrupt stack and invoking function
* calls from there. The following rules apply:
*
* - Ordering:
*
* 1. Write the stack pointer into the top most place of the irq
* stack. This ensures that the various unwinders can link back to the
* original stack.
*
* 2. Switch the stack pointer to the top of the irq stack.
*
* 3. Invoke whatever needs to be done (@asm_call argument)
*
* 4. Pop the original stack pointer from the top of the irq stack
* which brings it back to the original stack where it left off.
*
* - Function invocation:
*
* To allow flexible usage of the macro, the actual function code including
* the store of the arguments in the call ABI registers is handed in via
* the @asm_call argument.
*
* - Local variables:
*
* @tos:
* The @tos variable holds a pointer to the top of the irq stack and
* _must_ be allocated in a non-callee saved register as this is a
* restriction coming from objtool.
*
* Note, that (tos) is both in input and output constraints to ensure
* that the compiler does not assume that R11 is left untouched in
* case this macro is used in some place where the per cpu interrupt
* stack pointer is used again afterwards
*
* - Function arguments:
* The function argument(s), if any, have to be defined in register
* variables at the place where this is invoked. Storing the
* argument(s) in the proper register(s) is part of the @asm_call
*
* - Constraints:
*
* The constraints have to be done very carefully because the compiler
* does not know about the assembly call.
*
* output:
* As documented already above the @tos variable is required to be in
* the output constraints to make the compiler aware that R11 cannot be
* reused after the asm() statement.
*
* For builds with CONFIG_UNWINDER_FRAME_POINTER, ASM_CALL_CONSTRAINT is
* required as well as this prevents certain creative GCC variants from
* misplacing the ASM code.
*
* input:
* - func:
* Immediate, which tells the compiler that the function is referenced.
*
* - tos:
* Register. The actual register is defined by the variable declaration.
*
* - function arguments:
* The constraints are handed in via the 'argconstr' argument list. They
* describe the register arguments which are used in @asm_call.
*
* clobbers:
* Function calls can clobber anything except the callee-saved
* registers. Tell the compiler.
*/
#define call_on_stack(stack, func, asm_call, argconstr...) \
{ \
register void *tos asm("r11"); \
\
tos = ((void *)(stack)); \
\
asm_inline volatile( \
"movq %%rsp, (%[tos]) \n" \
"movq %[tos], %%rsp \n" \
\
asm_call \
\
"popq %%rsp \n" \
\
: "+r" (tos), ASM_CALL_CONSTRAINT \
: [__func] "i" (func), [tos] "r" (tos) argconstr \
: "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10", \
"memory" \
); \
}
#define ASM_CALL_ARG0 \
"call %P[__func] \n" \
ASM_REACHABLE
#define ASM_CALL_ARG1 \
"movq %[arg1], %%rdi \n" \
ASM_CALL_ARG0
#define ASM_CALL_ARG2 \
"movq %[arg2], %%rsi \n" \
ASM_CALL_ARG1
#define ASM_CALL_ARG3 \
"movq %[arg3], %%rdx \n" \
ASM_CALL_ARG2
#define call_on_irqstack(func, asm_call, argconstr...) \
call_on_stack(__this_cpu_read(hardirq_stack_ptr), \
func, asm_call, argconstr)
/* Macros to assert type correctness for run_*_on_irqstack macros */
#define assert_function_type(func, proto) \
static_assert(__builtin_types_compatible_p(typeof(&func), proto))
#define assert_arg_type(arg, proto) \
static_assert(__builtin_types_compatible_p(typeof(arg), proto))
/*
* Macro to invoke system vector and device interrupt C handlers.
*/
#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...) \
{ \
/* \
* User mode entry and interrupt on the irq stack do not \
* switch stacks. If from user mode the task stack is empty. \
*/ \
if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) { \
irq_enter_rcu(); \
func(c_args); \
irq_exit_rcu(); \
} else { \
/* \
* Mark the irq stack inuse _before_ and unmark _after_ \
* switching stacks. Interrupts are disabled in both \
* places. Invoke the stack switch macro with the call \
* sequence which matches the above direct invocation. \
*/ \
__this_cpu_write(hardirq_stack_inuse, true); \
call_on_irqstack(func, asm_call, constr); \
__this_cpu_write(hardirq_stack_inuse, false); \
} \
}
/*
* Function call sequence for __call_on_irqstack() for system vectors.
*
* Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
* mechanism because these functions are global and cannot be optimized out
* when compiling a particular source file which uses one of these macros.
*
* The argument (regs) does not need to be pushed or stashed in a callee
* saved register to be safe vs. the irq_enter_rcu() call because the
* clobbers already prevent the compiler from storing it in a callee
* clobbered register. As the compiler has to preserve @regs for the final
* call to idtentry_exit() anyway, it's likely that it does not cause extra
* effort for this asm magic.
*/
#define ASM_CALL_SYSVEC \
"call irq_enter_rcu \n" \
ASM_CALL_ARG1 \
"call irq_exit_rcu \n"
#define SYSVEC_CONSTRAINTS , [arg1] "r" (regs)
#define run_sysvec_on_irqstack_cond(func, regs) \
{ \
assert_function_type(func, void (*)(struct pt_regs *)); \
assert_arg_type(regs, struct pt_regs *); \
\
call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC, \
SYSVEC_CONSTRAINTS, regs); \
}
/*
* As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
* @regs and @vector in callee saved registers.
*/
#define ASM_CALL_IRQ \
"call irq_enter_rcu \n" \
ASM_CALL_ARG2 \
"call irq_exit_rcu \n"
#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector)
#define run_irq_on_irqstack_cond(func, regs, vector) \
{ \
assert_function_type(func, void (*)(struct pt_regs *, u32)); \
assert_arg_type(regs, struct pt_regs *); \
assert_arg_type(vector, u32); \
\
call_on_irqstack_cond(func, regs, ASM_CALL_IRQ, \
IRQ_CONSTRAINTS, regs, vector); \
}
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
/*
* Macro to invoke __do_softirq on the irq stack. This is only called from
* task context when bottom halves are about to be reenabled and soft
* interrupts are pending to be processed. The interrupt stack cannot be in
* use here.
*/
#define do_softirq_own_stack() \
{ \
__this_cpu_write(hardirq_stack_inuse, true); \
call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \
__this_cpu_write(hardirq_stack_inuse, false); \
}
#endif
#else /* CONFIG_X86_64 */
/* System vector handlers always run on the stack they interrupted. */
#define run_sysvec_on_irqstack_cond(func, regs) \
{ \
irq_enter_rcu(); \
func(regs); \
irq_exit_rcu(); \
}
/* Switches to the irq stack within func() */
#define run_irq_on_irqstack_cond(func, regs, vector) \
{ \
irq_enter_rcu(); \
func(regs, vector); \
irq_exit_rcu(); \
}
#endif /* !CONFIG_X86_64 */
#endif
|