1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_EXCEPTION_H
#define _ASM_POWERPC_EXCEPTION_H
/*
* Extracted from head_64.S
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
* Adapted for Power Macintosh by Paul Mackerras.
* Low-level exception handlers and MMU support
* rewritten by Paul Mackerras.
* Copyright (C) 1996 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
*
* This file contains the low-level support and setup for the
* PowerPC-64 platform, including trap and interrupt dispatch.
*/
/*
* The following macros define the code that appears as
* the prologue to each of the exception handlers. They
* are split into two parts to allow a single kernel binary
* to be used for pSeries and iSeries.
*
* We make as much of the exception code common between native
* exception handlers (including pSeries LPAR) and iSeries LPAR
* implementations as possible.
*/
#include <asm/feature-fixups.h>
/* PACA save area size in u64 units (exgen, exmc, etc) */
#define EX_SIZE 10
/*
* maximum recursive depth of MCE exceptions
*/
#define MAX_MCE_DEPTH 4
#ifdef __ASSEMBLY__
#define STF_ENTRY_BARRIER_SLOT \
STF_ENTRY_BARRIER_FIXUP_SECTION; \
nop; \
nop; \
nop
#define STF_EXIT_BARRIER_SLOT \
STF_EXIT_BARRIER_FIXUP_SECTION; \
nop; \
nop; \
nop; \
nop; \
nop; \
nop
#define ENTRY_FLUSH_SLOT \
ENTRY_FLUSH_FIXUP_SECTION; \
nop; \
nop; \
nop;
#define SCV_ENTRY_FLUSH_SLOT \
SCV_ENTRY_FLUSH_FIXUP_SECTION; \
nop; \
nop; \
nop;
/*
* r10 must be free to use, r13 must be paca
*/
#define INTERRUPT_TO_KERNEL \
STF_ENTRY_BARRIER_SLOT; \
ENTRY_FLUSH_SLOT
/*
* r10, ctr must be free to use, r13 must be paca
*/
#define SCV_INTERRUPT_TO_KERNEL \
STF_ENTRY_BARRIER_SLOT; \
SCV_ENTRY_FLUSH_SLOT
/*
* Macros for annotating the expected destination of (h)rfid
*
* The nop instructions allow us to insert one or more instructions to flush the
* L1-D cache when returning to userspace or a guest.
*
* powerpc relies on return from interrupt/syscall being context synchronising
* (which hrfid, rfid, and rfscv are) to support ARCH_HAS_MEMBARRIER_SYNC_CORE
* without additional synchronisation instructions.
*
* soft-masked interrupt replay does not include a context-synchronising rfid,
* but those always return to kernel, the sync is only required when returning
* to user.
*/
#define RFI_FLUSH_SLOT \
RFI_FLUSH_FIXUP_SECTION; \
nop; \
nop; \
nop
#define RFI_TO_KERNEL \
rfid
#define RFI_TO_USER \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
rfid; \
b rfi_flush_fallback
#define RFI_TO_USER_OR_KERNEL \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
rfid; \
b rfi_flush_fallback
#define RFI_TO_GUEST \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
rfid; \
b rfi_flush_fallback
#define HRFI_TO_KERNEL \
hrfid
#define HRFI_TO_USER \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#define HRFI_TO_USER_OR_KERNEL \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#define HRFI_TO_GUEST \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#define HRFI_TO_UNKNOWN \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
hrfid; \
b hrfi_flush_fallback
#define RFSCV_TO_USER \
STF_EXIT_BARRIER_SLOT; \
RFI_FLUSH_SLOT; \
RFSCV; \
b rfscv_flush_fallback
#else /* __ASSEMBLY__ */
/* Prototype for function defined in exceptions-64s.S */
void do_uaccess_flush(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_EXCEPTION_H */
|