1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
|
/*
* Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2023, Advanced Micro Devices, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <inttypes.h>
#include <stdint.h>
#include <arch_helpers.h>
#include <common/debug.h>
#include <plat_startup.h>
/*
* HandoffParams
* Parameter bitfield encoding
* -----------------------------------------------------------------------------
* Exec State 0 0 -> Aarch64, 1-> Aarch32
* endianness 1 0 -> LE, 1 -> BE
* secure (TZ) 2 0 -> Non secure, 1 -> secure
* EL 3:4 00 -> EL0, 01 -> EL1, 10 -> EL2, 11 -> EL3
* CPU# 5:6 00 -> A53_0, 01 -> A53_1, 10 -> A53_2, 11 -> A53_3
* Reserved 7:10 Reserved
* Cluster# 11:12 00 -> Cluster 0, 01 -> Cluster 1, 10 -> Cluster 2,
* 11 -> Cluster (Applicable for Versal NET only).
* Reserved 13:16 Reserved
*/
#define XBL_FLAGS_ESTATE_SHIFT 0U
#define XBL_FLAGS_ESTATE_MASK (1U << XBL_FLAGS_ESTATE_SHIFT)
#define XBL_FLAGS_ESTATE_A64 0U
#define XBL_FLAGS_ESTATE_A32 1U
#define XBL_FLAGS_ENDIAN_SHIFT 1U
#define XBL_FLAGS_ENDIAN_MASK (1U << XBL_FLAGS_ENDIAN_SHIFT)
#define XBL_FLAGS_ENDIAN_LE 0U
#define XBL_FLAGS_ENDIAN_BE 1U
#define XBL_FLAGS_TZ_SHIFT 2U
#define XBL_FLAGS_TZ_MASK (1U << XBL_FLAGS_TZ_SHIFT)
#define XBL_FLAGS_NON_SECURE 0U
#define XBL_FLAGS_SECURE 1U
#define XBL_FLAGS_EL_SHIFT 3U
#define XBL_FLAGS_EL_MASK (3U << XBL_FLAGS_EL_SHIFT)
#define XBL_FLAGS_EL0 0U
#define XBL_FLAGS_EL1 1U
#define XBL_FLAGS_EL2 2U
#define XBL_FLAGS_EL3 3U
#define XBL_FLAGS_CPU_SHIFT 5U
#define XBL_FLAGS_CPU_MASK (3U << XBL_FLAGS_CPU_SHIFT)
#define XBL_FLAGS_A53_0 0U
#define XBL_FLAGS_A53_1 1U
#define XBL_FLAGS_A53_2 2U
#define XBL_FLAGS_A53_3 3U
#if defined(PLAT_versal_net)
#define XBL_FLAGS_CLUSTER_SHIFT 11U
#define XBL_FLAGS_CLUSTER_MASK GENMASK(11, 12)
#define XBL_FLAGS_CLUSTER_0 0U
#endif /* PLAT_versal_net */
/**
* get_xbl_cpu() - Get the target CPU for partition.
* @partition: Pointer to partition struct.
*
* Return: XBL_FLAGS_A53_0, XBL_FLAGS_A53_1, XBL_FLAGS_A53_2 or XBL_FLAGS_A53_3.
*
*/
static int32_t get_xbl_cpu(const struct xbl_partition *partition)
{
uint64_t flags = partition->flags & XBL_FLAGS_CPU_MASK;
return flags >> XBL_FLAGS_CPU_SHIFT;
}
/**
* get_xbl_el() - Get the target exception level for partition.
* @partition: Pointer to partition struct.
*
* Return: XBL_FLAGS_EL0, XBL_FLAGS_EL1, XBL_FLAGS_EL2 or XBL_FLAGS_EL3.
*
*/
static int32_t get_xbl_el(const struct xbl_partition *partition)
{
uint64_t flags = partition->flags & XBL_FLAGS_EL_MASK;
return flags >> XBL_FLAGS_EL_SHIFT;
}
/**
* get_xbl_ss() - Get the target security state for partition.
* @partition: Pointer to partition struct.
*
* Return: XBL_FLAGS_NON_SECURE or XBL_FLAGS_SECURE.
*
*/
static int32_t get_xbl_ss(const struct xbl_partition *partition)
{
uint64_t flags = partition->flags & XBL_FLAGS_TZ_MASK;
return flags >> XBL_FLAGS_TZ_SHIFT;
}
/**
* get_xbl_endian() - Get the target endianness for partition.
* @partition: Pointer to partition struct.
*
* Return: SPSR_E_LITTLE or SPSR_E_BIG.
*
*/
static int32_t get_xbl_endian(const struct xbl_partition *partition)
{
uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK;
flags >>= XBL_FLAGS_ENDIAN_SHIFT;
if (flags == XBL_FLAGS_ENDIAN_BE) {
return SPSR_E_BIG;
} else {
return SPSR_E_LITTLE;
}
}
/**
* get_xbl_estate() - Get the target execution state for partition.
* @partition: Pointer to partition struct.
*
* Return: XBL_FLAGS_ESTATE_A32 or XBL_FLAGS_ESTATE_A64.
*
*/
static int32_t get_xbl_estate(const struct xbl_partition *partition)
{
uint64_t flags = partition->flags & XBL_FLAGS_ESTATE_MASK;
return flags >> XBL_FLAGS_ESTATE_SHIFT;
}
#if defined(PLAT_versal_net)
/**
* get_xbl_cluster - Get the cluster number
* @partition: pointer to the partition structure.
*
* Return: cluster number for the partition.
*/
static int32_t get_xbl_cluster(const struct xbl_partition *partition)
{
uint64_t flags = partition->flags & XBL_FLAGS_CLUSTER_MASK;
return (int32_t)(flags >> XBL_FLAGS_CLUSTER_SHIFT);
}
#endif /* PLAT_versal_net */
/**
* xbl_handover() - Populates the bl32 and bl33 image info structures.
* @bl32: BL32 image info structure.
* @bl33: BL33 image info structure.
* @handoff_addr: TF-A handoff address.
*
* Process the handoff parameters from the XBL and populate the BL32 and BL33
* image info structures accordingly.
*
* Return: Return the status of the handoff. The value will be from the
* xbl_handoff enum.
*
*/
enum xbl_handoff xbl_handover(entry_point_info_t *bl32,
entry_point_info_t *bl33,
uint64_t handoff_addr)
{
const struct xbl_handoff_params *HandoffParams;
if (!handoff_addr) {
WARN("BL31: No handoff structure passed\n");
return XBL_HANDOFF_NO_STRUCT;
}
HandoffParams = (struct xbl_handoff_params *)handoff_addr;
if ((HandoffParams->magic[0] != 'X') ||
(HandoffParams->magic[1] != 'L') ||
(HandoffParams->magic[2] != 'N') ||
(HandoffParams->magic[3] != 'X')) {
ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr);
return XBL_HANDOFF_INVAL_STRUCT;
}
VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n",
handoff_addr, HandoffParams->num_entries);
if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) {
ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n",
HandoffParams->num_entries, XBL_MAX_PARTITIONS);
return XBL_HANDOFF_TOO_MANY_PARTS;
}
/*
* we loop over all passed entries but only populate two image structs
* (bl32, bl33). I.e. the last applicable images in the handoff
* structure will be used for the hand off
*/
for (size_t i = 0; i < HandoffParams->num_entries; i++) {
entry_point_info_t *image;
int32_t target_estate, target_secure, target_cpu;
uint32_t target_endianness, target_el;
VERBOSE("BL31: %zd: entry:0x%" PRIx64 ", flags:0x%" PRIx64 "\n", i,
HandoffParams->partition[i].entry_point,
HandoffParams->partition[i].flags);
#if defined(PLAT_versal_net)
uint32_t target_cluster;
target_cluster = get_xbl_cluster(&HandoffParams->partition[i]);
if (target_cluster != XBL_FLAGS_CLUSTER_0) {
WARN("BL31: invalid target Cluster (%i)\n",
target_cluster);
continue;
}
#endif /* PLAT_versal_net */
target_cpu = get_xbl_cpu(&HandoffParams->partition[i]);
if (target_cpu != XBL_FLAGS_A53_0) {
WARN("BL31: invalid target CPU (%i)\n", target_cpu);
continue;
}
target_el = get_xbl_el(&HandoffParams->partition[i]);
if ((target_el == XBL_FLAGS_EL3) ||
(target_el == XBL_FLAGS_EL0)) {
WARN("BL31: invalid target exception level(%i)\n",
target_el);
continue;
}
target_secure = get_xbl_ss(&HandoffParams->partition[i]);
if (target_secure == XBL_FLAGS_SECURE &&
target_el == XBL_FLAGS_EL2) {
WARN("BL31: invalid security state (%i) for exception level (%i)\n",
target_secure, target_el);
continue;
}
target_estate = get_xbl_estate(&HandoffParams->partition[i]);
target_endianness = get_xbl_endian(&HandoffParams->partition[i]);
if (target_secure == XBL_FLAGS_SECURE) {
image = bl32;
if (target_estate == XBL_FLAGS_ESTATE_A32) {
bl32->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
target_endianness,
DISABLE_ALL_EXCEPTIONS);
} else {
bl32->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
}
} else {
image = bl33;
if (target_estate == XBL_FLAGS_ESTATE_A32) {
if (target_el == XBL_FLAGS_EL2) {
target_el = MODE32_hyp;
} else {
target_el = MODE32_sys;
}
bl33->spsr = SPSR_MODE32(target_el, SPSR_T_ARM,
target_endianness,
DISABLE_ALL_EXCEPTIONS);
} else {
if (target_el == XBL_FLAGS_EL2) {
target_el = MODE_EL2;
} else {
target_el = MODE_EL1;
}
bl33->spsr = SPSR_64(target_el, MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS);
}
}
VERBOSE("Setting up %s entry point to:%" PRIx64 ", el:%x\n",
target_secure == XBL_FLAGS_SECURE ? "BL32" : "BL33",
HandoffParams->partition[i].entry_point,
target_el);
image->pc = HandoffParams->partition[i].entry_point;
if (target_endianness == SPSR_E_BIG) {
EP_SET_EE(image->h.attr, EP_EE_BIG);
} else {
EP_SET_EE(image->h.attr, EP_EE_LITTLE);
}
}
return XBL_HANDOFF_SUCCESS;
}
|