1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
|
/*
* Copyright (c) 2019, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <common/debug.h>
#include <lib/mmio.h>
#include <scu.h>
#include <mcucfg.h>
#include <drivers/delay_timer.h>
#include <mcsi/mcsi.h>
#define MAX_CLUSTERS 5
static unsigned long cci_base_addr;
static unsigned int cci_cluster_ix_to_iface[MAX_CLUSTERS];
void mcsi_init(unsigned long cci_base,
unsigned int num_cci_masters)
{
int i;
assert(cci_base);
assert(num_cci_masters < MAX_CLUSTERS);
cci_base_addr = cci_base;
for (i = 0; i < num_cci_masters; i++)
cci_cluster_ix_to_iface[i] = SLAVE_IFACE_OFFSET(i);
}
void mcsi_cache_flush(void)
{
/* timeout is 10ms */
int timeout = 10000;
/* to make flush by SF safe, need to disable BIU DCM */
mmio_clrbits_32(CCI_CLK_CTRL, 1 << 8);
mmio_write_32(cci_base_addr + FLUSH_SF, 0x1);
for (; timeout; timeout--, udelay(1)) {
if ((mmio_read_32(cci_base_addr + FLUSH_SF) & 0x1) == 0x0)
break;
}
if (!timeout) {
INFO("SF lush timeout\n");
return;
}
/* enable BIU DCM as it was */
mmio_setbits_32(CCI_CLK_CTRL, 1 << 8);
}
static inline unsigned long get_slave_iface_base(unsigned long mpidr)
{
/*
* We assume the TF topology code allocates affinity instances
* consecutively from zero.
* It is a programming error if this is called without initializing
* the slave interface to use for this cluster.
*/
unsigned int cluster_id =
(mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(cluster_id < MAX_CLUSTERS);
assert(cci_cluster_ix_to_iface[cluster_id] != 0);
return cci_base_addr + cci_cluster_ix_to_iface[cluster_id];
}
void cci_enable_cluster_coherency(unsigned long mpidr)
{
unsigned long slave_base;
unsigned int support_ability;
unsigned int config = 0;
unsigned int pending = 0;
assert(cci_base_addr);
slave_base = get_slave_iface_base(mpidr);
support_ability = mmio_read_32(slave_base);
pending = (mmio_read_32(
cci_base_addr + SNP_PENDING_REG)) >> SNP_PENDING;
while (pending) {
pending = (mmio_read_32(
cci_base_addr + SNP_PENDING_REG)) >> SNP_PENDING;
}
if (support_ability & SNP_SUPPORT)
config |= SNOOP_EN_BIT;
if (support_ability & DVM_SUPPORT)
config |= DVM_EN_BIT;
mmio_write_32(slave_base, support_ability | config);
/* Wait for the dust to settle down */
while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
;
}
#if ERRATA_MCSIB_SW
#pragma weak mcsib_sw_workaround_main
#endif
void cci_disable_cluster_coherency(unsigned long mpidr)
{
unsigned long slave_base;
unsigned int config = 0;
assert(cci_base_addr);
slave_base = get_slave_iface_base(mpidr);
while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
;
config = mmio_read_32(slave_base);
config &= ~(DVM_EN_BIT | SNOOP_EN_BIT);
/* Disable Snoops and DVM messages */
mmio_write_32(slave_base, config);
#if ERRATA_MCSIB_SW
mcsib_sw_workaround_main();
#endif
/* Wait for the dust to settle down */
while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
;
}
void cci_secure_switch(unsigned int status)
{
unsigned int config;
config = mmio_read_32(cci_base_addr + CENTRAL_CTRL_REG);
if (status == NS_ACC)
config |= SECURE_ACC_EN;
else
config &= ~SECURE_ACC_EN;
mmio_write_32(cci_base_addr + CENTRAL_CTRL_REG, config);
}
void cci_pmu_secure_switch(unsigned int status)
{
unsigned int config;
config = mmio_read_32(cci_base_addr + CENTRAL_CTRL_REG);
if (status == NS_ACC)
config |= PMU_SECURE_ACC_EN;
else
config &= ~PMU_SECURE_ACC_EN;
mmio_write_32(cci_base_addr + CENTRAL_CTRL_REG, config);
}
void cci_init_sf(void)
{
while (mmio_read_32(cci_base_addr + SNP_PENDING_REG) >> SNP_PENDING)
;
/* init sf1 */
mmio_write_32(cci_base_addr + SF_INIT_REG, TRIG_SF1_INIT);
while (mmio_read_32(cci_base_addr + SF_INIT_REG) & TRIG_SF1_INIT)
;
while (!(mmio_read_32(cci_base_addr + SF_INIT_REG) & SF1_INIT_DONE))
;
/* init sf2 */
mmio_write_32(cci_base_addr + SF_INIT_REG, TRIG_SF2_INIT);
while (mmio_read_32(cci_base_addr + SF_INIT_REG) & TRIG_SF2_INIT)
;
while (!(mmio_read_32(cci_base_addr + SF_INIT_REG) & SF2_INIT_DONE))
;
}
void cci_interrupt_en(void)
{
mmio_setbits_32(cci_base_addr + CENTRAL_CTRL_REG, INT_EN);
}
unsigned long cci_reg_access(unsigned int op, unsigned long offset,
unsigned long val)
{
unsigned long ret = 0;
if ((cci_base_addr == 0) || (offset > MSCI_MEMORY_SZ))
panic();
switch (op) {
case MCSI_REG_ACCESS_READ:
ret = mmio_read_32(cci_base_addr + offset);
break;
case MCSI_REG_ACCESS_WRITE:
mmio_write_32(cci_base_addr + offset, val);
dsb();
break;
case MCSI_REG_ACCESS_SET_BITMASK:
mmio_setbits_32(cci_base_addr + offset, val);
dsb();
break;
case MCSI_REG_ACCESS_CLEAR_BITMASK:
mmio_clrbits_32(cci_base_addr + offset, val);
dsb();
break;
default:
break;
}
return ret;
}
|