1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2014 Google, Inc
* Author: Alexandru M Stan <amstan@chromium.org>
*/
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include "clk.h"
struct rockchip_mmc_clock {
struct clk_hw hw;
void __iomem *reg;
int id;
int shift;
int cached_phase;
struct notifier_block clk_rate_change_nb;
};
#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
#define RK3288_MMC_CLKGEN_DIV 2
static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
unsigned long parent_rate)
{
return parent_rate / RK3288_MMC_CLKGEN_DIV;
}
#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
#define PSECS_PER_SEC 1000000000000LL
/*
* Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
* simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
*/
#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
static int rockchip_mmc_get_phase(struct clk_hw *hw)
{
struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
unsigned long rate = clk_hw_get_rate(hw);
u32 raw_value;
u16 degrees;
u32 delay_num = 0;
/* Constant signal, no measurable phase shift */
if (!rate)
return 0;
raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
/* degrees/delaynum * 1000000 */
unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
36 * (rate / 10000);
delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000);
}
return degrees % 360;
}
static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
{
struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
unsigned long rate = clk_hw_get_rate(hw);
u8 nineties, remainder;
u8 delay_num;
u32 raw_value;
u32 delay;
/*
* The below calculation is based on the output clock from
* MMC host to the card, which expects the phase clock inherits
* the clock rate from its parent, namely the output clock
* provider of MMC host. However, things may go wrong if
* (1) It is orphan.
* (2) It is assigned to the wrong parent.
*
* This check help debug the case (1), which seems to be the
* most likely problem we often face and which makes it difficult
* for people to debug unstable mmc tuning results.
*/
if (!rate) {
pr_err("%s: invalid clk rate\n", __func__);
return -EINVAL;
}
nineties = degrees / 90;
remainder = (degrees % 90);
/*
* Due to the inexact nature of the "fine" delay, we might
* actually go non-monotonic. We don't go _too_ monotonic
* though, so we should be OK. Here are options of how we may
* work:
*
* Ideally we end up with:
* 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0
*
* On one extreme (if delay is actually 44ps):
* .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0
* The other (if delay is actually 77ps):
* 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
*
* It's possible we might make a delay that is up to 25
* degrees off from what we think we're making. That's OK
* though because we should be REALLY far from any bad range.
*/
/*
* Convert to delay; do a little extra work to make sure we
* don't overflow 32-bit / 64-bit numbers.
*/
delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
delay *= remainder;
delay = DIV_ROUND_CLOSEST(delay,
(rate / 1000) * 36 *
(ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
delay_num = (u8) min_t(u32, delay, 255);
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
clk_hw_get_name(hw), degrees, delay_num,
mmc_clock->reg, raw_value>>(mmc_clock->shift),
rockchip_mmc_get_phase(hw)
);
return 0;
}
static const struct clk_ops rockchip_mmc_clk_ops = {
.recalc_rate = rockchip_mmc_recalc,
.get_phase = rockchip_mmc_get_phase,
.set_phase = rockchip_mmc_set_phase,
};
#define to_rockchip_mmc_clock(x) \
container_of(x, struct rockchip_mmc_clock, clk_rate_change_nb)
static int rockchip_mmc_clk_rate_notify(struct notifier_block *nb,
unsigned long event, void *data)
{
struct rockchip_mmc_clock *mmc_clock = to_rockchip_mmc_clock(nb);
struct clk_notifier_data *ndata = data;
/*
* rockchip_mmc_clk is mostly used by mmc controllers to sample
* the intput data, which expects the fixed phase after the tuning
* process. However if the clock rate is changed, the phase is stale
* and may break the data sampling. So here we try to restore the phase
* for that case, except that
* (1) cached_phase is invaild since we inevitably cached it when the
* clock provider be reparented from orphan to its real parent in the
* first place. Otherwise we may mess up the initialization of MMC cards
* since we only set the default sample phase and drive phase later on.
* (2) the new coming rate is higher than the older one since mmc driver
* set the max-frequency to match the boards' ability but we can't go
* over the heads of that, otherwise the tests smoke out the issue.
*/
if (ndata->old_rate <= ndata->new_rate)
return NOTIFY_DONE;
if (event == PRE_RATE_CHANGE)
mmc_clock->cached_phase =
rockchip_mmc_get_phase(&mmc_clock->hw);
else if (mmc_clock->cached_phase != -EINVAL &&
event == POST_RATE_CHANGE)
rockchip_mmc_set_phase(&mmc_clock->hw, mmc_clock->cached_phase);
return NOTIFY_DONE;
}
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *reg, int shift)
{
struct clk_init_data init;
struct rockchip_mmc_clock *mmc_clock;
struct clk *clk;
int ret;
mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
if (!mmc_clock)
return ERR_PTR(-ENOMEM);
init.name = name;
init.flags = 0;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
mmc_clock->hw.init = &init;
mmc_clock->reg = reg;
mmc_clock->shift = shift;
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
goto err_register;
}
mmc_clock->clk_rate_change_nb.notifier_call =
&rockchip_mmc_clk_rate_notify;
ret = clk_notifier_register(clk, &mmc_clock->clk_rate_change_nb);
if (ret)
goto err_notifier;
return clk;
err_notifier:
clk_unregister(clk);
err_register:
kfree(mmc_clock);
return ERR_PTR(ret);
}
|