1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2023 Nuvoton Technology Corp.
* Author: Chi-Fang Li <cfli0@nuvoton.com>
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include "clk-ma35d1.h"
struct ma35d1_adc_clk_div {
struct clk_hw hw;
void __iomem *reg;
u8 shift;
u8 width;
u32 mask;
const struct clk_div_table *table;
/* protects concurrent access to clock divider registers */
spinlock_t *lock;
};
static inline struct ma35d1_adc_clk_div *to_ma35d1_adc_clk_div(struct clk_hw *_hw)
{
return container_of(_hw, struct ma35d1_adc_clk_div, hw);
}
static unsigned long ma35d1_clkdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
unsigned int val;
struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
val = readl_relaxed(dclk->reg) >> dclk->shift;
val &= clk_div_mask(dclk->width);
val += 1;
return divider_recalc_rate(hw, parent_rate, val, dclk->table,
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
static long ma35d1_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
{
struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
return divider_round_rate(hw, rate, prate, dclk->table,
dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
}
static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
{
int value;
unsigned long flags = 0;
u32 data;
struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
value = divider_get_val(rate, parent_rate, dclk->table,
dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
spin_lock_irqsave(dclk->lock, flags);
data = readl_relaxed(dclk->reg);
data &= ~(clk_div_mask(dclk->width) << dclk->shift);
data |= (value - 1) << dclk->shift;
data |= dclk->mask;
writel_relaxed(data, dclk->reg);
spin_unlock_irqrestore(dclk->lock, flags);
return 0;
}
static const struct clk_ops ma35d1_adc_clkdiv_ops = {
.recalc_rate = ma35d1_clkdiv_recalc_rate,
.round_rate = ma35d1_clkdiv_round_rate,
.set_rate = ma35d1_clkdiv_set_rate,
};
struct clk_hw *ma35d1_reg_adc_clkdiv(struct device *dev, const char *name,
struct clk_hw *parent_hw, spinlock_t *lock,
unsigned long flags, void __iomem *reg,
u8 shift, u8 width, u32 mask_bit)
{
struct ma35d1_adc_clk_div *div;
struct clk_init_data init;
struct clk_div_table *table;
struct clk_parent_data pdata = { .index = 0 };
u32 max_div, min_div;
struct clk_hw *hw;
int ret;
int i;
div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
max_div = clk_div_mask(width) + 1;
min_div = 1;
table = devm_kcalloc(dev, max_div + 1, sizeof(*table), GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
for (i = 0; i < max_div; i++) {
table[i].val = min_div + i;
table[i].div = 2 * table[i].val;
}
table[max_div].val = 0;
table[max_div].div = 0;
memset(&init, 0, sizeof(init));
init.name = name;
init.ops = &ma35d1_adc_clkdiv_ops;
init.flags |= flags;
pdata.hw = parent_hw;
init.parent_data = &pdata;
init.num_parents = 1;
div->reg = reg;
div->shift = shift;
div->width = width;
div->mask = mask_bit ? BIT(mask_bit) : 0;
div->lock = lock;
div->hw.init = &init;
div->table = table;
hw = &div->hw;
ret = devm_clk_hw_register(dev, hw);
if (ret)
return ERR_PTR(ret);
return hw;
}
EXPORT_SYMBOL_GPL(ma35d1_reg_adc_clkdiv);
|