1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* mmp gate clock operation source file
*
* Copyright (C) 2014 Marvell
* Chao Xie <chao.xie@marvell.com>
*/
#include <linux/clk-provider.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/delay.h>
#include "clk.h"
/*
* Some clocks will have mutiple bits to enable the clocks, and
* the bits to disable the clock is not same as enabling bits.
*/
#define to_clk_mmp_gate(hw) container_of(hw, struct mmp_clk_gate, hw)
static int mmp_clk_gate_enable(struct clk_hw *hw)
{
struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
unsigned long flags = 0;
unsigned long rate;
u32 tmp;
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
tmp = readl(gate->reg);
tmp &= ~gate->mask;
tmp |= gate->val_enable;
writel(tmp, gate->reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
rate = clk_hw_get_rate(hw);
/* Need delay 2 cycles. */
udelay(2000000/rate);
}
return 0;
}
static void mmp_clk_gate_disable(struct clk_hw *hw)
{
struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
unsigned long flags = 0;
u32 tmp;
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
tmp = readl(gate->reg);
tmp &= ~gate->mask;
tmp |= gate->val_disable;
writel(tmp, gate->reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
}
static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
{
struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
unsigned long flags = 0;
u32 tmp;
if (gate->lock)
spin_lock_irqsave(gate->lock, flags);
tmp = readl(gate->reg);
if (gate->lock)
spin_unlock_irqrestore(gate->lock, flags);
return (tmp & gate->mask) == gate->val_enable;
}
const struct clk_ops mmp_clk_gate_ops = {
.enable = mmp_clk_gate_enable,
.disable = mmp_clk_gate_disable,
.is_enabled = mmp_clk_gate_is_enabled,
};
struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
unsigned int gate_flags, spinlock_t *lock)
{
struct mmp_clk_gate *gate;
struct clk *clk;
struct clk_init_data init;
/* allocate the gate */
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &mmp_clk_gate_ops;
init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
/* struct clk_gate assignments */
gate->reg = reg;
gate->mask = mask;
gate->val_enable = val_enable;
gate->val_disable = val_disable;
gate->flags = gate_flags;
gate->lock = lock;
gate->hw.init = &init;
clk = clk_register(dev, &gate->hw);
if (IS_ERR(clk))
kfree(gate);
return clk;
}
|