1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2018 MediaTek Inc.
4 * Author: Owen Chen <owen.chen@mediatek.com>
5 */
6
7 #include <linux/of.h>
8 #include <linux/of_address.h>
9 #include <linux/slab.h>
10 #include <linux/mfd/syscon.h>
11 #include <linux/module.h>
12
13 #include "clk-mtk.h"
14 #include "clk-mux.h"
15
to_mtk_clk_mux(struct clk_hw * hw)16 static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
17 {
18 return container_of(hw, struct mtk_clk_mux, hw);
19 }
20
mtk_clk_mux_enable_setclr(struct clk_hw * hw)21 static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
22 {
23 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
24 unsigned long flags = 0;
25
26 if (mux->lock)
27 spin_lock_irqsave(mux->lock, flags);
28 else
29 __acquire(mux->lock);
30
31 regmap_write(mux->regmap, mux->data->clr_ofs,
32 BIT(mux->data->gate_shift));
33
34 /*
35 * If the parent has been changed when the clock was disabled, it will
36 * not be effective yet. Set the update bit to ensure the mux gets
37 * updated.
38 */
39 if (mux->reparent && mux->data->upd_shift >= 0) {
40 regmap_write(mux->regmap, mux->data->upd_ofs,
41 BIT(mux->data->upd_shift));
42 mux->reparent = false;
43 }
44
45 if (mux->lock)
46 spin_unlock_irqrestore(mux->lock, flags);
47 else
48 __release(mux->lock);
49
50 return 0;
51 }
52
mtk_clk_mux_disable_setclr(struct clk_hw * hw)53 static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
54 {
55 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
56
57 regmap_write(mux->regmap, mux->data->set_ofs,
58 BIT(mux->data->gate_shift));
59 }
60
mtk_clk_mux_is_enabled(struct clk_hw * hw)61 static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
62 {
63 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
64 u32 val;
65
66 regmap_read(mux->regmap, mux->data->mux_ofs, &val);
67
68 return (val & BIT(mux->data->gate_shift)) == 0;
69 }
70
mtk_clk_mux_get_parent(struct clk_hw * hw)71 static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
72 {
73 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
74 u32 mask = GENMASK(mux->data->mux_width - 1, 0);
75 u32 val;
76
77 regmap_read(mux->regmap, mux->data->mux_ofs, &val);
78 val = (val >> mux->data->mux_shift) & mask;
79
80 return val;
81 }
82
mtk_clk_mux_set_parent_setclr_lock(struct clk_hw * hw,u8 index)83 static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
84 {
85 struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
86 u32 mask = GENMASK(mux->data->mux_width - 1, 0);
87 u32 val, orig;
88 unsigned long flags = 0;
89
90 if (mux->lock)
91 spin_lock_irqsave(mux->lock, flags);
92 else
93 __acquire(mux->lock);
94
95 regmap_read(mux->regmap, mux->data->mux_ofs, &orig);
96 val = (orig & ~(mask << mux->data->mux_shift))
97 | (index << mux->data->mux_shift);
98
99 if (val != orig) {
100 regmap_write(mux->regmap, mux->data->clr_ofs,
101 mask << mux->data->mux_shift);
102 regmap_write(mux->regmap, mux->data->set_ofs,
103 index << mux->data->mux_shift);
104
105 if (mux->data->upd_shift >= 0) {
106 regmap_write(mux->regmap, mux->data->upd_ofs,
107 BIT(mux->data->upd_shift));
108 mux->reparent = true;
109 }
110 }
111
112 if (mux->lock)
113 spin_unlock_irqrestore(mux->lock, flags);
114 else
115 __release(mux->lock);
116
117 return 0;
118 }
119
120 const struct clk_ops mtk_mux_clr_set_upd_ops = {
121 .get_parent = mtk_clk_mux_get_parent,
122 .set_parent = mtk_clk_mux_set_parent_setclr_lock,
123 };
124 EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
125
126 const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
127 .enable = mtk_clk_mux_enable_setclr,
128 .disable = mtk_clk_mux_disable_setclr,
129 .is_enabled = mtk_clk_mux_is_enabled,
130 .get_parent = mtk_clk_mux_get_parent,
131 .set_parent = mtk_clk_mux_set_parent_setclr_lock,
132 };
133 EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
134
mtk_clk_register_mux(const struct mtk_mux * mux,struct regmap * regmap,spinlock_t * lock)135 static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
136 struct regmap *regmap,
137 spinlock_t *lock)
138 {
139 struct mtk_clk_mux *clk_mux;
140 struct clk_init_data init = {};
141 struct clk *clk;
142
143 clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL);
144 if (!clk_mux)
145 return ERR_PTR(-ENOMEM);
146
147 init.name = mux->name;
148 init.flags = mux->flags | CLK_SET_RATE_PARENT;
149 init.parent_names = mux->parent_names;
150 init.num_parents = mux->num_parents;
151 init.ops = mux->ops;
152
153 clk_mux->regmap = regmap;
154 clk_mux->data = mux;
155 clk_mux->lock = lock;
156 clk_mux->hw.init = &init;
157
158 clk = clk_register(NULL, &clk_mux->hw);
159 if (IS_ERR(clk)) {
160 kfree(clk_mux);
161 return clk;
162 }
163
164 return clk;
165 }
166
mtk_clk_register_muxes(const struct mtk_mux * muxes,int num,struct device_node * node,spinlock_t * lock,struct clk_onecell_data * clk_data)167 int mtk_clk_register_muxes(const struct mtk_mux *muxes,
168 int num, struct device_node *node,
169 spinlock_t *lock,
170 struct clk_onecell_data *clk_data)
171 {
172 struct regmap *regmap;
173 struct clk *clk;
174 int i;
175
176 regmap = device_node_to_regmap(node);
177 if (IS_ERR(regmap)) {
178 pr_err("Cannot find regmap for %pOF: %ld\n", node,
179 PTR_ERR(regmap));
180 return PTR_ERR(regmap);
181 }
182
183 for (i = 0; i < num; i++) {
184 const struct mtk_mux *mux = &muxes[i];
185
186 if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) {
187 clk = mtk_clk_register_mux(mux, regmap, lock);
188
189 if (IS_ERR(clk)) {
190 pr_err("Failed to register clk %s: %ld\n",
191 mux->name, PTR_ERR(clk));
192 continue;
193 }
194
195 clk_data->clks[mux->id] = clk;
196 }
197 }
198
199 return 0;
200 }
201 EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
202
203 MODULE_LICENSE("GPL");
204