1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2013 Broadcom Corporation.
4 */
5
6 /*
7 *
8 * bcm235xx architecture clock framework
9 *
10 */
11
12 #include <common.h>
13 #include <log.h>
14 #include <asm/io.h>
15 #include <linux/delay.h>
16 #include <linux/errno.h>
17 #include <bitfield.h>
18 #include <asm/arch/sysmap.h>
19 #include <asm/kona-common/clk.h>
20 #include "clk-core.h"
21
22 #define CLK_WR_ACCESS_PASSWORD 0x00a5a501
23 #define WR_ACCESS_OFFSET 0 /* common to all clock blocks */
24 #define POLICY_CTL_GO 1 /* Load and refresh policy masks */
25 #define POLICY_CTL_GO_ATL 4 /* Active Load */
26
27 /* Helper function */
clk_get_and_enable(char * clkstr)28 int clk_get_and_enable(char *clkstr)
29 {
30 int ret = 0;
31 struct clk *c;
32
33 debug("%s: %s\n", __func__, clkstr);
34
35 c = clk_get(clkstr);
36 if (c) {
37 ret = clk_enable(c);
38 if (ret)
39 return ret;
40 } else {
41 printf("%s: Couldn't find %s\n", __func__, clkstr);
42 return -EINVAL;
43 }
44 return ret;
45 }
46
47 /*
48 * Poll a register in a CCU's address space, returning when the
49 * specified bit in that register's value is set (or clear). Delay
50 * a microsecond after each read of the register. Returns true if
51 * successful, or false if we gave up trying.
52 *
53 * Caller must ensure the CCU lock is held.
54 */
55 #define CLK_GATE_DELAY_USEC 2000
wait_bit(void * base,u32 offset,u32 bit,bool want)56 static inline int wait_bit(void *base, u32 offset, u32 bit, bool want)
57 {
58 unsigned int tries;
59 u32 bit_mask = 1 << bit;
60
61 for (tries = 0; tries < CLK_GATE_DELAY_USEC; tries++) {
62 u32 val;
63 bool bit_val;
64
65 val = readl(base + offset);
66 bit_val = (val & bit_mask) ? 1 : 0;
67 if (bit_val == want)
68 return 0; /* success */
69 udelay(1);
70 }
71
72 debug("%s: timeout on addr 0x%p, waiting for bit %d to go to %d\n",
73 __func__, base + offset, bit, want);
74
75 return -ETIMEDOUT;
76 }
77
78 /* Enable a peripheral clock */
peri_clk_enable(struct clk * c,int enable)79 static int peri_clk_enable(struct clk *c, int enable)
80 {
81 int ret = 0;
82 u32 reg;
83 struct peri_clock *peri_clk = to_peri_clk(c);
84 struct peri_clk_data *cd = peri_clk->data;
85 struct bcm_clk_gate *gate = &cd->gate;
86 void *base = (void *)c->ccu_clk_mgr_base;
87
88
89 debug("%s: %s\n", __func__, c->name);
90
91 clk_get_rate(c); /* Make sure rate and sel are filled in */
92
93 /* enable access */
94 writel(CLK_WR_ACCESS_PASSWORD, base + WR_ACCESS_OFFSET);
95
96 if (enable) {
97 debug("%s %s set rate %lu div %lu sel %d parent %lu\n",
98 __func__, c->name, c->rate, c->div, c->sel,
99 c->parent->rate);
100
101 /*
102 * clkgate - only software controllable gates are
103 * supported by u-boot which includes all clocks
104 * that matter. This avoids bringing in a lot of extra
105 * complexity as done in the kernel framework.
106 */
107 if (gate_exists(gate)) {
108 reg = readl(base + cd->gate.offset);
109 reg |= (1 << cd->gate.en_bit);
110 writel(reg, base + cd->gate.offset);
111 }
112
113 /* div and pll select */
114 if (divider_exists(&cd->div)) {
115 reg = readl(base + cd->div.offset);
116 bitfield_replace(reg, cd->div.shift, cd->div.width,
117 c->div - 1);
118 writel(reg, base + cd->div.offset);
119 }
120
121 /* frequency selector */
122 if (selector_exists(&cd->sel)) {
123 reg = readl(base + cd->sel.offset);
124 bitfield_replace(reg, cd->sel.shift, cd->sel.width,
125 c->sel);
126 writel(reg, base + cd->sel.offset);
127 }
128
129 /* trigger */
130 if (trigger_exists(&cd->trig)) {
131 writel((1 << cd->trig.bit), base + cd->trig.offset);
132
133 /* wait for trigger status bit to go to 0 */
134 ret = wait_bit(base, cd->trig.offset, cd->trig.bit, 0);
135 if (ret)
136 return ret;
137 }
138
139 /* wait for running (status_bit = 1) */
140 ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit, 1);
141 if (ret)
142 return ret;
143 } else {
144 debug("%s disable clock %s\n", __func__, c->name);
145
146 /* clkgate */
147 reg = readl(base + cd->gate.offset);
148 reg &= ~(1 << cd->gate.en_bit);
149 writel(reg, base + cd->gate.offset);
150
151 /* wait for stop (status_bit = 0) */
152 ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit, 0);
153 }
154
155 /* disable access */
156 writel(0, base + WR_ACCESS_OFFSET);
157
158 return ret;
159 }
160
161 /* Set the rate of a peripheral clock */
peri_clk_set_rate(struct clk * c,unsigned long rate)162 static int peri_clk_set_rate(struct clk *c, unsigned long rate)
163 {
164 int ret = 0;
165 int i;
166 unsigned long diff;
167 unsigned long new_rate = 0, div = 1;
168 struct peri_clock *peri_clk = to_peri_clk(c);
169 struct peri_clk_data *cd = peri_clk->data;
170 const char **clock;
171
172 debug("%s: %s\n", __func__, c->name);
173 diff = rate;
174
175 i = 0;
176 for (clock = cd->clocks; *clock; clock++, i++) {
177 struct refclk *ref = refclk_str_to_clk(*clock);
178 if (!ref) {
179 printf("%s: Lookup of %s failed\n", __func__, *clock);
180 return -EINVAL;
181 }
182
183 /* round to the new rate */
184 div = ref->clk.rate / rate;
185 if (div == 0)
186 div = 1;
187
188 new_rate = ref->clk.rate / div;
189
190 /* get the min diff */
191 if (abs(new_rate - rate) < diff) {
192 diff = abs(new_rate - rate);
193 c->sel = i;
194 c->parent = &ref->clk;
195 c->rate = new_rate;
196 c->div = div;
197 }
198 }
199
200 debug("%s %s set rate %lu div %lu sel %d parent %lu\n", __func__,
201 c->name, c->rate, c->div, c->sel, c->parent->rate);
202 return ret;
203 }
204
205 /* Get the rate of a peripheral clock */
peri_clk_get_rate(struct clk * c)206 static unsigned long peri_clk_get_rate(struct clk *c)
207 {
208 struct peri_clock *peri_clk = to_peri_clk(c);
209 struct peri_clk_data *cd = peri_clk->data;
210 void *base = (void *)c->ccu_clk_mgr_base;
211 int div = 1;
212 const char **clock;
213 struct refclk *ref;
214 u32 reg;
215
216 debug("%s: %s\n", __func__, c->name);
217 if (selector_exists(&cd->sel)) {
218 reg = readl(base + cd->sel.offset);
219 c->sel = bitfield_extract(reg, cd->sel.shift, cd->sel.width);
220 } else {
221 /*
222 * For peri clocks that don't have a selector, the single
223 * reference clock will always exist at index 0.
224 */
225 c->sel = 0;
226 }
227
228 if (divider_exists(&cd->div)) {
229 reg = readl(base + cd->div.offset);
230 div = bitfield_extract(reg, cd->div.shift, cd->div.width);
231 div += 1;
232 }
233
234 clock = cd->clocks;
235 ref = refclk_str_to_clk(clock[c->sel]);
236 if (!ref) {
237 printf("%s: Can't lookup %s\n", __func__, clock[c->sel]);
238 return 0;
239 }
240
241 c->parent = &ref->clk;
242 c->div = div;
243 c->rate = c->parent->rate / c->div;
244 debug("%s parent rate %lu div %d sel %d rate %lu\n", __func__,
245 c->parent->rate, div, c->sel, c->rate);
246
247 return c->rate;
248 }
249
250 /* Peripheral clock operations */
251 struct clk_ops peri_clk_ops = {
252 .enable = peri_clk_enable,
253 .set_rate = peri_clk_set_rate,
254 .get_rate = peri_clk_get_rate,
255 };
256
257 /* Enable a CCU clock */
ccu_clk_enable(struct clk * c,int enable)258 static int ccu_clk_enable(struct clk *c, int enable)
259 {
260 struct ccu_clock *ccu_clk = to_ccu_clk(c);
261 void *base = (void *)c->ccu_clk_mgr_base;
262 int ret = 0;
263 u32 reg;
264
265 debug("%s: %s\n", __func__, c->name);
266 if (!enable)
267 return -EINVAL; /* CCU clock cannot shutdown */
268
269 /* enable access */
270 writel(CLK_WR_ACCESS_PASSWORD, base + WR_ACCESS_OFFSET);
271
272 /* config enable for policy engine */
273 writel(1, base + ccu_clk->lvm_en_offset);
274
275 /* wait for bit to go to 0 */
276 ret = wait_bit(base, ccu_clk->lvm_en_offset, 0, 0);
277 if (ret)
278 return ret;
279
280 /* freq ID */
281 if (!ccu_clk->freq_bit_shift)
282 ccu_clk->freq_bit_shift = 8;
283
284 /* Set frequency id for each of the 4 policies */
285 reg = ccu_clk->freq_id |
286 (ccu_clk->freq_id << (ccu_clk->freq_bit_shift)) |
287 (ccu_clk->freq_id << (ccu_clk->freq_bit_shift * 2)) |
288 (ccu_clk->freq_id << (ccu_clk->freq_bit_shift * 3));
289 writel(reg, base + ccu_clk->policy_freq_offset);
290
291 /* enable all clock mask */
292 writel(0x7fffffff, base + ccu_clk->policy0_mask_offset);
293 writel(0x7fffffff, base + ccu_clk->policy1_mask_offset);
294 writel(0x7fffffff, base + ccu_clk->policy2_mask_offset);
295 writel(0x7fffffff, base + ccu_clk->policy3_mask_offset);
296
297 if (ccu_clk->num_policy_masks == 2) {
298 writel(0x7fffffff, base + ccu_clk->policy0_mask2_offset);
299 writel(0x7fffffff, base + ccu_clk->policy1_mask2_offset);
300 writel(0x7fffffff, base + ccu_clk->policy2_mask2_offset);
301 writel(0x7fffffff, base + ccu_clk->policy3_mask2_offset);
302 }
303
304 /* start policy engine */
305 reg = readl(base + ccu_clk->policy_ctl_offset);
306 reg |= (POLICY_CTL_GO + POLICY_CTL_GO_ATL);
307 writel(reg, base + ccu_clk->policy_ctl_offset);
308
309 /* wait till started */
310 ret = wait_bit(base, ccu_clk->policy_ctl_offset, 0, 0);
311 if (ret)
312 return ret;
313
314 /* disable access */
315 writel(0, base + WR_ACCESS_OFFSET);
316
317 return ret;
318 }
319
320 /* Get the CCU clock rate */
ccu_clk_get_rate(struct clk * c)321 static unsigned long ccu_clk_get_rate(struct clk *c)
322 {
323 struct ccu_clock *ccu_clk = to_ccu_clk(c);
324 debug("%s: %s\n", __func__, c->name);
325 c->rate = ccu_clk->freq_tbl[ccu_clk->freq_id];
326 return c->rate;
327 }
328
329 /* CCU clock operations */
330 struct clk_ops ccu_clk_ops = {
331 .enable = ccu_clk_enable,
332 .get_rate = ccu_clk_get_rate,
333 };
334
335 /* Enable a bus clock */
bus_clk_enable(struct clk * c,int enable)336 static int bus_clk_enable(struct clk *c, int enable)
337 {
338 struct bus_clock *bus_clk = to_bus_clk(c);
339 struct bus_clk_data *cd = bus_clk->data;
340 void *base = (void *)c->ccu_clk_mgr_base;
341 int ret = 0;
342 u32 reg;
343
344 debug("%s: %s\n", __func__, c->name);
345 /* enable access */
346 writel(CLK_WR_ACCESS_PASSWORD, base + WR_ACCESS_OFFSET);
347
348 /* enable gating */
349 reg = readl(base + cd->gate.offset);
350 if (!!(reg & (1 << cd->gate.status_bit)) == !!enable)
351 debug("%s already %s\n", c->name,
352 enable ? "enabled" : "disabled");
353 else {
354 int want = (enable) ? 1 : 0;
355 reg |= (1 << cd->gate.hw_sw_sel_bit);
356
357 if (enable)
358 reg |= (1 << cd->gate.en_bit);
359 else
360 reg &= ~(1 << cd->gate.en_bit);
361
362 writel(reg, base + cd->gate.offset);
363 ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit,
364 want);
365 if (ret)
366 return ret;
367 }
368
369 /* disable access */
370 writel(0, base + WR_ACCESS_OFFSET);
371
372 return ret;
373 }
374
375 /* Get the rate of a bus clock */
bus_clk_get_rate(struct clk * c)376 static unsigned long bus_clk_get_rate(struct clk *c)
377 {
378 struct bus_clock *bus_clk = to_bus_clk(c);
379 struct ccu_clock *ccu_clk;
380
381 debug("%s: %s\n", __func__, c->name);
382 ccu_clk = to_ccu_clk(c->parent);
383
384 c->rate = bus_clk->freq_tbl[ccu_clk->freq_id];
385 c->div = ccu_clk->freq_tbl[ccu_clk->freq_id] / c->rate;
386 return c->rate;
387 }
388
389 /* Bus clock operations */
390 struct clk_ops bus_clk_ops = {
391 .enable = bus_clk_enable,
392 .get_rate = bus_clk_get_rate,
393 };
394
395 /* Enable a reference clock */
ref_clk_enable(struct clk * c,int enable)396 static int ref_clk_enable(struct clk *c, int enable)
397 {
398 debug("%s: %s\n", __func__, c->name);
399 return 0;
400 }
401
402 /* Reference clock operations */
403 struct clk_ops ref_clk_ops = {
404 .enable = ref_clk_enable,
405 };
406
407 /*
408 * clk.h implementation follows
409 */
410
411 /* Initialize the clock framework */
clk_init(void)412 int clk_init(void)
413 {
414 debug("%s:\n", __func__);
415 return 0;
416 }
417
418 /* Get a clock handle, give a name string */
clk_get(const char * con_id)419 struct clk *clk_get(const char *con_id)
420 {
421 int i;
422 struct clk_lookup *clk_tblp;
423
424 debug("%s: %s\n", __func__, con_id);
425
426 clk_tblp = arch_clk_tbl;
427 for (i = 0; i < arch_clk_tbl_array_size; i++, clk_tblp++) {
428 if (clk_tblp->con_id) {
429 if (!con_id || strcmp(clk_tblp->con_id, con_id))
430 continue;
431 return clk_tblp->clk;
432 }
433 }
434 return NULL;
435 }
436
437 /* Enable a clock */
clk_enable(struct clk * c)438 int clk_enable(struct clk *c)
439 {
440 int ret = 0;
441
442 debug("%s: %s\n", __func__, c->name);
443 if (!c->ops || !c->ops->enable)
444 return -1;
445
446 /* enable parent clock first */
447 if (c->parent)
448 ret = clk_enable(c->parent);
449
450 if (ret)
451 return ret;
452
453 if (!c->use_cnt)
454 ret = c->ops->enable(c, 1);
455 c->use_cnt++;
456
457 return ret;
458 }
459
460 /* Disable a clock */
clk_disable(struct clk * c)461 void clk_disable(struct clk *c)
462 {
463 debug("%s: %s\n", __func__, c->name);
464 if (!c->ops || !c->ops->enable)
465 return;
466
467 if (c->use_cnt > 0) {
468 c->use_cnt--;
469 if (c->use_cnt == 0)
470 c->ops->enable(c, 0);
471 }
472
473 /* disable parent */
474 if (c->parent)
475 clk_disable(c->parent);
476 }
477
478 /* Get the clock rate */
clk_get_rate(struct clk * c)479 unsigned long clk_get_rate(struct clk *c)
480 {
481 unsigned long rate;
482
483 if (!c || !c->ops || !c->ops->get_rate)
484 return 0;
485 debug("%s: %s\n", __func__, c->name);
486
487 rate = c->ops->get_rate(c);
488 debug("%s: rate = %ld\n", __func__, rate);
489 return rate;
490 }
491
492 /* Set the clock rate */
clk_set_rate(struct clk * c,unsigned long rate)493 int clk_set_rate(struct clk *c, unsigned long rate)
494 {
495 int ret;
496
497 if (!c || !c->ops || !c->ops->set_rate)
498 return -EINVAL;
499 debug("%s: %s rate=%ld\n", __func__, c->name, rate);
500
501 if (c->use_cnt)
502 return -EINVAL;
503
504 ret = c->ops->set_rate(c, rate);
505
506 return ret;
507 }
508
509 /* Not required for this arch */
510 /*
511 long clk_round_rate(struct clk *clk, unsigned long rate);
512 int clk_set_parent(struct clk *clk, struct clk *parent);
513 struct clk *clk_get_parent(struct clk *clk);
514 */
515