1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/bitops.h>
7 #include <linux/delay.h>
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/ktime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regmap.h>
16 #include <linux/regulator/consumer.h>
17 #include <linux/reset-controller.h>
18 #include <linux/slab.h>
19 #include "gdsc.h"
20
21 #define PWR_ON_MASK BIT(31)
22 #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20)
23 #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16)
24 #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12)
25 #define SW_OVERRIDE_MASK BIT(2)
26 #define HW_CONTROL_MASK BIT(1)
27 #define SW_COLLAPSE_MASK BIT(0)
28 #define GMEM_CLAMP_IO_MASK BIT(0)
29 #define GMEM_RESET_MASK BIT(4)
30
31 /* CFG_GDSCR */
32 #define GDSC_POWER_UP_COMPLETE BIT(16)
33 #define GDSC_POWER_DOWN_COMPLETE BIT(15)
34 #define GDSC_RETAIN_FF_ENABLE BIT(11)
35 #define CFG_GDSCR_OFFSET 0x4
36
37 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
38 #define EN_REST_WAIT_VAL (0x2 << 20)
39 #define EN_FEW_WAIT_VAL (0x8 << 16)
40 #define CLK_DIS_WAIT_VAL (0x2 << 12)
41
42 #define RETAIN_MEM BIT(14)
43 #define RETAIN_PERIPH BIT(13)
44
45 #define TIMEOUT_US 500
46
47 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
48
49 enum gdsc_status {
50 GDSC_OFF,
51 GDSC_ON
52 };
53
gdsc_pm_runtime_get(struct gdsc * sc)54 static int gdsc_pm_runtime_get(struct gdsc *sc)
55 {
56 if (!sc->dev)
57 return 0;
58
59 return pm_runtime_resume_and_get(sc->dev);
60 }
61
gdsc_pm_runtime_put(struct gdsc * sc)62 static int gdsc_pm_runtime_put(struct gdsc *sc)
63 {
64 if (!sc->dev)
65 return 0;
66
67 return pm_runtime_put_sync(sc->dev);
68 }
69
70 /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
gdsc_check_status(struct gdsc * sc,enum gdsc_status status)71 static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
72 {
73 unsigned int reg;
74 u32 val;
75 int ret;
76
77 if (sc->flags & POLL_CFG_GDSCR)
78 reg = sc->gdscr + CFG_GDSCR_OFFSET;
79 else if (sc->gds_hw_ctrl)
80 reg = sc->gds_hw_ctrl;
81 else
82 reg = sc->gdscr;
83
84 ret = regmap_read(sc->regmap, reg, &val);
85 if (ret)
86 return ret;
87
88 if (sc->flags & POLL_CFG_GDSCR) {
89 switch (status) {
90 case GDSC_ON:
91 return !!(val & GDSC_POWER_UP_COMPLETE);
92 case GDSC_OFF:
93 return !!(val & GDSC_POWER_DOWN_COMPLETE);
94 }
95 }
96
97 switch (status) {
98 case GDSC_ON:
99 return !!(val & PWR_ON_MASK);
100 case GDSC_OFF:
101 return !(val & PWR_ON_MASK);
102 }
103
104 return -EINVAL;
105 }
106
gdsc_hwctrl(struct gdsc * sc,bool en)107 static int gdsc_hwctrl(struct gdsc *sc, bool en)
108 {
109 u32 val = en ? HW_CONTROL_MASK : 0;
110
111 return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
112 }
113
gdsc_poll_status(struct gdsc * sc,enum gdsc_status status)114 static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
115 {
116 ktime_t start;
117
118 start = ktime_get();
119 do {
120 if (gdsc_check_status(sc, status))
121 return 0;
122 } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
123
124 if (gdsc_check_status(sc, status))
125 return 0;
126
127 return -ETIMEDOUT;
128 }
129
gdsc_toggle_logic(struct gdsc * sc,enum gdsc_status status)130 static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status)
131 {
132 int ret;
133 u32 val = (status == GDSC_ON) ? 0 : SW_COLLAPSE_MASK;
134
135 if (status == GDSC_ON && sc->rsupply) {
136 ret = regulator_enable(sc->rsupply);
137 if (ret < 0)
138 return ret;
139 }
140
141 ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
142 if (ret)
143 return ret;
144
145 /* If disabling votable gdscs, don't poll on status */
146 if ((sc->flags & VOTABLE) && status == GDSC_OFF) {
147 /*
148 * Add a short delay here to ensure that an enable
149 * right after it was disabled does not put it in an
150 * unknown state
151 */
152 udelay(TIMEOUT_US);
153 return 0;
154 }
155
156 if (sc->gds_hw_ctrl) {
157 /*
158 * The gds hw controller asserts/de-asserts the status bit soon
159 * after it receives a power on/off request from a master.
160 * The controller then takes around 8 xo cycles to start its
161 * internal state machine and update the status bit. During
162 * this time, the status bit does not reflect the true status
163 * of the core.
164 * Add a delay of 1 us between writing to the SW_COLLAPSE bit
165 * and polling the status bit.
166 */
167 udelay(1);
168 }
169
170 ret = gdsc_poll_status(sc, status);
171 WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
172
173 if (!ret && status == GDSC_OFF && sc->rsupply) {
174 ret = regulator_disable(sc->rsupply);
175 if (ret < 0)
176 return ret;
177 }
178
179 return ret;
180 }
181
gdsc_deassert_reset(struct gdsc * sc)182 static inline int gdsc_deassert_reset(struct gdsc *sc)
183 {
184 int i;
185
186 for (i = 0; i < sc->reset_count; i++)
187 sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]);
188 return 0;
189 }
190
gdsc_assert_reset(struct gdsc * sc)191 static inline int gdsc_assert_reset(struct gdsc *sc)
192 {
193 int i;
194
195 for (i = 0; i < sc->reset_count; i++)
196 sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]);
197 return 0;
198 }
199
gdsc_force_mem_on(struct gdsc * sc)200 static inline void gdsc_force_mem_on(struct gdsc *sc)
201 {
202 int i;
203 u32 mask = RETAIN_MEM;
204
205 if (!(sc->flags & NO_RET_PERIPH))
206 mask |= RETAIN_PERIPH;
207
208 for (i = 0; i < sc->cxc_count; i++)
209 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
210 }
211
gdsc_clear_mem_on(struct gdsc * sc)212 static inline void gdsc_clear_mem_on(struct gdsc *sc)
213 {
214 int i;
215 u32 mask = RETAIN_MEM;
216
217 if (!(sc->flags & NO_RET_PERIPH))
218 mask |= RETAIN_PERIPH;
219
220 for (i = 0; i < sc->cxc_count; i++)
221 regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
222 }
223
gdsc_deassert_clamp_io(struct gdsc * sc)224 static inline void gdsc_deassert_clamp_io(struct gdsc *sc)
225 {
226 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
227 GMEM_CLAMP_IO_MASK, 0);
228 }
229
gdsc_assert_clamp_io(struct gdsc * sc)230 static inline void gdsc_assert_clamp_io(struct gdsc *sc)
231 {
232 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
233 GMEM_CLAMP_IO_MASK, 1);
234 }
235
gdsc_assert_reset_aon(struct gdsc * sc)236 static inline void gdsc_assert_reset_aon(struct gdsc *sc)
237 {
238 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
239 GMEM_RESET_MASK, 1);
240 udelay(1);
241 regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
242 GMEM_RESET_MASK, 0);
243 }
244
gdsc_retain_ff_on(struct gdsc * sc)245 static void gdsc_retain_ff_on(struct gdsc *sc)
246 {
247 u32 mask = GDSC_RETAIN_FF_ENABLE;
248
249 regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
250 }
251
_gdsc_enable(struct gdsc * sc)252 static int _gdsc_enable(struct gdsc *sc)
253 {
254 int ret;
255
256 if (sc->pwrsts == PWRSTS_ON)
257 return gdsc_deassert_reset(sc);
258
259 if (sc->flags & SW_RESET) {
260 gdsc_assert_reset(sc);
261 udelay(1);
262 gdsc_deassert_reset(sc);
263 }
264
265 if (sc->flags & CLAMP_IO) {
266 if (sc->flags & AON_RESET)
267 gdsc_assert_reset_aon(sc);
268 gdsc_deassert_clamp_io(sc);
269 }
270
271 ret = gdsc_toggle_logic(sc, GDSC_ON);
272 if (ret)
273 return ret;
274
275 if (sc->pwrsts & PWRSTS_OFF)
276 gdsc_force_mem_on(sc);
277
278 /*
279 * If clocks to this power domain were already on, they will take an
280 * additional 4 clock cycles to re-enable after the power domain is
281 * enabled. Delay to account for this. A delay is also needed to ensure
282 * clocks are not enabled within 400ns of enabling power to the
283 * memories.
284 */
285 udelay(1);
286
287 /* Turn on HW trigger mode if supported */
288 if (sc->flags & HW_CTRL) {
289 ret = gdsc_hwctrl(sc, true);
290 if (ret)
291 return ret;
292 /*
293 * Wait for the GDSC to go through a power down and
294 * up cycle. In case a firmware ends up polling status
295 * bits for the gdsc, it might read an 'on' status before
296 * the GDSC can finish the power cycle.
297 * We wait 1us before returning to ensure the firmware
298 * can't immediately poll the status bits.
299 */
300 udelay(1);
301 }
302
303 if (sc->flags & RETAIN_FF_ENABLE)
304 gdsc_retain_ff_on(sc);
305
306 return 0;
307 }
308
gdsc_enable(struct generic_pm_domain * domain)309 static int gdsc_enable(struct generic_pm_domain *domain)
310 {
311 struct gdsc *sc = domain_to_gdsc(domain);
312 int ret;
313
314 ret = gdsc_pm_runtime_get(sc);
315 if (ret)
316 return ret;
317
318 return _gdsc_enable(sc);
319 }
320
_gdsc_disable(struct gdsc * sc)321 static int _gdsc_disable(struct gdsc *sc)
322 {
323 int ret;
324
325 if (sc->pwrsts == PWRSTS_ON)
326 return gdsc_assert_reset(sc);
327
328 /* Turn off HW trigger mode if supported */
329 if (sc->flags & HW_CTRL) {
330 ret = gdsc_hwctrl(sc, false);
331 if (ret < 0)
332 return ret;
333 /*
334 * Wait for the GDSC to go through a power down and
335 * up cycle. In case we end up polling status
336 * bits for the gdsc before the power cycle is completed
337 * it might read an 'on' status wrongly.
338 */
339 udelay(1);
340
341 ret = gdsc_poll_status(sc, GDSC_ON);
342 if (ret)
343 return ret;
344 }
345
346 if (sc->pwrsts & PWRSTS_OFF)
347 gdsc_clear_mem_on(sc);
348
349 ret = gdsc_toggle_logic(sc, GDSC_OFF);
350 if (ret)
351 return ret;
352
353 if (sc->flags & CLAMP_IO)
354 gdsc_assert_clamp_io(sc);
355
356 return 0;
357 }
358
gdsc_disable(struct generic_pm_domain * domain)359 static int gdsc_disable(struct generic_pm_domain *domain)
360 {
361 struct gdsc *sc = domain_to_gdsc(domain);
362 int ret;
363
364 ret = _gdsc_disable(sc);
365
366 gdsc_pm_runtime_put(sc);
367
368 return ret;
369 }
370
gdsc_init(struct gdsc * sc)371 static int gdsc_init(struct gdsc *sc)
372 {
373 u32 mask, val;
374 int on, ret;
375
376 /*
377 * Disable HW trigger: collapse/restore occur based on registers writes.
378 * Disable SW override: Use hardware state-machine for sequencing.
379 * Configure wait time between states.
380 */
381 mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
382 EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
383 val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
384 ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
385 if (ret)
386 return ret;
387
388 /* Force gdsc ON if only ON state is supported */
389 if (sc->pwrsts == PWRSTS_ON) {
390 ret = gdsc_toggle_logic(sc, GDSC_ON);
391 if (ret)
392 return ret;
393 }
394
395 on = gdsc_check_status(sc, GDSC_ON);
396 if (on < 0)
397 return on;
398
399 if (on) {
400 /* The regulator must be on, sync the kernel state */
401 if (sc->rsupply) {
402 ret = regulator_enable(sc->rsupply);
403 if (ret < 0)
404 return ret;
405 }
406
407 /*
408 * Votable GDSCs can be ON due to Vote from other masters.
409 * If a Votable GDSC is ON, make sure we have a Vote.
410 */
411 if (sc->flags & VOTABLE) {
412 ret = regmap_update_bits(sc->regmap, sc->gdscr,
413 SW_COLLAPSE_MASK, val);
414 if (ret)
415 return ret;
416 }
417
418 /* Turn on HW trigger mode if supported */
419 if (sc->flags & HW_CTRL) {
420 ret = gdsc_hwctrl(sc, true);
421 if (ret < 0)
422 return ret;
423 }
424
425 /*
426 * Make sure the retain bit is set if the GDSC is already on,
427 * otherwise we end up turning off the GDSC and destroying all
428 * the register contents that we thought we were saving.
429 */
430 if (sc->flags & RETAIN_FF_ENABLE)
431 gdsc_retain_ff_on(sc);
432 } else if (sc->flags & ALWAYS_ON) {
433 /* If ALWAYS_ON GDSCs are not ON, turn them ON */
434 gdsc_enable(&sc->pd);
435 on = true;
436 }
437
438 if (on || (sc->pwrsts & PWRSTS_RET))
439 gdsc_force_mem_on(sc);
440 else
441 gdsc_clear_mem_on(sc);
442
443 if (sc->flags & ALWAYS_ON)
444 sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
445 if (!sc->pd.power_off)
446 sc->pd.power_off = gdsc_disable;
447 if (!sc->pd.power_on)
448 sc->pd.power_on = gdsc_enable;
449 pm_genpd_init(&sc->pd, NULL, !on);
450
451 return 0;
452 }
453
gdsc_register(struct gdsc_desc * desc,struct reset_controller_dev * rcdev,struct regmap * regmap)454 int gdsc_register(struct gdsc_desc *desc,
455 struct reset_controller_dev *rcdev, struct regmap *regmap)
456 {
457 int i, ret;
458 struct genpd_onecell_data *data;
459 struct device *dev = desc->dev;
460 struct gdsc **scs = desc->scs;
461 size_t num = desc->num;
462
463 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
464 if (!data)
465 return -ENOMEM;
466
467 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
468 GFP_KERNEL);
469 if (!data->domains)
470 return -ENOMEM;
471
472 for (i = 0; i < num; i++) {
473 if (!scs[i] || !scs[i]->supply)
474 continue;
475
476 scs[i]->rsupply = devm_regulator_get(dev, scs[i]->supply);
477 if (IS_ERR(scs[i]->rsupply))
478 return PTR_ERR(scs[i]->rsupply);
479 }
480
481 data->num_domains = num;
482 for (i = 0; i < num; i++) {
483 if (!scs[i])
484 continue;
485 if (pm_runtime_enabled(dev))
486 scs[i]->dev = dev;
487 scs[i]->regmap = regmap;
488 scs[i]->rcdev = rcdev;
489 ret = gdsc_init(scs[i]);
490 if (ret)
491 return ret;
492 data->domains[i] = &scs[i]->pd;
493 }
494
495 /* Add subdomains */
496 for (i = 0; i < num; i++) {
497 if (!scs[i])
498 continue;
499 if (scs[i]->parent)
500 pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
501 else if (!IS_ERR_OR_NULL(dev->pm_domain))
502 pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
503 }
504
505 return of_genpd_add_provider_onecell(dev->of_node, data);
506 }
507
gdsc_unregister(struct gdsc_desc * desc)508 void gdsc_unregister(struct gdsc_desc *desc)
509 {
510 int i;
511 struct device *dev = desc->dev;
512 struct gdsc **scs = desc->scs;
513 size_t num = desc->num;
514
515 /* Remove subdomains */
516 for (i = 0; i < num; i++) {
517 if (!scs[i])
518 continue;
519 if (scs[i]->parent)
520 pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
521 else if (!IS_ERR_OR_NULL(dev->pm_domain))
522 pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
523 }
524 of_genpd_del_provider(dev->of_node);
525 }
526
527 /*
528 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
529 * running in the CX domain so the CPU doesn't need to know anything about the
530 * GX domain EXCEPT....
531 *
532 * Hardware constraints dictate that the GX be powered down before the CX. If
533 * the GMU crashes it could leave the GX on. In order to successfully bring back
534 * the device the CPU needs to disable the GX headswitch. There being no sane
535 * way to reach in and touch that register from deep inside the GPU driver we
536 * need to set up the infrastructure to be able to ensure that the GPU can
537 * ensure that the GX is off during this super special case. We do this by
538 * defining a GX gdsc with a dummy enable function and a "default" disable
539 * function.
540 *
541 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
542 * driver. During power up, nothing will happen from the CPU (and the GMU will
543 * power up normally but during power down this will ensure that the GX domain
544 * is *really* off - this gives us a semi standard way of doing what we need.
545 */
gdsc_gx_do_nothing_enable(struct generic_pm_domain * domain)546 int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
547 {
548 /* Do nothing but give genpd the impression that we were successful */
549 return 0;
550 }
551 EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);
552