1 /*
2 * Arm SCP/MCP Software
3 * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 * Description:
8 * Power State Management PPU v1 driver.
9 */
10
11 #include "ppu_v1.h"
12
13 #include <mod_power_domain.h>
14 #include <mod_ppu_v1.h>
15
16 #ifdef BUILD_HAS_MOD_SYSTEM_POWER
17 # include <mod_system_power.h>
18 #endif
19
20 #include <fwk_assert.h>
21 #include <fwk_event.h>
22 #include <fwk_id.h>
23 #include <fwk_interrupt.h>
24 #include <fwk_log.h>
25 #include <fwk_macros.h>
26 #include <fwk_mm.h>
27 #include <fwk_module.h>
28 #include <fwk_module_idx.h>
29 #include <fwk_notification.h>
30 #include <fwk_status.h>
31
32 #include <stdbool.h>
33 #include <stddef.h>
34
35 #define CORE_PER_CLUSTER_COUNT_MAX 8
36
37 /* Power domain context */
38 struct ppu_v1_pd_ctx {
39 /* Power domain configuration data */
40 const struct mod_ppu_v1_pd_config *config;
41
42 /* PPU registers */
43 struct ppu_v1_reg *ppu;
44
45 /* Identifier of the entity bound to the power domain driver API */
46 fwk_id_t bound_id;
47
48 /* Power module driver input API */
49 struct mod_pd_driver_input_api *pd_driver_input_api;
50
51 /* Context of the parent power domain (used only for core power domains) */
52 struct ppu_v1_pd_ctx *parent_pd_ctx;
53
54 /* Pointer to the power state observer API */
55 const struct mod_ppu_v1_power_state_observer_api *observer_api;
56
57 /* Timer context */
58 struct ppu_v1_timer_ctx *timer_ctx;
59
60 /* Context data specific to the type of power domain */
61 void *data;
62 };
63
64 /* Cluster power domain specific context */
65 struct ppu_v1_cluster_pd_ctx {
66 /*
67 * Table of pointers to the contexts of the cores being part of the
68 * cluster.
69 */
70 struct ppu_v1_pd_ctx *core_pd_ctx_table[CORE_PER_CLUSTER_COUNT_MAX];
71
72 /* Number of cores */
73 unsigned int core_count;
74 };
75
76 /* Module context */
77 struct ppu_v1_ctx {
78 /* Table of the power domain contexts */
79 struct ppu_v1_pd_ctx *pd_ctx_table;
80
81 /* Number of power domains */
82 size_t pd_ctx_table_size;
83 };
84
85 /*
86 * Internal variables
87 */
88
89 static struct ppu_v1_ctx ppu_v1_ctx;
90
91 #define MODE_UNSUPPORTED ~0U
92 static const uint8_t ppu_mode_to_power_state[] = {
93 [PPU_V1_MODE_OFF] = (uint8_t)MOD_PD_STATE_OFF,
94 [PPU_V1_MODE_OFF_EMU] = (uint8_t)MOD_PD_STATE_OFF,
95 [PPU_V1_MODE_MEM_RET] = (uint8_t)MOD_PD_STATE_OFF,
96 [PPU_V1_MODE_MEM_RET_EMU] = (uint8_t)MOD_PD_STATE_OFF,
97 [PPU_V1_MODE_LOGIC_RET] = (uint8_t)MOD_PD_STATE_ON,
98 [PPU_V1_MODE_FULL_RET] = (uint8_t)MOD_PD_STATE_ON,
99 [PPU_V1_MODE_MEM_OFF] = (uint8_t)MOD_PD_STATE_ON,
100 [PPU_V1_MODE_FUNC_RET] = (uint8_t)MOD_PD_STATE_ON,
101 [PPU_V1_MODE_ON] = (uint8_t)MOD_PD_STATE_ON,
102 [PPU_V1_MODE_WARM_RST] = (uint8_t)MODE_UNSUPPORTED,
103 [PPU_V1_MODE_DBG_RECOV] = (uint8_t)MODE_UNSUPPORTED
104 };
105
106 /*
107 * Functions not specific to any type of power domain
108 */
109
get_state(struct ppu_v1_reg * ppu,unsigned int * state)110 static int get_state(struct ppu_v1_reg *ppu, unsigned int *state)
111 {
112 enum ppu_v1_mode mode;
113
114 /* Ensure ppu_to_pd_state_v1 has an entry for each PPU state */
115 static_assert((FWK_ARRAY_SIZE(ppu_mode_to_power_state) ==
116 PPU_V1_MODE_COUNT), "[PPU_V1] ppu_mode_to_power_state size error");
117
118 mode = ppu_v1_get_power_mode(ppu);
119 fwk_assert(mode < PPU_V1_MODE_COUNT);
120
121 *state = ppu_mode_to_power_state[mode];
122
123 if ((*state == MOD_PD_STATE_OFF) && (ppu_v1_is_dynamic_enabled(ppu)))
124 *state = MOD_PD_STATE_SLEEP;
125
126 if (*state == MODE_UNSUPPORTED) {
127 FWK_LOG_ERR("[PPU_V1] Unexpected PPU mode (%i).", mode);
128 return FWK_E_DEVICE;
129 }
130
131 return FWK_SUCCESS;
132 }
133
ppu_v1_pd_set_state(fwk_id_t pd_id,unsigned int state)134 static int ppu_v1_pd_set_state(fwk_id_t pd_id, unsigned int state)
135 {
136 int status;
137 struct ppu_v1_pd_ctx *pd_ctx;
138
139 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(pd_id);
140
141 switch (state) {
142 case MOD_PD_STATE_ON:
143 ppu_v1_set_power_mode(pd_ctx->ppu, PPU_V1_MODE_ON, pd_ctx->timer_ctx);
144 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
145 pd_ctx->bound_id, MOD_PD_STATE_ON);
146 fwk_assert(status == FWK_SUCCESS);
147 break;
148
149 case MOD_PD_STATE_OFF:
150 ppu_v1_set_power_mode(pd_ctx->ppu, PPU_V1_MODE_OFF, pd_ctx->timer_ctx);
151 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
152 pd_ctx->bound_id, MOD_PD_STATE_OFF);
153 fwk_assert(status == FWK_SUCCESS);
154 break;
155
156 default:
157 FWK_LOG_ERR("[PD] Requested power state (%i) is not supported.", state);
158 return FWK_E_PARAM;
159 }
160
161 return status;
162 }
163
ppu_v1_pd_get_state(fwk_id_t pd_id,unsigned int * state)164 static int ppu_v1_pd_get_state(fwk_id_t pd_id, unsigned int *state)
165 {
166 struct ppu_v1_pd_ctx *pd_ctx;
167
168 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(pd_id);
169
170 return get_state(pd_ctx->ppu, state);
171 }
172
ppu_v1_pd_reset(fwk_id_t pd_id)173 static int ppu_v1_pd_reset(fwk_id_t pd_id)
174 {
175 int status;
176 struct ppu_v1_pd_ctx *pd_ctx;
177
178 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(pd_id);
179
180 /* Model does not support warm reset at the moment. Using OFF instead. */
181 status =
182 ppu_v1_set_power_mode(pd_ctx->ppu, PPU_V1_MODE_OFF, pd_ctx->timer_ctx);
183 if (status == FWK_SUCCESS)
184 status = ppu_v1_set_power_mode(
185 pd_ctx->ppu, PPU_V1_MODE_ON, pd_ctx->timer_ctx);
186
187 return status;
188 }
189
ppu_v1_pd_shutdown(fwk_id_t core_pd_id,enum mod_pd_system_shutdown system_shutdown)190 static int ppu_v1_pd_shutdown(fwk_id_t core_pd_id,
191 enum mod_pd_system_shutdown system_shutdown)
192 {
193 return FWK_SUCCESS;
194 }
195
196 static const struct mod_pd_driver_api pd_driver = {
197 .set_state = ppu_v1_pd_set_state,
198 .get_state = ppu_v1_pd_get_state,
199 .reset = ppu_v1_pd_reset,
200 .shutdown = ppu_v1_pd_shutdown,
201 };
202
203 /*
204 * Functions specific to core power domains
205 */
ppu_v1_core_pd_init(struct ppu_v1_pd_ctx * pd_ctx)206 static int ppu_v1_core_pd_init(struct ppu_v1_pd_ctx *pd_ctx)
207 {
208 int status;
209 struct ppu_v1_reg *ppu = pd_ctx->ppu;
210 unsigned int state;
211
212 ppu_v1_init(ppu);
213
214 status = get_state(ppu, &state);
215 if (status != FWK_SUCCESS)
216 return status;
217
218 if (state == MOD_PD_STATE_ON) {
219 ppu_v1_interrupt_unmask(ppu, PPU_V1_IMR_DYN_POLICY_MIN_IRQ_MASK);
220 ppu_v1_dynamic_enable(ppu, PPU_V1_MODE_OFF);
221 }
222
223 return FWK_SUCCESS;
224 }
225
226 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
ppu_v1_core_pd_set_state(fwk_id_t core_pd_id,unsigned int state)227 static int ppu_v1_core_pd_set_state(fwk_id_t core_pd_id, unsigned int state)
228 {
229 int status;
230 struct ppu_v1_pd_ctx *pd_ctx;
231 struct ppu_v1_reg *ppu;
232
233 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(core_pd_id);
234 ppu = pd_ctx->ppu;
235
236 switch (state) {
237 case MOD_PD_STATE_OFF:
238 ppu_v1_set_input_edge_sensitivity(ppu,
239 PPU_V1_MODE_ON,
240 PPU_V1_EDGE_SENSITIVITY_MASKED);
241 ppu_v1_interrupt_mask(ppu, PPU_V1_IMR_DYN_POLICY_MIN_IRQ_MASK);
242 ppu_v1_set_power_mode(ppu, PPU_V1_MODE_OFF, pd_ctx->timer_ctx);
243 ppu_v1_lock_off_disable(ppu);
244 ppu_v1_off_unlock(ppu);
245 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
246 pd_ctx->bound_id, MOD_PD_STATE_OFF);
247 fwk_assert(status == FWK_SUCCESS);
248 break;
249
250 case MOD_PD_STATE_ON:
251 ppu_v1_interrupt_unmask(ppu, PPU_V1_IMR_DYN_POLICY_MIN_IRQ_MASK);
252 ppu_v1_set_input_edge_sensitivity(ppu,
253 PPU_V1_MODE_ON,
254 PPU_V1_EDGE_SENSITIVITY_MASKED);
255 ppu_v1_set_power_mode(ppu, PPU_V1_MODE_ON, pd_ctx->timer_ctx);
256 ppu_v1_dynamic_enable(ppu, PPU_V1_MODE_OFF);
257 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
258 pd_ctx->bound_id, MOD_PD_STATE_ON);
259 fwk_assert(status == FWK_SUCCESS);
260 break;
261
262 case MOD_PD_STATE_SLEEP:
263 /*
264 * If the dynamic transitions have been enabled then the core is
265 * already in the SLEEP power state or will transit to the SLEEP power
266 * state if the appropriate processing is done on AP side. Thus nothing
267 * to do in that case. If the dynamic transitions are not enabled then
268 * this is an OFF to SLEEP transition.
269 */
270 if (!ppu_v1_is_dynamic_enabled(ppu)) {
271 ppu_v1_dynamic_enable(ppu, PPU_V1_MODE_OFF);
272 ppu_v1_set_input_edge_sensitivity(ppu,
273 PPU_V1_MODE_ON,
274 PPU_V1_EDGE_SENSITIVITY_MASKED);
275 }
276 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
277 pd_ctx->bound_id, MOD_PD_STATE_SLEEP);
278 fwk_assert(status == FWK_SUCCESS);
279 break;
280
281 default:
282 FWK_LOG_ERR(
283 "[PPU_V1] Requested CPU power state (%i) is not supported!", state);
284 return FWK_E_PARAM;
285 }
286
287 return status;
288 }
289
ppu_v1_core_pd_reset(fwk_id_t core_pd_id)290 static int ppu_v1_core_pd_reset(fwk_id_t core_pd_id)
291 {
292 int status;
293
294 status = ppu_v1_core_pd_set_state(core_pd_id, MOD_PD_STATE_OFF);
295 if (status == FWK_SUCCESS)
296 status = ppu_v1_core_pd_set_state(core_pd_id, MOD_PD_STATE_ON);
297
298 return status;
299 }
300
ppu_v1_core_pd_prepare_for_system_suspend(fwk_id_t core_pd_id)301 static int ppu_v1_core_pd_prepare_for_system_suspend(fwk_id_t core_pd_id)
302 {
303 struct ppu_v1_pd_ctx *pd_ctx;
304 struct ppu_v1_reg *ppu;
305
306 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(core_pd_id);
307 ppu = pd_ctx->ppu;
308
309 ppu_v1_set_input_edge_sensitivity(ppu,
310 PPU_V1_MODE_ON,
311 PPU_V1_EDGE_SENSITIVITY_MASKED);
312 ppu_v1_request_power_mode(ppu, PPU_V1_MODE_OFF);
313
314 return FWK_SUCCESS;
315 }
316 #endif
317
core_pd_ppu_interrupt_handler(struct ppu_v1_pd_ctx * pd_ctx)318 static void core_pd_ppu_interrupt_handler(struct ppu_v1_pd_ctx *pd_ctx)
319 {
320 int status;
321 struct ppu_v1_reg *ppu;
322
323 ppu = pd_ctx->ppu;
324
325 /* ON request interrupt */
326 if (ppu_v1_is_power_active_edge_interrupt(ppu, PPU_V1_MODE_ON)) {
327 ppu_v1_ack_power_active_edge_interrupt(ppu, PPU_V1_MODE_ON);
328 ppu_v1_set_input_edge_sensitivity(ppu,
329 PPU_V1_MODE_ON,
330 PPU_V1_EDGE_SENSITIVITY_MASKED);
331 ppu_v1_interrupt_unmask(ppu, PPU_V1_IMR_DYN_POLICY_MIN_IRQ_MASK);
332
333 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
334 pd_ctx->bound_id, MOD_PD_STATE_ON);
335 fwk_assert(status == FWK_SUCCESS);
336 (void)status;
337 /* Minimum policy reached interrupt */
338 } else if (ppu_v1_is_dyn_policy_min_interrupt(ppu)) {
339 ppu_v1_ack_interrupt(ppu, PPU_V1_ISR_DYN_POLICY_MIN_IRQ);
340 ppu_v1_interrupt_mask(ppu, PPU_V1_IMR_DYN_POLICY_MIN_IRQ_MASK);
341
342 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
343 pd_ctx->bound_id, MOD_PD_STATE_SLEEP);
344 fwk_assert(status == FWK_SUCCESS);
345 (void)status;
346
347 /*
348 * Enable the core PACTIVE ON signal rising edge interrupt then check if
349 * the PACTIVE ON signal is high. If it is high, we may have missed the
350 * transition from low to high. In that case, just disable the interrupt
351 * and acknowledge it in case it is pending. There is no need to send an
352 * update request as one has already been queued.
353 */
354 ppu_v1_set_input_edge_sensitivity(ppu,
355 PPU_V1_MODE_ON,
356 PPU_V1_EDGE_SENSITIVITY_RISING_EDGE);
357 if (ppu_v1_is_power_devactive_high(ppu, PPU_V1_MODE_ON)) {
358 ppu_v1_set_input_edge_sensitivity(ppu,
359 PPU_V1_MODE_ON,
360 PPU_V1_EDGE_SENSITIVITY_MASKED);
361 ppu_v1_ack_power_active_edge_interrupt(ppu, PPU_V1_MODE_ON);
362 ppu_v1_interrupt_unmask(ppu, PPU_V1_IMR_DYN_POLICY_MIN_IRQ_MASK);
363 }
364 }
365 }
366
367 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
368 static const struct mod_pd_driver_api core_pd_driver = {
369 .set_state = ppu_v1_core_pd_set_state,
370 .get_state = ppu_v1_pd_get_state,
371 .reset = ppu_v1_core_pd_reset,
372 .prepare_core_for_system_suspend =
373 ppu_v1_core_pd_prepare_for_system_suspend,
374 .shutdown = ppu_v1_pd_shutdown,
375 };
376 #endif
377
378 /*
379 * Functions specific to cluster power domains
380 */
381
unlock_all_cores(struct ppu_v1_pd_ctx * pd_ctx)382 static void unlock_all_cores(struct ppu_v1_pd_ctx *pd_ctx)
383 {
384 struct ppu_v1_cluster_pd_ctx *cluster_pd_ctx;
385 struct ppu_v1_reg *cpu_ppu;
386 unsigned int core_idx;
387
388 fwk_assert(pd_ctx != NULL);
389
390 cluster_pd_ctx = pd_ctx->data;
391
392 for (core_idx = 0; core_idx < cluster_pd_ctx->core_count; ++core_idx) {
393 cpu_ppu = cluster_pd_ctx->core_pd_ctx_table[core_idx]->ppu;
394 ppu_v1_lock_off_disable(cpu_ppu);
395 ppu_v1_off_unlock(cpu_ppu);
396 }
397 }
398
lock_all_dynamic_cores(struct ppu_v1_pd_ctx * pd_ctx)399 static bool lock_all_dynamic_cores(struct ppu_v1_pd_ctx *pd_ctx)
400 {
401 struct ppu_v1_cluster_pd_ctx *cluster_pd_ctx;
402 struct ppu_v1_reg *cpu_ppu;
403 unsigned int core_idx;
404
405 fwk_assert(pd_ctx != NULL);
406
407 cluster_pd_ctx = pd_ctx->data;
408
409 for (core_idx = 0; core_idx < cluster_pd_ctx->core_count; ++core_idx) {
410 cpu_ppu = cluster_pd_ctx->core_pd_ctx_table[core_idx]->ppu;
411
412 if (!ppu_v1_is_dynamic_enabled(cpu_ppu))
413 continue;
414
415 ppu_v1_lock_off_enable(cpu_ppu);
416 while ((!ppu_v1_is_locked(cpu_ppu)) &&
417 (!ppu_v1_is_power_devactive_high(cpu_ppu, PPU_V1_MODE_ON)))
418 continue;
419
420 if (ppu_v1_is_power_devactive_high(cpu_ppu, PPU_V1_MODE_ON))
421 return false;
422 }
423
424 return true;
425 }
426
cluster_off(struct ppu_v1_pd_ctx * pd_ctx)427 static bool cluster_off(struct ppu_v1_pd_ctx *pd_ctx)
428 {
429 struct ppu_v1_reg *ppu;
430 bool lock_successful;
431
432 fwk_assert(pd_ctx != NULL);
433
434 ppu = pd_ctx->ppu;
435
436 ppu_v1_set_input_edge_sensitivity(ppu,
437 PPU_V1_MODE_ON,
438 PPU_V1_EDGE_SENSITIVITY_MASKED);
439
440 lock_successful = lock_all_dynamic_cores(pd_ctx);
441 if (!lock_successful) {
442 unlock_all_cores(pd_ctx);
443 return false;
444 }
445
446 ppu_v1_set_power_mode(ppu, PPU_V1_MODE_OFF, pd_ctx->timer_ctx);
447 return true;
448 }
449
cluster_on(struct ppu_v1_pd_ctx * pd_ctx)450 static void cluster_on(struct ppu_v1_pd_ctx *pd_ctx)
451 {
452 int status;
453 struct ppu_v1_reg *ppu;
454
455 fwk_assert(pd_ctx != NULL);
456
457 ppu = pd_ctx->ppu;
458
459 ppu_v1_set_input_edge_sensitivity(ppu,
460 PPU_V1_MODE_ON,
461 PPU_V1_EDGE_SENSITIVITY_MASKED);
462
463 ppu_v1_request_operating_mode(ppu, pd_ctx->config->opmode);
464
465 ppu_v1_set_power_mode(ppu, PPU_V1_MODE_ON, pd_ctx->timer_ctx);
466 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
467 pd_ctx->bound_id, MOD_PD_STATE_ON);
468 fwk_assert(status == FWK_SUCCESS);
469 (void)status;
470
471 if (pd_ctx->observer_api != NULL)
472 pd_ctx->observer_api->post_ppu_on(pd_ctx->config->post_ppu_on_param);
473
474 unlock_all_cores(pd_ctx);
475 }
476
ppu_v1_cluster_pd_init(struct ppu_v1_pd_ctx * pd_ctx)477 static int ppu_v1_cluster_pd_init(struct ppu_v1_pd_ctx *pd_ctx)
478 {
479 int status;
480 struct ppu_v1_reg *ppu = pd_ctx->ppu;
481 unsigned int state;
482
483 ppu_v1_init(ppu);
484
485 status = get_state(ppu, &state);
486 if (status != FWK_SUCCESS)
487 return status;
488
489 /* For clusters with operating mode support, enable the dynamic support */
490 if (ppu_v1_get_num_opmode(ppu) > 1)
491 ppu_v1_opmode_dynamic_enable(ppu, PPU_V1_OPMODE_00);
492
493 if (state == MOD_PD_STATE_ON) {
494 ppu_v1_set_input_edge_sensitivity(ppu,
495 PPU_V1_MODE_ON,
496 PPU_V1_EDGE_SENSITIVITY_FALLING_EDGE);
497 }
498
499 return FWK_SUCCESS;
500 }
501
502 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
ppu_v1_cluster_pd_set_state(fwk_id_t cluster_pd_id,unsigned int state)503 static int ppu_v1_cluster_pd_set_state(fwk_id_t cluster_pd_id,
504 unsigned int state)
505 {
506 int status;
507 struct ppu_v1_pd_ctx *pd_ctx;
508
509 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(cluster_pd_id);
510
511 switch (state) {
512 case MOD_PD_STATE_ON:
513 cluster_on(pd_ctx);
514
515 return FWK_SUCCESS;
516
517 case MOD_PD_STATE_OFF:
518 if (!cluster_off(pd_ctx)) {
519 /* Cluster failed to transition to off */
520
521 return FWK_E_STATE;
522 }
523 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
524 pd_ctx->bound_id, MOD_PD_STATE_OFF);
525 fwk_assert(status == FWK_SUCCESS);
526 return status;
527
528 default:
529 FWK_LOG_ERR(
530 "[PPU_V1] Requested CPU power state (%i) is not supported!", state);
531 return FWK_E_PARAM;
532 }
533 }
534 #endif
535
cluster_pd_ppu_interrupt_handler(struct ppu_v1_pd_ctx * pd_ctx)536 static void cluster_pd_ppu_interrupt_handler(struct ppu_v1_pd_ctx *pd_ctx)
537 {
538 int status;
539 struct ppu_v1_reg *ppu;
540 enum ppu_v1_mode current_mode;
541
542 fwk_assert(pd_ctx != NULL);
543
544 ppu = pd_ctx->ppu;
545
546 if (!ppu_v1_is_power_active_edge_interrupt(ppu, PPU_V1_MODE_ON))
547 return; /* Spurious interrupt */
548
549 ppu_v1_ack_power_active_edge_interrupt(ppu, PPU_V1_MODE_ON);
550 current_mode = ppu_v1_get_power_mode(ppu);
551
552 switch (current_mode) {
553 case PPU_V1_MODE_OFF:
554 /* Cluster has to be powered on */
555 cluster_on(pd_ctx);
556 ppu_v1_set_input_edge_sensitivity(ppu,
557 PPU_V1_MODE_ON,
558 PPU_V1_EDGE_SENSITIVITY_FALLING_EDGE);
559 return;
560
561 case PPU_V1_MODE_ON:
562 /*
563 * It may be possible to turn off the cluster, check all PACTIVE lines
564 * to make sure it is not just requesting a low power mode.
565 */
566 while (current_mode > 0) {
567 if (ppu_v1_is_power_devactive_high(ppu, current_mode--))
568 return;
569 }
570
571 /* All PACTIVE lines are low, so the cluster can be turned off */
572 if (cluster_off(pd_ctx)) {
573 /* Cluster successfuly transitioned to off */
574 ppu_v1_set_input_edge_sensitivity(ppu,
575 PPU_V1_MODE_ON, PPU_V1_EDGE_SENSITIVITY_RISING_EDGE);
576 status = pd_ctx->pd_driver_input_api->report_power_state_transition(
577 pd_ctx->bound_id, MOD_PD_STATE_SLEEP);
578 fwk_assert(status == FWK_SUCCESS);
579 (void)status;
580 } else {
581 /* Cluster did not transition to off */
582 ppu_v1_set_input_edge_sensitivity(ppu,
583 PPU_V1_MODE_ON, PPU_V1_EDGE_SENSITIVITY_FALLING_EDGE);
584 }
585 return;
586
587 default:
588 /* Cluster is in an invalid power mode */
589 fwk_unexpected();
590 return;
591 }
592 }
593
594 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
595 static const struct mod_pd_driver_api cluster_pd_driver = {
596 .set_state = ppu_v1_cluster_pd_set_state,
597 .get_state = ppu_v1_pd_get_state,
598 .reset = ppu_v1_pd_reset,
599 .shutdown = ppu_v1_pd_shutdown,
600 };
601 #endif
602
ppu_interrupt_handler(uintptr_t pd_ctx_param)603 static void ppu_interrupt_handler(uintptr_t pd_ctx_param)
604 {
605 struct ppu_v1_pd_ctx *pd_ctx = (struct ppu_v1_pd_ctx *)pd_ctx_param;
606
607 fwk_assert(pd_ctx != NULL);
608
609 if (pd_ctx->config->pd_type == MOD_PD_TYPE_CORE)
610 core_pd_ppu_interrupt_handler(pd_ctx);
611 else
612 cluster_pd_ppu_interrupt_handler(pd_ctx);
613 }
614
ppu_isr_api_interrupt_handler(fwk_id_t pd_id)615 static void ppu_isr_api_interrupt_handler(fwk_id_t pd_id)
616 {
617 struct ppu_v1_pd_ctx *pd_ctx;
618
619 if (!fwk_id_is_type(pd_id, FWK_ID_TYPE_ELEMENT))
620 return;
621
622 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(pd_id);
623 ppu_interrupt_handler((uintptr_t)pd_ctx);
624 }
625
626 static const struct ppu_v1_isr_api isr_api = {
627 .ppu_interrupt_handler = ppu_isr_api_interrupt_handler,
628 };
629
ppu_power_mode_on(fwk_id_t pd_id)630 static int ppu_power_mode_on(fwk_id_t pd_id)
631 {
632 struct ppu_v1_pd_ctx *pd_ctx;
633
634 if (!fwk_id_is_type(pd_id, FWK_ID_TYPE_ELEMENT))
635 return FWK_E_PARAM;
636
637 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(pd_id);
638
639 return ppu_v1_set_power_mode(
640 pd_ctx->ppu, PPU_V1_MODE_ON, pd_ctx->timer_ctx);
641 }
642
643 static const struct ppu_v1_boot_api boot_api = {
644 .power_mode_on = ppu_power_mode_on,
645 };
646
647 /*
648 * Framework handlers
649 */
650
ppu_v1_mod_init(fwk_id_t module_id,unsigned int pd_count,const void * unused)651 static int ppu_v1_mod_init(fwk_id_t module_id, unsigned int pd_count,
652 const void *unused)
653 {
654 ppu_v1_ctx.pd_ctx_table = fwk_mm_calloc(pd_count,
655 sizeof(struct ppu_v1_pd_ctx));
656
657 ppu_v1_ctx.pd_ctx_table_size = pd_count;
658
659 return FWK_SUCCESS;
660 }
661
ppu_v1_pd_init(fwk_id_t pd_id,unsigned int unused,const void * data)662 static int ppu_v1_pd_init(fwk_id_t pd_id, unsigned int unused, const void *data)
663 {
664 const struct mod_ppu_v1_pd_config *config = data;
665 struct ppu_v1_pd_ctx *pd_ctx;
666
667 if (config->pd_type >= MOD_PD_TYPE_COUNT)
668 return FWK_E_DATA;
669
670 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(pd_id);
671 pd_ctx->config = config;
672 pd_ctx->ppu = (struct ppu_v1_reg *)(config->ppu.reg_base);
673 pd_ctx->bound_id = FWK_ID_NONE;
674
675 if (config->ppu.irq != FWK_INTERRUPT_NONE) {
676 fwk_interrupt_set_isr_param(config->ppu.irq,
677 ppu_interrupt_handler,
678 (uintptr_t)pd_ctx);
679 }
680
681 if (config->pd_type == MOD_PD_TYPE_CLUSTER) {
682 pd_ctx->data = fwk_mm_calloc(1, sizeof(struct ppu_v1_cluster_pd_ctx));
683 }
684 #ifdef BUILD_HAS_MOD_TIMER
685 if (config->timer_config == NULL) {
686 pd_ctx->timer_ctx = NULL;
687 } else {
688 pd_ctx->timer_ctx = fwk_mm_calloc(1, sizeof(struct ppu_v1_timer_ctx));
689 if (pd_ctx->timer_ctx == NULL)
690 return FWK_E_NOMEM;
691 /* Check for valid timeout value if timer ID is specified */
692 if (config->timer_config->set_state_timeout_us == 0)
693 return FWK_E_PARAM;
694 /* Save the timer ID to pd context */
695 pd_ctx->timer_ctx->timer_id = config->timer_config->timer_id;
696 pd_ctx->timer_ctx->delay_us =
697 config->timer_config->set_state_timeout_us;
698 }
699 #else
700 pd_ctx->timer_ctx = NULL;
701 #endif
702 if (config->default_power_on) {
703 switch (config->pd_type) {
704 case MOD_PD_TYPE_DEVICE:
705 /* Fall through */
706 case MOD_PD_TYPE_DEVICE_DEBUG:
707 /* Fall through */
708 case MOD_PD_TYPE_SYSTEM:
709 ppu_v1_init(pd_ctx->ppu);
710 return ppu_v1_set_power_mode(pd_ctx->ppu, PPU_V1_MODE_ON, NULL);
711
712 default:
713 fwk_unexpected();
714 return FWK_E_SUPPORT;
715 }
716 }
717 return FWK_SUCCESS;
718 }
719
ppu_v1_post_init(fwk_id_t module_id)720 static int ppu_v1_post_init(fwk_id_t module_id)
721 {
722 unsigned int pd_idx;
723 struct ppu_v1_pd_ctx *pd_ctx, *cluster_pd_ctx;
724 const struct mod_ppu_v1_pd_config *config;
725 fwk_id_t cluster_id;
726 struct ppu_v1_cluster_pd_ctx *cluster_pd_specific_ctx;
727
728 for (pd_idx = 0; pd_idx < ppu_v1_ctx.pd_ctx_table_size; pd_idx++) {
729 pd_ctx = &ppu_v1_ctx.pd_ctx_table[pd_idx];
730 config = pd_ctx->config;
731 if (config->pd_type != MOD_PD_TYPE_CORE)
732 continue;
733
734 cluster_id = config->cluster_id;
735
736 if ((!fwk_module_is_valid_element_id(cluster_id)) ||
737 (fwk_id_get_module_idx(cluster_id) != FWK_MODULE_IDX_PPU_V1))
738 return FWK_E_PARAM;
739
740 cluster_pd_ctx = &ppu_v1_ctx.pd_ctx_table[
741 fwk_id_get_element_idx(cluster_id)];
742 cluster_pd_specific_ctx = cluster_pd_ctx->data;
743
744 if (cluster_pd_specific_ctx->core_count >= CORE_PER_CLUSTER_COUNT_MAX)
745 return FWK_E_NOMEM;
746
747 cluster_pd_specific_ctx->core_pd_ctx_table[
748 cluster_pd_specific_ctx->core_count++] = pd_ctx;
749 pd_ctx->parent_pd_ctx = cluster_pd_ctx;
750 }
751
752 return FWK_SUCCESS;
753 }
754
ppu_v1_bind(fwk_id_t id,unsigned int round)755 static int ppu_v1_bind(fwk_id_t id, unsigned int round)
756 {
757 int status = FWK_SUCCESS;
758 struct ppu_v1_pd_ctx *pd_ctx;
759
760 /* Nothing to do during the first round of calls where the power module
761 will bind to the power domains of this module. */
762 if (round == 0)
763 return FWK_SUCCESS;
764
765 if (fwk_id_is_type(id, FWK_ID_TYPE_MODULE))
766 return FWK_SUCCESS;
767
768 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(id);
769
770 #ifdef BUILD_HAS_MOD_TIMER
771 if (pd_ctx->timer_ctx != NULL &&
772 !fwk_id_is_equal(pd_ctx->timer_ctx->timer_id, FWK_ID_NONE)) {
773 /* Bind to the timer */
774 status = fwk_module_bind(
775 pd_ctx->timer_ctx->timer_id,
776 MOD_TIMER_API_ID_TIMER,
777 &pd_ctx->timer_ctx->timer_api);
778 if (status != FWK_SUCCESS)
779 return status;
780 }
781 #endif
782
783 if (!fwk_id_is_equal(pd_ctx->config->observer_id, FWK_ID_NONE)) {
784 if (pd_ctx->config->pd_type != MOD_PD_TYPE_CLUSTER) {
785 /* State observation only supported for clusters */
786 fwk_unexpected();
787 return FWK_E_SUPPORT;
788 }
789
790 status = fwk_module_bind(pd_ctx->config->observer_id,
791 pd_ctx->config->observer_api,
792 &pd_ctx->observer_api);
793 if (status != FWK_SUCCESS)
794 return status;
795 }
796
797 if (fwk_id_is_equal(pd_ctx->bound_id, FWK_ID_NONE))
798 return FWK_SUCCESS;
799
800 switch (fwk_id_get_module_idx(pd_ctx->bound_id)) {
801 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
802 case FWK_MODULE_IDX_POWER_DOMAIN:
803 return fwk_module_bind(pd_ctx->bound_id,
804 mod_pd_api_id_driver_input,
805 &pd_ctx->pd_driver_input_api);
806 break;
807 #endif
808
809 #ifdef BUILD_HAS_MOD_SYSTEM_POWER
810 case FWK_MODULE_IDX_SYSTEM_POWER:
811 return fwk_module_bind(pd_ctx->bound_id,
812 mod_system_power_api_id_pd_driver_input,
813 &pd_ctx->pd_driver_input_api);
814 break;
815 #endif
816
817 default:
818 fwk_unexpected();
819 return FWK_E_SUPPORT;
820 }
821 }
822
ppu_v1_process_bind_request(fwk_id_t source_id,fwk_id_t target_id,fwk_id_t api_id,const void ** api)823 static int ppu_v1_process_bind_request(fwk_id_t source_id,
824 fwk_id_t target_id, fwk_id_t api_id,
825 const void **api)
826 {
827 struct ppu_v1_pd_ctx *pd_ctx;
828 unsigned int api_idx;
829 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
830 bool is_power_domain_module;
831 #endif
832 #ifdef BUILD_HAS_MOD_SYSTEM_POWER
833 bool is_system_power_module;
834 #endif
835
836 api_idx = fwk_id_get_api_idx(api_id);
837
838 if (api_idx == MOD_PPU_V1_API_IDX_ISR) {
839 if (!fwk_id_is_type(target_id, FWK_ID_TYPE_MODULE))
840 return FWK_E_SUPPORT;
841
842 *api = &isr_api;
843 return FWK_SUCCESS;
844 }
845
846 if (api_idx == MOD_PPU_V1_API_IDX_BOOT) {
847 *api = &boot_api;
848 return FWK_SUCCESS;
849 }
850
851 if (api_idx != MOD_PPU_V1_API_IDX_POWER_DOMAIN_DRIVER)
852 return FWK_E_SUPPORT;
853
854 if (!fwk_module_is_valid_element_id(target_id))
855 return FWK_E_PARAM;
856
857 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(target_id);
858
859 /* Allow multiple binding only for device power domain for now */
860 if ((pd_ctx->config->pd_type != MOD_PD_TYPE_DEVICE) &&
861 (!fwk_id_is_equal(pd_ctx->bound_id, FWK_ID_NONE))) {
862 fwk_unexpected();
863 return FWK_E_ACCESS;
864 }
865
866 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
867 is_power_domain_module = (fwk_id_get_module_idx(source_id) ==
868 FWK_MODULE_IDX_POWER_DOMAIN);
869 #endif
870 #ifdef BUILD_HAS_MOD_SYSTEM_POWER
871 is_system_power_module = (fwk_id_get_module_idx(source_id) ==
872 FWK_MODULE_IDX_SYSTEM_POWER);
873 #endif
874
875 switch (pd_ctx->config->pd_type) {
876 case MOD_PD_TYPE_CORE:
877 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
878 if (is_power_domain_module) {
879 *api = &core_pd_driver;
880 pd_ctx->bound_id = source_id;
881 return FWK_SUCCESS;
882 }
883 #endif
884 break;
885
886 case MOD_PD_TYPE_CLUSTER:
887 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
888 if (is_power_domain_module) {
889 *api = &cluster_pd_driver;
890 pd_ctx->bound_id = source_id;
891 return FWK_SUCCESS;
892 }
893 #endif
894 break;
895
896 case MOD_PD_TYPE_SYSTEM:
897 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
898 if (is_power_domain_module) {
899 *api = &pd_driver;
900 pd_ctx->bound_id = source_id;
901 return FWK_SUCCESS;
902 }
903 #endif
904 #ifdef BUILD_HAS_MOD_SYSTEM_POWER
905 if (is_system_power_module) {
906 *api = &pd_driver;
907 pd_ctx->bound_id = source_id;
908 return FWK_SUCCESS;
909 }
910 #endif
911 break;
912
913 default:
914 #ifdef BUILD_HAS_MOD_POWER_DOMAIN
915 if (is_power_domain_module) {
916 pd_ctx->bound_id = source_id;
917 }
918 #endif
919 *api = &pd_driver;
920 return FWK_SUCCESS;
921 }
922
923 pd_ctx->bound_id = FWK_ID_NONE;
924 return FWK_E_ACCESS;
925 }
926
ppu_v1_start(fwk_id_t id)927 static int ppu_v1_start(fwk_id_t id)
928 {
929 int status;
930 struct ppu_v1_pd_ctx *pd_ctx;
931 const struct mod_ppu_v1_config *module_config;
932
933 if (!fwk_id_is_type(id, FWK_ID_TYPE_ELEMENT))
934 return FWK_SUCCESS;
935
936 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(id);
937 module_config = fwk_module_get_data(fwk_id_build_module_id(id));
938 fwk_assert(module_config != NULL);
939
940 /* Register for power domain transition notifications */
941 status = fwk_notification_subscribe(
942 module_config->pd_notification_id,
943 module_config->pd_source_id,
944 id);
945 if (status != FWK_SUCCESS)
946 return status;
947
948 switch (pd_ctx->config->pd_type) {
949 case MOD_PD_TYPE_CORE:
950 case MOD_PD_TYPE_CLUSTER:
951 fwk_interrupt_clear_pending(pd_ctx->config->ppu.irq);
952 fwk_interrupt_enable(pd_ctx->config->ppu.irq);
953 break;
954 default:
955 /* Nothing to be done for other types */
956 break;
957 }
958
959 return FWK_SUCCESS;
960 }
961
ppu_v1_process_notification(const struct fwk_event * event,struct fwk_event * resp_event)962 static int ppu_v1_process_notification(
963 const struct fwk_event *event,
964 struct fwk_event *resp_event)
965 {
966 const struct mod_ppu_v1_config *module_config;
967 struct ppu_v1_pd_ctx *pd_ctx;
968 struct mod_pd_power_state_transition_notification_params *params;
969
970 fwk_assert(fwk_id_is_type(event->target_id, FWK_ID_TYPE_ELEMENT));
971 module_config =
972 fwk_module_get_data(fwk_id_build_module_id(event->target_id));
973 assert(
974 fwk_id_is_equal(
975 event->id,
976 module_config->pd_notification_id));
977 (void)module_config;
978
979 params = (struct mod_pd_power_state_transition_notification_params *)
980 event->params;
981
982 if (params->state != MOD_PD_STATE_ON)
983 return FWK_SUCCESS;
984
985 pd_ctx = ppu_v1_ctx.pd_ctx_table + fwk_id_get_element_idx(event->target_id);
986
987 switch (pd_ctx->config->pd_type) {
988 case MOD_PD_TYPE_CORE:
989 return ppu_v1_core_pd_init(pd_ctx);
990
991 case MOD_PD_TYPE_CLUSTER:
992 return ppu_v1_cluster_pd_init(pd_ctx);
993
994 default:
995 ppu_v1_init(pd_ctx->ppu);
996 return FWK_SUCCESS;
997 }
998 }
999
1000 const struct fwk_module module_ppu_v1 = {
1001 .type = FWK_MODULE_TYPE_DRIVER,
1002 .api_count = MOD_PPU_V1_API_IDX_COUNT,
1003 .init = ppu_v1_mod_init,
1004 .element_init = ppu_v1_pd_init,
1005 .post_init = ppu_v1_post_init,
1006 .bind = ppu_v1_bind,
1007 .start = ppu_v1_start,
1008 .process_bind_request = ppu_v1_process_bind_request,
1009 .process_notification = ppu_v1_process_notification,
1010 };
1011