/tf-a-ffa_el3_spmc/plat/qti/qtiseclib/src/ |
A D | qtiseclib_interface_stub.c | 91 void qtiseclib_psci_node_on_finish(const uint8_t *states) in qtiseclib_psci_node_on_finish() argument 99 void qtiseclib_psci_node_power_off(const uint8_t *states) in qtiseclib_psci_node_power_off() argument 103 void qtiseclib_psci_node_suspend(const uint8_t *states) in qtiseclib_psci_node_suspend() argument 107 void qtiseclib_psci_node_suspend_finish(const uint8_t *states) in qtiseclib_psci_node_suspend_finish() argument
|
/tf-a-ffa_el3_spmc/plat/qti/qtiseclib/inc/ |
A D | qtiseclib_interface.h | 76 void qtiseclib_psci_node_on_finish(const uint8_t *states); 78 void qtiseclib_psci_node_power_off(const uint8_t *states); 79 void qtiseclib_psci_node_suspend(const uint8_t *states); 80 void qtiseclib_psci_node_suspend_finish(const uint8_t *states);
|
/tf-a-ffa_el3_spmc/plat/nvidia/tegra/soc/t194/ |
A D | plat_psci_handlers.c | 184 static bool tegra_last_on_cpu_in_cluster(const plat_local_state_t *states, in tegra_last_on_cpu_in_cluster() argument 192 target = states[pos]; in tegra_last_on_cpu_in_cluster() 206 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states, in tegra_get_afflvl1_pwr_state() argument 210 plat_local_state_t target = states[core_pos]; in tegra_get_afflvl1_pwr_state() 217 if (tegra_last_on_cpu_in_cluster(states, ncpu)) { in tegra_get_afflvl1_pwr_state() 243 const plat_local_state_t *states, in tegra_soc_get_target_pwr_state() argument 250 if ((lvl == (uint32_t)MPIDR_AFFLVL2) && (states[cpu] == PSTATE_ID_SOC_POWERDN)) { in tegra_soc_get_target_pwr_state() 256 target = tegra_get_afflvl1_pwr_state(states, ncpu); in tegra_soc_get_target_pwr_state()
|
/tf-a-ffa_el3_spmc/plat/nvidia/tegra/soc/t186/ |
A D | plat_psci_handlers.c | 175 static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states, in tegra_last_cpu_in_cluster() argument 183 target = states[pos]; in tegra_last_cpu_in_cluster() 197 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states, in tegra_get_afflvl1_pwr_state() argument 203 plat_local_state_t target = states[core_pos]; in tegra_get_afflvl1_pwr_state() 226 if (tegra_last_cpu_in_cluster(states, ncpu)) { in tegra_get_afflvl1_pwr_state() 258 const plat_local_state_t *states, in tegra_soc_get_target_pwr_state() argument 266 (states[cpu] == PSTATE_ID_SOC_POWERDN)) { in tegra_soc_get_target_pwr_state() 272 target = tegra_get_afflvl1_pwr_state(states, ncpu); in tegra_soc_get_target_pwr_state()
|
/tf-a-ffa_el3_spmc/fdts/ |
A D | tc.dts | 61 idle-states { 88 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; 98 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; 108 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; 118 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; 128 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; 138 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; 148 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; 158 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>;
|
A D | fvp-foundation-gicv2-psci.dts | 55 idle-states {
|
A D | fvp-foundation-gicv3-psci.dts | 55 idle-states {
|
A D | fvp-base-gicv2-psci.dts | 54 idle-states {
|
A D | fvp-base-gicv2-psci-aarch32.dts | 55 idle-states {
|
A D | fvp-base-gicv3-psci-aarch32-common.dtsi | 47 idle-states {
|
A D | fvp-defs-dynamiq.dtsi | 36 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; \
|
A D | fvp-base-gicv3-psci-common.dtsi | 107 idle-states {
|
A D | fvp-defs.dtsi | 58 cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; \
|
/tf-a-ffa_el3_spmc/plat/common/ |
A D | plat_psci_common.c | 149 const plat_local_state_t *states, in plat_get_target_pwr_state() argument 153 const plat_local_state_t *st = states; in plat_get_target_pwr_state()
|
/tf-a-ffa_el3_spmc/plat/nvidia/tegra/common/ |
A D | tegra_pm.c | 325 const plat_local_state_t *states, in plat_get_target_pwr_state() argument 328 return tegra_soc_get_target_pwr_state(lvl, states, ncpu); in plat_get_target_pwr_state()
|
/tf-a-ffa_el3_spmc/plat/nvidia/tegra/soc/t210/ |
A D | plat_psci_handlers.c | 102 const plat_local_state_t *states, in tegra_soc_get_target_pwr_state() argument 113 target = *(states + core_pos); in tegra_soc_get_target_pwr_state() 115 target = *(states + cpu); in tegra_soc_get_target_pwr_state()
|
/tf-a-ffa_el3_spmc/plat/nvidia/tegra/include/ |
A D | tegra_private.h | 119 const plat_local_state_t *states,
|
/tf-a-ffa_el3_spmc/plat/mediatek/mt8173/ |
A D | plat_pm.c | 589 const plat_local_state_t *states, in plat_get_target_pwr_state() argument 597 temp = *states++; in plat_get_target_pwr_state()
|
/tf-a-ffa_el3_spmc/include/plat/common/ |
A D | platform.h | 271 const plat_local_state_t *states,
|
/tf-a-ffa_el3_spmc/ |
A D | readme.rst | 8 or AArch64 execution states.
|
/tf-a-ffa_el3_spmc/docs/ |
A D | index.rst | 40 states.
|
/tf-a-ffa_el3_spmc/docs/perf/ |
A D | psci-performance-juno.rst | 25 Juno supports CPU, cluster and system power down states, corresponding to power 26 levels 0, 1 and 2 respectively. It does not support any retention states.
|
/tf-a-ffa_el3_spmc/docs/getting_started/ |
A D | porting-guide.rst | 155 states for each level may be sparsely allocated between 0 and this value 157 value to initialize the local power states of the power domain nodes and 166 power states within PSCI_CPU_SUSPEND call. 170 Defines the maximum number of local power states per power domain level 172 most platforms just support a maximum of two local power states at each 174 account for more local power states, then it must redefine this macro. 2215 power states. 2219 of the power state i.e. for two power states X & Y, if X < Y 2333 target local power states for the CPU power domain and its parent 2393 low power states. The generic code expects the handler to succeed. [all …]
|
/tf-a-ffa_el3_spmc/docs/components/ |
A D | firmware-update.rst | 100 BL1 to update its FWU image state. The BL1 image states and valid state 106 The following is a brief description of the supported states:
|
/tf-a-ffa_el3_spmc/docs/threat_model/ |
A D | threat_model.rst | 607 | | states, register contents of Secure world or | 732 | | both Secure and Non-secure states. This allows |
|