1 /*
2 * Copyright (c) 2015-2018, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2020, NVIDIA Corporation. All rights reserved.
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7
8 #include <assert.h>
9 #include <stdbool.h>
10 #include <string.h>
11
12 #include <arch.h>
13 #include <arch_helpers.h>
14 #include <common/bl_common.h>
15 #include <common/debug.h>
16 #include <context.h>
17 #include <cortex_a57.h>
18 #include <denver.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/psci/psci.h>
21 #include <plat/common/platform.h>
22
23 #include <bpmp_ipc.h>
24 #include <mce.h>
25 #include <memctrl_v2.h>
26 #include <security_engine.h>
27 #include <smmu.h>
28 #include <t18x_ari.h>
29 #include <tegra186_private.h>
30 #include <tegra_private.h>
31
32 extern void memcpy16(void *dest, const void *src, unsigned int length);
33
34 /* state id mask */
35 #define TEGRA186_STATE_ID_MASK 0xFU
36 /* constants to get power state's wake time */
37 #define TEGRA186_WAKE_TIME_MASK 0x0FFFFFF0U
38 #define TEGRA186_WAKE_TIME_SHIFT 4U
39 /* default core wake mask for CPU_SUSPEND */
40 #define TEGRA186_CORE_WAKE_MASK 0x180cU
41 /* context size to save during system suspend */
42 #define TEGRA186_SE_CONTEXT_SIZE 3U
43
44 static uint32_t se_regs[TEGRA186_SE_CONTEXT_SIZE];
45 static struct tegra_psci_percpu_data {
46 uint32_t wake_time;
47 } __aligned(CACHE_WRITEBACK_GRANULE) tegra_percpu_data[PLATFORM_CORE_COUNT];
48
tegra_soc_validate_power_state(uint32_t power_state,psci_power_state_t * req_state)49 int32_t tegra_soc_validate_power_state(uint32_t power_state,
50 psci_power_state_t *req_state)
51 {
52 uint8_t state_id = (uint8_t)psci_get_pstate_id(power_state) & TEGRA186_STATE_ID_MASK;
53 uint32_t cpu = plat_my_core_pos();
54 int32_t ret = PSCI_E_SUCCESS;
55
56 /* save the core wake time (in TSC ticks)*/
57 tegra_percpu_data[cpu].wake_time = (power_state & TEGRA186_WAKE_TIME_MASK)
58 << TEGRA186_WAKE_TIME_SHIFT;
59
60 /*
61 * Clean percpu_data[cpu] to DRAM. This needs to be done to ensure that
62 * the correct value is read in tegra_soc_pwr_domain_suspend(), which
63 * is called with caches disabled. It is possible to read a stale value
64 * from DRAM in that function, because the L2 cache is not flushed
65 * unless the cluster is entering CC6/CC7.
66 */
67 clean_dcache_range((uint64_t)&tegra_percpu_data[cpu],
68 sizeof(tegra_percpu_data[cpu]));
69
70 /* Sanity check the requested state id */
71 switch (state_id) {
72 case PSTATE_ID_CORE_IDLE:
73 case PSTATE_ID_CORE_POWERDN:
74
75 if (psci_get_pstate_type(power_state) != PSTATE_TYPE_POWERDOWN) {
76 ret = PSCI_E_INVALID_PARAMS;
77 break;
78 }
79
80 /* Core powerdown request */
81 req_state->pwr_domain_state[MPIDR_AFFLVL0] = state_id;
82 req_state->pwr_domain_state[MPIDR_AFFLVL1] = state_id;
83
84 break;
85
86 default:
87 ERROR("%s: unsupported state id (%d)\n", __func__, state_id);
88 ret = PSCI_E_INVALID_PARAMS;
89 break;
90 }
91
92 return ret;
93 }
94
tegra_soc_cpu_standby(plat_local_state_t cpu_state)95 int32_t tegra_soc_cpu_standby(plat_local_state_t cpu_state)
96 {
97 (void)cpu_state;
98 return PSCI_E_SUCCESS;
99 }
100
tegra_soc_pwr_domain_suspend(const psci_power_state_t * target_state)101 int32_t tegra_soc_pwr_domain_suspend(const psci_power_state_t *target_state)
102 {
103 const plat_local_state_t *pwr_domain_state;
104 uint8_t stateid_afflvl0, stateid_afflvl2;
105 uint32_t cpu = plat_my_core_pos();
106 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
107 mce_cstate_info_t cstate_info = { 0 };
108 uint64_t mc_ctx_base;
109 uint32_t val;
110
111 /* get the state ID */
112 pwr_domain_state = target_state->pwr_domain_state;
113 stateid_afflvl0 = pwr_domain_state[MPIDR_AFFLVL0] &
114 TEGRA186_STATE_ID_MASK;
115 stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
116 TEGRA186_STATE_ID_MASK;
117
118 if ((stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ||
119 (stateid_afflvl0 == PSTATE_ID_CORE_POWERDN)) {
120
121 /* Enter CPU idle/powerdown */
122 val = (stateid_afflvl0 == PSTATE_ID_CORE_IDLE) ?
123 (uint32_t)TEGRA_ARI_CORE_C6 : (uint32_t)TEGRA_ARI_CORE_C7;
124 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE, (uint64_t)val,
125 tegra_percpu_data[cpu].wake_time, 0U);
126
127 } else if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
128
129 /* save SE registers */
130 se_regs[0] = mmio_read_32(TEGRA_SE0_BASE +
131 SE_MUTEX_WATCHDOG_NS_LIMIT);
132 se_regs[1] = mmio_read_32(TEGRA_RNG1_BASE +
133 RNG_MUTEX_WATCHDOG_NS_LIMIT);
134 se_regs[2] = mmio_read_32(TEGRA_PKA1_BASE +
135 PKA_MUTEX_WATCHDOG_NS_LIMIT);
136
137 /* save 'Secure Boot' Processor Feature Config Register */
138 val = mmio_read_32(TEGRA_MISC_BASE + MISCREG_PFCFG);
139 mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_BOOTP_FCFG, val);
140
141 /* save MC context to TZDRAM */
142 mc_ctx_base = params_from_bl2->tzdram_base;
143 tegra_mc_save_context((uintptr_t)mc_ctx_base);
144
145 /* Prepare for system suspend */
146 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
147 cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC7;
148 cstate_info.system_state_force = 1;
149 cstate_info.update_wake_mask = 1;
150 mce_update_cstate_info(&cstate_info);
151
152 /* Loop until system suspend is allowed */
153 do {
154 val = (uint32_t)mce_command_handler(
155 (uint64_t)MCE_CMD_IS_SC7_ALLOWED,
156 (uint64_t)TEGRA_ARI_CORE_C7,
157 MCE_CORE_SLEEP_TIME_INFINITE,
158 0U);
159 } while (val == 0U);
160
161 /* Instruct the MCE to enter system suspend state */
162 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
163 (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
164
165 } else {
166 ; /* do nothing */
167 }
168
169 return PSCI_E_SUCCESS;
170 }
171
172 /*******************************************************************************
173 * Helper function to check if this is the last ON CPU in the cluster
174 ******************************************************************************/
tegra_last_cpu_in_cluster(const plat_local_state_t * states,uint32_t ncpu)175 static bool tegra_last_cpu_in_cluster(const plat_local_state_t *states,
176 uint32_t ncpu)
177 {
178 plat_local_state_t target;
179 bool last_on_cpu = true;
180 uint32_t num_cpus = ncpu, pos = 0;
181
182 do {
183 target = states[pos];
184 if (target != PLAT_MAX_OFF_STATE) {
185 last_on_cpu = false;
186 }
187 --num_cpus;
188 pos++;
189 } while (num_cpus != 0U);
190
191 return last_on_cpu;
192 }
193
194 /*******************************************************************************
195 * Helper function to get target power state for the cluster
196 ******************************************************************************/
tegra_get_afflvl1_pwr_state(const plat_local_state_t * states,uint32_t ncpu)197 static plat_local_state_t tegra_get_afflvl1_pwr_state(const plat_local_state_t *states,
198 uint32_t ncpu)
199 {
200 uint32_t core_pos = (uint32_t)read_mpidr() & (uint32_t)MPIDR_CPU_MASK;
201 uint32_t cpu = plat_my_core_pos();
202 int32_t ret;
203 plat_local_state_t target = states[core_pos];
204 mce_cstate_info_t cstate_info = { 0 };
205
206 /* CPU suspend */
207 if (target == PSTATE_ID_CORE_POWERDN) {
208 /* Program default wake mask */
209 cstate_info.wake_mask = TEGRA186_CORE_WAKE_MASK;
210 cstate_info.update_wake_mask = 1;
211 mce_update_cstate_info(&cstate_info);
212
213 /* Check if CCx state is allowed. */
214 ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
215 (uint64_t)TEGRA_ARI_CORE_C7,
216 tegra_percpu_data[cpu].wake_time,
217 0U);
218 if (ret == 0) {
219 target = PSCI_LOCAL_STATE_RUN;
220 }
221 }
222
223 /* CPU off */
224 if (target == PLAT_MAX_OFF_STATE) {
225 /* Enable cluster powerdn from last CPU in the cluster */
226 if (tegra_last_cpu_in_cluster(states, ncpu)) {
227 /* Enable CC7 state and turn off wake mask */
228 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
229 cstate_info.update_wake_mask = 1;
230 mce_update_cstate_info(&cstate_info);
231
232 /* Check if CCx state is allowed. */
233 ret = mce_command_handler((uint64_t)MCE_CMD_IS_CCX_ALLOWED,
234 (uint64_t)TEGRA_ARI_CORE_C7,
235 MCE_CORE_SLEEP_TIME_INFINITE,
236 0U);
237 if (ret == 0) {
238 target = PSCI_LOCAL_STATE_RUN;
239 }
240
241 } else {
242
243 /* Turn off wake_mask */
244 cstate_info.update_wake_mask = 1;
245 mce_update_cstate_info(&cstate_info);
246 target = PSCI_LOCAL_STATE_RUN;
247 }
248 }
249
250 return target;
251 }
252
253 /*******************************************************************************
254 * Platform handler to calculate the proper target power level at the
255 * specified affinity level
256 ******************************************************************************/
tegra_soc_get_target_pwr_state(uint32_t lvl,const plat_local_state_t * states,uint32_t ncpu)257 plat_local_state_t tegra_soc_get_target_pwr_state(uint32_t lvl,
258 const plat_local_state_t *states,
259 uint32_t ncpu)
260 {
261 plat_local_state_t target = PSCI_LOCAL_STATE_RUN;
262 uint32_t cpu = plat_my_core_pos();
263
264 /* System Suspend */
265 if ((lvl == (uint32_t)MPIDR_AFFLVL2) &&
266 (states[cpu] == PSTATE_ID_SOC_POWERDN)) {
267 target = PSTATE_ID_SOC_POWERDN;
268 }
269
270 /* CPU off, CPU suspend */
271 if (lvl == (uint32_t)MPIDR_AFFLVL1) {
272 target = tegra_get_afflvl1_pwr_state(states, ncpu);
273 }
274
275 /* target cluster/system state */
276 return target;
277 }
278
tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t * target_state)279 int32_t tegra_soc_pwr_domain_power_down_wfi(const psci_power_state_t *target_state)
280 {
281 const plat_local_state_t *pwr_domain_state =
282 target_state->pwr_domain_state;
283 const plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params();
284 uint8_t stateid_afflvl2 = pwr_domain_state[PLAT_MAX_PWR_LVL] &
285 TEGRA186_STATE_ID_MASK;
286 uint64_t val;
287 uint64_t src_len_in_bytes = (uint64_t)(((uintptr_t)(&__BL31_END__) -
288 (uintptr_t)BL31_BASE));
289 int32_t ret;
290
291 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
292 val = params_from_bl2->tzdram_base +
293 tegra186_get_mc_ctx_size();
294
295 /* Initialise communication channel with BPMP */
296 assert(tegra_bpmp_ipc_init() == 0);
297
298 /* Enable SE clock */
299 ret = tegra_bpmp_ipc_enable_clock(TEGRA186_CLK_SE);
300 if (ret != 0) {
301 ERROR("Failed to enable clock\n");
302 return ret;
303 }
304
305 /*
306 * Generate/save SHA256 of ATF during SC7 entry
307 */
308 if (tegra_se_save_sha256_hash(BL31_BASE,
309 (uint32_t)src_len_in_bytes) != 0) {
310 ERROR("Hash calculation failed. Reboot\n");
311 (void)tegra_soc_prepare_system_reset();
312 }
313
314 /*
315 * The TZRAM loses power when we enter system suspend. To
316 * allow graceful exit from system suspend, we need to copy
317 * BL3-1 over to TZDRAM.
318 */
319 val = params_from_bl2->tzdram_base +
320 tegra186_get_mc_ctx_size();
321 memcpy16((void *)(uintptr_t)val, (void *)(uintptr_t)BL31_BASE,
322 (uintptr_t)BL31_END - (uintptr_t)BL31_BASE);
323
324 /*
325 * Save code base and size; this would be used by SC7-RF to
326 * verify binary
327 */
328 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV68_LO,
329 (uint32_t)val);
330 mmio_write_32(TEGRA_SCRATCH_BASE + SECURE_SCRATCH_RSV0_HI,
331 (uint32_t)src_len_in_bytes);
332
333 ret = tegra_bpmp_ipc_disable_clock(TEGRA186_CLK_SE);
334 if (ret != 0) {
335 ERROR("Failed to disable clock\n");
336 return ret;
337 }
338 }
339
340 return PSCI_E_SUCCESS;
341 }
342
tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t * target_state)343 int32_t tegra_soc_pwr_domain_suspend_pwrdown_early(const psci_power_state_t *target_state)
344 {
345 return PSCI_E_NOT_SUPPORTED;
346 }
347
tegra_soc_pwr_domain_on(u_register_t mpidr)348 int32_t tegra_soc_pwr_domain_on(u_register_t mpidr)
349 {
350 int32_t ret = PSCI_E_SUCCESS;
351 uint64_t target_cpu = mpidr & MPIDR_CPU_MASK;
352 uint64_t target_cluster = (mpidr & MPIDR_CLUSTER_MASK) >>
353 MPIDR_AFFINITY_BITS;
354
355 if (target_cluster > ((uint32_t)PLATFORM_CLUSTER_COUNT - 1U)) {
356
357 ERROR("%s: unsupported CPU (0x%lx)\n", __func__, mpidr);
358 ret = PSCI_E_NOT_PRESENT;
359
360 } else {
361 /* construct the target CPU # */
362 target_cpu |= (target_cluster << 2);
363
364 (void)mce_command_handler((uint64_t)MCE_CMD_ONLINE_CORE, target_cpu, 0U, 0U);
365 }
366
367 return ret;
368 }
369
tegra_soc_pwr_domain_on_finish(const psci_power_state_t * target_state)370 int32_t tegra_soc_pwr_domain_on_finish(const psci_power_state_t *target_state)
371 {
372 uint8_t stateid_afflvl2 = target_state->pwr_domain_state[PLAT_MAX_PWR_LVL];
373 uint8_t stateid_afflvl0 = target_state->pwr_domain_state[MPIDR_AFFLVL0];
374 mce_cstate_info_t cstate_info = { 0 };
375 uint64_t impl, val;
376 const plat_params_from_bl2_t *plat_params = bl31_get_plat_params();
377
378 impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
379
380 /*
381 * Enable ECC and Parity Protection for Cortex-A57 CPUs (Tegra186
382 * A02p and beyond).
383 */
384 if ((plat_params->l2_ecc_parity_prot_dis != 1) && (impl != DENVER_IMPL)) {
385
386 val = read_l2ctlr_el1();
387 val |= CORTEX_A57_L2_ECC_PARITY_PROTECTION_BIT;
388 write_l2ctlr_el1(val);
389 }
390
391 /*
392 * Reset power state info for CPUs when onlining, we set
393 * deepest power when offlining a core but that may not be
394 * requested by non-secure sw which controls idle states. It
395 * will re-init this info from non-secure software when the
396 * core come online.
397 */
398 if (stateid_afflvl0 == PLAT_MAX_OFF_STATE) {
399
400 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC1;
401 cstate_info.update_wake_mask = 1;
402 mce_update_cstate_info(&cstate_info);
403 }
404
405 /*
406 * Check if we are exiting from deep sleep and restore SE
407 * context if we are.
408 */
409 if (stateid_afflvl2 == PSTATE_ID_SOC_POWERDN) {
410
411 mmio_write_32(TEGRA_SE0_BASE + SE_MUTEX_WATCHDOG_NS_LIMIT,
412 se_regs[0]);
413 mmio_write_32(TEGRA_RNG1_BASE + RNG_MUTEX_WATCHDOG_NS_LIMIT,
414 se_regs[1]);
415 mmio_write_32(TEGRA_PKA1_BASE + PKA_MUTEX_WATCHDOG_NS_LIMIT,
416 se_regs[2]);
417
418 /* Init SMMU */
419 tegra_smmu_init();
420
421 /*
422 * Reset power state info for the last core doing SC7
423 * entry and exit, we set deepest power state as CC7
424 * and SC7 for SC7 entry which may not be requested by
425 * non-secure SW which controls idle states.
426 */
427 cstate_info.cluster = (uint32_t)TEGRA_ARI_CLUSTER_CC7;
428 cstate_info.system = (uint32_t)TEGRA_ARI_SYSTEM_SC1;
429 cstate_info.update_wake_mask = 1;
430 mce_update_cstate_info(&cstate_info);
431 }
432
433 return PSCI_E_SUCCESS;
434 }
435
tegra_soc_pwr_domain_off(const psci_power_state_t * target_state)436 int32_t tegra_soc_pwr_domain_off(const psci_power_state_t *target_state)
437 {
438 uint64_t impl = (read_midr() >> MIDR_IMPL_SHIFT) & (uint64_t)MIDR_IMPL_MASK;
439
440 (void)target_state;
441
442 /* Disable Denver's DCO operations */
443 if (impl == DENVER_IMPL) {
444 denver_disable_dco();
445 }
446
447 /* Turn off CPU */
448 (void)mce_command_handler((uint64_t)MCE_CMD_ENTER_CSTATE,
449 (uint64_t)TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0U);
450
451 return PSCI_E_SUCCESS;
452 }
453
tegra_soc_prepare_system_off(void)454 __dead2 void tegra_soc_prepare_system_off(void)
455 {
456 /* power off the entire system */
457 mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_POWER_OFF);
458
459 wfi();
460
461 /* wait for the system to power down */
462 for (;;) {
463 ;
464 }
465 }
466
tegra_soc_prepare_system_reset(void)467 int32_t tegra_soc_prepare_system_reset(void)
468 {
469 mce_enter_ccplex_state((uint32_t)TEGRA_ARI_MISC_CCPLEX_SHUTDOWN_REBOOT);
470
471 return PSCI_E_SUCCESS;
472 }
473