1 /*
2  * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <arch_features.h>
10 #include <arch_helpers.h>
11 #include <bl32/tsp/tsp.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <lib/spinlock.h>
15 #include <plat/common/platform.h>
16 #include <platform_def.h>
17 #include <platform_tsp.h>
18 #if SPMC_AT_EL3
19 #include <services/ffa_svc.h>
20 #include <lib/psci/psci.h>
21 
22 #include "ffa_helpers.h"
23 #include <lib/xlat_tables/xlat_tables_defs.h>
24 #include <lib/xlat_tables/xlat_tables_v2.h>
25 
26 #endif
27 
28 #include "tsp_private.h"
29 
30 
31 /*******************************************************************************
32  * Lock to control access to the console
33  ******************************************************************************/
34 spinlock_t console_lock;
35 
36 /*******************************************************************************
37  * Per cpu data structure to populate parameters for an SMC in C code and use
38  * a pointer to this structure in assembler code to populate x0-x7
39  ******************************************************************************/
40 static tsp_args_t tsp_smc_args[PLATFORM_CORE_COUNT];
41 
42 /*******************************************************************************
43  * Per cpu data structure to keep track of TSP activity
44  ******************************************************************************/
45 work_statistics_t tsp_stats[PLATFORM_CORE_COUNT];
46 
47 /*******************************************************************************
48  * The TSP memory footprint starts at address BL32_BASE and ends with the
49  * linker symbol __BL32_END__. Use these addresses to compute the TSP image
50  * size.
51  ******************************************************************************/
52 #define BL32_TOTAL_LIMIT BL32_END
53 #define BL32_TOTAL_SIZE (BL32_TOTAL_LIMIT - (unsigned long) BL32_BASE)
54 
55 #if SPMC_AT_EL3
56 static unsigned int spmc_id;
57 static unsigned int partition_id;
58 
59 /* Partition Mailbox */
60 static uint8_t send_page[PAGE_SIZE] __aligned(PAGE_SIZE);
61 static uint8_t recv_page[PAGE_SIZE] __aligned(PAGE_SIZE);
62 
63 struct mailbox {
64 	void* send;
65 	const void* recv;
66 };
67 struct mailbox mailbox;
68 
69 #endif
70 
tsp_smc(uint32_t func,uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6)71 tsp_args_t tsp_smc(uint32_t func, uint64_t arg0,
72 			  uint64_t arg1, uint64_t arg2,
73 			  uint64_t arg3, uint64_t arg4,
74 			  uint64_t arg5, uint64_t arg6)
75 {
76 	tsp_args_t ret_args = {0};
77 	register uint64_t r0 __asm__("x0") = func;
78 	register uint64_t r1 __asm__("x1") = arg0;
79 	register uint64_t r2 __asm__("x2") = arg1;
80 	register uint64_t r3 __asm__("x3") = arg2;
81 	register uint64_t r4 __asm__("x4") = arg3;
82 	register uint64_t r5 __asm__("x5") = arg4;
83 	register uint64_t r6 __asm__("x6") = arg5;
84 	register uint64_t r7 __asm__("x7") = arg6;
85 
86 	__asm__ volatile(
87 		                "smc #0"
88 				: /* Output registers, also used as inputs ('+' constraint). */
89 				  "+r"(r0), "+r"(r1), "+r"(r2), "+r"(r3), "+r"(r4), "+r"(r5),
90 				  "+r"(r6), "+r"(r7));
91 
92 	ret_args._regs[0] = r0;
93 	ret_args._regs[1] = r1;
94 	ret_args._regs[2] = r2;
95 	ret_args._regs[3] = r3;
96 	ret_args._regs[4] = r4;
97 	ret_args._regs[5] = r5;
98 	ret_args._regs[6] = r6;
99 	ret_args._regs[7] = r7;
100 
101 	return ret_args;
102 }
103 
set_smc_args(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)104 static tsp_args_t *set_smc_args(uint64_t arg0,
105 				uint64_t arg1,
106 				uint64_t arg2,
107 				uint64_t arg3,
108 				uint64_t arg4,
109 				uint64_t arg5,
110 				uint64_t arg6,
111 				uint64_t arg7)
112 {
113 	uint32_t linear_id;
114 	tsp_args_t *pcpu_smc_args;
115 
116 	/*
117 	 * Return to Secure Monitor by raising an SMC. The results of the
118 	 * service are passed as an arguments to the SMC
119 	 */
120 	linear_id = plat_my_core_pos();
121 	pcpu_smc_args = &tsp_smc_args[linear_id];
122 	write_sp_arg(pcpu_smc_args, TSP_ARG0, arg0);
123 	write_sp_arg(pcpu_smc_args, TSP_ARG1, arg1);
124 	write_sp_arg(pcpu_smc_args, TSP_ARG2, arg2);
125 	write_sp_arg(pcpu_smc_args, TSP_ARG3, arg3);
126 	write_sp_arg(pcpu_smc_args, TSP_ARG4, arg4);
127 	write_sp_arg(pcpu_smc_args, TSP_ARG5, arg5);
128 	write_sp_arg(pcpu_smc_args, TSP_ARG6, arg6);
129 	write_sp_arg(pcpu_smc_args, TSP_ARG7, arg7);
130 
131 	return pcpu_smc_args;
132 }
133 
134 /*******************************************************************************
135  * Setup function for TSP.
136  ******************************************************************************/
tsp_setup(void)137 void tsp_setup(void)
138 {
139 	/* Perform early platform-specific setup */
140 	tsp_early_platform_setup();
141 
142 	/* Perform late platform-specific setup */
143 	tsp_plat_arch_setup();
144 
145 #if ENABLE_PAUTH
146 	/*
147 	 * Assert that the ARMv8.3-PAuth registers are present or an access
148 	 * fault will be triggered when they are being saved or restored.
149 	 */
150 	assert(is_armv8_3_pauth_present());
151 #endif /* ENABLE_PAUTH */
152 }
153 
154 /*******************************************************************************
155  * TSP main entry point where it gets the opportunity to initialize its secure
156  * state/applications. Once the state is initialized, it must return to the
157  * SPD with a pointer to the 'tsp_vector_table' jump table.
158  ******************************************************************************/
159 #if SPMC_AT_EL3
tsp_main(uintptr_t secondary_ep)160 tsp_args_t *tsp_main(uintptr_t secondary_ep)
161 #else
162 uint64_t tsp_main(void)
163 #endif
164 {
165 	NOTICE("TSP: %s\n", version_string);
166 	NOTICE("TSP: %s\n", build_message);
167 	INFO("TSP: Total memory base : 0x%lx\n", (unsigned long) BL32_BASE);
168 	INFO("TSP: Total memory size : 0x%lx bytes\n", BL32_TOTAL_SIZE);
169 
170 	uint32_t linear_id = plat_my_core_pos();
171 
172 	/* Initialize the platform */
173 	tsp_platform_setup();
174 
175 	/* Initialize secure/applications state here */
176 	tsp_generic_timer_start();
177 
178 #if SPMC_AT_EL3
179 	{
180 		tsp_args_t smc_args = {0};
181 
182 		/* Register secondary entrypoint with the SPMC. */
183 		smc_args = tsp_smc(FFA_SECONDARY_EP_REGISTER_SMC64,
184 				   (uint64_t) secondary_ep,
185 				   0, 0, 0, 0, 0, 0);
186 		if (smc_args._regs[TSP_ARG0] != FFA_SUCCESS_SMC32)
187 			ERROR("TSP could not register secondary ep (0x%llx)\n",
188 			      smc_args._regs[2]);
189 
190 		/* Get TSP's endpoint id */
191 		smc_args = tsp_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
192 		if (smc_args._regs[TSP_ARG0] != FFA_SUCCESS_SMC32) {
193 			ERROR("TSP could not get own ID (0x%llx) on core%d\n",
194 			      smc_args._regs[2], linear_id);
195 			panic();
196 		}
197 
198 		INFO("TSP FF-A endpoint id = 0x%llx \n", smc_args._regs[2]);
199 		partition_id =  smc_args._regs[2];
200 
201 		/* Get the SPMC ID */
202 		smc_args = tsp_smc(FFA_SPM_ID_GET, 0, 0, 0, 0, 0, 0, 0);
203 		if (smc_args._regs[TSP_ARG0] != FFA_SUCCESS_SMC32) {
204 			ERROR("TSP could not get SPMC ID (0x%llx) on core%d\n",
205 			      smc_args._regs[2], linear_id);
206 			panic();
207 		}
208 
209 		spmc_id = smc_args._regs[2];
210 
211 		/* Call RXTX_MAP to map a 4k RX and TX buffer. */
212 		if (ffa_rxtx_map((uintptr_t) send_page, (uintptr_t) recv_page, 1)) {
213 			ERROR("TSP could not map it's RX/TX Buffers\n");
214 			panic();
215 		}
216 
217 		mailbox.send = send_page;
218 		mailbox.recv = recv_page;
219 	}
220 #endif
221 	/* Update this cpu's statistics */
222 	tsp_stats[linear_id].smc_count++;
223 	tsp_stats[linear_id].eret_count++;
224 	tsp_stats[linear_id].cpu_on_count++;
225 
226 #if LOG_LEVEL >= LOG_LEVEL_INFO
227 	spin_lock(&console_lock);
228 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
229 	     read_mpidr(),
230 	     tsp_stats[linear_id].smc_count,
231 	     tsp_stats[linear_id].eret_count,
232 	     tsp_stats[linear_id].cpu_on_count);
233 	spin_unlock(&console_lock);
234 #endif
235 #if SPMC_AT_EL3
236 	return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
237 #else
238 	return (uint64_t) &tsp_vector_table;
239 #endif
240 }
241 
242 /*******************************************************************************
243  * This function performs any remaining book keeping in the test secure payload
244  * after this cpu's architectural state has been setup in response to an earlier
245  * psci cpu_on request.
246  ******************************************************************************/
tsp_cpu_on_main(void)247 tsp_args_t *tsp_cpu_on_main(void)
248 {
249 	uint32_t linear_id = plat_my_core_pos();
250 
251 	/* Initialize secure/applications state here */
252 	tsp_generic_timer_start();
253 
254 	/* Update this cpu's statistics */
255 	tsp_stats[linear_id].smc_count++;
256 	tsp_stats[linear_id].eret_count++;
257 	tsp_stats[linear_id].cpu_on_count++;
258 
259 #if LOG_LEVEL >= LOG_LEVEL_INFO
260 	spin_lock(&console_lock);
261 	INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
262 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
263 		read_mpidr(),
264 		tsp_stats[linear_id].smc_count,
265 		tsp_stats[linear_id].eret_count,
266 		tsp_stats[linear_id].cpu_on_count);
267 	spin_unlock(&console_lock);
268 #endif
269 
270 #if SPMC_AT_EL3
271 	return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
272 #else
273 	/* Indicate to the SPD that we have completed turned ourselves on */
274 	return set_smc_args(TSP_ON_DONE, 0, 0, 0, 0, 0, 0, 0);
275 #endif
276 }
277 
278 /*******************************************************************************
279  * This function performs any remaining book keeping in the test secure payload
280  * before this cpu is turned off in response to a psci cpu_off request.
281  ******************************************************************************/
tsp_cpu_off_main(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)282 tsp_args_t *tsp_cpu_off_main(uint64_t arg0,
283 			   uint64_t arg1,
284 			   uint64_t arg2,
285 			   uint64_t arg3,
286 			   uint64_t arg4,
287 			   uint64_t arg5,
288 			   uint64_t arg6,
289 			   uint64_t arg7)
290 {
291 	uint32_t linear_id = plat_my_core_pos();
292 
293 	/*
294 	 * This cpu is being turned off, so disable the timer to prevent the
295 	 * secure timer interrupt from interfering with power down. A pending
296 	 * interrupt will be lost but we do not care as we are turning off.
297 	 */
298 	tsp_generic_timer_stop();
299 
300 	/* Update this cpu's statistics */
301 	tsp_stats[linear_id].smc_count++;
302 	tsp_stats[linear_id].eret_count++;
303 	tsp_stats[linear_id].cpu_off_count++;
304 
305 #if LOG_LEVEL >= LOG_LEVEL_INFO
306 	spin_lock(&console_lock);
307 	INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
308 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
309 		read_mpidr(),
310 		tsp_stats[linear_id].smc_count,
311 		tsp_stats[linear_id].eret_count,
312 		tsp_stats[linear_id].cpu_off_count);
313 	spin_unlock(&console_lock);
314 #endif
315 
316 #if SPMC_AT_EL3
317 	{
318 		unsigned int tsp_id;
319 		tsp_args_t smc_args = {0};
320 
321 		/* Get the TSP ID */
322 		smc_args = tsp_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
323 		if (smc_args._regs[TSP_ARG0] != FFA_SUCCESS_SMC32) {
324 			ERROR("TSP could not get own ID (0x%llx) on core%d\n",
325 			      smc_args._regs[2], linear_id);
326 			panic();
327 		}
328 
329 		tsp_id = smc_args._regs[2];
330 
331 		return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32,
332 				    tsp_id << FFA_DIRECT_MSG_SOURCE_SHIFT |
333 				    spmc_id,
334 				    FFA_DIRECT_FRAMEWORK_MSG_MASK |
335 				    (FFA_PM_MSG_PM_RESP & FFA_PM_MSG_MASK),
336 				    0, 0, 0, 0, 0);
337 	}
338 #else
339 	/* Indicate to the SPD that we have completed this request */
340 	return set_smc_args(TSP_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
341 #endif
342 }
343 
344 /*******************************************************************************
345  * This function performs any book keeping in the test secure payload before
346  * this cpu's architectural state is saved in response to an earlier psci
347  * cpu_suspend request.
348  ******************************************************************************/
tsp_cpu_suspend_main(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)349 tsp_args_t *tsp_cpu_suspend_main(uint64_t arg0,
350 			       uint64_t arg1,
351 			       uint64_t arg2,
352 			       uint64_t arg3,
353 			       uint64_t arg4,
354 			       uint64_t arg5,
355 			       uint64_t arg6,
356 			       uint64_t arg7)
357 {
358 	uint32_t linear_id = plat_my_core_pos();
359 
360 	/*
361 	 * Save the time context and disable it to prevent the secure timer
362 	 * interrupt from interfering with wakeup from the suspend state.
363 	 */
364 	tsp_generic_timer_save();
365 	tsp_generic_timer_stop();
366 
367 	/* Update this cpu's statistics */
368 	tsp_stats[linear_id].smc_count++;
369 	tsp_stats[linear_id].eret_count++;
370 	tsp_stats[linear_id].cpu_suspend_count++;
371 
372 #if LOG_LEVEL >= LOG_LEVEL_INFO
373 	spin_lock(&console_lock);
374 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
375 		read_mpidr(),
376 		tsp_stats[linear_id].smc_count,
377 		tsp_stats[linear_id].eret_count,
378 		tsp_stats[linear_id].cpu_suspend_count);
379 	spin_unlock(&console_lock);
380 #endif
381 
382 	/* Indicate to the SPD that we have completed this request */
383 	return set_smc_args(TSP_SUSPEND_DONE, 0, 0, 0, 0, 0, 0, 0);
384 }
385 
386 /*******************************************************************************
387  * This function performs any book keeping in the test secure payload after this
388  * cpu's architectural state has been restored after wakeup from an earlier psci
389  * cpu_suspend request.
390  ******************************************************************************/
tsp_cpu_resume_main(uint64_t max_off_pwrlvl,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)391 tsp_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
392 			      uint64_t arg1,
393 			      uint64_t arg2,
394 			      uint64_t arg3,
395 			      uint64_t arg4,
396 			      uint64_t arg5,
397 			      uint64_t arg6,
398 			      uint64_t arg7)
399 {
400 	uint32_t linear_id = plat_my_core_pos();
401 
402 	/* Restore the generic timer context */
403 	tsp_generic_timer_restore();
404 
405 	/* Update this cpu's statistics */
406 	tsp_stats[linear_id].smc_count++;
407 	tsp_stats[linear_id].eret_count++;
408 	tsp_stats[linear_id].cpu_resume_count++;
409 
410 #if LOG_LEVEL >= LOG_LEVEL_INFO
411 	spin_lock(&console_lock);
412 	INFO("TSP: cpu 0x%lx resumed. maximum off power level %lld\n",
413 	     read_mpidr(), max_off_pwrlvl);
414 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
415 		read_mpidr(),
416 		tsp_stats[linear_id].smc_count,
417 		tsp_stats[linear_id].eret_count,
418 		tsp_stats[linear_id].cpu_resume_count);
419 	spin_unlock(&console_lock);
420 #endif
421 	/* Indicate to the SPD that we have completed this request */
422 	return set_smc_args(TSP_RESUME_DONE, 0, 0, 0, 0, 0, 0, 0);
423 }
424 
425 /*******************************************************************************
426  * This function performs any remaining bookkeeping in the test secure payload
427  * before the system is switched off (in response to a psci SYSTEM_OFF request)
428  ******************************************************************************/
tsp_system_off_main(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)429 tsp_args_t *tsp_system_off_main(uint64_t arg0,
430 				uint64_t arg1,
431 				uint64_t arg2,
432 				uint64_t arg3,
433 				uint64_t arg4,
434 				uint64_t arg5,
435 				uint64_t arg6,
436 				uint64_t arg7)
437 {
438 	uint32_t linear_id = plat_my_core_pos();
439 
440 	/* Update this cpu's statistics */
441 	tsp_stats[linear_id].smc_count++;
442 	tsp_stats[linear_id].eret_count++;
443 
444 #if LOG_LEVEL >= LOG_LEVEL_INFO
445 	spin_lock(&console_lock);
446 	INFO("TSP: cpu 0x%lx SYSTEM_OFF request\n", read_mpidr());
447 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
448 	     tsp_stats[linear_id].smc_count,
449 	     tsp_stats[linear_id].eret_count);
450 	spin_unlock(&console_lock);
451 #endif
452 
453 	/* Indicate to the SPD that we have completed this request */
454 	return set_smc_args(TSP_SYSTEM_OFF_DONE, 0, 0, 0, 0, 0, 0, 0);
455 }
456 
457 /*******************************************************************************
458  * This function performs any remaining bookkeeping in the test secure payload
459  * before the system is reset (in response to a psci SYSTEM_RESET request)
460  ******************************************************************************/
tsp_system_reset_main(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)461 tsp_args_t *tsp_system_reset_main(uint64_t arg0,
462 				uint64_t arg1,
463 				uint64_t arg2,
464 				uint64_t arg3,
465 				uint64_t arg4,
466 				uint64_t arg5,
467 				uint64_t arg6,
468 				uint64_t arg7)
469 {
470 	uint32_t linear_id = plat_my_core_pos();
471 
472 	/* Update this cpu's statistics */
473 	tsp_stats[linear_id].smc_count++;
474 	tsp_stats[linear_id].eret_count++;
475 
476 #if LOG_LEVEL >= LOG_LEVEL_INFO
477 	spin_lock(&console_lock);
478 	INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", read_mpidr());
479 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
480 	     tsp_stats[linear_id].smc_count,
481 	     tsp_stats[linear_id].eret_count);
482 	spin_unlock(&console_lock);
483 #endif
484 
485 	/* Indicate to the SPD that we have completed this request */
486 	return set_smc_args(TSP_SYSTEM_RESET_DONE, 0, 0, 0, 0, 0, 0, 0);
487 }
488 
489 /*******************************************************************************
490  * TSP fast smc handler. The secure monitor jumps to this function by
491  * doing the ERET after populating X0-X7 registers. The arguments are received
492  * in the function arguments in order. Once the service is rendered, this
493  * function returns to Secure Monitor by raising SMC.
494  ******************************************************************************/
tsp_smc_handler(uint64_t func,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)495 tsp_args_t *tsp_smc_handler(uint64_t func,
496 			       uint64_t arg1,
497 			       uint64_t arg2,
498 			       uint64_t arg3,
499 			       uint64_t arg4,
500 			       uint64_t arg5,
501 			       uint64_t arg6,
502 			       uint64_t arg7)
503 {
504 	uint128_t service_args;
505 	uint64_t service_arg0;
506 	uint64_t service_arg1;
507 	uint64_t results[2];
508 	uint32_t linear_id = plat_my_core_pos();
509 
510 	/* Update this cpu's statistics */
511 	tsp_stats[linear_id].smc_count++;
512 	tsp_stats[linear_id].eret_count++;
513 
514 #if LOG_LEVEL >= LOG_LEVEL_INFO
515 	spin_lock(&console_lock);
516 	INFO("TSP: cpu 0x%lx received %s smc 0x%llx\n", read_mpidr(),
517 		((func >> 31) & 1) == 1 ? "fast" : "yielding",
518 		func);
519 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(),
520 		tsp_stats[linear_id].smc_count,
521 		tsp_stats[linear_id].eret_count);
522 	spin_unlock(&console_lock);
523 #endif
524 
525 	/* Render secure services and obtain results here */
526 	results[0] = arg1;
527 	results[1] = arg2;
528 
529 	/*
530 	 * Request a service back from dispatcher/secure monitor.
531 	 * This call returns and thereafter resumes execution.
532 	 */
533 	service_args = tsp_get_magic();
534 	service_arg0 = (uint64_t)service_args;
535 	service_arg1 = (uint64_t)(service_args >> 64U);
536 
537 #if CTX_INCLUDE_MTE_REGS
538 	/*
539 	 * Write a dummy value to an MTE register, to simulate usage in the
540 	 * secure world
541 	 */
542 	write_gcr_el1(0x99);
543 #endif
544 
545 	/* Determine the function to perform based on the function ID */
546 	switch (TSP_BARE_FID(func)) {
547 	case TSP_ADD:
548 		results[0] += service_arg0;
549 		results[1] += service_arg1;
550 		break;
551 	case TSP_SUB:
552 		results[0] -= service_arg0;
553 		results[1] -= service_arg1;
554 		break;
555 	case TSP_MUL:
556 		results[0] *= service_arg0;
557 		results[1] *= service_arg1;
558 		break;
559 	case TSP_DIV:
560 		results[0] /= service_arg0 ? service_arg0 : 1;
561 		results[1] /= service_arg1 ? service_arg1 : 1;
562 		break;
563 	default:
564 		break;
565 	}
566 
567 	return set_smc_args(func, 0,
568 			    results[0],
569 			    results[1],
570 			    0, 0, 0, 0);
571 }
572 
573 /*******************************************************************************
574  * TSP smc abort handler. This function is called when aborting a preempted
575  * yielding SMC request. It should cleanup all resources owned by the SMC
576  * handler such as locks or dynamically allocated memory so following SMC
577  * request are executed in a clean environment.
578  ******************************************************************************/
tsp_abort_smc_handler(uint64_t func,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)579 tsp_args_t *tsp_abort_smc_handler(uint64_t func,
580 				  uint64_t arg1,
581 				  uint64_t arg2,
582 				  uint64_t arg3,
583 				  uint64_t arg4,
584 				  uint64_t arg5,
585 				  uint64_t arg6,
586 				  uint64_t arg7)
587 {
588 	return set_smc_args(TSP_ABORT_DONE, 0, 0, 0, 0, 0, 0, 0);
589 }
590 
591 #if SPMC_AT_EL3
592 
593 /*******************************************************************************
594  * This enum is used to handle test cases driven from the FFA Test Driver
595  ******************************************************************************/
596 /* Keep in Sync with FF-A Test Driver */
597 enum message_t
598 {
599 	/* Partition Only Messages. */
600 	FF_A_RELAY_MESSAGE = 0,
601 
602 	/* Basic Functionality. */
603 	FF_A_ECHO_MESSAGE,
604 	FF_A_RELAY_MESSAGE_EL3,
605 
606 	/* Memory Sharing. */
607 	FF_A_MEMORY_SHARE,
608 	FF_A_MEMORY_SHARE_FRAGMENTED,
609 	FF_A_MEMORY_LEND,
610 	FF_A_MEMORY_LEND_FRAGMENTED,
611 
612 	LAST,
613 	FF_A_RUN_ALL = 255,
614 	FF_A_OP_MAX = 256
615 };
616 
617 
618 /*******************************************************************************
619  * This function handles framework messages. Currently only PM.
620  ******************************************************************************/
handle_framework_message(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)621 tsp_args_t *handle_framework_message(uint64_t arg0,
622 				     uint64_t arg1,
623 				     uint64_t arg2,
624 				     uint64_t arg3,
625 				     uint64_t arg4,
626 				     uint64_t arg5,
627 				     uint64_t arg6,
628 				     uint64_t arg7)
629 {
630 
631 	/*
632 	* Check if it is a power management message from the SPMC to
633 	* turn off this cpu else barf for now.
634 	*/
635 	if (FFA_SENDER(arg1) != spmc_id)
636 		goto err;
637 
638 	/* Check it is a PM request message */
639 	if ((arg2 & FFA_PM_MSG_MASK) != FFA_PM_MSG_PSCI_REQ)
640 		goto err;
641 
642 	/* Check it is a PSCI CPU_OFF request */
643 	if (arg3 != PSCI_CPU_OFF)
644 		goto err;
645 
646 	/* Everything checks out. Do the needful */
647 	return tsp_cpu_off_main(arg0, arg1, arg2, arg3,
648 				arg4, arg5, arg6, arg7);
649 err:
650 	/* TODO Add support in SPMC for FFA_ERROR. */
651 	return set_smc_args(FFA_ERROR, 0, 0, 0, 0, 0, 0, 0);
652 }
653 
654 /*******************************************************************************
655  * Helper function tow swap source and destination partition IDs
656  ******************************************************************************/
swap_src_dst(uint16_t * src,uint16_t * dst)657 void swap_src_dst(uint16_t *src, uint16_t *dst)
658 {
659 	uint32_t tmp;
660 	tmp = *src;
661 	*src = *dst;
662 	*dst = tmp;
663 }
664 
665 /*******************************************************************************
666  * Wrapper function to send a direct response
667  ******************************************************************************/
ffa_msg_send_direct_resp(uint16_t sender,uint16_t receiver,uint32_t arg3,uint32_t arg4,uint32_t arg5,uint32_t arg6,uint32_t arg7)668 tsp_args_t *ffa_msg_send_direct_resp(uint16_t sender,
669 			      uint16_t receiver,
670 			      uint32_t arg3,
671 			      uint32_t arg4,
672 			      uint32_t arg5,
673 			      uint32_t arg6,
674 			      uint32_t arg7)
675 {
676 	uint32_t flags = 0;
677 	uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
678 			       (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
679 
680 	return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC64, src_dst_ids,
681 			    flags, arg3, arg4, arg5, arg6, arg7);
682 }
683 
684 /*******************************************************************************
685 * Wrapper function to send a direct request
686  ******************************************************************************/
ffa_msg_send_direct_req(uint16_t sender,uint16_t receiver,uint32_t arg3,uint32_t arg4,uint32_t arg5,uint32_t arg6,uint32_t arg7)687 tsp_args_t ffa_msg_send_direct_req(uint16_t sender,
688 			      uint16_t receiver,
689 			      uint32_t arg3,
690 			      uint32_t arg4,
691 			      uint32_t arg5,
692 			      uint32_t arg6,
693 			      uint32_t arg7)
694 {
695 	uint32_t flags = 0;
696 	uint32_t src_dst_ids = (sender << FFA_DIRECT_MSG_SOURCE_SHIFT) |
697 			       (receiver << FFA_DIRECT_MSG_DESTINATION_SHIFT);
698 
699 
700 	/* Send Direct Request. */
701 	return tsp_smc(FFA_MSG_SEND_DIRECT_REQ_SMC64, src_dst_ids,
702 			flags, arg3, arg4, arg5, arg6, arg7);
703 }
704 
705 /*******************************************************************************
706 * Wrapper function to call FFA_RUN
707  ******************************************************************************/
ffa_run(uint16_t target,uint16_t vcpu)708 tsp_args_t ffa_run(uint16_t target, uint16_t vcpu)
709 {
710 	uint32_t target_info = FFA_RUN_TARGET(target) | FFA_RUN_VCPU(vcpu);
711 
712 	/* Send Direct Request. */
713 	return tsp_smc(FFA_MSG_RUN, target_info,
714 			FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
715 			FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
716 }
717 
718 /*******************************************************************************
719  *  Wrapper to handle BUSY and INTERRUPT error codes when sending a direct request.
720  ******************************************************************************/
ffa_direct_req_wrapper(uint16_t sender,uint16_t receiver,uint32_t arg3,uint32_t arg4,uint32_t arg5,uint32_t arg6,uint32_t arg7)721 tsp_args_t ffa_direct_req_wrapper(
722 	uint16_t sender, uint16_t receiver, uint32_t arg3,
723 	uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
724 {
725 
726 	tsp_args_t ret;
727 
728 	/* Handle initial busy Error Code */
729 	ret = ffa_msg_send_direct_req(sender, receiver, arg3, arg4, arg5, arg6, arg7);
730 	while (ret._regs[0] == FFA_ERROR && ret._regs[2] == FFA_ERROR_BUSY) {
731 		ret = ffa_msg_send_direct_req(sender, receiver, arg3, arg4, arg5, arg6, arg7);
732 	}
733 
734 	/* We've sent the direct request and been interrupted, keep running until completes. */
735 	while (ret._regs[0] == FFA_INTERRUPT) {
736 		ret = ffa_run((ret._regs[1] >> FFA_DIRECT_MSG_SOURCE_SHIFT) & FFA_DIRECT_MSG_ENDPOINT_ID_MASK,
737 			       ret._regs[1] & FFA_DIRECT_MSG_ENDPOINT_ID_MASK);
738 	}
739 
740 	return ret;
741 }
742 
743 /*******************************************************************************
744  * Test Functions
745  ******************************************************************************/
ffa_test_relay(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)746 int ffa_test_relay(uint64_t arg0,
747 		   uint64_t arg1,
748 		   uint64_t arg2,
749 		   uint64_t arg3,
750 		   uint64_t arg4,
751 		   uint64_t arg5,
752 		   uint64_t arg6,
753 		   uint64_t arg7)
754 {
755 	tsp_args_t ffa_forward_result;
756 	uint32_t receiver = arg5;
757 	ffa_forward_result = ffa_direct_req_wrapper(FFA_SENDER(arg1), receiver, FF_A_ECHO_MESSAGE, arg4, 0, 0, 0);
758 	return ffa_forward_result._regs[3];
759 }
760 
761 
762 /*******************************************************************************
763  * Memory Management Helpers
764  ******************************************************************************/
765 static char mem_region_buffer[4096 * 2]  __aligned(PAGE_SIZE);
766 #define REGION_BUF_SIZE sizeof(mem_region_buffer)
767 
memory_retrieve(struct mailbox * mb,struct ffa_memory_region ** retrieved,uint64_t handle,ffa_id_t sender,ffa_id_t receiver,uint32_t flags,uint32_t * frag_length,uint32_t * total_length)768 bool memory_retrieve(struct mailbox *mb,
769 		     struct ffa_memory_region **retrieved, uint64_t handle,
770 		     ffa_id_t sender, ffa_id_t receiver,
771 		     uint32_t flags, uint32_t *frag_length, uint32_t *total_length )
772 {
773 	tsp_args_t ret;
774 	uint32_t descriptor_size;
775 
776 	if (retrieved == NULL || mb == NULL) {
777 		ERROR("Invalid parameters!\n");
778 		return false;
779 	}
780 
781 	/* Clear TX buffer. */
782 	memset(mb->send, 0, PAGE_SIZE);
783 
784 	/* Clear local buffer. */
785 	memset(mem_region_buffer, 0, REGION_BUF_SIZE);
786 
787 	/*
788 	 * TODO: Revise shareability attribute in function call
789 	 * below.
790 	 * https://lists.trustedfirmware.org/pipermail/hafnium/2020-June/000023.html
791 	 */
792 	descriptor_size = ffa_memory_retrieve_request_init(
793 	    mb->send, handle, sender, receiver, 0, flags,
794 	    FFA_DATA_ACCESS_RW,
795 	    FFA_INSTRUCTION_ACCESS_NX,
796 	    FFA_MEMORY_NORMAL_MEM,
797 	    FFA_MEMORY_CACHE_WRITE_BACK,
798 	    FFA_MEMORY_OUTER_SHAREABLE);
799 
800 	ret = ffa_mem_retrieve_req(descriptor_size, descriptor_size);
801 
802 	if (ffa_func_id(ret) == FFA_ERROR) {
803 		ERROR("Couldn't retrieve the memory page. Error: %x\n",
804 		      ffa_error_code(ret));
805 		return false;
806 	}
807 
808 	/*
809 	 * Following total_size and fragment_size are useful to keep track
810 	 * of the state of transaction. When the sum of all fragment_size of all
811 	 * fragments is equal to total_size, the memory transaction has been
812 	 * completed.
813 	 */
814 	*total_length = ret._regs[1];
815 	*frag_length = ret._regs[2];
816 
817 	/* Copy reponse to local buffer. */
818 	memcpy(mem_region_buffer, mb->recv, *frag_length);
819 
820         if (ffa_rx_release()) {
821                 ERROR("Failed to release buffer!\n");
822                 return false;
823        }
824 
825 	*retrieved = (struct ffa_memory_region *) mem_region_buffer;
826 
827 	if ((*retrieved)->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
828 		VERBOSE("SPMC memory sharing operations support max of %u "
829 			"receivers!\n", MAX_MEM_SHARE_RECIPIENTS);
830 		return false;
831 	}
832 
833 	VERBOSE("Memory Descriptor Retrieved!\n");
834 
835 	return true;
836 }
837 
838 /*******************************************************************************
839  * This function handles memory management tests, currently share and lend.
840  ******************************************************************************/
test_memory_send(uint16_t sender,uint64_t handle,bool share)841 int test_memory_send(uint16_t sender, uint64_t handle, bool share)
842 {
843         struct ffa_memory_region *m;
844         struct ffa_composite_memory_region *composite;
845         int ret, status = 0;
846         unsigned int mem_attrs;
847         char *ptr;
848         ffa_id_t source = sender;
849 	uint32_t flags = share ? FFA_FLAG_SHARE_MEMORY : FFA_FLAG_LEND_MEMORY;
850 	uint32_t total_length, recv_length= 0;
851 
852         memory_retrieve(&mailbox, &m, handle, source, partition_id, flags, &recv_length, &total_length);
853 
854 	while (total_length != recv_length) {
855 		tsp_args_t ffa_return;
856 		uint32_t frag_length;
857 		ffa_return = ffa_mem_frag_rx((uint32_t) handle, recv_length);
858 
859 		if (ffa_return._regs[0] == FFA_ERROR)
860 		{
861 			WARN("TSP: failed to resume mem with handle %llx\n", handle);
862 			return -4;
863 		}
864 		frag_length = ffa_return._regs[3];
865 
866 		memcpy(&mem_region_buffer[recv_length], mailbox.recv, frag_length);
867 
868 		if (ffa_rx_release()) {
869                 	ERROR("Failed to release buffer!\n");
870                 	return false;
871        		}
872 
873 		recv_length += frag_length;
874 
875 		assert(recv_length <= total_length);
876 	}
877 
878         composite = ffa_memory_region_get_composite(m, 0);
879 	if (composite == NULL){
880 		WARN("Failed to get composite descriptor!\n");
881 	}
882 
883         VERBOSE("Address: %p; page_count: %x %lx\n",
884                 composite->constituents[0].address,
885                 composite->constituents[0].page_count, PAGE_SIZE);
886 
887         /* This test is only concerned with RW permissions. */
888         if (ffa_get_data_access_attr(
889                         m->receivers[0].receiver_permissions.permissions) !=
890                 FFA_DATA_ACCESS_RW) {
891                 ERROR(" %x != %x!\n", ffa_get_data_access_attr(
892                         m->receivers[0].receiver_permissions.permissions),
893 			FFA_DATA_ACCESS_RW);
894                 return -1;
895         }
896 
897         mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
898 
899 	/* Only expecting to be sent memory from Nwld so map accordinly. */
900 	mem_attrs |= MT_NS;
901 
902 	for (int i = 0; i < composite->constituent_count; i++) {
903 		ret = mmap_add_dynamic_region(
904 				(uint64_t)composite->constituents[i].address,
905 				(uint64_t)composite->constituents[i].address,
906 				composite->constituents[i].page_count * PAGE_SIZE,
907 				mem_attrs);
908 
909 		if (ret != 0) {
910 			ERROR("Failed [%d] mmap_add_dynamic_region %d (%llx) (%lx) (%x)!\n", i, ret,
911 				(uint64_t)composite->constituents[i].address,
912 				composite->constituents[i].page_count * PAGE_SIZE,
913 				mem_attrs);
914 			return -2;
915 		}
916 
917 	        ptr = (char *) composite->constituents[i].address;
918 
919        		/* Read initial magic number from memory region for validation purposes. */
920 		if (!i) {
921 			status = *ptr + 1;
922 		}
923        		/* Increment memory region for validation purposes. */
924 		++(*ptr);
925 	}
926 
927 	for (int i = 0; i < composite->constituent_count; i++) {
928 		ret = mmap_remove_dynamic_region(
929 			(uint64_t)composite->constituents[i].address,
930 			composite->constituents[i].page_count * PAGE_SIZE);
931 
932 		if (ret != 0) {
933 			ERROR("Failed [%d] mmap_add_dynamic_region!\n", i);
934 			return -3;
935 		}
936 	}
937 	if (!memory_relinquish((struct ffa_mem_relinquish *)mailbox.send,
938 				m->handle, partition_id)) {
939 		ERROR("Failed to relinquish memory region!\n");
940 		return -4;
941 	}
942        return status;
943 }
944 
945 /*******************************************************************************
946  * This function handles partition messages. Exercised from the FFA Test Driver
947  ******************************************************************************/
handle_partition_message(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)948 tsp_args_t *handle_partition_message(uint64_t arg0,
949 				     uint64_t arg1,
950 				     uint64_t arg2,
951 				     uint64_t arg3,
952 				     uint64_t arg4,
953 				     uint64_t arg5,
954 				     uint64_t arg6,
955 				     uint64_t arg7)
956 {
957 	uint16_t sender = FFA_SENDER(arg1);
958 	uint16_t receiver = FFA_RECEIVER(arg1);
959 	uint32_t status = -1;
960 
961 	switch (arg3) {
962 		case FF_A_MEMORY_SHARE:
963 			INFO("TSP Tests: Memory Share Request--\n");
964 			status = test_memory_send(sender, arg4, true);
965 			break;
966 		case FF_A_MEMORY_LEND:
967 			INFO("TSP Tests: Memory Lend Request--\n");
968 			status = test_memory_send(sender, arg4, false);
969 			break;
970 
971 		case FF_A_RELAY_MESSAGE:
972 			INFO("TSP Tests: Relaying message--\n");
973 			status = ffa_test_relay(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
974 			break;
975 		case FF_A_ECHO_MESSAGE:
976 			INFO("TSP Tests: echo message--\n");
977 			status = arg4;
978 			break;
979 		default:
980 			INFO("TSP Tests: Unknown request ID %d--\n", (int) arg3);
981 	}
982 
983 	swap_src_dst(&sender, &receiver);
984 	return ffa_msg_send_direct_resp(sender, receiver, status, 0, 0, 0, 0);
985 }
986 
987 
988 /*******************************************************************************
989  * This function implements the event loop for handling FF-A ABI invocations.
990  ******************************************************************************/
tsp_event_loop(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)991 tsp_args_t *tsp_event_loop(uint64_t arg0,
992 			   uint64_t arg1,
993 			   uint64_t arg2,
994 			   uint64_t arg3,
995 			   uint64_t arg4,
996 			   uint64_t arg5,
997 			   uint64_t arg6,
998 			   uint64_t arg7)
999 {
1000 	uint64_t smc_fid = arg0;
1001 
1002 	/* Panic if the SPMC did not forward an FF-A call */
1003 	if(!is_ffa_fid(smc_fid))
1004 		panic();
1005 
1006 	switch (smc_fid) {
1007 	case FFA_INTERRUPT:
1008 		/*
1009 		 * IRQs were enabled upon re-entry into the TSP. The interrupt
1010 		 * must have been handled by now. Return to the SPMC indicating
1011 		 * the same.
1012 		 */
1013 		return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
1014 
1015 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1016 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1017 
1018 		/* Check if a framework message, handle accordingly */
1019 		if ((arg2 & FFA_DIRECT_FRAMEWORK_MSG_MASK)) {
1020 			return handle_framework_message(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
1021 		} else {
1022 			return handle_partition_message(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
1023 		}
1024 	default:
1025 		return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32, 1, 2, 3, 4, 0, 0, 0);
1026 	}
1027 
1028 	INFO("%s: Unsupported FF-A FID (0x%llu)\n", __func__, smc_fid);
1029 	panic();
1030 }
1031 #endif /* SPMC_AT_EL3*/
1032