1/* SPDX-License-Identifier: BSD-2-Clause */
2/*
3 * Copyright (c) 2016-2020, Linaro Limited
4 * Copyright (c) 2014, STMicroelectronics International N.V.
5 */
6
7#include <arm32_macros.S>
8#include <arm.h>
9#include <asm.S>
10#include <generated/asm-defines.h>
11#include <keep.h>
12#include <kernel/abort.h>
13#include <kernel/cache_helpers.h>
14#include <kernel/thread_defs.h>
15#include <mm/core_mmu.h>
16
17#include "thread_private.h"
18
19	.syntax unified
20	.arch_extension sec
21
22	.macro cmp_spsr_user_mode reg:req
23		/*
24		 * We're only testing the lower 4 bits as bit 5 (0x10)
25		 * always is set.
26		 */
27		tst	\reg, #0x0f
28	.endm
29
30FUNC thread_set_abt_sp , :
31UNWIND(	.cantunwind)
32	mrs	r1, cpsr
33	cps	#CPSR_MODE_ABT
34	mov	sp, r0
35	msr	cpsr, r1
36	bx	lr
37END_FUNC thread_set_abt_sp
38
39FUNC thread_set_und_sp , :
40UNWIND(	.cantunwind)
41	mrs	r1, cpsr
42	cps	#CPSR_MODE_UND
43	mov	sp, r0
44	msr	cpsr, r1
45	bx	lr
46END_FUNC thread_set_und_sp
47
48FUNC thread_set_irq_sp , :
49UNWIND(	.cantunwind)
50	mrs	r1, cpsr
51	cps	#CPSR_MODE_IRQ
52	mov	sp, r0
53	msr	cpsr, r1
54	bx	lr
55END_FUNC thread_set_irq_sp
56
57FUNC thread_set_fiq_sp , :
58UNWIND(	.cantunwind)
59	mrs	r1, cpsr
60	cps	#CPSR_MODE_FIQ
61	mov	sp, r0
62	msr	cpsr, r1
63	bx	lr
64END_FUNC thread_set_fiq_sp
65
66FUNC thread_get_usr_sp , :
67	mrs	r1, cpsr
68	cpsid	aif
69	cps	#CPSR_MODE_SYS
70	mov	r0, sp
71	msr	cpsr, r1
72	bx	lr
73END_FUNC thread_get_usr_sp
74
75/* void thread_resume(struct thread_ctx_regs *regs) */
76FUNC thread_resume , :
77UNWIND(	.cantunwind)
78	add	r12, r0, #(13 * 4)	/* Restore registers r0-r12 later */
79
80	cps	#CPSR_MODE_SYS
81	ldr	sp, [r12], #4
82	ldr	lr, [r12], #4
83
84	cps	#CPSR_MODE_SVC
85	ldr	r1, [r12], #4
86	ldr	sp, [r12], #4
87	ldr	lr, [r12], #4
88	msr	spsr_fsxc, r1
89
90	ldm	r12, {r1, r2}
91
92	/*
93	 * Switching to some other mode than SVC as we need to set spsr in
94	 * order to return into the old state properly and it may be SVC
95	 * mode we're returning to.
96	 */
97	cps	#CPSR_MODE_ABT
98	cmp_spsr_user_mode r2
99	mov	lr, r1
100	msr	spsr_fsxc, r2
101	ldm	r0, {r0-r12}
102	movsne	pc, lr
103	b	eret_to_user_mode
104END_FUNC thread_resume
105
106/*
107 * Disables IRQ and FIQ and saves state of thread in fiq mode which has
108 * the banked r8-r12 registers, returns original CPSR.
109 */
110LOCAL_FUNC thread_save_state_fiq , :
111UNWIND(	.cantunwind)
112	mov	r9, lr
113
114	/*
115	 * Uses stack for temporary storage, while storing needed
116	 * context in the thread context struct.
117	 */
118
119	mrs	r8, cpsr
120
121	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
122
123	push	{r4-r7}
124	push	{r0-r3}
125
126	mrs	r6, cpsr		/* Save current CPSR */
127
128	bl	thread_get_ctx_regs
129
130	pop	{r1-r4}			/* r0-r3 pushed above */
131	stm	r0!, {r1-r4}
132	pop	{r1-r4}			/* r4-r7 pushed above */
133	stm	r0!, {r1-r4}
134
135	cps     #CPSR_MODE_SYS
136	stm	r0!, {r8-r12}
137	str	sp, [r0], #4
138	str	lr, [r0], #4
139
140	cps     #CPSR_MODE_SVC
141	mrs     r1, spsr
142	str	r1, [r0], #4
143	str	sp, [r0], #4
144	str	lr, [r0], #4
145
146	/* back to fiq mode */
147	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
148	msr	cpsr, r6		/* Restore mode */
149
150	mov	r0, r8			/* Return original CPSR */
151	bx	r9
152END_FUNC thread_save_state_fiq
153
154/*
155 * Disables IRQ and FIQ and saves state of thread, returns original
156 * CPSR.
157 */
158FUNC thread_save_state , :
159UNWIND(	.cantunwind)
160	push	{r12, lr}
161	/*
162	 * Uses stack for temporary storage, while storing needed
163	 * context in the thread context struct.
164	 */
165
166	mrs	r12, cpsr
167
168	cpsid	aif			/* Disable Async abort, IRQ and FIQ */
169
170	push	{r4-r7}
171	push	{r0-r3}
172
173	mov	r5, r12			/* Save CPSR in a preserved register */
174	mrs	r6, cpsr		/* Save current CPSR */
175
176	bl	thread_get_ctx_regs
177
178	pop	{r1-r4}			/* r0-r3 pushed above */
179	stm	r0!, {r1-r4}
180	pop	{r1-r4}			/* r4-r7 pushed above */
181	stm	r0!, {r1-r4}
182	stm	r0!, {r8-r11}
183
184	pop	{r12, lr}
185	stm	r0!, {r12}
186
187        cps     #CPSR_MODE_SYS
188	str	sp, [r0], #4
189	str	lr, [r0], #4
190
191        cps     #CPSR_MODE_SVC
192        mrs     r1, spsr
193	str	r1, [r0], #4
194	str	sp, [r0], #4
195	str	lr, [r0], #4
196
197	orr	r6, r6, #ARM32_CPSR_FIA	/* Disable Async abort, IRQ and FIQ */
198	msr	cpsr, r6		/* Restore mode */
199
200	mov	r0, r5			/* Return original CPSR */
201	bx	lr
202END_FUNC thread_save_state
203
204/*
205 * unsigned long thread_smc(unsigned long func_id, unsigned long a1,
206 *			    unsigned long a2, unsigned long a3)
207 */
208FUNC thread_smc , :
209	push	{r4-r7}
210UNWIND(	.save	{r4-r7})
211	smc	#0
212	pop	{r4-r7}
213	bx	lr
214END_FUNC thread_smc
215
216/* void thread_smccc(struct thread_smc_args *arg_res) */
217FUNC thread_smccc , :
218	push	{r4-r7}
219	push	{r0, lr}
220	ldm	r0, {r0-r7}
221#ifdef CFG_CORE_SEL2_SPMC
222	hvc	#0
223#else
224	smc	#0
225#endif
226	pop	{r12, lr}
227	stm	r12, {r0-r7}
228	pop	{r4-r7}
229	bx	lr
230END_FUNC thread_smccc
231
232FUNC thread_init_vbar , :
233	/* Set vector (VBAR) */
234	write_vbar r0
235	bx	lr
236END_FUNC thread_init_vbar
237DECLARE_KEEP_PAGER thread_init_vbar
238
239/*
240 * Below are low level routines handling entry and return from user mode.
241 *
242 * thread_enter_user_mode() saves all that registers user mode can change
243 * so kernel mode can restore needed registers when resuming execution
244 * after the call to thread_enter_user_mode() has returned.
245 * thread_enter_user_mode() doesn't return directly since it enters user
246 * mode instead, it's thread_unwind_user_mode() that does the
247 * returning by restoring the registers saved by thread_enter_user_mode().
248 *
249 * There's three ways for thread_enter_user_mode() to return to caller,
250 * user TA calls _utee_return, user TA calls _utee_panic or through an abort.
251 *
252 * Calls to _utee_return or _utee_panic are handled as:
253 * __thread_svc_handler() -> thread_svc_handler() ->tee_svc_do_call() which
254 * calls syscall_return() or syscall_panic().
255 *
256 * These function calls returns normally except thread_svc_handler() which
257 * which is an exception handling routine so it reads return address and
258 * SPSR to restore from the stack. syscall_return() and syscall_panic()
259 * changes return address and SPSR used by thread_svc_handler() to instead of
260 * returning into user mode as with other syscalls it returns into
261 * thread_unwind_user_mode() in kernel mode instead.  When
262 * thread_svc_handler() returns the stack pointer at the point where
263 * thread_enter_user_mode() left it so this is where
264 * thread_unwind_user_mode() can operate.
265 *
266 * Aborts are handled in a similar way but by thread_abort_handler()
267 * instead, when the pager sees that it's an abort from user mode that
268 * can't be handled it updates SPSR and return address used by
269 * thread_abort_handler() to return into thread_unwind_user_mode()
270 * instead.
271 */
272
273/*
274 * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs,
275 *				     uint32_t *exit_status0,
276 *				     uint32_t *exit_status1);
277 *
278 * This function depends on being called with exceptions masked.
279 */
280FUNC __thread_enter_user_mode , :
281UNWIND(	.cantunwind)
282	/*
283	 * Save all registers to allow syscall_return() to resume execution
284	 * as if this function would have returned. This is also used in
285	 * syscall_panic().
286	 *
287	 * If stack usage of this function is changed
288	 * thread_unwind_user_mode() has to be updated.
289	 */
290	push    {r4-r12,lr}
291
292	/*
293	 * Save old user sp and set new user sp.
294	 */
295	cps	#CPSR_MODE_SYS
296	mov	r4, sp
297	ldr	sp, [r0, #THREAD_CTX_REGS_USR_SP]
298	cps	#CPSR_MODE_SVC
299
300	push	{r1, r2, r4, r5}
301
302	/* Prepare user mode entry via eret_to_user_mode */
303	ldr	lr, [r0, #THREAD_CTX_REGS_PC]
304	ldr	r4, [r0, #THREAD_CTX_REGS_CPSR]
305	msr     spsr_fsxc, r4
306
307	ldm	r0, {r0-r12}
308
309	b	eret_to_user_mode
310END_FUNC __thread_enter_user_mode
311
312/*
313 * void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
314 *              uint32_t exit_status1);
315 * See description in thread.h
316 */
317FUNC thread_unwind_user_mode , :
318UNWIND(	.cantunwind)
319	/* Match push {r1, r2, r4, r5} in thread_enter_user_mode() */
320	pop	{r4-r7}
321	str	r1, [r4]
322	str	r2, [r5]
323
324	/* Restore old user sp */
325	cps	#CPSR_MODE_SYS
326	mov	sp, r6
327	cps	#CPSR_MODE_SVC
328
329	/* Match push {r4-r12,lr} in thread_enter_user_mode() */
330	pop     {r4-r12,pc}
331END_FUNC thread_unwind_user_mode
332
333	.macro maybe_restore_mapping
334		/*
335		 * This macro is a bit hard to read due to all the ifdefs,
336		 * we're testing for two different configs which makes four
337		 * different combinations.
338		 *
339		 * - With LPAE, and then some extra code if with
340		 *   CFG_CORE_UNMAP_CORE_AT_EL0
341		 * - Without LPAE, and then some extra code if with
342		 *   CFG_CORE_UNMAP_CORE_AT_EL0
343		 */
344
345		/*
346		 * At this point we can't rely on any memory being writable
347		 * yet, so we're using TPIDRPRW to store r0, and if with
348		 * LPAE TPIDRURO to store r1 too.
349		 */
350		write_tpidrprw r0
351#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
352		write_tpidruro r1
353#endif
354
355#ifdef CFG_WITH_LPAE
356		read_ttbr0_64bit r0, r1
357		tst	r1, #BIT(TTBR_ASID_SHIFT - 32)
358		beq	11f
359
360#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
361		/*
362		 * Update the mapping to use the full kernel mode mapping.
363		 * Since the translation table could reside above 4GB we'll
364		 * have to use 64-bit arithmetics.
365		 */
366		subs	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
367		sbc	r1, r1, #0
368#endif
369		bic	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
370		write_ttbr0_64bit r0, r1
371		isb
372
373#else /*!CFG_WITH_LPAE*/
374		read_contextidr r0
375		tst	r0, #1
376		beq	11f
377
378		/* Update the mapping to use the full kernel mode mapping. */
379		bic	r0, r0, #1
380		write_contextidr r0
381		isb
382#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
383		read_ttbcr r0
384		bic	r0, r0, #TTBCR_PD1
385		write_ttbcr r0
386		isb
387#endif
388
389#endif /*!CFG_WITH_LPAE*/
390
391#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
392		ldr	r0, =thread_user_kcode_offset
393		ldr	r0, [r0]
394		read_vbar r1
395		add	r1, r1, r0
396		write_vbar r1
397		isb
398
399	11:	/*
400		 * The PC is adjusted unconditionally to guard against the
401		 * case there was an FIQ just before we did the "cpsid aif".
402		 */
403		ldr	r0, =22f
404		bx	r0
405	22:
406#else
407	11:
408#endif
409		read_tpidrprw r0
410#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
411		read_tpidruro r1
412#endif
413	.endm
414
415/* The handler of native interrupt. */
416.macro	native_intr_handler mode:req
417	cpsid	aif
418	maybe_restore_mapping
419
420	/*
421	 * FIQ and IRQ have a +4 offset for lr compared to preferred return
422	 * address
423	 */
424	sub     lr, lr, #4
425
426	/*
427	 * We're always saving {r0-r3}. In IRQ mode we're saving r12 also.
428	 * In FIQ mode we're saving the banked fiq registers {r8-r12} FIQ
429	 * because the secure monitor doesn't save those. The treatment of
430	 * the banked fiq registers is somewhat analogous to the lazy save
431	 * of VFP registers.
432	 */
433	.ifc	\mode\(),fiq
434	push	{r0-r3, r8-r12, lr}
435	.else
436	push	{r0-r3, r12, lr}
437	.endif
438
439	bl	thread_check_canaries
440	bl	itr_core_handler
441
442	mrs	r0, spsr
443	cmp_spsr_user_mode r0
444
445	.ifc	\mode\(),fiq
446	pop	{r0-r3, r8-r12, lr}
447	.else
448	pop	{r0-r3, r12, lr}
449	.endif
450
451	movsne	pc, lr
452	b	eret_to_user_mode
453.endm
454
455/* The handler of foreign interrupt. */
456.macro foreign_intr_handler mode:req
457	cpsid	aif
458	maybe_restore_mapping
459
460	sub	lr, lr, #4
461	push	{r12}
462
463	.ifc	\mode\(),fiq
464	/*
465	 * If a foreign (non-secure) interrupt is received as a FIQ we need
466	 * to check that we're in a saveable state or if we need to mask
467	 * the interrupt to be handled later.
468	 *
469	 * The window when this is needed is quite narrow, it's between
470	 * entering the exception vector and until the "cpsid" instruction
471	 * of the handler has been executed.
472	 *
473	 * Currently we can save the state properly if the FIQ is received
474	 * while in user or svc (kernel) mode.
475	 *
476	 * If we're returning to abort, undef or irq mode we're returning
477	 * with the mapping restored. This is OK since before the handler
478	 * we're returning to eventually returns to user mode the reduced
479	 * mapping will be restored.
480	 */
481	mrs	r12, spsr
482	and	r12, r12, #ARM32_CPSR_MODE_MASK
483	cmp	r12, #ARM32_CPSR_MODE_USR
484	cmpne	r12, #ARM32_CPSR_MODE_SVC
485	beq	1f
486	mrs	r12, spsr
487	orr	r12, r12, #ARM32_CPSR_F
488	msr	spsr_fsxc, r12
489	pop	{r12}
490	movs	pc, lr
4911:
492	.endif
493
494	push	{lr}
495
496	.ifc	\mode\(),fiq
497	bl	thread_save_state_fiq
498	.else
499	bl	thread_save_state
500	.endif
501
502#ifdef CFG_CORE_WORKAROUND_NSITR_CACHE_PRIME
503	/*
504	 * Prevent leaking information about which entries has been used in
505	 * cache. We're relying on the secure monitor/dispatcher to take
506	 * care of the BTB.
507	 */
508	mov	r0, #DCACHE_OP_CLEAN_INV
509	bl	dcache_op_louis
510	write_iciallu
511#endif
512
513	/*
514	 * Use SP_abt to update core local flags.
515	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_TMP;
516	 */
517	cps     #CPSR_MODE_ABT
518	ldr     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
519	lsl     r1, r1, #THREAD_CLF_SAVED_SHIFT
520	orr     r1, r1, #THREAD_CLF_TMP
521	str     r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
522	.ifc    \mode\(),fiq
523	cps     #CPSR_MODE_FIQ
524	.else
525	cps     #CPSR_MODE_IRQ
526	.endif
527
528	mov	r0, #THREAD_FLAGS_EXIT_ON_FOREIGN_INTR
529	mrs	r1, spsr
530	pop	{r2}
531	pop	{r12}
532	blx	thread_state_suspend
533
534	/*
535	 * Switch to SVC mode and copy current stack pointer as it already
536	 * is the tmp stack.
537	 */
538	mov	r1, sp
539	cps	#CPSR_MODE_SVC
540	mov	sp, r1
541
542	/* Passing thread index in r0 */
543	b	thread_foreign_intr_exit
544.endm
545
546FUNC thread_excp_vect , :, align=32
547UNWIND(	.cantunwind)
548	b	.			/* Reset			*/
549	b	__thread_und_handler	/* Undefined instruction	*/
550	b	__thread_svc_handler	/* System call			*/
551	b	__thread_pabort_handler	/* Prefetch abort		*/
552	b	__thread_dabort_handler	/* Data abort			*/
553	b	.			/* Reserved			*/
554	b	__thread_irq_handler	/* IRQ				*/
555	b	__thread_fiq_handler	/* FIQ				*/
556#ifdef CFG_CORE_WORKAROUND_SPECTRE_BP_SEC
557	.macro vector_prologue_spectre
558		/*
559		 * This depends on SP being 8 byte aligned, that is, the
560		 * lowest three bits in SP are zero.
561		 *
562		 * To avoid unexpected speculation we need to invalidate
563		 * the branch predictor before we do the first branch. It
564		 * doesn't matter if it's a conditional or an unconditional
565		 * branch speculation can still occur.
566		 *
567		 * The idea is to form a specific bit pattern in the lowest
568		 * three bits of SP depending on which entry in the vector
569		 * we enter via.  This is done by adding 1 to SP in each
570		 * entry but the last.
571		 */
572		add	sp, sp, #1	/* 7:111 Reset			*/
573		add	sp, sp, #1	/* 6:110 Undefined instruction	*/
574		add	sp, sp, #1	/* 5:101 Secure monitor call	*/
575		add	sp, sp, #1	/* 4:100 Prefetch abort		*/
576		add	sp, sp, #1	/* 3:011 Data abort		*/
577		add	sp, sp, #1	/* 2:010 Reserved		*/
578		add	sp, sp, #1	/* 1:001 IRQ			*/
579		cpsid   aif		/* 0:000 FIQ			*/
580	.endm
581
582        .balign	32
583	.global thread_excp_vect_workaround_a15
584thread_excp_vect_workaround_a15:
585	vector_prologue_spectre
586	write_tpidrprw r0
587	mrs	r0, spsr
588	cmp_spsr_user_mode r0
589	bne	1f
590	/*
591	 * Invalidate the branch predictor for the current processor.
592	 * For Cortex-A8 ACTLR[6] has to be set to 1 for BPIALL to be
593	 * effective.
594	 * Note that the BPIALL instruction is not effective in
595	 * invalidating the branch predictor on Cortex-A15. For that CPU,
596	 * set ACTLR[0] to 1 during early processor initialisation, and
597	 * invalidate the branch predictor by performing an ICIALLU
598	 * instruction. See also:
599	 * https://github.com/ARM-software/arm-trusted-firmware/wiki/Arm-Trusted-Firmware-Security-Advisory-TFV-6#variant-2-cve-2017-5715
600	 */
601	write_iciallu
602	isb
603	b	1f
604
605        .balign	32
606	.global thread_excp_vect_workaround
607thread_excp_vect_workaround:
608	vector_prologue_spectre
609	write_tpidrprw r0
610	mrs	r0, spsr
611	cmp_spsr_user_mode r0
612	bne	1f
613	/* Invalidate the branch predictor for the current processor. */
614	write_bpiall
615	isb
616
6171:	and	r0, sp, #(BIT(0) | BIT(1) | BIT(2))
618	bic	sp, sp, #(BIT(0) | BIT(1) | BIT(2))
619	add	pc, pc, r0, LSL #3
620	nop
621
622	read_tpidrprw r0
623	b	__thread_fiq_handler	/* FIQ				*/
624	read_tpidrprw r0
625	b	__thread_irq_handler	/* IRQ				*/
626	read_tpidrprw r0
627	b	.			/* Reserved			*/
628	read_tpidrprw r0
629	b	__thread_dabort_handler	/* Data abort			*/
630	read_tpidrprw r0
631	b	__thread_pabort_handler	/* Prefetch abort		*/
632	read_tpidrprw r0
633	b	__thread_svc_handler	/* System call			*/
634	read_tpidrprw r0
635	b	__thread_und_handler	/* Undefined instruction	*/
636	read_tpidrprw r0
637	b	.			/* Reset			*/
638#endif /*CFG_CORE_WORKAROUND_SPECTRE_BP_SEC*/
639
640__thread_und_handler:
641	cpsid	aif
642	maybe_restore_mapping
643	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
644	mrs	r1, spsr
645	tst	r1, #CPSR_T
646	subne	lr, lr, #2
647	subeq	lr, lr, #4
648	mov	r0, #ABORT_TYPE_UNDEF
649	b	__thread_abort_common
650
651__thread_dabort_handler:
652	cpsid	aif
653	maybe_restore_mapping
654	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
655	sub	lr, lr, #8
656	mov	r0, #ABORT_TYPE_DATA
657	b	__thread_abort_common
658
659__thread_pabort_handler:
660	cpsid	aif
661	maybe_restore_mapping
662	strd	r0, r1, [sp, #THREAD_CORE_LOCAL_R0]
663	sub	lr, lr, #4
664	mov	r0, #ABORT_TYPE_PREFETCH
665
666__thread_abort_common:
667	/*
668	 * At this label:
669	 * cpsr is in mode undef or abort
670	 * sp is still pointing to struct thread_core_local belonging to
671	 * this core.
672	 * {r0, r1} are saved in struct thread_core_local pointed to by sp
673	 * {r2-r11, ip} are untouched.
674	 * r0 holds the first argument for abort_handler()
675	 */
676
677	/*
678	 * Update core local flags.
679	 * flags = (flags << THREAD_CLF_SAVED_SHIFT) | THREAD_CLF_ABORT;
680	 */
681	ldr	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
682	lsl	r1, r1, #THREAD_CLF_SAVED_SHIFT
683	orr	r1, r1, #THREAD_CLF_ABORT
684
685	/*
686	 * Select stack and update flags accordingly
687	 *
688	 * Normal case:
689	 * If the abort stack is unused select that.
690	 *
691	 * Fatal error handling:
692	 * If we're already using the abort stack as noted by bit
693	 * (THREAD_CLF_SAVED_SHIFT + THREAD_CLF_ABORT_SHIFT) in the flags
694	 * field we're selecting the temporary stack instead to be able to
695	 * make a stack trace of the abort in abort mode.
696	 *
697	 * r1 is initialized as a temporary stack pointer until we've
698	 * switched to system mode.
699	 */
700	tst	r1, #(THREAD_CLF_ABORT << THREAD_CLF_SAVED_SHIFT)
701	orrne	r1, r1, #THREAD_CLF_TMP /* flags |= THREAD_CLF_TMP; */
702	str	r1, [sp, #THREAD_CORE_LOCAL_FLAGS]
703	ldrne	r1, [sp, #THREAD_CORE_LOCAL_TMP_STACK_VA_END]
704	ldreq	r1, [sp, #THREAD_CORE_LOCAL_ABT_STACK_VA_END]
705
706	/*
707	 * Store registers on stack fitting struct thread_abort_regs
708	 * start from the end of the struct
709	 * {r2-r11, ip}
710	 * Load content of previously saved {r0-r1} and stores
711	 * it up to the pad field.
712	 * After this is only {usr_sp, usr_lr} missing in the struct
713	 */
714	stmdb	r1!, {r2-r11, ip}	/* Push on the selected stack */
715	ldrd	r2, r3, [sp, #THREAD_CORE_LOCAL_R0]
716	/* Push the original {r0-r1} on the selected stack */
717	stmdb	r1!, {r2-r3}
718	mrs	r3, spsr
719	/* Push {pad, spsr, elr} on the selected stack */
720	stmdb	r1!, {r2, r3, lr}
721
722	cps	#CPSR_MODE_SYS
723	str	lr, [r1, #-4]!
724	str	sp, [r1, #-4]!
725	mov	sp, r1
726
727	bl	abort_handler
728
729	mov	ip, sp
730	ldr	sp, [ip], #4
731	ldr	lr, [ip], #4
732
733	/*
734	 * Even if we entered via CPSR_MODE_UND, we are returning via
735	 * CPSR_MODE_ABT. It doesn't matter as lr and spsr are assigned
736	 * here.
737	 */
738	cps	#CPSR_MODE_ABT
739	ldm	ip!, {r0, r1, lr}	/* r0 is pad */
740	msr	spsr_fsxc, r1
741
742	/* Update core local flags */
743	ldr	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
744	lsr	r0, r0, #THREAD_CLF_SAVED_SHIFT
745	str	r0, [sp, #THREAD_CORE_LOCAL_FLAGS]
746
747	cmp_spsr_user_mode r1
748	ldm	ip, {r0-r11, ip}
749	movsne	pc, lr
750	b	eret_to_user_mode
751	/* end thread_abort_common */
752
753__thread_svc_handler:
754	cpsid	aif
755
756	maybe_restore_mapping
757
758	push	{r0-r7, lr}
759	mrs	r0, spsr
760	push	{r0}
761	mov	r0, sp
762	bl	thread_svc_handler
763	cpsid	aif	/* In case something was unmasked */
764	pop	{r0}
765	msr	spsr_fsxc, r0
766	cmp_spsr_user_mode r0
767	pop	{r0-r7, lr}
768	movsne	pc, lr
769	b	eret_to_user_mode
770	/* end thread_svc_handler */
771
772__thread_fiq_handler:
773#if defined(CFG_ARM_GICV3)
774	foreign_intr_handler	fiq
775#else
776	native_intr_handler	fiq
777#endif
778	/* end thread_fiq_handler */
779
780__thread_irq_handler:
781#if defined(CFG_ARM_GICV3)
782	native_intr_handler	irq
783#else
784	foreign_intr_handler	irq
785#endif
786	/* end thread_irq_handler */
787
788	/*
789	 * Returns to user mode.
790	 * Expects to be jumped to with lr pointing to the user space
791	 * address to jump to and spsr holding the desired cpsr. Async
792	 * abort, irq and fiq should be masked.
793	 */
794eret_to_user_mode:
795	write_tpidrprw r0
796#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
797	write_tpidruro r1
798#endif
799
800#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
801	ldr	r0, =thread_user_kcode_offset
802	ldr	r0, [r0]
803	read_vbar r1
804	sub	r1, r1, r0
805	write_vbar r1
806	isb
807
808	/* Jump into the reduced mapping before the full mapping is removed */
809	ldr	r1, =1f
810	sub	r1, r1, r0
811	bx	r1
8121:
813#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
814
815#ifdef CFG_WITH_LPAE
816	read_ttbr0_64bit r0, r1
817#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
818	add	r0, r0, #CORE_MMU_BASE_TABLE_OFFSET
819#endif
820	/* switch to user ASID */
821	orr	r1, r1, #BIT(TTBR_ASID_SHIFT - 32)
822	write_ttbr0_64bit r0, r1
823	isb
824#else /*!CFG_WITH_LPAE*/
825#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
826	read_ttbcr r0
827	orr	r0, r0, #TTBCR_PD1
828	write_ttbcr r0
829	isb
830#endif
831	read_contextidr r0
832	orr	r0, r0, #BIT(0)
833	write_contextidr r0
834	isb
835#endif /*!CFG_WITH_LPAE*/
836
837	read_tpidrprw r0
838#if defined(CFG_CORE_UNMAP_CORE_AT_EL0) || defined(CFG_WITH_LPAE)
839	read_tpidruro r1
840#endif
841
842	movs	pc, lr
843
844	/*
845	 * void icache_inv_user_range(void *addr, size_t size);
846	 *
847	 * This function has to execute with the user space ASID active,
848	 * this means executing with reduced mapping and the code needs
849	 * to be located here together with the vector.
850	 */
851	.global icache_inv_user_range
852	.type icache_inv_user_range , %function
853icache_inv_user_range:
854	push	{r4-r7}
855
856	/* Mask all exceptions */
857	mrs	r4, cpsr	/* This register must be preserved */
858	cpsid	aif
859
860#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
861	ldr	r2, =thread_user_kcode_offset
862	ldr	r2, [r2]
863	read_vbar r5		/* This register must be preserved */
864	sub	r3, r5, r2
865	write_vbar r3
866	isb
867
868	/* Jump into the reduced mapping before the full mapping is removed */
869	ldr	r3, =1f
870	sub	r3, r3, r2
871	bx	r3
8721:
873#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
874
875#ifdef CFG_WITH_LPAE
876	read_ttbr0_64bit r6, r7	/* These registers must be preseved */
877	/* switch to user ASID */
878	orr	r3, r7, #BIT(TTBR_ASID_SHIFT - 32)
879#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
880	add	r2, r6, #CORE_MMU_BASE_TABLE_OFFSET
881	write_ttbr0_64bit r2, r3
882#else
883	write_ttbr0_64bit r6, r3
884#endif
885	isb
886#else /*!CFG_WITH_LPAE*/
887#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
888	read_ttbcr r6	/* This register must be preserved */
889	orr	r2, r6, #TTBCR_PD1
890	write_ttbcr r2
891	isb
892#endif /*CFG_CORE_UNMAP_CORE_AT_EL0*/
893	read_contextidr r7	/* This register must be preserved */
894	orr	r2, r7, #BIT(0)
895	write_contextidr r2
896	isb
897#endif /*!CFG_WITH_LPAE*/
898
899	/*
900	 * Do the actual icache invalidation
901	 */
902
903	/* Calculate minimum icache line size, result in r2 */
904	read_ctr r3
905	and     r3, r3, #CTR_IMINLINE_MASK
906	mov     r2, #CTR_WORD_SIZE
907	lsl     r2, r2, r3
908
909	add	r1, r0, r1
910	sub	r3, r2, #1
911	bic	r0, r0, r3
9121:
913	write_icimvau r0
914	add	r0, r0, r2
915	cmp	r0, r1
916	blo	1b
917
918	/* Invalidate entire branch predictor array inner shareable */
919	write_bpiallis
920
921	dsb	ishst
922	isb
923
924#ifdef CFG_WITH_LPAE
925	write_ttbr0_64bit r6, r7
926	isb
927#else /*!CFG_WITH_LPAE*/
928	write_contextidr r7
929	isb
930#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
931	write_ttbcr r6
932	isb
933#endif
934#endif /*!CFG_WITH_LPAE*/
935
936#ifdef CFG_CORE_UNMAP_CORE_AT_EL0
937	write_vbar r5
938	isb
939	/*
940	 * The PC is adjusted unconditionally to guard against the
941	 * case there was an FIQ just before we did the "cpsid aif".
942	 */
943	ldr	r0, =1f
944	bx	r0
9451:
946#endif
947
948	msr	cpsr_fsxc, r4	/* Restore exceptions */
949	pop	{r4-r7}
950	bx	lr		/* End of icache_inv_user_range() */
951
952	/*
953	 * Make sure that literals are placed before the
954	 * thread_excp_vect_end label.
955	 */
956	.pool
957	.global thread_excp_vect_end
958thread_excp_vect_end:
959END_FUNC thread_excp_vect
960