1/*
2 * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef EL3_COMMON_MACROS_S
8#define EL3_COMMON_MACROS_S
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13#include <lib/xlat_tables/xlat_tables_defs.h>
14
15#define PAGE_START_MASK		~(PAGE_SIZE_MASK)
16
17	/*
18	 * Helper macro to initialise EL3 registers we care about.
19	 */
20	.macro el3_arch_init_common
21	/* ---------------------------------------------------------------------
22	 * SCTLR has already been initialised - read current value before
23	 * modifying.
24	 *
25	 * SCTLR.I: Enable the instruction cache.
26	 *
27	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
28	 *  or store one or more registers have an alignment check that the
29	 *  address being accessed is aligned to the size of the data element(s)
30	 *  being accessed.
31	 * ---------------------------------------------------------------------
32	 */
33	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
34	ldcopr	r0, SCTLR
35	orr	r0, r0, r1
36	stcopr	r0, SCTLR
37	isb
38
39	/* ---------------------------------------------------------------------
40	 * Initialise SCR, setting all fields rather than relying on the hw.
41	 *
42	 * SCR.SIF: Enabled so that Secure state instruction fetches from
43	 *  Non-secure memory are not permitted.
44	 * ---------------------------------------------------------------------
45	 */
46	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
47	stcopr	r0, SCR
48
49	/* -----------------------------------------------------
50	 * Enable the Asynchronous data abort now that the
51	 * exception vectors have been setup.
52	 * -----------------------------------------------------
53	 */
54	cpsie   a
55	isb
56
57	/* ---------------------------------------------------------------------
58	 * Initialise NSACR, setting all the fields, except for the
59	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
60	 * fields are architecturally UNKNOWN on reset.
61	 *
62	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
63	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
64	 *  field is set to allow access to Advanced SIMD and floating point
65	 *  features from both Security states.
66	 * ---------------------------------------------------------------------
67	 */
68	ldcopr	r0, NSACR
69	and	r0, r0, #NSACR_IMP_DEF_MASK
70	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
71	stcopr	r0, NSACR
72	isb
73
74	/* ---------------------------------------------------------------------
75	 * Initialise CPACR, setting all fields rather than relying on hw. Some
76	 * fields are architecturally UNKNOWN on reset.
77	 *
78	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
79	 *  to trace registers. Set to zero to allow access.
80	 *
81	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
82	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
83	 *  field is set to allow full access from PL0 and PL1 to floating-point
84	 *  and Advanced SIMD features.
85	 * ---------------------------------------------------------------------
86	 */
87	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
88	stcopr	r0, CPACR
89	isb
90
91	/* ---------------------------------------------------------------------
92	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
93	 * fields are architecturally UNKNOWN on reset and are set to zero
94	 * except for field(s) listed below.
95	 *
96	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
97	 *  from all exception levels.
98         *
99         * __SOFTFP__: Predefined macro exposed by soft-float toolchain.
100         *  ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and
101         *  hard-float variants of toolchain, avoid compiling below code with
102         *  soft-float toolchain as "vmsr" instruction will not be recognized.
103	 * ---------------------------------------------------------------------
104	 */
105#if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__)
106	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
107	vmsr	FPEXC, r0
108	isb
109#endif
110
111#if (ARM_ARCH_MAJOR > 7)
112	/* ---------------------------------------------------------------------
113	 * Initialise SDCR, setting all the fields rather than relying on hw.
114	 *
115	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
116	 *  Secure EL1 are disabled.
117	 *
118	 * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
119	 *  in Secure state. This bit is RES0 in versions of the architecture
120	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect on
121	 *  them.
122	 * ---------------------------------------------------------------------
123	 */
124	ldr	r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | SDCR_SCCD_BIT)
125	stcopr	r0, SDCR
126
127	/* ---------------------------------------------------------------------
128	 * Initialise PMCR, setting all fields rather than relying
129	 * on hw. Some fields are architecturally UNKNOWN on reset.
130	 *
131	 * PMCR.LP: Set to one so that event counter overflow, that
132	 *  is recorded in PMOVSCLR[0-30], occurs on the increment
133	 *  that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU
134	 *  is implemented. This bit is RES0 in versions of the architecture
135	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
136	 *  on them.
137	 *  This bit is Reserved, UNK/SBZP in ARMv7.
138	 *
139	 * PMCR.LC: Set to one so that cycle counter overflow, that
140	 *  is recorded in PMOVSCLR[31], occurs on the increment
141	 *  that changes PMCCNTR[63] from 1 to 0.
142	 *  This bit is Reserved, UNK/SBZP in ARMv7.
143	 *
144	 * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
145	 * ---------------------------------------------------------------------
146	 */
147	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
148		      PMCR_LP_BIT)
149#else
150	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
151#endif
152	stcopr	r0, PMCR
153
154	/*
155	 * If Data Independent Timing (DIT) functionality is implemented,
156	 * always enable DIT in EL3
157	 */
158	ldcopr	r0, ID_PFR0
159	and	r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
160	cmp	r0, #ID_PFR0_DIT_SUPPORTED
161	bne	1f
162	mrs	r0, cpsr
163	orr	r0, r0, #CPSR_DIT_BIT
164	msr	cpsr_cxsf, r0
1651:
166	.endm
167
168/* -----------------------------------------------------------------------------
169 * This is the super set of actions that need to be performed during a cold boot
170 * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
171 *
172 * This macro will always perform reset handling, architectural initialisations
173 * and stack setup. The rest of the actions are optional because they might not
174 * be needed, depending on the context in which this macro is called. This is
175 * why this macro is parameterised ; each parameter allows to enable/disable
176 * some actions.
177 *
178 *  _init_sctlr:
179 *	Whether the macro needs to initialise the SCTLR register including
180 *	configuring the endianness of data accesses.
181 *
182 *  _warm_boot_mailbox:
183 *	Whether the macro needs to detect the type of boot (cold/warm). The
184 *	detection is based on the platform entrypoint address : if it is zero
185 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
186 *	this macro jumps on the platform entrypoint address.
187 *
188 *  _secondary_cold_boot:
189 *	Whether the macro needs to identify the CPU that is calling it: primary
190 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
191 *	the platform initialisations, while the secondaries will be put in a
192 *	platform-specific state in the meantime.
193 *
194 *	If the caller knows this macro will only be called by the primary CPU
195 *	then this parameter can be defined to 0 to skip this step.
196 *
197 * _init_memory:
198 *	Whether the macro needs to initialise the memory.
199 *
200 * _init_c_runtime:
201 *	Whether the macro needs to initialise the C runtime environment.
202 *
203 * _exception_vectors:
204 *	Address of the exception vectors to program in the VBAR_EL3 register.
205 *
206 * _pie_fixup_size:
207 *	Size of memory region to fixup Global Descriptor Table (GDT).
208 *
209 *	A non-zero value is expected when firmware needs GDT to be fixed-up.
210 *
211 * -----------------------------------------------------------------------------
212 */
213	.macro el3_entrypoint_common					\
214		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
215		_init_memory, _init_c_runtime, _exception_vectors,	\
216		_pie_fixup_size
217
218	/* Make sure we are in Secure Mode */
219#if ENABLE_ASSERTIONS
220	ldcopr	r0, SCR
221	tst	r0, #SCR_NS_BIT
222	ASM_ASSERT(eq)
223#endif
224
225	.if \_init_sctlr
226		/* -------------------------------------------------------------
227		 * This is the initialisation of SCTLR and so must ensure that
228		 * all fields are explicitly set rather than relying on hw. Some
229		 * fields reset to an IMPLEMENTATION DEFINED value.
230		 *
231		 * SCTLR.TE: Set to zero so that exceptions to an Exception
232		 *  Level executing at PL1 are taken to A32 state.
233		 *
234		 * SCTLR.EE: Set the CPU endianness before doing anything that
235		 *  might involve memory reads or writes. Set to zero to select
236		 *  Little Endian.
237		 *
238		 * SCTLR.V: Set to zero to select the normal exception vectors
239		 *  with base address held in VBAR.
240		 *
241		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
242		 *  safe behaviour upon exception entry to EL3.
243		 * -------------------------------------------------------------
244		 */
245		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
246				SCTLR_V_BIT | SCTLR_DSSBS_BIT))
247		stcopr	r0, SCTLR
248		isb
249	.endif /* _init_sctlr */
250
251	/* Switch to monitor mode */
252	cps	#MODE32_mon
253	isb
254
255#if DISABLE_MTPMU
256	bl	mtpmu_disable
257#endif
258
259	.if \_warm_boot_mailbox
260		/* -------------------------------------------------------------
261		 * This code will be executed for both warm and cold resets.
262		 * Now is the time to distinguish between the two.
263		 * Query the platform entrypoint address and if it is not zero
264		 * then it means it is a warm boot so jump to this address.
265		 * -------------------------------------------------------------
266		 */
267		bl	plat_get_my_entrypoint
268		cmp	r0, #0
269		bxne	r0
270	.endif /* _warm_boot_mailbox */
271
272	.if \_pie_fixup_size
273#if ENABLE_PIE
274		/*
275		 * ------------------------------------------------------------
276		 * If PIE is enabled fixup the Global descriptor Table only
277		 * once during primary core cold boot path.
278		 *
279		 * Compile time base address, required for fixup, is calculated
280		 * using "pie_fixup" label present within first page.
281		 * ------------------------------------------------------------
282		 */
283	pie_fixup:
284		ldr	r0, =pie_fixup
285		ldr	r1, =PAGE_START_MASK
286		and	r0, r0, r1
287		mov_imm	r1, \_pie_fixup_size
288		add	r1, r1, r0
289		bl	fixup_gdt_reloc
290#endif /* ENABLE_PIE */
291	.endif /* _pie_fixup_size */
292
293	/* ---------------------------------------------------------------------
294	 * Set the exception vectors (VBAR/MVBAR).
295	 * ---------------------------------------------------------------------
296	 */
297	ldr	r0, =\_exception_vectors
298	stcopr	r0, VBAR
299	stcopr	r0, MVBAR
300	isb
301
302	/* ---------------------------------------------------------------------
303	 * It is a cold boot.
304	 * Perform any processor specific actions upon reset e.g. cache, TLB
305	 * invalidations etc.
306	 * ---------------------------------------------------------------------
307	 */
308	bl	reset_handler
309
310	el3_arch_init_common
311
312	.if \_secondary_cold_boot
313		/* -------------------------------------------------------------
314		 * Check if this is a primary or secondary CPU cold boot.
315		 * The primary CPU will set up the platform while the
316		 * secondaries are placed in a platform-specific state until the
317		 * primary CPU performs the necessary actions to bring them out
318		 * of that state and allows entry into the OS.
319		 * -------------------------------------------------------------
320		 */
321		bl	plat_is_my_cpu_primary
322		cmp	r0, #0
323		bne	do_primary_cold_boot
324
325		/* This is a cold boot on a secondary CPU */
326		bl	plat_secondary_cold_boot_setup
327		/* plat_secondary_cold_boot_setup() is not supposed to return */
328		no_ret	plat_panic_handler
329
330	do_primary_cold_boot:
331	.endif /* _secondary_cold_boot */
332
333	/* ---------------------------------------------------------------------
334	 * Initialize memory now. Secondary CPU initialization won't get to this
335	 * point.
336	 * ---------------------------------------------------------------------
337	 */
338
339	.if \_init_memory
340		bl	platform_mem_init
341	.endif /* _init_memory */
342
343	/* ---------------------------------------------------------------------
344	 * Init C runtime environment:
345	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
346	 *       - the .bss section;
347	 *       - the coherent memory section (if any).
348	 *   - Relocate the data section from ROM to RAM, if required.
349	 * ---------------------------------------------------------------------
350	 */
351	.if \_init_c_runtime
352#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
353		/* -----------------------------------------------------------------
354		 * Invalidate the RW memory used by the image. This
355		 * includes the data and NOBITS sections. This is done to
356		 * safeguard against possible corruption of this memory by
357		 * dirty cache lines in a system cache as a result of use by
358		 * an earlier boot loader stage.
359		 * -----------------------------------------------------------------
360		 */
361		ldr	r0, =__RW_START__
362		ldr	r1, =__RW_END__
363		sub	r1, r1, r0
364		bl	inv_dcache_range
365#endif
366
367		/*
368		 * zeromem uses r12 whereas it is used to save previous BL arg3,
369		 * save it in r7
370		 */
371		mov	r7, r12
372		ldr	r0, =__BSS_START__
373		ldr	r1, =__BSS_END__
374		sub 	r1, r1, r0
375		bl	zeromem
376
377#if USE_COHERENT_MEM
378		ldr	r0, =__COHERENT_RAM_START__
379		ldr	r1, =__COHERENT_RAM_END_UNALIGNED__
380		sub 	r1, r1, r0
381		bl	zeromem
382#endif
383
384		/* Restore r12 */
385		mov	r12, r7
386
387#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
388		/* -----------------------------------------------------
389		 * Copy data from ROM to RAM.
390		 * -----------------------------------------------------
391		 */
392		ldr	r0, =__DATA_RAM_START__
393		ldr	r1, =__DATA_ROM_START__
394		ldr	r2, =__DATA_RAM_END__
395		sub 	r2, r2, r0
396		bl	memcpy4
397#endif
398	.endif /* _init_c_runtime */
399
400	/* ---------------------------------------------------------------------
401	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
402	 * the MMU is enabled. There is no risk of reading stale stack memory
403	 * after enabling the MMU as only the primary CPU is running at the
404	 * moment.
405	 * ---------------------------------------------------------------------
406	 */
407	bl	plat_set_my_stack
408
409#if STACK_PROTECTOR_ENABLED
410	.if \_init_c_runtime
411	bl	update_stack_protector_canary
412	.endif /* _init_c_runtime */
413#endif
414	.endm
415
416#endif /* EL3_COMMON_MACROS_S */
417