1/*
2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <asm_macros.S>
9#include <assert_macros.S>
10#include <cpu_macros.S>
11#include <common/bl_common.h>
12#include <lib/el3_runtime/cpu_data.h>
13
14#if defined(IMAGE_BL1) || defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
15	/*
16	 * The reset handler common to all platforms.  After a matching
17	 * cpu_ops structure entry is found, the correponding reset_handler
18	 * in the cpu_ops is invoked. The reset handler is invoked very early
19	 * in the boot sequence and it is assumed that we can clobber r0 - r10
20	 * without the need to follow AAPCS.
21	 * Clobbers: r0 - r10
22	 */
23	.globl	reset_handler
24func reset_handler
25	mov	r8, lr
26
27	/* The plat_reset_handler can clobber r0 - r7 */
28	bl	plat_reset_handler
29
30	/* Get the matching cpu_ops pointer (clobbers: r0 - r5) */
31	bl	get_cpu_ops_ptr
32
33#if ENABLE_ASSERTIONS
34	cmp	r0, #0
35	ASM_ASSERT(ne)
36#endif
37
38	/* Get the cpu_ops reset handler */
39	ldr	r1, [r0, #CPU_RESET_FUNC]
40	cmp	r1, #0
41	mov	lr, r8
42	bxne	r1
43	bx	lr
44endfunc reset_handler
45
46#endif
47
48#ifdef IMAGE_BL32 /* The power down core and cluster is needed only in  BL32 */
49	/*
50	 * void prepare_cpu_pwr_dwn(unsigned int power_level)
51	 *
52	 * Prepare CPU power down function for all platforms. The function takes
53	 * a domain level to be powered down as its parameter. After the cpu_ops
54	 * pointer is retrieved from cpu_data, the handler for requested power
55	 * level is called.
56	 */
57	.globl	prepare_cpu_pwr_dwn
58func prepare_cpu_pwr_dwn
59	/*
60	 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
61	 * power down handler for the last power level
62	 */
63	mov	r2, #(CPU_MAX_PWR_DWN_OPS - 1)
64	cmp	r0, r2
65	movhi	r0, r2
66
67	push	{r0, lr}
68	bl	_cpu_data
69	pop	{r2, lr}
70
71	ldr	r0, [r0, #CPU_DATA_CPU_OPS_PTR]
72#if ENABLE_ASSERTIONS
73	cmp	r0, #0
74	ASM_ASSERT(ne)
75#endif
76
77	/* Get the appropriate power down handler */
78	mov	r1, #CPU_PWR_DWN_OPS
79	add	r1, r1, r2, lsl #2
80	ldr	r1, [r0, r1]
81#if ENABLE_ASSERTIONS
82	cmp	r1, #0
83	ASM_ASSERT(ne)
84#endif
85	bx	r1
86endfunc prepare_cpu_pwr_dwn
87
88	/*
89	 * Initializes the cpu_ops_ptr if not already initialized
90	 * in cpu_data. This must only be called after the data cache
91	 * is enabled. AAPCS is followed.
92	 */
93	.globl	init_cpu_ops
94func init_cpu_ops
95	push	{r4 - r6, lr}
96	bl	_cpu_data
97	mov	r6, r0
98	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
99	cmp	r1, #0
100	bne	1f
101	bl	get_cpu_ops_ptr
102#if ENABLE_ASSERTIONS
103	cmp	r0, #0
104	ASM_ASSERT(ne)
105#endif
106	str	r0, [r6, #CPU_DATA_CPU_OPS_PTR]!
1071:
108	pop	{r4 - r6, pc}
109endfunc init_cpu_ops
110
111#endif /* IMAGE_BL32 */
112
113	/*
114	 * The below function returns the cpu_ops structure matching the
115	 * midr of the core. It reads the MIDR and finds the matching
116	 * entry in cpu_ops entries. Only the implementation and part number
117	 * are used to match the entries.
118	 * Return :
119	 *     r0 - The matching cpu_ops pointer on Success
120	 *     r0 - 0 on failure.
121	 * Clobbers: r0 - r5
122	 */
123	.globl	get_cpu_ops_ptr
124func get_cpu_ops_ptr
125	/* Get the cpu_ops start and end locations */
126	ldr	r4, =(__CPU_OPS_START__ + CPU_MIDR)
127	ldr	r5, =(__CPU_OPS_END__ + CPU_MIDR)
128
129	/* Initialize the return parameter */
130	mov	r0, #0
131
132	/* Read the MIDR_EL1 */
133	ldcopr	r2, MIDR
134	ldr	r3, =CPU_IMPL_PN_MASK
135
136	/* Retain only the implementation and part number using mask */
137	and	r2, r2, r3
1381:
139	/* Check if we have reached end of list */
140	cmp	r4, r5
141	bhs	error_exit
142
143	/* load the midr from the cpu_ops */
144	ldr	r1, [r4], #CPU_OPS_SIZE
145	and	r1, r1, r3
146
147	/* Check if midr matches to midr of this core */
148	cmp	r1, r2
149	bne	1b
150
151	/* Subtract the increment and offset to get the cpu-ops pointer */
152	sub	r0, r4, #(CPU_OPS_SIZE + CPU_MIDR)
153#if ENABLE_ASSERTIONS
154	cmp	r0, #0
155	ASM_ASSERT(ne)
156#endif
157error_exit:
158	bx	lr
159endfunc get_cpu_ops_ptr
160
161/*
162 * Extract CPU revision and variant, and combine them into a single numeric for
163 * easier comparison.
164 */
165	.globl	cpu_get_rev_var
166func cpu_get_rev_var
167	ldcopr	r1, MIDR
168
169	/*
170	 * Extract the variant[23:20] and revision[3:0] from r1 and pack it in
171	 * r0[0:7] as variant[7:4] and revision[3:0]:
172	 *
173	 * First extract r1[23:16] to r0[7:0] and zero fill the rest. Then
174	 * extract r1[3:0] into r0[3:0] retaining other bits.
175	 */
176	ubfx	r0, r1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
177	bfi	r0, r1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
178	bx	lr
179endfunc cpu_get_rev_var
180
181/*
182 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
183 * application purposes. If the revision-variant is less than or same as a given
184 * value, indicates that errata applies; otherwise not.
185 */
186	.globl	cpu_rev_var_ls
187func cpu_rev_var_ls
188	cmp	r0, r1
189	movls	r0, #ERRATA_APPLIES
190	movhi	r0, #ERRATA_NOT_APPLIES
191	bx	lr
192endfunc cpu_rev_var_ls
193
194/*
195 * Compare the CPU's revision-variant (r0) with a given value (r1), for errata
196 * application purposes. If the revision-variant is higher than or same as a
197 * given value, indicates that errata applies; otherwise not.
198 */
199	.globl	cpu_rev_var_hs
200func cpu_rev_var_hs
201	cmp	r0, r1
202	movge	r0, #ERRATA_APPLIES
203	movlt	r0, #ERRATA_NOT_APPLIES
204	bx	lr
205endfunc cpu_rev_var_hs
206
207#if REPORT_ERRATA
208/*
209 * void print_errata_status(void);
210 *
211 * Function to print errata status for CPUs of its class. Must be called only:
212 *
213 *   - with MMU and data caches are enabled;
214 *   - after cpu_ops have been initialized in per-CPU data.
215 */
216	.globl print_errata_status
217func print_errata_status
218	/* r12 is pushed only for the sake of 8-byte stack alignment */
219	push	{r4, r5, r12, lr}
220#ifdef IMAGE_BL1
221	/*
222	 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
223	 * directly.
224	 */
225	bl	get_cpu_ops_ptr
226	ldr	r0, [r0, #CPU_ERRATA_FUNC]
227	cmp	r0, #0
228	blxne	r0
229#else
230	/*
231	 * Retrieve pointer to cpu_ops, and further, the errata printing
232	 * function. If it's non-NULL, jump to the function in turn.
233	 */
234	bl	_cpu_data
235#if ENABLE_ASSERTIONS
236	cmp	r0, #0
237	ASM_ASSERT(ne)
238#endif
239	ldr	r1, [r0, #CPU_DATA_CPU_OPS_PTR]
240#if ENABLE_ASSERTIONS
241	cmp	r1, #0
242	ASM_ASSERT(ne)
243#endif
244	ldr	r0, [r1, #CPU_ERRATA_FUNC]
245	cmp	r0, #0
246	beq	1f
247
248	mov	r4, r0
249
250	/*
251	 * Load pointers to errata lock and printed flag. Call
252	 * errata_needs_reporting to check whether this CPU needs to report
253	 * errata status pertaining to its class.
254	 */
255	ldr	r0, [r1, #CPU_ERRATA_LOCK]
256	ldr	r1, [r1, #CPU_ERRATA_PRINTED]
257	bl	errata_needs_reporting
258	cmp	r0, #0
259	blxne	r4
2601:
261#endif
262	pop	{r4, r5, r12, pc}
263endfunc print_errata_status
264#endif
265