1/*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9/**
10 * From Linux commit 679db70801da9fda91d26caf13bf5b5ccc74e8e8:
11 * "Some CPUs can speculate past an ERET instruction and potentially perform
12 * speculative accesses to memory before processing the exception return.
13 * Since the register state is often controlled by a lower privilege level
14 * at the point of an ERET, this could potentially be used as part of a
15 * side-channel attack."
16 *
17 * This macro emits a speculation barrier after the ERET to prevent the CPU
18 * from speculating past the exception return.
19 *
20 * ARMv8.5 introduces a dedicated SB speculative barrier instruction.
21 * Use a DSB/ISB pair on older platforms.
22 */
23.macro eret_with_sb
24	eret
25	dsb	nsh
26	isb
27.endm
28
29/**
30 * Saves the volatile registers onto the stack. This currently takes 14
31 * instructions, so it can be used in exception handlers with 18 instructions
32 * left, 2 of which in the same cache line (assuming a 16-byte cache line).
33 *
34 * On return, x0 and x1 are initialised to elr_el2 and spsr_el2 respectively,
35 * which can be used as the first and second arguments of a subsequent call.
36 */
37.macro save_volatile_to_stack elx:req
38	/* Reserve stack space and save registers x0-x18, x29 & x30. */
39	stp x0, x1, [sp, #-(8 * 24)]!
40	stp x2, x3, [sp, #8 * 2]
41	stp x4, x5, [sp, #8 * 4]
42	stp x6, x7, [sp, #8 * 6]
43	stp x8, x9, [sp, #8 * 8]
44	stp x10, x11, [sp, #8 * 10]
45	stp x12, x13, [sp, #8 * 12]
46	stp x14, x15, [sp, #8 * 14]
47	stp x16, x17, [sp, #8 * 16]
48	str x18, [sp, #8 * 18]
49	stp x29, x30, [sp, #8 * 20]
50
51	/*
52	 * Save elr_elx & spsr_elx. This such that we can take nested exception
53	 * and still be able to unwind.
54	 */
55	mrs x0, elr_\elx
56	mrs x1, spsr_\elx
57	stp x0, x1, [sp, #8 * 22]
58.endm
59
60/**
61 * Helper macros for SIMD vectors save/restore operations.
62 */
63.macro simd_op_vectors op:req reg:req
64	\op q0, q1, [\reg], #32
65	\op q2, q3, [\reg], #32
66	\op q4, q5, [\reg], #32
67	\op q6, q7, [\reg], #32
68	\op q8, q9, [\reg], #32
69	\op q10, q11, [\reg], #32
70	\op q12, q13, [\reg], #32
71	\op q14, q15, [\reg], #32
72	\op q16, q17, [\reg], #32
73	\op q18, q19, [\reg], #32
74	\op q20, q21, [\reg], #32
75	\op q22, q23, [\reg], #32
76	\op q24, q25, [\reg], #32
77	\op q26, q27, [\reg], #32
78	\op q28, q29, [\reg], #32
79	\op q30, q31, [\reg], #32
80.endm
81
82/**
83 * Helper macros for SVE vectors save/restore operations.
84 */
85.macro sve_op_vectors op:req reg:req
86	\op z0, [\reg, #0, MUL VL]
87	\op z1, [\reg, #1, MUL VL]
88	\op z2, [\reg, #2, MUL VL]
89	\op z3, [\reg, #3, MUL VL]
90	\op z4, [\reg, #4, MUL VL]
91	\op z5, [\reg, #5, MUL VL]
92	\op z6, [\reg, #6, MUL VL]
93	\op z7, [\reg, #7, MUL VL]
94	\op z8, [\reg, #8, MUL VL]
95	\op z9, [\reg, #9, MUL VL]
96	\op z10, [\reg, #10, MUL VL]
97	\op z11, [\reg, #11, MUL VL]
98	\op z12, [\reg, #12, MUL VL]
99	\op z13, [\reg, #13, MUL VL]
100	\op z14, [\reg, #14, MUL VL]
101	\op z15, [\reg, #15, MUL VL]
102	\op z16, [\reg, #16, MUL VL]
103	\op z17, [\reg, #17, MUL VL]
104	\op z18, [\reg, #18, MUL VL]
105	\op z19, [\reg, #19, MUL VL]
106	\op z20, [\reg, #20, MUL VL]
107	\op z21, [\reg, #21, MUL VL]
108	\op z22, [\reg, #22, MUL VL]
109	\op z23, [\reg, #23, MUL VL]
110	\op z24, [\reg, #24, MUL VL]
111	\op z25, [\reg, #25, MUL VL]
112	\op z26, [\reg, #26, MUL VL]
113	\op z27, [\reg, #27, MUL VL]
114	\op z28, [\reg, #28, MUL VL]
115	\op z29, [\reg, #29, MUL VL]
116	\op z30, [\reg, #30, MUL VL]
117	\op z31, [\reg, #31, MUL VL]
118
119.endm
120
121/**
122 * Helper macros for SVE predicates save/restore operations.
123 */
124.macro sve_predicate_op op:req reg:req
125	\op p0, [\reg, #0, MUL VL]
126	\op p1, [\reg, #1, MUL VL]
127	\op p2, [\reg, #2, MUL VL]
128	\op p3, [\reg, #3, MUL VL]
129	\op p4, [\reg, #4, MUL VL]
130	\op p5, [\reg, #5, MUL VL]
131	\op p6, [\reg, #6, MUL VL]
132	\op p7, [\reg, #7, MUL VL]
133	\op p8, [\reg, #8, MUL VL]
134	\op p9, [\reg, #9, MUL VL]
135	\op p10, [\reg, #10, MUL VL]
136	\op p11, [\reg, #11, MUL VL]
137	\op p12, [\reg, #12, MUL VL]
138	\op p13, [\reg, #13, MUL VL]
139	\op p14, [\reg, #14, MUL VL]
140	\op p15, [\reg, #15, MUL VL]
141.endm
142
143/**
144 * Restores the volatile registers from the stack. This currently takes 14
145 * instructions, so it can be used in exception handlers while still leaving 18
146 * instructions left; if paired with save_volatile_to_stack, there are 4
147 * instructions to spare.
148 */
149.macro restore_volatile_from_stack elx:req
150	/* Restore registers x2-x18, x29 & x30. */
151	ldp x2, x3, [sp, #8 * 2]
152	ldp x4, x5, [sp, #8 * 4]
153	ldp x6, x7, [sp, #8 * 6]
154	ldp x8, x9, [sp, #8 * 8]
155	ldp x10, x11, [sp, #8 * 10]
156	ldp x12, x13, [sp, #8 * 12]
157	ldp x14, x15, [sp, #8 * 14]
158	ldp x16, x17, [sp, #8 * 16]
159	ldr x18, [sp, #8 * 18]
160	ldp x29, x30, [sp, #8 * 20]
161
162	/* Restore registers elr_elx & spsr_elx, using x0 & x1 as scratch. */
163	ldp x0, x1, [sp, #8 * 22]
164	msr elr_\elx, x0
165	msr spsr_\elx, x1
166
167	/* Restore x0 & x1, and release stack space. */
168	ldp x0, x1, [sp], #8 * 24
169.endm
170
171/**
172 * This is a generic handler for exceptions taken at the current EL while using
173 * SP0. It behaves similarly to the SPx case by first switching to SPx, doing
174 * the work, then switching back to SP0 before returning.
175 *
176 * Switching to SPx and calling the C handler takes 16 instructions, so it's not
177 * possible to add a branch to a common exit path without going into the next
178 * cache line (assuming 16-byte cache lines). Additionally, to restore and
179 * return we need an additional 16 instructions, so we could implement the whole
180 * handler within the allotted 32 instructions. However, since we want to emit
181 * a speculation barrier after each ERET, we are forced to move the ERET to
182 * a shared exit path.
183 */
184.macro current_exception_sp0 elx:req handler:req eret_label:req
185	msr spsel, #1
186	save_volatile_to_stack \elx
187	bl \handler
188	restore_volatile_from_stack \elx
189	msr spsel, #0
190	b \eret_label
191.endm
192
193/**
194 * Variant of current_exception_sp0 which assumes the handler never returns.
195 */
196.macro noreturn_current_exception_sp0 elx:req handler:req
197	msr spsel, #1
198	save_volatile_to_stack \elx
199	b \handler
200.endm
201
202/**
203 * This is a generic handler for exceptions taken at the current EL while using
204 * SPx. It saves volatile registers, calls the C handler, restores volatile
205 * registers, then returns.
206 *
207 * Saving state and jumping to C handler takes 15 instructions. We add an extra
208 * branch to a common exit path. So each handler takes up one unique cache line
209 * and one shared cache line (assuming 16-byte cache lines).
210 */
211.macro current_exception_spx elx:req handler:req
212	save_volatile_to_stack \elx
213	bl \handler
214	b restore_from_stack_and_return
215.endm
216
217/**
218 * Variant of current_exception_spx which assumes the handler never returns.
219 */
220.macro noreturn_current_exception_spx elx:req handler:req
221	save_volatile_to_stack \elx
222	b \handler
223.endm
224
225/**
226 * Restore Hypervisor pointer authentication APIA key.
227 * Emit isb to ensure the pointer authentication key change takes
228 * effect before any pauth instruction is executed.
229 */
230.macro pauth_restore_hypervisor_key reg1 reg2
231	adrp	\reg1, pauth_apia_key
232	add	\reg1, \reg1, :lo12: pauth_apia_key
233	ldp	\reg1, \reg2, [\reg1]
234	msr     APIAKEYLO_EL1, \reg1
235	msr     APIAKEYHI_EL1, \reg2
236	isb
237.endm
238
239/**
240 * mte_restore_hypervisor_state
241 */
242.macro mte_restore_hypervisor_state reg1
243	adrp	\reg1, mte_seed
244	add	\reg1, \reg1, :lo12: mte_seed
245	ldr	\reg1, [\reg1]
246	msr     rgsr_el1, \reg1
247	msr     gcr_el1, xzr
248
249	/* Reset TCO on taking an exception. */
250	msr TCO, 0
251	isb
252.endm
253