1 /*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6 #include <granule.h>
7 #include <psci.h>
8 #include <realm.h>
9 #include <rec.h>
10 #include <smc-rmi.h>
11 #include <smc.h>
12 #include <stdint.h>
13
psci_version(struct rec * rec)14 static struct psci_result psci_version(struct rec *rec)
15 {
16 struct psci_result result = { 0 };
17 unsigned int version_1_1 = (1U << 16) | 1U;
18
19 result.smc_res.x[0] = (unsigned long)version_1_1;
20 return result;
21 }
22
psci_cpu_suspend(struct rec * rec,unsigned long entry_point_address,unsigned long context_id)23 static struct psci_result psci_cpu_suspend(struct rec *rec,
24 unsigned long entry_point_address,
25 unsigned long context_id)
26 {
27 struct psci_result result = { 0 };
28
29 /*
30 * We treat all target power states as suspend requests, so all we
31 * need to do is inform that NS hypervisor and we can ignore all the
32 * parameters.
33 */
34 result.hvc_forward.forward_psci_call = true;
35
36 result.smc_res.x[0] = PSCI_RETURN_SUCCESS;
37 return result;
38 }
39
psci_cpu_off(struct rec * rec)40 static struct psci_result psci_cpu_off(struct rec *rec)
41 {
42 struct psci_result result = { 0 };
43
44 result.hvc_forward.forward_psci_call = true;
45
46 /*
47 * It should be fine to set this flag without holding a lock on the
48 * REC or without explicit memory barriers or ordering semantics
49 * operations, because we already ensure that a REC can only be in an
50 * executing state once at any given time, and we're in this execution
51 * context already, and we will be holding a reference count on the
52 * REC at this point, which will be dropped and re-evaluated with
53 * proper barriers before any CPU can evaluate the runnable field
54 * after this change.
55 */
56 rec->runnable = false;
57
58 result.smc_res.x[0] = PSCI_RETURN_SUCCESS;
59 return result;
60 }
61
psci_reset_rec(struct rec * rec,unsigned long caller_sctlr_el1)62 static void psci_reset_rec(struct rec *rec, unsigned long caller_sctlr_el1)
63 {
64 /* Set execution level to EL1 (AArch64) and mask exceptions */
65 rec->pstate = SPSR_EL2_MODE_EL1h |
66 SPSR_EL2_nRW_AARCH64 |
67 SPSR_EL2_F_BIT |
68 SPSR_EL2_I_BIT |
69 SPSR_EL2_A_BIT |
70 SPSR_EL2_D_BIT;
71
72 /* Disable stage 1 MMU and caches */
73 rec->sysregs.sctlr_el1 = SCTLR_EL1_FLAGS;
74
75 /* Set the endianness of the target to that of the caller */
76 rec->sysregs.sctlr_el1 |= caller_sctlr_el1 & SCTLR_EL1_EE;
77 }
78
rd_map_read_rec_count(struct granule * g_rd)79 static unsigned long rd_map_read_rec_count(struct granule *g_rd)
80 {
81 unsigned long rec_count;
82 struct rd *rd = granule_map(g_rd, SLOT_RD);
83
84 rec_count = get_rd_rec_count_unlocked(rd);
85 buffer_unmap(rd);
86 return rec_count;
87 }
88
psci_cpu_on(struct rec * rec,unsigned long target_cpu,unsigned long entry_point_address,unsigned long context_id)89 static struct psci_result psci_cpu_on(struct rec *rec,
90 unsigned long target_cpu,
91 unsigned long entry_point_address,
92 unsigned long context_id)
93 {
94 struct psci_result result = { 0 };
95 unsigned long target_rec_idx;
96
97 /* Check that entry_point_address is a Protected Realm Address */
98 if (!addr_in_rec_par(rec, entry_point_address)) {
99 result.smc_res.x[0] = PSCI_RETURN_INVALID_ADDRESS;
100 return result;
101 }
102
103 /* Get REC index from MPIDR */
104 target_rec_idx = mpidr_to_rec_idx(target_cpu);
105
106 /*
107 * Check that the target_cpu is a valid value.
108 * Note that the RMM enforces that the REC are created with
109 * consecutively increasing indexes starting from zero.
110 */
111 if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) {
112 result.smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
113 return result;
114 }
115
116 /* Check if we're trying to turn ourselves on */
117 if (target_rec_idx == rec->rec_idx) {
118 result.smc_res.x[0] = PSCI_RETURN_ALREADY_ON;
119 return result;
120 }
121
122 rec->psci_info.pending = true;
123
124 result.hvc_forward.forward_psci_call = true;
125 result.hvc_forward.x1 = target_cpu;
126 return result;
127 }
128
psci_affinity_info(struct rec * rec,unsigned long target_affinity,unsigned long lowest_affinity_level)129 static struct psci_result psci_affinity_info(struct rec *rec,
130 unsigned long target_affinity,
131 unsigned long lowest_affinity_level)
132 {
133 struct psci_result result = { 0 };
134 unsigned long target_rec_idx;
135
136 if (lowest_affinity_level != 0UL) {
137 result.smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
138 return result;
139 }
140
141 /* Get REC index from MPIDR */
142 target_rec_idx = mpidr_to_rec_idx(target_affinity);
143
144 /*
145 * Check that the target_affinity is a valid value.
146 * Note that the RMM enforces that the REC are created with
147 * consecutively increasing indexes starting from zero.
148 */
149 if (target_rec_idx >= rd_map_read_rec_count(rec->realm_info.g_rd)) {
150 result.smc_res.x[0] = PSCI_RETURN_INVALID_PARAMS;
151 return result;
152 }
153
154 /* Check if the vCPU targets itself */
155 if (target_rec_idx == rec->rec_idx) {
156 result.smc_res.x[0] = PSCI_AFFINITY_INFO_ON;
157 return result;
158 }
159
160 rec->psci_info.pending = true;
161
162 result.hvc_forward.forward_psci_call = true;
163 result.hvc_forward.x1 = target_affinity;
164 return result;
165 }
166
167 /*
168 * Turning a system off or requesting a reboot of a realm is enforced by the
169 * RMM by preventing execution of a REC after the function has run. Reboot
170 * functionality must be provided by the host hypervisor by creating a new
171 * Realm with associated attestation, measurement etc.
172 */
system_off_reboot(struct rec * rec)173 static void system_off_reboot(struct rec *rec)
174 {
175 struct rd *rd;
176 struct granule *g_rd = rec->realm_info.g_rd;
177
178 /*
179 * The RECs (and, consequently, the PSCI calls) run without any
180 * RMM lock held. Therefore, we cannot cause a deadlock when we acquire
181 * the rd lock here before we set the Realm's new state.
182 */
183 granule_lock(g_rd, GRANULE_STATE_RD);
184 rd = granule_map(rec->realm_info.g_rd, SLOT_RD);
185
186 set_rd_state(rd, REALM_STATE_SYSTEM_OFF);
187
188 buffer_unmap(rd);
189 granule_unlock(g_rd);
190
191 /* TODO: Invalidate all stage 2 entris to ensure REC exits */
192 }
193
psci_system_off(struct rec * rec)194 static struct psci_result psci_system_off(struct rec *rec)
195 {
196 struct psci_result result = { 0 };
197
198 system_off_reboot(rec);
199
200 result.hvc_forward.forward_psci_call = true;
201 return result;
202 }
203
psci_system_reset(struct rec * rec)204 static struct psci_result psci_system_reset(struct rec *rec)
205 {
206 struct psci_result result = { 0 };
207
208 system_off_reboot(rec);
209
210 result.hvc_forward.forward_psci_call = true;
211 return result;
212 }
213
psci_features(struct rec * rec,unsigned int psci_func_id)214 static struct psci_result psci_features(struct rec *rec,
215 unsigned int psci_func_id)
216 {
217 struct psci_result result = { 0 };
218 unsigned long ret;
219
220 switch (psci_func_id) {
221 case SMC32_PSCI_CPU_SUSPEND:
222 case SMC64_PSCI_CPU_SUSPEND:
223 case SMC32_PSCI_CPU_OFF:
224 case SMC32_PSCI_CPU_ON:
225 case SMC64_PSCI_CPU_ON:
226 case SMC32_PSCI_AFFINITY_INFO:
227 case SMC64_PSCI_AFFINITY_INFO:
228 case SMC32_PSCI_SYSTEM_OFF:
229 case SMC32_PSCI_SYSTEM_RESET:
230 case SMC32_PSCI_FEATURES:
231 case SMCCC_VERSION:
232 ret = 0UL;
233 break;
234 default:
235 ret = PSCI_RETURN_NOT_SUPPORTED;
236 }
237
238 result.smc_res.x[0] = ret;
239 return result;
240 }
241
psci_rsi(struct rec * rec,unsigned int function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2)242 struct psci_result psci_rsi(struct rec *rec,
243 unsigned int function_id,
244 unsigned long arg0,
245 unsigned long arg1,
246 unsigned long arg2)
247 {
248 struct psci_result result;
249
250 switch (function_id) {
251 case SMC32_PSCI_VERSION:
252 result = psci_version(rec);
253 break;
254 case SMC32_PSCI_CPU_SUSPEND:
255 case SMC64_PSCI_CPU_SUSPEND:
256 result = psci_cpu_suspend(rec, arg0, arg1);
257 break;
258 case SMC32_PSCI_CPU_OFF:
259 result = psci_cpu_off(rec);
260 break;
261 case SMC32_PSCI_CPU_ON:
262 arg0 = (unsigned int)arg0;
263 arg1 = (unsigned int)arg1;
264 arg2 = (unsigned int)arg2;
265 /* Fall through */
266 case SMC64_PSCI_CPU_ON:
267 result = psci_cpu_on(rec, arg0, arg1, arg2);
268 break;
269 case SMC32_PSCI_AFFINITY_INFO:
270 arg0 = (unsigned int)arg0;
271 arg1 = (unsigned int)arg1;
272 FALLTHROUGH;
273 case SMC64_PSCI_AFFINITY_INFO:
274 result = psci_affinity_info(rec, arg0, arg1);
275 break;
276 case SMC32_PSCI_SYSTEM_OFF:
277 result = psci_system_off(rec);
278 break;
279 case SMC32_PSCI_SYSTEM_RESET:
280 result = psci_system_reset(rec);
281 break;
282 case SMC32_PSCI_FEATURES:
283 result = psci_features(rec, arg0);
284 break;
285 default:
286 result.smc_res.x[0] = PSCI_RETURN_NOT_SUPPORTED;
287 result.hvc_forward.forward_psci_call = false;
288 break;
289 }
290
291 return result;
292 }
293
294 /*
295 * In the following two functions, it is only safe to access the runnable field
296 * on the target_rec once the target_rec is no longer running on another PE and
297 * all writes performed by the other PE as part of smc_rec_enter is also
298 * guaranteed to be observed here, which we know when we read a zero refcount
299 * on the target rec using acquire semantics paired with the release semantics
300 * on the reference count in smc_rec_enter. If we observe a non-zero refcount
301 * it simply means that the target_rec is running and we can return the
302 * corresponding value.
303 */
complete_psci_cpu_on(struct rec * target_rec,unsigned long entry_point_address,unsigned long caller_sctlr_el1)304 static unsigned long complete_psci_cpu_on(struct rec *target_rec,
305 unsigned long entry_point_address,
306 unsigned long caller_sctlr_el1)
307 {
308 if ((granule_refcount_read_acquire(target_rec->g_rec) != 0UL) ||
309 target_rec->runnable) {
310 return PSCI_RETURN_ALREADY_ON;
311 }
312
313 psci_reset_rec(target_rec, caller_sctlr_el1);
314 target_rec->pc = entry_point_address;
315 target_rec->runnable = true;
316 return PSCI_RETURN_SUCCESS;
317 }
318
complete_psci_affinity_info(struct rec * target_rec)319 static unsigned long complete_psci_affinity_info(struct rec *target_rec)
320 {
321 if ((granule_refcount_read_acquire(target_rec->g_rec) != 0UL) ||
322 target_rec->runnable) {
323 return PSCI_AFFINITY_INFO_ON;
324 }
325
326 return PSCI_AFFINITY_INFO_OFF;
327 }
328
psci_complete_request(struct rec * calling_rec,struct rec * target_rec)329 unsigned long psci_complete_request(struct rec *calling_rec,
330 struct rec *target_rec)
331 {
332 unsigned long ret = PSCI_RETURN_NOT_SUPPORTED;
333 unsigned long mpidr = calling_rec->regs[1];
334
335 if (!calling_rec->psci_info.pending) {
336 return RMI_ERROR_INPUT;
337 }
338
339 if (calling_rec->realm_info.g_rd != target_rec->realm_info.g_rd) {
340 return RMI_ERROR_INPUT;
341 }
342
343 if (mpidr_to_rec_idx(mpidr) != target_rec->rec_idx) {
344 return RMI_ERROR_INPUT;
345 }
346
347 switch (calling_rec->regs[0]) {
348 case SMC32_PSCI_CPU_ON:
349 case SMC64_PSCI_CPU_ON:
350 ret = complete_psci_cpu_on(target_rec,
351 calling_rec->regs[2],
352 calling_rec->sysregs.sctlr_el1);
353 break;
354 case SMC32_PSCI_AFFINITY_INFO:
355 case SMC64_PSCI_AFFINITY_INFO:
356 ret = complete_psci_affinity_info(target_rec);
357 break;
358 default:
359 assert(false);
360 }
361
362 calling_rec->regs[0] = ret;
363 calling_rec->regs[1] = 0;
364 calling_rec->regs[2] = 0;
365 calling_rec->regs[3] = 0;
366 calling_rec->psci_info.pending = false;
367
368 return RMI_SUCCESS;
369 }
370