1 /*
2  * Copyright 2021 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/ffa.h"
10 
11 #include "hf/arch/plat/ffa.h"
12 
13 #include "hf/ffa_internal.h"
14 #include "hf/vcpu.h"
15 #include "hf/vm.h"
16 
arch_ffa_features(uint32_t function_id)17 struct ffa_value arch_ffa_features(uint32_t function_id)
18 {
19 	(void)function_id;
20 	return ffa_error(FFA_NOT_SUPPORTED);
21 }
22 
arch_ffa_spmc_id_get(void)23 ffa_vm_id_t arch_ffa_spmc_id_get(void)
24 {
25 	return HF_SPMC_VM_ID;
26 }
27 
plat_ffa_log_init(void)28 void plat_ffa_log_init(void)
29 {
30 }
31 
plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,uint32_t share_func)32 bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
33 				   uint32_t share_func)
34 {
35 	(void)receiver_vm_id;
36 	(void)share_func;
37 
38 	return true;
39 }
40 
plat_ffa_is_direct_request_valid(struct vcpu * current,ffa_vm_id_t sender_vm_id,ffa_vm_id_t receiver_vm_id)41 bool plat_ffa_is_direct_request_valid(struct vcpu *current,
42 				      ffa_vm_id_t sender_vm_id,
43 				      ffa_vm_id_t receiver_vm_id)
44 {
45 	(void)current;
46 	(void)sender_vm_id;
47 	(void)receiver_vm_id;
48 
49 	return true;
50 }
51 
plat_ffa_is_direct_request_supported(struct vm * sender_vm,struct vm * receiver_vm)52 bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
53 					  struct vm *receiver_vm)
54 {
55 	(void)sender_vm;
56 	(void)receiver_vm;
57 
58 	return false;
59 }
60 
plat_ffa_is_direct_response_valid(struct vcpu * current,ffa_vm_id_t sender_vm_id,ffa_vm_id_t receiver_vm_id)61 bool plat_ffa_is_direct_response_valid(struct vcpu *current,
62 				       ffa_vm_id_t sender_vm_id,
63 				       ffa_vm_id_t receiver_vm_id)
64 {
65 	(void)current;
66 	(void)sender_vm_id;
67 	(void)receiver_vm_id;
68 
69 	return true;
70 }
71 
plat_ffa_run_forward(ffa_vm_id_t vm_id,ffa_vcpu_index_t vcpu_idx,struct ffa_value * ret)72 bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
73 			  struct ffa_value *ret)
74 {
75 	(void)vm_id;
76 	(void)vcpu_idx;
77 	(void)ret;
78 
79 	return false;
80 }
81 
plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)82 void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
83 {
84 	(void)to_destroy_locked;
85 }
86 
plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)87 void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
88 {
89 	(void)id;
90 }
91 
plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,struct ffa_value args,struct ffa_value * ret)92 bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
93 				     struct ffa_value args,
94 				     struct ffa_value *ret)
95 {
96 	(void)receiver_vm_id;
97 	(void)args;
98 	(void)ret;
99 	return false;
100 }
101 
plat_ffa_rx_release_forward(struct vm_locked vm_locked,struct ffa_value * ret)102 bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
103 				 struct ffa_value *ret)
104 {
105 	(void)vm_locked;
106 	(void)ret;
107 
108 	return false;
109 }
110 
plat_ffa_rx_release_forwarded(struct vm_locked vm_locked)111 bool plat_ffa_rx_release_forwarded(struct vm_locked vm_locked)
112 {
113 	(void)vm_locked;
114 
115 	return false;
116 }
117 
plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,struct ffa_value * ret)118 bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
119 				  struct ffa_value *ret)
120 {
121 	(void)to_locked;
122 	(void)ret;
123 
124 	return false;
125 }
126 
plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,struct vm_locked receiver_locked)127 bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
128 					struct vm_locked receiver_locked)
129 {
130 	(void)sender_locked;
131 	(void)receiver_locked;
132 
133 	return false;
134 }
135 
plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,ffa_vm_id_t sender_vm_id,struct ffa_value * ret)136 bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,
137 				ffa_vm_id_t sender_vm_id, struct ffa_value *ret)
138 {
139 	(void)receiver_vm_id;
140 	(void)sender_vm_id;
141 	(void)ret;
142 
143 	return false;
144 }
145 
plat_ffa_memory_handle_make(uint64_t index)146 ffa_memory_handle_t plat_ffa_memory_handle_make(uint64_t index)
147 {
148 	return index;
149 }
150 
plat_ffa_memory_handle_allocated_by_current_world(ffa_memory_handle_t handle)151 bool plat_ffa_memory_handle_allocated_by_current_world(
152 	ffa_memory_handle_t handle)
153 {
154 	(void)handle;
155 	return false;
156 }
157 
plat_ffa_other_world_mode(void)158 uint32_t plat_ffa_other_world_mode(void)
159 {
160 	return 0U;
161 }
162 
plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)163 uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
164 {
165 	(void)owner_id;
166 	return 0U;
167 }
168 
plat_ffa_is_notifications_bind_valid(struct vcpu * current,ffa_vm_id_t sender_id,ffa_vm_id_t receiver_id)169 bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
170 					  ffa_vm_id_t sender_id,
171 					  ffa_vm_id_t receiver_id)
172 {
173 	(void)current;
174 	(void)sender_id;
175 	(void)receiver_id;
176 	return false;
177 }
178 
plat_ffa_notifications_update_bindings_forward(ffa_vm_id_t receiver_id,ffa_vm_id_t sender_id,uint32_t flags,ffa_notifications_bitmap_t bitmap,bool is_bind,struct ffa_value * ret)179 bool plat_ffa_notifications_update_bindings_forward(
180 	ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
181 	ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
182 {
183 	(void)ret;
184 	(void)receiver_id;
185 	(void)sender_id;
186 	(void)flags;
187 	(void)bitmap;
188 	(void)is_bind;
189 	(void)ret;
190 
191 	return false;
192 }
193 
plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)194 void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
195 {
196 	(void)vm_locked;
197 }
198 
plat_ffa_partition_properties(ffa_vm_id_t vm_id,const struct vm * target)199 ffa_partition_properties_t plat_ffa_partition_properties(
200 	ffa_vm_id_t vm_id, const struct vm *target)
201 {
202 	(void)vm_id;
203 	(void)target;
204 	return 0;
205 }
206 
plat_ffa_vm_managed_exit_supported(struct vm * vm)207 bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
208 {
209 	(void)vm;
210 	return false;
211 }
212 
plat_ffa_is_notifications_create_valid(struct vcpu * current,ffa_vm_id_t vm_id)213 bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
214 					    ffa_vm_id_t vm_id)
215 {
216 	(void)current;
217 	(void)vm_id;
218 
219 	return false;
220 }
221 
plat_ffa_is_notification_set_valid(struct vcpu * current,ffa_vm_id_t sender_id,ffa_vm_id_t receiver_id)222 bool plat_ffa_is_notification_set_valid(struct vcpu *current,
223 					ffa_vm_id_t sender_id,
224 					ffa_vm_id_t receiver_id)
225 {
226 	(void)current;
227 	(void)sender_id;
228 	(void)receiver_id;
229 	return false;
230 }
231 
plat_ffa_is_notification_get_valid(struct vcpu * current,ffa_vm_id_t receiver_id,uint32_t flags)232 bool plat_ffa_is_notification_get_valid(struct vcpu *current,
233 					ffa_vm_id_t receiver_id, uint32_t flags)
234 {
235 	(void)flags;
236 	(void)current;
237 	(void)receiver_id;
238 	return false;
239 }
240 
plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,ffa_vcpu_index_t vcpu_id,ffa_notifications_bitmap_t * from_sp,struct ffa_value * ret)241 bool plat_ffa_notifications_get_from_sp(
242 	struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
243 	ffa_notifications_bitmap_t *from_sp,  // NOLINT
244 	struct ffa_value *ret)		      // NOLINT
245 {
246 	(void)receiver_locked;
247 	(void)vcpu_id;
248 	(void)from_sp;
249 	(void)ret;
250 
251 	return false;
252 }
253 
plat_ffa_notifications_get_framework_notifications(struct vm_locked receiver_locked,ffa_notifications_bitmap_t * from_fwk,uint32_t flags,ffa_vcpu_index_t vcpu_id,struct ffa_value * ret)254 bool plat_ffa_notifications_get_framework_notifications(
255 	struct vm_locked receiver_locked,
256 	ffa_notifications_bitmap_t *from_fwk,  // NOLINT
257 	uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret)
258 {
259 	(void)receiver_locked;
260 	(void)from_fwk;
261 	(void)flags;
262 	(void)vcpu_id;
263 	(void)ret;
264 
265 	return false;
266 }
267 
plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,ffa_vm_id_t receiver_vm_id,uint32_t flags,ffa_notifications_bitmap_t bitmap,struct ffa_value * ret)268 bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
269 				       ffa_vm_id_t receiver_vm_id,
270 				       uint32_t flags,
271 				       ffa_notifications_bitmap_t bitmap,
272 				       struct ffa_value *ret)
273 {
274 	(void)sender_vm_id;
275 	(void)receiver_vm_id;
276 	(void)flags;
277 	(void)bitmap;
278 	(void)ret;
279 
280 	return false;
281 }
282 
plat_ffa_notifications_bitmap_create(ffa_vm_id_t vm_id,ffa_vcpu_count_t vcpu_count)283 struct ffa_value plat_ffa_notifications_bitmap_create(
284 	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
285 {
286 	(void)vm_id;
287 	(void)vcpu_count;
288 
289 	return ffa_error(FFA_NOT_SUPPORTED);
290 }
291 
plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)292 struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
293 {
294 	(void)vm_id;
295 
296 	return ffa_error(FFA_NOT_SUPPORTED);
297 }
298 
plat_ffa_vm_find_locked(ffa_vm_id_t vm_id)299 struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id)
300 {
301 	(void)vm_id;
302 	return (struct vm_locked){.vm = NULL};
303 }
304 
plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)305 struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
306 {
307 	(void)vm_id;
308 	return (struct vm_locked){.vm = NULL};
309 }
310 
plat_ffa_is_vm_id(ffa_vm_id_t vm_id)311 bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
312 {
313 	(void)vm_id;
314 	return false;
315 }
316 
plat_ffa_vm_notifications_info_get(uint16_t * ids,uint32_t * ids_count,uint32_t * lists_sizes,uint32_t * lists_count,const uint32_t ids_count_max)317 bool plat_ffa_vm_notifications_info_get(     // NOLINTNEXTLINE
318 	uint16_t *ids, uint32_t *ids_count,  // NOLINTNEXTLINE
319 	uint32_t *lists_sizes,		     // NOLINTNEXTLINE
320 	uint32_t *lists_count, const uint32_t ids_count_max)
321 {
322 	(void)ids;
323 	(void)ids_count;
324 	(void)lists_sizes;
325 	(void)lists_count;
326 	(void)ids_count_max;
327 
328 	return false;
329 }
330 
plat_ffa_is_mem_perm_get_valid(const struct vcpu * current)331 bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
332 {
333 	(void)current;
334 	return false;
335 }
336 
plat_ffa_is_mem_perm_set_valid(const struct vcpu * current)337 bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
338 {
339 	(void)current;
340 	return false;
341 }
342 
343 /**
344  * Check if current VM can resume target VM/SP using FFA_RUN ABI.
345  */
plat_ffa_run_checks(struct vcpu * current,ffa_vm_id_t target_vm_id,ffa_vcpu_index_t vcpu_idx,struct ffa_value * run_ret,struct vcpu ** next)346 bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
347 			 ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
348 			 struct vcpu **next)
349 {
350 	(void)current;
351 	(void)target_vm_id;
352 	(void)run_ret;
353 	(void)next;
354 	(void)vcpu_idx;
355 	return true;
356 }
357 
plat_ffa_notification_info_get_forward(uint16_t * ids,uint32_t * ids_count,uint32_t * lists_sizes,uint32_t * lists_count,const uint32_t ids_count_max)358 void plat_ffa_notification_info_get_forward(  // NOLINTNEXTLINE
359 	uint16_t *ids, uint32_t *ids_count,   // NOLINTNEXTLINE
360 	uint32_t *lists_sizes, uint32_t *lists_count,
361 	const uint32_t ids_count_max)
362 {
363 	(void)ids;
364 	(void)ids_count;
365 	(void)lists_sizes;
366 	(void)lists_count;
367 	(void)ids_count_max;
368 }
369 
plat_ffa_sri_state_set(enum plat_ffa_sri_state state)370 void plat_ffa_sri_state_set(enum plat_ffa_sri_state state)
371 {
372 	(void)state;
373 }
374 
plat_ffa_sri_trigger_if_delayed(struct cpu * cpu)375 void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
376 {
377 	(void)cpu;
378 }
379 
plat_ffa_sri_trigger_not_delayed(struct cpu * cpu)380 void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
381 {
382 	(void)cpu;
383 }
384 
plat_ffa_sri_init(struct cpu * cpu)385 void plat_ffa_sri_init(struct cpu *cpu)
386 {
387 	(void)cpu;
388 }
389 
plat_ffa_inject_notification_pending_interrupt(struct vcpu_locked target_locked,struct vcpu * current,struct vm_locked receiver_locked)390 bool plat_ffa_inject_notification_pending_interrupt(
391 	struct vcpu_locked target_locked, struct vcpu *current,
392 	struct vm_locked receiver_locked)
393 {
394 	(void)target_locked;
395 	(void)current;
396 	(void)receiver_locked;
397 
398 	return false;
399 }
400 
plat_ffa_partition_info_get_forward(const struct ffa_uuid * uuid,const uint32_t flags,struct ffa_partition_info * partitions,ffa_vm_count_t * ret_count)401 void plat_ffa_partition_info_get_forward(  // NOLINTNEXTLINE
402 	const struct ffa_uuid *uuid,	   // NOLINTNEXTLINE
403 	const uint32_t flags,		   // NOLINTNEXTLINE
404 	struct ffa_partition_info *partitions, ffa_vm_count_t *ret_count)
405 {
406 	(void)uuid;
407 	(void)flags;
408 	(void)partitions;
409 	(void)ret_count;
410 }
411 
plat_ffa_is_secondary_ep_register_supported(void)412 bool plat_ffa_is_secondary_ep_register_supported(void)
413 {
414 	return false;
415 }
416 
plat_ffa_msg_wait_prepare(struct vcpu * current,struct vcpu ** next)417 struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu *current,
418 					   struct vcpu **next)
419 {
420 	(void)current;
421 	(void)next;
422 
423 	return (struct ffa_value){.func = FFA_INTERRUPT_32};
424 }
425 
plat_ffa_check_runtime_state_transition(struct vcpu * current,ffa_vm_id_t vm_id,ffa_vm_id_t receiver_vm_id,struct vcpu * receiver_vcpu,uint32_t func,enum vcpu_state * next_state)426 bool plat_ffa_check_runtime_state_transition(struct vcpu *current,
427 					     ffa_vm_id_t vm_id,
428 					     ffa_vm_id_t receiver_vm_id,
429 					     struct vcpu *receiver_vcpu,
430 					     uint32_t func,  // NOLINTNEXTLINE
431 					     enum vcpu_state *next_state)
432 {
433 	/* Perform state transition checks only for Secure Partitions. */
434 	(void)current;
435 	(void)vm_id;
436 	(void)receiver_vm_id;
437 	(void)receiver_vcpu;
438 	(void)func;
439 	(void)next_state;
440 
441 	return true;
442 }
443 
plat_ffa_init_schedule_mode_ffa_run(struct vcpu * current,struct vcpu_locked target_locked)444 void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
445 					 struct vcpu_locked target_locked)
446 {
447 	/* Scheduling mode not supported in the Hypervisor/VMs. */
448 	(void)current;
449 	(void)target_locked;
450 }
451 
plat_ffa_wind_call_chain_ffa_direct_req(struct vcpu_locked current_locked,struct vcpu_locked receiver_vcpu_locked)452 void plat_ffa_wind_call_chain_ffa_direct_req(
453 	struct vcpu_locked current_locked,
454 	struct vcpu_locked receiver_vcpu_locked)
455 {
456 	/* Calls chains not supported in the Hypervisor/VMs. */
457 	(void)current_locked;
458 	(void)receiver_vcpu_locked;
459 }
460 
plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu * current,struct vcpu * next)461 void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current,
462 						struct vcpu *next)
463 {
464 	/* Calls chains not supported in the Hypervisor/VMs. */
465 	(void)current;
466 	(void)next;
467 }
468 
plat_ffa_enable_virtual_maintenance_interrupts(struct vcpu_locked current_locked)469 void plat_ffa_enable_virtual_maintenance_interrupts(
470 	struct vcpu_locked current_locked)
471 {
472 	(void)current_locked;
473 }
474