1 /*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/ffa.h"
10
11 #include <stddef.h>
12
13 #include "hf/types.h"
14
15 #if defined(__linux__) && defined(__KERNEL__)
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18
19 #else
20 #include "hf/static_assert.h"
21 #include "hf/std.h"
22 #endif
23
24 static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
25 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
26 "bytes long.");
27
ffa_copy_memory_region_constituents(struct ffa_memory_region_constituent * dest,const struct ffa_memory_region_constituent * src)28 static void ffa_copy_memory_region_constituents(
29 struct ffa_memory_region_constituent *dest,
30 const struct ffa_memory_region_constituent *src)
31 {
32 dest->address = src->address;
33 dest->page_count = src->page_count;
34 dest->reserved = 0;
35 }
36
37 /**
38 * Initializes receiver permissions, in a memory transaction descriptor.
39 */
ffa_memory_access_init_permissions(struct ffa_memory_access * receiver,ffa_vm_id_t receiver_id,enum ffa_data_access data_access,enum ffa_instruction_access instruction_access,ffa_memory_receiver_flags_t flags)40 void ffa_memory_access_init_permissions(
41 struct ffa_memory_access *receiver, ffa_vm_id_t receiver_id,
42 enum ffa_data_access data_access,
43 enum ffa_instruction_access instruction_access,
44 ffa_memory_receiver_flags_t flags)
45 {
46 ffa_memory_access_permissions_t permissions = 0;
47
48 /* Set memory region's permissions. */
49 ffa_set_data_access_attr(&permissions, data_access);
50 ffa_set_instruction_access_attr(&permissions, instruction_access);
51
52 receiver->receiver_permissions.receiver = receiver_id;
53 receiver->receiver_permissions.permissions = permissions;
54 receiver->receiver_permissions.flags = flags;
55
56 receiver->reserved_0 = 0ULL;
57 }
58
59 /**
60 * Initialises the header of the given `ffa_memory_region`, not
61 * including the composite memory region offset.
62 */
ffa_memory_region_init_header(struct ffa_memory_region * memory_region,ffa_vm_id_t sender,ffa_memory_attributes_t attributes,ffa_memory_region_flags_t flags,ffa_memory_handle_t handle,uint32_t tag,uint32_t receiver_count)63 static void ffa_memory_region_init_header(
64 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
65 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
66 ffa_memory_handle_t handle, uint32_t tag, uint32_t receiver_count)
67 {
68 memory_region->sender = sender;
69 memory_region->attributes = attributes;
70 memory_region->reserved_0 = 0;
71 memory_region->flags = flags;
72 memory_region->handle = handle;
73 memory_region->tag = tag;
74 memory_region->reserved_1 = 0;
75 memory_region->receiver_count = receiver_count;
76 }
77
78 /**
79 * Copies as many as possible of the given constituents to the respective
80 * memory region and sets the respective offset.
81 *
82 * Returns the number of constituents remaining which wouldn't fit, and (via
83 * return parameters) the size in bytes of the first fragment of data copied to
84 * `memory_region` (attributes, constituents and memory region header size), and
85 * the total size of the memory sharing message including all constituents.
86 */
ffa_memory_region_init_constituents(struct ffa_memory_region * memory_region,size_t memory_region_max_size,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t * total_length,uint32_t * fragment_length)87 static uint32_t ffa_memory_region_init_constituents(
88 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
89 const struct ffa_memory_region_constituent constituents[],
90 uint32_t constituent_count, uint32_t *total_length,
91 uint32_t *fragment_length)
92 {
93 struct ffa_composite_memory_region *composite_memory_region;
94 uint32_t fragment_max_constituents;
95 uint32_t constituents_offset;
96 uint32_t count_to_copy;
97 uint32_t i;
98
99 /*
100 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
101 * ffa_memory_access)` must both be multiples of 16 (as verified by the
102 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
103 * calculate here is aligned to a 64-bit boundary and so 64-bit values
104 * can be copied without alignment faults.
105 * If there are multiple receiver endpoints, their respective access
106 * structure should point to the same offset value.
107 */
108 for (i = 0U; i < memory_region->receiver_count; i++) {
109 memory_region->receivers[i].composite_memory_region_offset =
110 sizeof(struct ffa_memory_region) +
111 memory_region->receiver_count *
112 sizeof(struct ffa_memory_access);
113 }
114
115 composite_memory_region =
116 ffa_memory_region_get_composite(memory_region, 0);
117 composite_memory_region->page_count = 0;
118 composite_memory_region->constituent_count = constituent_count;
119 composite_memory_region->reserved_0 = 0;
120
121 constituents_offset =
122 memory_region->receivers[0].composite_memory_region_offset +
123 sizeof(struct ffa_composite_memory_region);
124 fragment_max_constituents =
125 (memory_region_max_size - constituents_offset) /
126 sizeof(struct ffa_memory_region_constituent);
127
128 count_to_copy = constituent_count;
129 if (count_to_copy > fragment_max_constituents) {
130 count_to_copy = fragment_max_constituents;
131 }
132
133 for (i = 0U; i < constituent_count; i++) {
134 if (i < count_to_copy) {
135 ffa_copy_memory_region_constituents(
136 &composite_memory_region->constituents[i],
137 &constituents[i]);
138 }
139 composite_memory_region->page_count +=
140 constituents[i].page_count;
141 }
142
143 if (total_length != NULL) {
144 *total_length =
145 constituents_offset +
146 composite_memory_region->constituent_count *
147 sizeof(struct ffa_memory_region_constituent);
148 }
149 if (fragment_length != NULL) {
150 *fragment_length =
151 constituents_offset +
152 count_to_copy *
153 sizeof(struct ffa_memory_region_constituent);
154 }
155
156 return composite_memory_region->constituent_count - count_to_copy;
157 }
158
159 /**
160 * Initialises the given `ffa_memory_region` and copies as many as possible of
161 * the given constituents to it.
162 *
163 * Returns the number of constituents remaining which wouldn't fit, and (via
164 * return parameters) the size in bytes of the first fragment of data copied to
165 * `memory_region` (attributes, constituents and memory region header size), and
166 * the total size of the memory sharing message including all constituents.
167 */
ffa_memory_region_init_single_receiver(struct ffa_memory_region * memory_region,size_t memory_region_max_size,ffa_vm_id_t sender,ffa_vm_id_t receiver,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_data_access data_access,enum ffa_instruction_access instruction_access,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability,uint32_t * total_length,uint32_t * fragment_length)168 uint32_t ffa_memory_region_init_single_receiver(
169 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
170 ffa_vm_id_t sender, ffa_vm_id_t receiver,
171 const struct ffa_memory_region_constituent constituents[],
172 uint32_t constituent_count, uint32_t tag,
173 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
174 enum ffa_instruction_access instruction_access,
175 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
176 enum ffa_memory_shareability shareability, uint32_t *total_length,
177 uint32_t *fragment_length)
178 {
179 struct ffa_memory_access receiver_access;
180
181 ffa_memory_access_init_permissions(&receiver_access, receiver,
182 data_access, instruction_access, 0);
183
184 return ffa_memory_region_init(
185 memory_region, memory_region_max_size, sender, &receiver_access,
186 1, constituents, constituent_count, tag, flags, type,
187 cacheability, shareability, total_length, fragment_length);
188 }
189
ffa_memory_region_init(struct ffa_memory_region * memory_region,size_t memory_region_max_size,ffa_vm_id_t sender,struct ffa_memory_access receivers[],uint32_t receiver_count,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability,uint32_t * total_length,uint32_t * fragment_length)190 uint32_t ffa_memory_region_init(
191 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
192 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
193 uint32_t receiver_count,
194 const struct ffa_memory_region_constituent constituents[],
195 uint32_t constituent_count, uint32_t tag,
196 ffa_memory_region_flags_t flags, enum ffa_memory_type type,
197 enum ffa_memory_cacheability cacheability,
198 enum ffa_memory_shareability shareability, uint32_t *total_length,
199 uint32_t *fragment_length)
200 {
201 ffa_memory_attributes_t attributes = 0;
202
203 /* Set memory region's page attributes. */
204 ffa_set_memory_type_attr(&attributes, type);
205 ffa_set_memory_cacheability_attr(&attributes, cacheability);
206 ffa_set_memory_shareability_attr(&attributes, shareability);
207
208 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
209 0, tag, receiver_count);
210
211 #if defined(__linux__) && defined(__KERNEL__)
212 memcpy(memory_region->receivers, receivers,
213 receiver_count * sizeof(struct ffa_memory_access));
214 #else
215 memcpy_s(memory_region->receivers,
216 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
217 receivers, receiver_count * sizeof(struct ffa_memory_access));
218 #endif
219
220 return ffa_memory_region_init_constituents(
221 memory_region, memory_region_max_size, constituents,
222 constituent_count, total_length, fragment_length);
223 }
224
225 /**
226 * Initialises the given `ffa_memory_region` to be used for an
227 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
228 *
229 * Returns the size of the message written.
230 */
ffa_memory_retrieve_request_init_single_receiver(struct ffa_memory_region * memory_region,ffa_memory_handle_t handle,ffa_vm_id_t sender,ffa_vm_id_t receiver,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_data_access data_access,enum ffa_instruction_access instruction_access,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability)231 uint32_t ffa_memory_retrieve_request_init_single_receiver(
232 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
233 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
234 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
235 enum ffa_instruction_access instruction_access,
236 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
237 enum ffa_memory_shareability shareability)
238 {
239 struct ffa_memory_access receiver_permissions;
240
241 ffa_memory_access_init_permissions(&receiver_permissions, receiver,
242 data_access, instruction_access, 0);
243
244 return ffa_memory_retrieve_request_init(
245 memory_region, handle, sender, &receiver_permissions, 1, tag,
246 flags, type, cacheability, shareability);
247 }
248
ffa_memory_retrieve_request_init(struct ffa_memory_region * memory_region,ffa_memory_handle_t handle,ffa_vm_id_t sender,struct ffa_memory_access receivers[],uint32_t receiver_count,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability)249 uint32_t ffa_memory_retrieve_request_init(
250 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
251 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
252 uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
253 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
254 enum ffa_memory_shareability shareability)
255 {
256 ffa_memory_attributes_t attributes = 0;
257 uint32_t i;
258
259 /* Set memory region's page attributes. */
260 ffa_set_memory_type_attr(&attributes, type);
261 ffa_set_memory_cacheability_attr(&attributes, cacheability);
262 ffa_set_memory_shareability_attr(&attributes, shareability);
263
264 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
265 handle, tag, receiver_count);
266
267 #if defined(__linux__) && defined(__KERNEL__)
268 memcpy(memory_region->receivers, receivers,
269 receiver_count * sizeof(struct ffa_memory_access));
270 #else
271 memcpy_s(memory_region->receivers,
272 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
273 receivers, receiver_count * sizeof(struct ffa_memory_access));
274 #endif
275 /* Zero the composite offset for all receivers */
276 for (i = 0U; i < receiver_count; i++) {
277 memory_region->receivers[i].composite_memory_region_offset = 0U;
278 }
279
280 return sizeof(struct ffa_memory_region) +
281 memory_region->receiver_count * sizeof(struct ffa_memory_access);
282 }
283
284 /**
285 * Initialises the given `ffa_memory_region` to be used for an
286 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
287 *
288 * Returns the size of the message written.
289 */
ffa_memory_lender_retrieve_request_init(struct ffa_memory_region * memory_region,ffa_memory_handle_t handle,ffa_vm_id_t sender)290 uint32_t ffa_memory_lender_retrieve_request_init(
291 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
292 ffa_vm_id_t sender)
293 {
294 memory_region->sender = sender;
295 memory_region->attributes = 0;
296 memory_region->reserved_0 = 0;
297 memory_region->flags = 0;
298 memory_region->reserved_1 = 0;
299 memory_region->handle = handle;
300 memory_region->tag = 0;
301 memory_region->receiver_count = 0;
302
303 return sizeof(struct ffa_memory_region);
304 }
305
306 /**
307 * Initialises the given `ffa_memory_region` to be used for an
308 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
309 * fragment.
310 *
311 * Returns true on success, or false if the given constituents won't all fit in
312 * the first fragment.
313 */
ffa_retrieved_memory_region_init(struct ffa_memory_region * response,size_t response_max_size,ffa_vm_id_t sender,ffa_memory_attributes_t attributes,ffa_memory_region_flags_t flags,ffa_memory_handle_t handle,ffa_vm_id_t receiver,ffa_memory_access_permissions_t permissions,uint32_t page_count,uint32_t total_constituent_count,const struct ffa_memory_region_constituent constituents[],uint32_t fragment_constituent_count,uint32_t * total_length,uint32_t * fragment_length)314 bool ffa_retrieved_memory_region_init(
315 struct ffa_memory_region *response, size_t response_max_size,
316 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
317 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
318 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
319 uint32_t page_count, uint32_t total_constituent_count,
320 const struct ffa_memory_region_constituent constituents[],
321 uint32_t fragment_constituent_count, uint32_t *total_length,
322 uint32_t *fragment_length)
323 {
324 struct ffa_composite_memory_region *composite_memory_region;
325 uint32_t i;
326 uint32_t constituents_offset;
327
328 ffa_memory_region_init_header(response, sender, attributes, flags,
329 handle, 0, 1);
330 /*
331 * Initialized here as in memory retrieve responses we currently expect
332 * one borrower to be specified.
333 */
334 ffa_memory_access_init_permissions(&response->receivers[0], receiver, 0,
335 0, flags);
336 response->receivers[0].receiver_permissions.permissions = permissions;
337
338 /*
339 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
340 * ffa_memory_access)` must both be multiples of 16 (as verified by the
341 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
342 * calculate here is aligned to a 64-bit boundary and so 64-bit values
343 * can be copied without alignment faults.
344 */
345 response->receivers[0].composite_memory_region_offset =
346 sizeof(struct ffa_memory_region) +
347 response->receiver_count * sizeof(struct ffa_memory_access);
348
349 composite_memory_region = ffa_memory_region_get_composite(response, 0);
350 composite_memory_region->page_count = page_count;
351 composite_memory_region->constituent_count = total_constituent_count;
352 composite_memory_region->reserved_0 = 0;
353
354 constituents_offset =
355 response->receivers[0].composite_memory_region_offset +
356 sizeof(struct ffa_composite_memory_region);
357 if (constituents_offset +
358 fragment_constituent_count *
359 sizeof(struct ffa_memory_region_constituent) >
360 response_max_size) {
361 return false;
362 }
363
364 for (i = 0; i < fragment_constituent_count; ++i) {
365 composite_memory_region->constituents[i] = constituents[i];
366 }
367
368 if (total_length != NULL) {
369 *total_length =
370 constituents_offset +
371 composite_memory_region->constituent_count *
372 sizeof(struct ffa_memory_region_constituent);
373 }
374 if (fragment_length != NULL) {
375 *fragment_length =
376 constituents_offset +
377 fragment_constituent_count *
378 sizeof(struct ffa_memory_region_constituent);
379 }
380
381 return true;
382 }
383
ffa_memory_fragment_init(struct ffa_memory_region_constituent * fragment,size_t fragment_max_size,const struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t * fragment_length)384 uint32_t ffa_memory_fragment_init(
385 struct ffa_memory_region_constituent *fragment,
386 size_t fragment_max_size,
387 const struct ffa_memory_region_constituent constituents[],
388 uint32_t constituent_count, uint32_t *fragment_length)
389 {
390 uint32_t fragment_max_constituents =
391 fragment_max_size /
392 sizeof(struct ffa_memory_region_constituent);
393 uint32_t count_to_copy = constituent_count;
394 uint32_t i;
395
396 if (count_to_copy > fragment_max_constituents) {
397 count_to_copy = fragment_max_constituents;
398 }
399
400 for (i = 0; i < count_to_copy; ++i) {
401 ffa_copy_memory_region_constituents(&fragment[i],
402 &constituents[i]);
403 }
404
405 if (fragment_length != NULL) {
406 *fragment_length = count_to_copy *
407 sizeof(struct ffa_memory_region_constituent);
408 }
409
410 return constituent_count - count_to_copy;
411 }
412
ffa_composite_memory_region_init(struct ffa_composite_memory_region * composite,uint64_t address,uint32_t page_count)413 static void ffa_composite_memory_region_init(
414 struct ffa_composite_memory_region *composite, uint64_t address,
415 uint32_t page_count)
416 {
417 composite->page_count = page_count;
418 composite->constituent_count = 1;
419 composite->reserved_0 = 0;
420
421 composite->constituents[0].page_count = page_count;
422 composite->constituents[0].address = address;
423 composite->constituents[0].reserved = 0;
424 }
425
426 /**
427 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
428 * `FFA_RXTX_MAP` forwarding.
429 * Each buffer is described by an `ffa_composite_memory_region` containing
430 * one `ffa_memory_region_constituent`.
431 */
ffa_endpoint_rx_tx_descriptor_init(struct ffa_endpoint_rx_tx_descriptor * desc,ffa_vm_id_t endpoint_id,uint64_t rx_address,uint64_t tx_address)432 void ffa_endpoint_rx_tx_descriptor_init(
433 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
434 uint64_t rx_address, uint64_t tx_address)
435 {
436 desc->endpoint_id = endpoint_id;
437 desc->reserved = 0;
438 desc->pad = 0;
439
440 /*
441 * RX's composite descriptor is allocated after the enpoint descriptor.
442 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
443 * 16-byte aligned.
444 */
445 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
446
447 ffa_composite_memory_region_init(
448 (struct ffa_composite_memory_region *)((uintptr_t)desc +
449 desc->rx_offset),
450 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
451
452 /*
453 * TX's composite descriptor is allocated after the RX descriptor.
454 * `sizeof(struct ffa_composite_memory_region)` and
455 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
456 * 16-byte aligned in ffa_memory.c.
457 */
458 desc->tx_offset = desc->rx_offset +
459 sizeof(struct ffa_composite_memory_region) +
460 sizeof(struct ffa_memory_region_constituent);
461
462 ffa_composite_memory_region_init(
463 (struct ffa_composite_memory_region *)((uintptr_t)desc +
464 desc->tx_offset),
465 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
466 }
467