1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_CONFIG_H
3 #define _LINUX_VIRTIO_CONFIG_H
4
5 #include <linux/err.h>
6 #include <linux/bug.h>
7 #include <linux/virtio.h>
8 #include <linux/virtio_byteorder.h>
9 #include <linux/compiler_types.h>
10 #include <uapi/linux/virtio_config.h>
11
12 struct irq_affinity;
13
14 struct virtio_shm_region {
15 u64 addr;
16 u64 len;
17 };
18
19 /**
20 * virtio_config_ops - operations for configuring a virtio device
21 * Note: Do not assume that a transport implements all of the operations
22 * getting/setting a value as a simple read/write! Generally speaking,
23 * any of @get/@set, @get_status/@set_status, or @get_features/
24 * @finalize_features are NOT safe to be called from an atomic
25 * context.
26 * @enable_cbs: enable the callbacks
27 * vdev: the virtio_device
28 * @get: read the value of a configuration field
29 * vdev: the virtio_device
30 * offset: the offset of the configuration field
31 * buf: the buffer to write the field value into.
32 * len: the length of the buffer
33 * @set: write the value of a configuration field
34 * vdev: the virtio_device
35 * offset: the offset of the configuration field
36 * buf: the buffer to read the field value from.
37 * len: the length of the buffer
38 * @generation: config generation counter (optional)
39 * vdev: the virtio_device
40 * Returns the config generation counter
41 * @get_status: read the status byte
42 * vdev: the virtio_device
43 * Returns the status byte
44 * @set_status: write the status byte
45 * vdev: the virtio_device
46 * status: the new status byte
47 * @reset: reset the device
48 * vdev: the virtio device
49 * After this, status and feature negotiation must be done again
50 * Device must not be reset from its vq/config callbacks, or in
51 * parallel with being added/removed.
52 * @find_vqs: find virtqueues and instantiate them.
53 * vdev: the virtio_device
54 * nvqs: the number of virtqueues to find
55 * vqs: on success, includes new virtqueues
56 * callbacks: array of callbacks, for each virtqueue
57 * include a NULL entry for vqs that do not need a callback
58 * names: array of virtqueue names (mainly for debugging)
59 * include a NULL entry for vqs unused by driver
60 * Returns 0 on success or error status
61 * @del_vqs: free virtqueues found by find_vqs().
62 * @get_features: get the array of feature bits for this device.
63 * vdev: the virtio_device
64 * Returns the first 64 feature bits (all we currently need).
65 * @finalize_features: confirm what device features we'll be using.
66 * vdev: the virtio_device
67 * This gives the final feature bits for the device: it can change
68 * the dev->feature bits if it wants.
69 * Returns 0 on success or error status
70 * @bus_name: return the bus name associated with the device (optional)
71 * vdev: the virtio_device
72 * This returns a pointer to the bus name a la pci_name from which
73 * the caller can then copy.
74 * @set_vq_affinity: set the affinity for a virtqueue (optional).
75 * @get_vq_affinity: get the affinity for a virtqueue (optional).
76 * @get_shm_region: get a shared memory region based on the index.
77 */
78 typedef void vq_callback_t(struct virtqueue *);
79 struct virtio_config_ops {
80 void (*enable_cbs)(struct virtio_device *vdev);
81 void (*get)(struct virtio_device *vdev, unsigned offset,
82 void *buf, unsigned len);
83 void (*set)(struct virtio_device *vdev, unsigned offset,
84 const void *buf, unsigned len);
85 u32 (*generation)(struct virtio_device *vdev);
86 u8 (*get_status)(struct virtio_device *vdev);
87 void (*set_status)(struct virtio_device *vdev, u8 status);
88 void (*reset)(struct virtio_device *vdev);
89 int (*find_vqs)(struct virtio_device *, unsigned nvqs,
90 struct virtqueue *vqs[], vq_callback_t *callbacks[],
91 const char * const names[], const bool *ctx,
92 struct irq_affinity *desc);
93 void (*del_vqs)(struct virtio_device *);
94 u64 (*get_features)(struct virtio_device *vdev);
95 int (*finalize_features)(struct virtio_device *vdev);
96 const char *(*bus_name)(struct virtio_device *vdev);
97 int (*set_vq_affinity)(struct virtqueue *vq,
98 const struct cpumask *cpu_mask);
99 const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
100 int index);
101 bool (*get_shm_region)(struct virtio_device *vdev,
102 struct virtio_shm_region *region, u8 id);
103 };
104
105 /* If driver didn't advertise the feature, it will never appear. */
106 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
107 unsigned int fbit);
108
109 /**
110 * __virtio_test_bit - helper to test feature bits. For use by transports.
111 * Devices should normally use virtio_has_feature,
112 * which includes more checks.
113 * @vdev: the device
114 * @fbit: the feature bit
115 */
__virtio_test_bit(const struct virtio_device * vdev,unsigned int fbit)116 static inline bool __virtio_test_bit(const struct virtio_device *vdev,
117 unsigned int fbit)
118 {
119 /* Did you forget to fix assumptions on max features? */
120 if (__builtin_constant_p(fbit))
121 BUILD_BUG_ON(fbit >= 64);
122 else
123 BUG_ON(fbit >= 64);
124
125 return vdev->features & BIT_ULL(fbit);
126 }
127
128 /**
129 * __virtio_set_bit - helper to set feature bits. For use by transports.
130 * @vdev: the device
131 * @fbit: the feature bit
132 */
__virtio_set_bit(struct virtio_device * vdev,unsigned int fbit)133 static inline void __virtio_set_bit(struct virtio_device *vdev,
134 unsigned int fbit)
135 {
136 /* Did you forget to fix assumptions on max features? */
137 if (__builtin_constant_p(fbit))
138 BUILD_BUG_ON(fbit >= 64);
139 else
140 BUG_ON(fbit >= 64);
141
142 vdev->features |= BIT_ULL(fbit);
143 }
144
145 /**
146 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
147 * @vdev: the device
148 * @fbit: the feature bit
149 */
__virtio_clear_bit(struct virtio_device * vdev,unsigned int fbit)150 static inline void __virtio_clear_bit(struct virtio_device *vdev,
151 unsigned int fbit)
152 {
153 /* Did you forget to fix assumptions on max features? */
154 if (__builtin_constant_p(fbit))
155 BUILD_BUG_ON(fbit >= 64);
156 else
157 BUG_ON(fbit >= 64);
158
159 vdev->features &= ~BIT_ULL(fbit);
160 }
161
162 /**
163 * virtio_has_feature - helper to determine if this device has this feature.
164 * @vdev: the device
165 * @fbit: the feature bit
166 */
virtio_has_feature(const struct virtio_device * vdev,unsigned int fbit)167 static inline bool virtio_has_feature(const struct virtio_device *vdev,
168 unsigned int fbit)
169 {
170 if (fbit < VIRTIO_TRANSPORT_F_START)
171 virtio_check_driver_offered_feature(vdev, fbit);
172
173 return __virtio_test_bit(vdev, fbit);
174 }
175
176 /**
177 * virtio_has_dma_quirk - determine whether this device has the DMA quirk
178 * @vdev: the device
179 */
virtio_has_dma_quirk(const struct virtio_device * vdev)180 static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev)
181 {
182 /*
183 * Note the reverse polarity of the quirk feature (compared to most
184 * other features), this is for compatibility with legacy systems.
185 */
186 return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM);
187 }
188
189 static inline
virtio_find_single_vq(struct virtio_device * vdev,vq_callback_t * c,const char * n)190 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
191 vq_callback_t *c, const char *n)
192 {
193 vq_callback_t *callbacks[] = { c };
194 const char *names[] = { n };
195 struct virtqueue *vq;
196 int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
197 NULL);
198 if (err < 0)
199 return ERR_PTR(err);
200 return vq;
201 }
202
203 static inline
virtio_find_vqs(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],struct irq_affinity * desc)204 int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
205 struct virtqueue *vqs[], vq_callback_t *callbacks[],
206 const char * const names[],
207 struct irq_affinity *desc)
208 {
209 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
210 }
211
212 static inline
virtio_find_vqs_ctx(struct virtio_device * vdev,unsigned nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)213 int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
214 struct virtqueue *vqs[], vq_callback_t *callbacks[],
215 const char * const names[], const bool *ctx,
216 struct irq_affinity *desc)
217 {
218 return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
219 desc);
220 }
221
222 /**
223 * virtio_device_ready - enable vq use in probe function
224 * @vdev: the device
225 *
226 * Driver must call this to use vqs in the probe function.
227 *
228 * Note: vqs are enabled automatically after probe returns.
229 */
230 static inline
virtio_device_ready(struct virtio_device * dev)231 void virtio_device_ready(struct virtio_device *dev)
232 {
233 unsigned status = dev->config->get_status(dev);
234
235 if (dev->config->enable_cbs)
236 dev->config->enable_cbs(dev);
237
238 BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
239 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
240 }
241
242 static inline
virtio_bus_name(struct virtio_device * vdev)243 const char *virtio_bus_name(struct virtio_device *vdev)
244 {
245 if (!vdev->config->bus_name)
246 return "virtio";
247 return vdev->config->bus_name(vdev);
248 }
249
250 /**
251 * virtqueue_set_affinity - setting affinity for a virtqueue
252 * @vq: the virtqueue
253 * @cpu: the cpu no.
254 *
255 * Pay attention the function are best-effort: the affinity hint may not be set
256 * due to config support, irq type and sharing.
257 *
258 */
259 static inline
virtqueue_set_affinity(struct virtqueue * vq,const struct cpumask * cpu_mask)260 int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
261 {
262 struct virtio_device *vdev = vq->vdev;
263 if (vdev->config->set_vq_affinity)
264 return vdev->config->set_vq_affinity(vq, cpu_mask);
265 return 0;
266 }
267
268 static inline
virtio_get_shm_region(struct virtio_device * vdev,struct virtio_shm_region * region,u8 id)269 bool virtio_get_shm_region(struct virtio_device *vdev,
270 struct virtio_shm_region *region, u8 id)
271 {
272 if (!vdev->config->get_shm_region)
273 return false;
274 return vdev->config->get_shm_region(vdev, region, id);
275 }
276
virtio_is_little_endian(struct virtio_device * vdev)277 static inline bool virtio_is_little_endian(struct virtio_device *vdev)
278 {
279 return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
280 virtio_legacy_is_little_endian();
281 }
282
283 /* Memory accessors */
virtio16_to_cpu(struct virtio_device * vdev,__virtio16 val)284 static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
285 {
286 return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
287 }
288
cpu_to_virtio16(struct virtio_device * vdev,u16 val)289 static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
290 {
291 return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
292 }
293
virtio32_to_cpu(struct virtio_device * vdev,__virtio32 val)294 static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
295 {
296 return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
297 }
298
cpu_to_virtio32(struct virtio_device * vdev,u32 val)299 static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
300 {
301 return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
302 }
303
virtio64_to_cpu(struct virtio_device * vdev,__virtio64 val)304 static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
305 {
306 return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
307 }
308
cpu_to_virtio64(struct virtio_device * vdev,u64 val)309 static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
310 {
311 return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
312 }
313
314 #define virtio_to_cpu(vdev, x) \
315 _Generic((x), \
316 __u8: (x), \
317 __virtio16: virtio16_to_cpu((vdev), (x)), \
318 __virtio32: virtio32_to_cpu((vdev), (x)), \
319 __virtio64: virtio64_to_cpu((vdev), (x)) \
320 )
321
322 #define cpu_to_virtio(vdev, x, m) \
323 _Generic((m), \
324 __u8: (x), \
325 __virtio16: cpu_to_virtio16((vdev), (x)), \
326 __virtio32: cpu_to_virtio32((vdev), (x)), \
327 __virtio64: cpu_to_virtio64((vdev), (x)) \
328 )
329
330 #define __virtio_native_type(structname, member) \
331 typeof(virtio_to_cpu(NULL, ((structname*)0)->member))
332
333 /* Config space accessors. */
334 #define virtio_cread(vdev, structname, member, ptr) \
335 do { \
336 typeof(((structname*)0)->member) virtio_cread_v; \
337 \
338 might_sleep(); \
339 /* Sanity check: must match the member's type */ \
340 typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \
341 \
342 switch (sizeof(virtio_cread_v)) { \
343 case 1: \
344 case 2: \
345 case 4: \
346 vdev->config->get((vdev), \
347 offsetof(structname, member), \
348 &virtio_cread_v, \
349 sizeof(virtio_cread_v)); \
350 break; \
351 default: \
352 __virtio_cread_many((vdev), \
353 offsetof(structname, member), \
354 &virtio_cread_v, \
355 1, \
356 sizeof(virtio_cread_v)); \
357 break; \
358 } \
359 *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \
360 } while(0)
361
362 /* Config space accessors. */
363 #define virtio_cwrite(vdev, structname, member, ptr) \
364 do { \
365 typeof(((structname*)0)->member) virtio_cwrite_v = \
366 cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \
367 \
368 might_sleep(); \
369 /* Sanity check: must match the member's type */ \
370 typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \
371 \
372 vdev->config->set((vdev), offsetof(structname, member), \
373 &virtio_cwrite_v, \
374 sizeof(virtio_cwrite_v)); \
375 } while(0)
376
377 /*
378 * Nothing virtio-specific about these, but let's worry about generalizing
379 * these later.
380 */
381 #define virtio_le_to_cpu(x) \
382 _Generic((x), \
383 __u8: (u8)(x), \
384 __le16: (u16)le16_to_cpu(x), \
385 __le32: (u32)le32_to_cpu(x), \
386 __le64: (u64)le64_to_cpu(x) \
387 )
388
389 #define virtio_cpu_to_le(x, m) \
390 _Generic((m), \
391 __u8: (x), \
392 __le16: cpu_to_le16(x), \
393 __le32: cpu_to_le32(x), \
394 __le64: cpu_to_le64(x) \
395 )
396
397 /* LE (e.g. modern) Config space accessors. */
398 #define virtio_cread_le(vdev, structname, member, ptr) \
399 do { \
400 typeof(((structname*)0)->member) virtio_cread_v; \
401 \
402 might_sleep(); \
403 /* Sanity check: must match the member's type */ \
404 typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \
405 \
406 switch (sizeof(virtio_cread_v)) { \
407 case 1: \
408 case 2: \
409 case 4: \
410 vdev->config->get((vdev), \
411 offsetof(structname, member), \
412 &virtio_cread_v, \
413 sizeof(virtio_cread_v)); \
414 break; \
415 default: \
416 __virtio_cread_many((vdev), \
417 offsetof(structname, member), \
418 &virtio_cread_v, \
419 1, \
420 sizeof(virtio_cread_v)); \
421 break; \
422 } \
423 *(ptr) = virtio_le_to_cpu(virtio_cread_v); \
424 } while(0)
425
426 #define virtio_cwrite_le(vdev, structname, member, ptr) \
427 do { \
428 typeof(((structname*)0)->member) virtio_cwrite_v = \
429 virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \
430 \
431 might_sleep(); \
432 /* Sanity check: must match the member's type */ \
433 typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \
434 \
435 vdev->config->set((vdev), offsetof(structname, member), \
436 &virtio_cwrite_v, \
437 sizeof(virtio_cwrite_v)); \
438 } while(0)
439
440
441 /* Read @count fields, @bytes each. */
__virtio_cread_many(struct virtio_device * vdev,unsigned int offset,void * buf,size_t count,size_t bytes)442 static inline void __virtio_cread_many(struct virtio_device *vdev,
443 unsigned int offset,
444 void *buf, size_t count, size_t bytes)
445 {
446 u32 old, gen = vdev->config->generation ?
447 vdev->config->generation(vdev) : 0;
448 int i;
449
450 might_sleep();
451 do {
452 old = gen;
453
454 for (i = 0; i < count; i++)
455 vdev->config->get(vdev, offset + bytes * i,
456 buf + i * bytes, bytes);
457
458 gen = vdev->config->generation ?
459 vdev->config->generation(vdev) : 0;
460 } while (gen != old);
461 }
462
virtio_cread_bytes(struct virtio_device * vdev,unsigned int offset,void * buf,size_t len)463 static inline void virtio_cread_bytes(struct virtio_device *vdev,
464 unsigned int offset,
465 void *buf, size_t len)
466 {
467 __virtio_cread_many(vdev, offset, buf, len, 1);
468 }
469
virtio_cread8(struct virtio_device * vdev,unsigned int offset)470 static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
471 {
472 u8 ret;
473
474 might_sleep();
475 vdev->config->get(vdev, offset, &ret, sizeof(ret));
476 return ret;
477 }
478
virtio_cwrite8(struct virtio_device * vdev,unsigned int offset,u8 val)479 static inline void virtio_cwrite8(struct virtio_device *vdev,
480 unsigned int offset, u8 val)
481 {
482 might_sleep();
483 vdev->config->set(vdev, offset, &val, sizeof(val));
484 }
485
virtio_cread16(struct virtio_device * vdev,unsigned int offset)486 static inline u16 virtio_cread16(struct virtio_device *vdev,
487 unsigned int offset)
488 {
489 __virtio16 ret;
490
491 might_sleep();
492 vdev->config->get(vdev, offset, &ret, sizeof(ret));
493 return virtio16_to_cpu(vdev, ret);
494 }
495
virtio_cwrite16(struct virtio_device * vdev,unsigned int offset,u16 val)496 static inline void virtio_cwrite16(struct virtio_device *vdev,
497 unsigned int offset, u16 val)
498 {
499 __virtio16 v;
500
501 might_sleep();
502 v = cpu_to_virtio16(vdev, val);
503 vdev->config->set(vdev, offset, &v, sizeof(v));
504 }
505
virtio_cread32(struct virtio_device * vdev,unsigned int offset)506 static inline u32 virtio_cread32(struct virtio_device *vdev,
507 unsigned int offset)
508 {
509 __virtio32 ret;
510
511 might_sleep();
512 vdev->config->get(vdev, offset, &ret, sizeof(ret));
513 return virtio32_to_cpu(vdev, ret);
514 }
515
virtio_cwrite32(struct virtio_device * vdev,unsigned int offset,u32 val)516 static inline void virtio_cwrite32(struct virtio_device *vdev,
517 unsigned int offset, u32 val)
518 {
519 __virtio32 v;
520
521 might_sleep();
522 v = cpu_to_virtio32(vdev, val);
523 vdev->config->set(vdev, offset, &v, sizeof(v));
524 }
525
virtio_cread64(struct virtio_device * vdev,unsigned int offset)526 static inline u64 virtio_cread64(struct virtio_device *vdev,
527 unsigned int offset)
528 {
529 __virtio64 ret;
530
531 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
532 return virtio64_to_cpu(vdev, ret);
533 }
534
virtio_cwrite64(struct virtio_device * vdev,unsigned int offset,u64 val)535 static inline void virtio_cwrite64(struct virtio_device *vdev,
536 unsigned int offset, u64 val)
537 {
538 __virtio64 v;
539
540 might_sleep();
541 v = cpu_to_virtio64(vdev, val);
542 vdev->config->set(vdev, offset, &v, sizeof(v));
543 }
544
545 /* Conditional config space accessors. */
546 #define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
547 ({ \
548 int _r = 0; \
549 if (!virtio_has_feature(vdev, fbit)) \
550 _r = -ENOENT; \
551 else \
552 virtio_cread((vdev), structname, member, ptr); \
553 _r; \
554 })
555
556 /* Conditional config space accessors. */
557 #define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \
558 ({ \
559 int _r = 0; \
560 if (!virtio_has_feature(vdev, fbit)) \
561 _r = -ENOENT; \
562 else \
563 virtio_cread_le((vdev), structname, member, ptr); \
564 _r; \
565 })
566
567 #ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
568 int arch_has_restricted_virtio_memory_access(void);
569 #else
arch_has_restricted_virtio_memory_access(void)570 static inline int arch_has_restricted_virtio_memory_access(void)
571 {
572 return 0;
573 }
574 #endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */
575
576 #endif /* _LINUX_VIRTIO_CONFIG_H */
577