1 /*
2 * Copyright (c) 2014-2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <dev/virtio/gpu.h>
9
10 #include <stdlib.h>
11 #include <lk/debug.h>
12 #include <assert.h>
13 #include <lk/trace.h>
14 #include <lk/compiler.h>
15 #include <lk/list.h>
16 #include <lk/err.h>
17 #include <string.h>
18 #include <kernel/thread.h>
19 #include <kernel/event.h>
20 #include <kernel/mutex.h>
21 #include <dev/display.h>
22
23 #if WITH_KERNEL_VM
24 #include <kernel/vm.h>
25 #endif
26
27 #include "virtio_gpu.h"
28
29 #define LOCAL_TRACE 0
30
31 static enum handler_return virtio_gpu_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e);
32 static enum handler_return virtio_gpu_config_change_callback(struct virtio_device *dev);
33 static int virtio_gpu_flush_thread(void *arg);
34
35 struct virtio_gpu_dev {
36 struct virtio_device *dev;
37
38 mutex_t lock;
39 event_t io_event;
40
41 void *gpu_request;
42 paddr_t gpu_request_phys;
43
44 /* a saved copy of the display */
45 struct virtio_gpu_display_one pmode;
46 int pmode_id;
47
48 /* resource id that is set as scanout */
49 uint32_t display_resource_id;
50
51 /* next resource id */
52 uint32_t next_resource_id;
53
54 event_t flush_event;
55
56 /* framebuffer */
57 void *fb;
58 };
59
60 static struct virtio_gpu_dev *the_gdev;
61
send_command_response(struct virtio_gpu_dev * gdev,const void * cmd,size_t cmd_len,void ** _res,size_t res_len)62 static status_t send_command_response(struct virtio_gpu_dev *gdev, const void *cmd, size_t cmd_len, void **_res, size_t res_len) {
63 DEBUG_ASSERT(gdev);
64 DEBUG_ASSERT(cmd);
65 DEBUG_ASSERT(_res);
66 DEBUG_ASSERT(cmd_len + res_len < PAGE_SIZE);
67
68 LTRACEF("gdev %p, cmd %p, cmd_len %zu, res %p, res_len %zu\n", gdev, cmd, cmd_len, _res, res_len);
69
70 uint16_t i;
71 struct vring_desc *desc = virtio_alloc_desc_chain(gdev->dev, 0, 2, &i);
72 DEBUG_ASSERT(desc);
73
74 memcpy(gdev->gpu_request, cmd, cmd_len);
75
76 desc->addr = gdev->gpu_request_phys;
77 desc->len = cmd_len;
78 desc->flags |= VRING_DESC_F_NEXT;
79
80 /* set the second descriptor to the response with the write bit set */
81 desc = virtio_desc_index_to_desc(gdev->dev, 0, desc->next);
82 DEBUG_ASSERT(desc);
83
84 void *res = (void *)((uint8_t *)gdev->gpu_request + cmd_len);
85 *_res = res;
86 paddr_t res_phys = gdev->gpu_request_phys + cmd_len;
87 memset(res, 0, res_len);
88
89 desc->addr = res_phys;
90 desc->len = res_len;
91 desc->flags = VRING_DESC_F_WRITE;
92
93 /* submit the transfer */
94 virtio_submit_chain(gdev->dev, 0, i);
95
96 /* kick it off */
97 virtio_kick(gdev->dev, 0);
98
99 /* wait for result */
100 event_wait(&gdev->io_event);
101
102 return NO_ERROR;
103 }
104
get_display_info(struct virtio_gpu_dev * gdev)105 static status_t get_display_info(struct virtio_gpu_dev *gdev) {
106 status_t err;
107
108 LTRACEF("gdev %p\n", gdev);
109
110 DEBUG_ASSERT(gdev);
111
112 /* grab a lock to keep this single message at a time */
113 mutex_acquire(&gdev->lock);
114
115 /* construct the get display info message */
116 struct virtio_gpu_ctrl_hdr req;
117 memset(&req, 0, sizeof(req));
118 req.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
119
120 /* send the message and get a response */
121 struct virtio_gpu_resp_display_info *info;
122 err = send_command_response(gdev, &req, sizeof(req), (void **)&info, sizeof(*info));
123 DEBUG_ASSERT(err == NO_ERROR);
124 if (err < NO_ERROR) {
125 mutex_release(&gdev->lock);
126 return ERR_NOT_FOUND;
127 }
128
129 /* we got response */
130 if (info->hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
131 mutex_release(&gdev->lock);
132 return ERR_NOT_FOUND;
133 }
134
135 LTRACEF("response:\n");
136 for (uint i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
137 if (info->pmodes[i].enabled) {
138 LTRACEF("%u: x %u y %u w %u h %u flags 0x%x\n", i,
139 info->pmodes[i].r.x, info->pmodes[i].r.y, info->pmodes[i].r.width, info->pmodes[i].r.height,
140 info->pmodes[i].flags);
141 if (gdev->pmode_id < 0) {
142 /* save the first valid pmode we see */
143 memcpy(&gdev->pmode, &info->pmodes[i], sizeof(gdev->pmode));
144 gdev->pmode_id = i;
145 }
146 }
147 }
148
149 /* release the lock */
150 mutex_release(&gdev->lock);
151
152 return NO_ERROR;
153 }
154
allocate_2d_resource(struct virtio_gpu_dev * gdev,uint32_t * resource_id,uint32_t width,uint32_t height)155 static status_t allocate_2d_resource(struct virtio_gpu_dev *gdev, uint32_t *resource_id, uint32_t width, uint32_t height) {
156 status_t err;
157
158 LTRACEF("gdev %p\n", gdev);
159
160 DEBUG_ASSERT(gdev);
161 DEBUG_ASSERT(resource_id);
162
163 /* grab a lock to keep this single message at a time */
164 mutex_acquire(&gdev->lock);
165
166 /* construct the request */
167 struct virtio_gpu_resource_create_2d req;
168 memset(&req, 0, sizeof(req));
169
170 req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
171 req.resource_id = gdev->next_resource_id++;
172 *resource_id = req.resource_id;
173 req.format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
174 req.width = width;
175 req.height = height;
176
177 /* send the command and get a response */
178 struct virtio_gpu_ctrl_hdr *res;
179 err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
180 DEBUG_ASSERT(err == NO_ERROR);
181
182 /* see if we got a valid response */
183 LTRACEF("response type 0x%x\n", res->type);
184 err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
185
186 /* release the lock */
187 mutex_release(&gdev->lock);
188
189 return err;
190 }
191
attach_backing(struct virtio_gpu_dev * gdev,uint32_t resource_id,void * ptr,size_t buf_len)192 static status_t attach_backing(struct virtio_gpu_dev *gdev, uint32_t resource_id, void *ptr, size_t buf_len) {
193 status_t err;
194
195 LTRACEF("gdev %p, resource_id %u, ptr %p, buf_len %zu\n", gdev, resource_id, ptr, buf_len);
196
197 DEBUG_ASSERT(gdev);
198 DEBUG_ASSERT(ptr);
199
200 /* grab a lock to keep this single message at a time */
201 mutex_acquire(&gdev->lock);
202
203 /* construct the request */
204 struct {
205 struct virtio_gpu_resource_attach_backing req;
206 struct virtio_gpu_mem_entry mem;
207 } req;
208 memset(&req, 0, sizeof(req));
209
210 req.req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
211 req.req.resource_id = resource_id;
212 req.req.nr_entries = 1;
213
214 paddr_t pa;
215 #if WITH_KERNEL_VM
216 pa = vaddr_to_paddr(ptr);
217 #else
218 pa = (paddr_t)ptr;
219 #endif
220 req.mem.addr = pa;
221 req.mem.length = buf_len;
222
223 /* send the command and get a response */
224 struct virtio_gpu_ctrl_hdr *res;
225 err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
226 DEBUG_ASSERT(err == NO_ERROR);
227
228 /* see if we got a valid response */
229 LTRACEF("response type 0x%x\n", res->type);
230 err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
231
232 /* release the lock */
233 mutex_release(&gdev->lock);
234
235 return err;
236 }
237
set_scanout(struct virtio_gpu_dev * gdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height)238 static status_t set_scanout(struct virtio_gpu_dev *gdev, uint32_t scanout_id, uint32_t resource_id, uint32_t width, uint32_t height) {
239 status_t err;
240
241 LTRACEF("gdev %p, scanout_id %u, resource_id %u, width %u, height %u\n", gdev, scanout_id, resource_id, width, height);
242
243 /* grab a lock to keep this single message at a time */
244 mutex_acquire(&gdev->lock);
245
246 /* construct the request */
247 struct virtio_gpu_set_scanout req;
248 memset(&req, 0, sizeof(req));
249
250 req.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
251 req.r.x = req.r.y = 0;
252 req.r.width = width;
253 req.r.height = height;
254 req.scanout_id = scanout_id;
255 req.resource_id = resource_id;
256
257 /* send the command and get a response */
258 struct virtio_gpu_ctrl_hdr *res;
259 err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
260 DEBUG_ASSERT(err == NO_ERROR);
261
262 /* see if we got a valid response */
263 LTRACEF("response type 0x%x\n", res->type);
264 err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
265
266 /* release the lock */
267 mutex_release(&gdev->lock);
268
269 return err;
270 }
271
flush_resource(struct virtio_gpu_dev * gdev,uint32_t resource_id,uint32_t width,uint32_t height)272 static status_t flush_resource(struct virtio_gpu_dev *gdev, uint32_t resource_id, uint32_t width, uint32_t height) {
273 status_t err;
274
275 LTRACEF("gdev %p, resource_id %u, width %u, height %u\n", gdev, resource_id, width, height);
276
277 /* grab a lock to keep this single message at a time */
278 mutex_acquire(&gdev->lock);
279
280 /* construct the request */
281 struct virtio_gpu_resource_flush req;
282 memset(&req, 0, sizeof(req));
283
284 req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
285 req.r.x = req.r.y = 0;
286 req.r.width = width;
287 req.r.height = height;
288 req.resource_id = resource_id;
289
290 /* send the command and get a response */
291 struct virtio_gpu_ctrl_hdr *res;
292 err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
293 DEBUG_ASSERT(err == NO_ERROR);
294
295 /* see if we got a valid response */
296 LTRACEF("response type 0x%x\n", res->type);
297 err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
298
299 /* release the lock */
300 mutex_release(&gdev->lock);
301
302 return err;
303 }
304
transfer_to_host_2d(struct virtio_gpu_dev * gdev,uint32_t resource_id,uint32_t width,uint32_t height)305 static status_t transfer_to_host_2d(struct virtio_gpu_dev *gdev, uint32_t resource_id, uint32_t width, uint32_t height) {
306 status_t err;
307
308 LTRACEF("gdev %p, resource_id %u, width %u, height %u\n", gdev, resource_id, width, height);
309
310 /* grab a lock to keep this single message at a time */
311 mutex_acquire(&gdev->lock);
312
313 /* construct the request */
314 struct virtio_gpu_transfer_to_host_2d req;
315 memset(&req, 0, sizeof(req));
316
317 req.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
318 req.r.x = req.r.y = 0;
319 req.r.width = width;
320 req.r.height = height;
321 req.offset = 0;
322 req.resource_id = resource_id;
323
324 /* send the command and get a response */
325 struct virtio_gpu_ctrl_hdr *res;
326 err = send_command_response(gdev, &req, sizeof(req), (void **)&res, sizeof(*res));
327 DEBUG_ASSERT(err == NO_ERROR);
328
329 /* see if we got a valid response */
330 LTRACEF("response type 0x%x\n", res->type);
331 err = (res->type == VIRTIO_GPU_RESP_OK_NODATA) ? NO_ERROR : ERR_NO_MEMORY;
332
333 /* release the lock */
334 mutex_release(&gdev->lock);
335
336 return err;
337 }
338
virtio_gpu_start(struct virtio_device * dev)339 status_t virtio_gpu_start(struct virtio_device *dev) {
340 status_t err;
341
342 LTRACEF("dev %p\n", dev);
343
344 struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv;
345
346 /* get the display info and see if we find a valid pmode */
347 err = get_display_info(gdev);
348 if (err < 0) {
349 LTRACEF("failed to get display info\n");
350 return err;
351 }
352
353 if (gdev->pmode_id < 0) {
354 LTRACEF("we failed to find a pmode, exiting\n");
355 return ERR_NOT_FOUND;
356 }
357
358 /* allocate a resource */
359 err = allocate_2d_resource(gdev, &gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
360 if (err < 0) {
361 LTRACEF("failed to allocate 2d resource\n");
362 return err;
363 }
364
365 /* attach a backing store to the resource */
366 size_t len = gdev->pmode.r.width * gdev->pmode.r.height * 4;
367 #if WITH_KERNEL_VM
368 gdev->fb = pmm_alloc_kpages(ROUNDUP(len, PAGE_SIZE) / PAGE_SIZE, NULL);
369 #else
370 gdev->fb = memalign(PAGE_SIZE, ROUNDUP(len, PAGE_SIZE));
371 #endif
372 if (!gdev->fb) {
373 TRACEF("failed to allocate framebuffer, wanted 0x%zx bytes\n", len);
374 return ERR_NO_MEMORY;
375 }
376
377 printf("virtio-gpu: framebuffer at %p, 0x%zx bytes\n", gdev->fb, len);
378
379 err = attach_backing(gdev, gdev->display_resource_id, gdev->fb, len);
380 if (err < 0) {
381 LTRACEF("failed to attach backing store\n");
382 return err;
383 }
384
385 /* attach this resource as a scanout */
386 err = set_scanout(gdev, gdev->pmode_id, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
387 if (err < 0) {
388 LTRACEF("failed to set scanout\n");
389 return err;
390 }
391
392 /* create the flush thread */
393 thread_t *t;
394 t = thread_create("virtio gpu flusher", &virtio_gpu_flush_thread, (void *)gdev, HIGH_PRIORITY, DEFAULT_STACK_SIZE);
395 thread_detach_and_resume(t);
396
397 /* kick it once */
398 event_signal(&gdev->flush_event, true);
399
400 LTRACE_EXIT;
401
402 return NO_ERROR;
403 }
404
405
dump_gpu_config(const volatile struct virtio_gpu_config * config)406 static void dump_gpu_config(const volatile struct virtio_gpu_config *config) {
407 LTRACEF("events_read 0x%x\n", config->events_read);
408 LTRACEF("events_clear 0x%x\n", config->events_clear);
409 LTRACEF("num_scanouts 0x%x\n", config->num_scanouts);
410 LTRACEF("reserved 0x%x\n", config->reserved);
411 }
412
virtio_gpu_init(struct virtio_device * dev,uint32_t host_features)413 status_t virtio_gpu_init(struct virtio_device *dev, uint32_t host_features) {
414 LTRACEF("dev %p, host_features 0x%x\n", dev, host_features);
415
416 /* allocate a new gpu device */
417 struct virtio_gpu_dev *gdev = malloc(sizeof(struct virtio_gpu_dev));
418 if (!gdev)
419 return ERR_NO_MEMORY;
420
421 mutex_init(&gdev->lock);
422 event_init(&gdev->io_event, false, EVENT_FLAG_AUTOUNSIGNAL);
423 event_init(&gdev->flush_event, false, EVENT_FLAG_AUTOUNSIGNAL);
424
425 gdev->dev = dev;
426 dev->priv = gdev;
427
428 gdev->pmode_id = -1;
429 gdev->next_resource_id = 1;
430
431 /* allocate memory for a gpu request */
432 #if WITH_KERNEL_VM
433 gdev->gpu_request = pmm_alloc_kpage();
434 gdev->gpu_request_phys = vaddr_to_paddr(gdev->gpu_request);
435 #else
436 gdev->gpu_request = malloc(sizeof(struct virtio_gpu_resp_display_info)); // XXX get size better
437 gdev->gpu_request_phys = (paddr_t)gdev->gpu_request;
438 #endif
439
440 /* make sure the device is reset */
441 virtio_reset_device(dev);
442
443 volatile struct virtio_gpu_config *config = (struct virtio_gpu_config *)dev->config_ptr;
444 dump_gpu_config(config);
445
446 /* ack and set the driver status bit */
447 virtio_status_acknowledge_driver(dev);
448
449 // XXX check features bits and ack/nak them
450
451 /* allocate a virtio ring */
452 virtio_alloc_ring(dev, 0, 16);
453
454 /* set our irq handler */
455 dev->irq_driver_callback = &virtio_gpu_irq_driver_callback;
456 dev->config_change_callback = &virtio_gpu_config_change_callback;
457
458 /* set DRIVER_OK */
459 virtio_status_driver_ok(dev);
460
461 /* save the main device we've found */
462 the_gdev = gdev;
463
464 printf("found virtio gpu device\n");
465
466 return NO_ERROR;
467 }
468
virtio_gpu_irq_driver_callback(struct virtio_device * dev,uint ring,const struct vring_used_elem * e)469 static enum handler_return virtio_gpu_irq_driver_callback(struct virtio_device *dev, uint ring, const struct vring_used_elem *e) {
470 struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv;
471
472 LTRACEF("dev %p, ring %u, e %p, id %u, len %u\n", dev, ring, e, e->id, e->len);
473
474 /* parse our descriptor chain, add back to the free queue */
475 uint16_t i = e->id;
476 for (;;) {
477 int next;
478 struct vring_desc *desc = virtio_desc_index_to_desc(dev, ring, i);
479
480 //virtio_dump_desc(desc);
481
482 if (desc->flags & VRING_DESC_F_NEXT) {
483 next = desc->next;
484 } else {
485 /* end of chain */
486 next = -1;
487 }
488
489 virtio_free_desc(dev, ring, i);
490
491 if (next < 0)
492 break;
493 i = next;
494 }
495
496 /* signal our event */
497 event_signal(&gdev->io_event, false);
498
499 return INT_RESCHEDULE;
500 }
501
virtio_gpu_config_change_callback(struct virtio_device * dev)502 static enum handler_return virtio_gpu_config_change_callback(struct virtio_device *dev) {
503 struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)dev->priv;
504
505 LTRACEF("gdev %p\n", gdev);
506
507 volatile struct virtio_gpu_config *config = (struct virtio_gpu_config *)dev->config_ptr;
508 dump_gpu_config(config);
509
510 return INT_RESCHEDULE;
511 }
512
virtio_gpu_flush_thread(void * arg)513 static int virtio_gpu_flush_thread(void *arg) {
514 struct virtio_gpu_dev *gdev = (struct virtio_gpu_dev *)arg;
515 status_t err;
516
517 for (;;) {
518 event_wait(&gdev->flush_event);
519
520 /* transfer to host 2d */
521 err = transfer_to_host_2d(gdev, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
522 if (err < 0) {
523 LTRACEF("failed to flush resource\n");
524 continue;
525 }
526
527 /* resource flush */
528 err = flush_resource(gdev, gdev->display_resource_id, gdev->pmode.r.width, gdev->pmode.r.height);
529 if (err < 0) {
530 LTRACEF("failed to flush resource\n");
531 continue;
532 }
533 }
534
535 return 0;
536 }
537
virtio_gpu_gfx_flush(uint starty,uint endy)538 static void virtio_gpu_gfx_flush(uint starty, uint endy) {
539 event_signal(&the_gdev->flush_event, !arch_ints_disabled());
540 }
541
display_get_framebuffer(struct display_framebuffer * fb)542 status_t display_get_framebuffer(struct display_framebuffer *fb) {
543 DEBUG_ASSERT(fb);
544 memset(fb, 0, sizeof(*fb));
545
546 if (!the_gdev)
547 return ERR_NOT_FOUND;
548
549 fb->image.pixels = the_gdev->fb;
550 fb->image.format = IMAGE_FORMAT_RGB_x888;
551 fb->image.width = the_gdev->pmode.r.width;
552 fb->image.height = the_gdev->pmode.r.height;
553 fb->image.stride = fb->image.width;
554 fb->image.rowbytes = fb->image.width * 4;
555 fb->flush = virtio_gpu_gfx_flush;
556 fb->format = DISPLAY_FORMAT_RGB_x888;
557
558 return NO_ERROR;
559 }
560
display_get_info(struct display_info * info)561 status_t display_get_info(struct display_info *info) {
562 DEBUG_ASSERT(info);
563 memset(info, 0, sizeof(*info));
564
565 if (!the_gdev)
566 return ERR_NOT_FOUND;
567
568 info->format = DISPLAY_FORMAT_RGB_x888;
569 info->width = the_gdev->pmode.r.width;
570 info->height = the_gdev->pmode.r.height;
571
572 return NO_ERROR;
573 }
574
display_present(struct display_image * image,uint starty,uint endy)575 status_t display_present(struct display_image *image, uint starty, uint endy) {
576 TRACEF("display_present - not implemented");
577 DEBUG_ASSERT(false);
578 return NO_ERROR;
579 }
580