1 /*
2 * Xen para-virtual frame buffer device
3 *
4 * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
5 * Copyright (C) 2006-2008 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
6 *
7 * Based on linux/drivers/video/q40fb.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of this archive for
11 * more details.
12 */
13
14 /*
15 * TODO:
16 *
17 * Switch to grant tables when they become capable of dealing with the
18 * frame buffer.
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/console.h>
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/fb.h>
27 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/mm.h>
31
32 #include <asm/xen/hypervisor.h>
33
34 #include <xen/xen.h>
35 #include <xen/events.h>
36 #include <xen/page.h>
37 #include <xen/interface/io/fbif.h>
38 #include <xen/interface/io/protocols.h>
39 #include <xen/xenbus.h>
40 #include <xen/platform_pci.h>
41
42 struct xenfb_info {
43 unsigned char *fb;
44 struct fb_info *fb_info;
45 int x1, y1, x2, y2; /* dirty rectangle,
46 protected by dirty_lock */
47 spinlock_t dirty_lock;
48 int nr_pages;
49 int irq;
50 struct xenfb_page *page;
51 unsigned long *gfns;
52 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
53 int feature_resize; /* XENFB_TYPE_RESIZE ok */
54 struct xenfb_resize resize; /* protected by resize_lock */
55 int resize_dpy; /* ditto */
56 spinlock_t resize_lock;
57
58 struct xenbus_device *xbdev;
59 };
60
61 #define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
62
63 enum { KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT };
64 static int video[KPARAM_CNT] = { 2, XENFB_WIDTH, XENFB_HEIGHT };
65 module_param_array(video, int, NULL, 0);
66 MODULE_PARM_DESC(video,
67 "Video memory size in MB, width, height in pixels (default 2,800,600)");
68
69 static void xenfb_make_preferred_console(void);
70 static void xenfb_remove(struct xenbus_device *);
71 static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
72 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
73 static void xenfb_disconnect_backend(struct xenfb_info *);
74
xenfb_send_event(struct xenfb_info * info,union xenfb_out_event * event)75 static void xenfb_send_event(struct xenfb_info *info,
76 union xenfb_out_event *event)
77 {
78 u32 prod;
79
80 prod = info->page->out_prod;
81 /* caller ensures !xenfb_queue_full() */
82 mb(); /* ensure ring space available */
83 XENFB_OUT_RING_REF(info->page, prod) = *event;
84 wmb(); /* ensure ring contents visible */
85 info->page->out_prod = prod + 1;
86
87 notify_remote_via_irq(info->irq);
88 }
89
xenfb_do_update(struct xenfb_info * info,int x,int y,int w,int h)90 static void xenfb_do_update(struct xenfb_info *info,
91 int x, int y, int w, int h)
92 {
93 union xenfb_out_event event;
94
95 memset(&event, 0, sizeof(event));
96 event.type = XENFB_TYPE_UPDATE;
97 event.update.x = x;
98 event.update.y = y;
99 event.update.width = w;
100 event.update.height = h;
101
102 /* caller ensures !xenfb_queue_full() */
103 xenfb_send_event(info, &event);
104 }
105
xenfb_do_resize(struct xenfb_info * info)106 static void xenfb_do_resize(struct xenfb_info *info)
107 {
108 union xenfb_out_event event;
109
110 memset(&event, 0, sizeof(event));
111 event.resize = info->resize;
112
113 /* caller ensures !xenfb_queue_full() */
114 xenfb_send_event(info, &event);
115 }
116
xenfb_queue_full(struct xenfb_info * info)117 static int xenfb_queue_full(struct xenfb_info *info)
118 {
119 u32 cons, prod;
120
121 prod = info->page->out_prod;
122 cons = info->page->out_cons;
123 return prod - cons == XENFB_OUT_RING_LEN;
124 }
125
xenfb_handle_resize_dpy(struct xenfb_info * info)126 static void xenfb_handle_resize_dpy(struct xenfb_info *info)
127 {
128 unsigned long flags;
129
130 spin_lock_irqsave(&info->resize_lock, flags);
131 if (info->resize_dpy) {
132 if (!xenfb_queue_full(info)) {
133 info->resize_dpy = 0;
134 xenfb_do_resize(info);
135 }
136 }
137 spin_unlock_irqrestore(&info->resize_lock, flags);
138 }
139
xenfb_refresh(struct xenfb_info * info,int x1,int y1,int w,int h)140 static void xenfb_refresh(struct xenfb_info *info,
141 int x1, int y1, int w, int h)
142 {
143 unsigned long flags;
144 int x2 = x1 + w - 1;
145 int y2 = y1 + h - 1;
146
147 xenfb_handle_resize_dpy(info);
148
149 if (!info->update_wanted)
150 return;
151
152 spin_lock_irqsave(&info->dirty_lock, flags);
153
154 /* Combine with dirty rectangle: */
155 if (info->y1 < y1)
156 y1 = info->y1;
157 if (info->y2 > y2)
158 y2 = info->y2;
159 if (info->x1 < x1)
160 x1 = info->x1;
161 if (info->x2 > x2)
162 x2 = info->x2;
163
164 if (xenfb_queue_full(info)) {
165 /* Can't send right now, stash it in the dirty rectangle */
166 info->x1 = x1;
167 info->x2 = x2;
168 info->y1 = y1;
169 info->y2 = y2;
170 spin_unlock_irqrestore(&info->dirty_lock, flags);
171 return;
172 }
173
174 /* Clear dirty rectangle: */
175 info->x1 = info->y1 = INT_MAX;
176 info->x2 = info->y2 = 0;
177
178 spin_unlock_irqrestore(&info->dirty_lock, flags);
179
180 if (x1 <= x2 && y1 <= y2)
181 xenfb_do_update(info, x1, y1, x2 - x1 + 1, y2 - y1 + 1);
182 }
183
xenfb_deferred_io(struct fb_info * fb_info,struct list_head * pagereflist)184 static void xenfb_deferred_io(struct fb_info *fb_info, struct list_head *pagereflist)
185 {
186 struct xenfb_info *info = fb_info->par;
187 struct fb_deferred_io_pageref *pageref;
188 unsigned long beg, end;
189 int y1, y2, miny, maxy;
190
191 miny = INT_MAX;
192 maxy = 0;
193 list_for_each_entry(pageref, pagereflist, list) {
194 beg = pageref->offset;
195 end = beg + PAGE_SIZE - 1;
196 y1 = beg / fb_info->fix.line_length;
197 y2 = end / fb_info->fix.line_length;
198 if (y2 >= fb_info->var.yres)
199 y2 = fb_info->var.yres - 1;
200 if (miny > y1)
201 miny = y1;
202 if (maxy < y2)
203 maxy = y2;
204 }
205 xenfb_refresh(info, 0, miny, fb_info->var.xres, maxy - miny + 1);
206 }
207
208 static struct fb_deferred_io xenfb_defio = {
209 .delay = HZ / 20,
210 .deferred_io = xenfb_deferred_io,
211 };
212
xenfb_setcolreg(unsigned regno,unsigned red,unsigned green,unsigned blue,unsigned transp,struct fb_info * info)213 static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
214 unsigned blue, unsigned transp,
215 struct fb_info *info)
216 {
217 u32 v;
218
219 if (regno > info->cmap.len)
220 return 1;
221
222 #define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
223 red = CNVT_TOHW(red, info->var.red.length);
224 green = CNVT_TOHW(green, info->var.green.length);
225 blue = CNVT_TOHW(blue, info->var.blue.length);
226 #undef CNVT_TOHW
227
228 v = (red << info->var.red.offset) |
229 (green << info->var.green.offset) |
230 (blue << info->var.blue.offset);
231
232 switch (info->var.bits_per_pixel) {
233 case 16:
234 case 24:
235 case 32:
236 ((u32 *)info->pseudo_palette)[regno] = v;
237 break;
238 }
239
240 return 0;
241 }
242
xenfb_fillrect(struct fb_info * p,const struct fb_fillrect * rect)243 static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
244 {
245 struct xenfb_info *info = p->par;
246
247 sys_fillrect(p, rect);
248 xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
249 }
250
xenfb_imageblit(struct fb_info * p,const struct fb_image * image)251 static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
252 {
253 struct xenfb_info *info = p->par;
254
255 sys_imageblit(p, image);
256 xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
257 }
258
xenfb_copyarea(struct fb_info * p,const struct fb_copyarea * area)259 static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
260 {
261 struct xenfb_info *info = p->par;
262
263 sys_copyarea(p, area);
264 xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
265 }
266
xenfb_write(struct fb_info * p,const char __user * buf,size_t count,loff_t * ppos)267 static ssize_t xenfb_write(struct fb_info *p, const char __user *buf,
268 size_t count, loff_t *ppos)
269 {
270 struct xenfb_info *info = p->par;
271 ssize_t res;
272
273 res = fb_sys_write(p, buf, count, ppos);
274 xenfb_refresh(info, 0, 0, info->page->width, info->page->height);
275 return res;
276 }
277
278 static int
xenfb_check_var(struct fb_var_screeninfo * var,struct fb_info * info)279 xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
280 {
281 struct xenfb_info *xenfb_info;
282 int required_mem_len;
283
284 xenfb_info = info->par;
285
286 if (!xenfb_info->feature_resize) {
287 if (var->xres == video[KPARAM_WIDTH] &&
288 var->yres == video[KPARAM_HEIGHT] &&
289 var->bits_per_pixel == xenfb_info->page->depth) {
290 return 0;
291 }
292 return -EINVAL;
293 }
294
295 /* Can't resize past initial width and height */
296 if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
297 return -EINVAL;
298
299 required_mem_len = var->xres * var->yres * xenfb_info->page->depth / 8;
300 if (var->bits_per_pixel == xenfb_info->page->depth &&
301 var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
302 required_mem_len <= info->fix.smem_len) {
303 var->xres_virtual = var->xres;
304 var->yres_virtual = var->yres;
305 return 0;
306 }
307 return -EINVAL;
308 }
309
xenfb_set_par(struct fb_info * info)310 static int xenfb_set_par(struct fb_info *info)
311 {
312 struct xenfb_info *xenfb_info;
313 unsigned long flags;
314
315 xenfb_info = info->par;
316
317 spin_lock_irqsave(&xenfb_info->resize_lock, flags);
318 xenfb_info->resize.type = XENFB_TYPE_RESIZE;
319 xenfb_info->resize.width = info->var.xres;
320 xenfb_info->resize.height = info->var.yres;
321 xenfb_info->resize.stride = info->fix.line_length;
322 xenfb_info->resize.depth = info->var.bits_per_pixel;
323 xenfb_info->resize.offset = 0;
324 xenfb_info->resize_dpy = 1;
325 spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
326 return 0;
327 }
328
329 static const struct fb_ops xenfb_fb_ops = {
330 .owner = THIS_MODULE,
331 .fb_read = fb_sys_read,
332 .fb_write = xenfb_write,
333 .fb_setcolreg = xenfb_setcolreg,
334 .fb_fillrect = xenfb_fillrect,
335 .fb_copyarea = xenfb_copyarea,
336 .fb_imageblit = xenfb_imageblit,
337 .fb_check_var = xenfb_check_var,
338 .fb_set_par = xenfb_set_par,
339 .fb_mmap = fb_deferred_io_mmap,
340 };
341
xenfb_event_handler(int rq,void * dev_id)342 static irqreturn_t xenfb_event_handler(int rq, void *dev_id)
343 {
344 /*
345 * No in events recognized, simply ignore them all.
346 * If you need to recognize some, see xen-kbdfront's
347 * input_handler() for how to do that.
348 */
349 struct xenfb_info *info = dev_id;
350 struct xenfb_page *page = info->page;
351
352 if (page->in_cons != page->in_prod) {
353 info->page->in_cons = info->page->in_prod;
354 notify_remote_via_irq(info->irq);
355 }
356
357 /* Flush dirty rectangle: */
358 xenfb_refresh(info, INT_MAX, INT_MAX, -INT_MAX, -INT_MAX);
359
360 return IRQ_HANDLED;
361 }
362
xenfb_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)363 static int xenfb_probe(struct xenbus_device *dev,
364 const struct xenbus_device_id *id)
365 {
366 struct xenfb_info *info;
367 struct fb_info *fb_info;
368 int fb_size;
369 int val;
370 int ret = 0;
371
372 info = kzalloc(sizeof(*info), GFP_KERNEL);
373 if (info == NULL) {
374 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
375 return -ENOMEM;
376 }
377
378 /* Limit kernel param videoram amount to what is in xenstore */
379 if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
380 if (val < video[KPARAM_MEM])
381 video[KPARAM_MEM] = val;
382 }
383
384 video[KPARAM_WIDTH] = xenbus_read_unsigned(dev->otherend, "width",
385 video[KPARAM_WIDTH]);
386 video[KPARAM_HEIGHT] = xenbus_read_unsigned(dev->otherend, "height",
387 video[KPARAM_HEIGHT]);
388
389 /* If requested res does not fit in available memory, use default */
390 fb_size = video[KPARAM_MEM] * 1024 * 1024;
391 if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH / 8
392 > fb_size) {
393 pr_warn("display parameters %d,%d,%d invalid, use defaults\n",
394 video[KPARAM_MEM], video[KPARAM_WIDTH],
395 video[KPARAM_HEIGHT]);
396 video[KPARAM_WIDTH] = XENFB_WIDTH;
397 video[KPARAM_HEIGHT] = XENFB_HEIGHT;
398 fb_size = XENFB_DEFAULT_FB_LEN;
399 }
400
401 dev_set_drvdata(&dev->dev, info);
402 info->xbdev = dev;
403 info->irq = -1;
404 info->x1 = info->y1 = INT_MAX;
405 spin_lock_init(&info->dirty_lock);
406 spin_lock_init(&info->resize_lock);
407
408 info->fb = vzalloc(fb_size);
409 if (info->fb == NULL)
410 goto error_nomem;
411
412 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
413
414 info->gfns = vmalloc(array_size(sizeof(unsigned long), info->nr_pages));
415 if (!info->gfns)
416 goto error_nomem;
417
418 /* set up shared page */
419 info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
420 if (!info->page)
421 goto error_nomem;
422
423 /* abusing framebuffer_alloc() to allocate pseudo_palette */
424 fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
425 if (fb_info == NULL)
426 goto error_nomem;
427
428 /* complete the abuse: */
429 fb_info->pseudo_palette = fb_info->par;
430 fb_info->par = info;
431
432 fb_info->screen_base = info->fb;
433
434 fb_info->fbops = &xenfb_fb_ops;
435 fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
436 fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
437 fb_info->var.bits_per_pixel = XENFB_DEPTH;
438
439 fb_info->var.red = (struct fb_bitfield){16, 8, 0};
440 fb_info->var.green = (struct fb_bitfield){8, 8, 0};
441 fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
442
443 fb_info->var.activate = FB_ACTIVATE_NOW;
444 fb_info->var.height = -1;
445 fb_info->var.width = -1;
446 fb_info->var.vmode = FB_VMODE_NONINTERLACED;
447
448 fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
449 fb_info->fix.line_length = fb_info->var.xres * XENFB_DEPTH / 8;
450 fb_info->fix.smem_start = 0;
451 fb_info->fix.smem_len = fb_size;
452 strcpy(fb_info->fix.id, "xen");
453 fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
454 fb_info->fix.accel = FB_ACCEL_NONE;
455
456 fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
457
458 ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
459 if (ret < 0) {
460 framebuffer_release(fb_info);
461 xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
462 goto error;
463 }
464
465 fb_info->fbdefio = &xenfb_defio;
466 fb_deferred_io_init(fb_info);
467
468 xenfb_init_shared_page(info, fb_info);
469
470 ret = xenfb_connect_backend(dev, info);
471 if (ret < 0) {
472 xenbus_dev_fatal(dev, ret, "xenfb_connect_backend");
473 goto error_fb;
474 }
475
476 ret = register_framebuffer(fb_info);
477 if (ret) {
478 xenbus_dev_fatal(dev, ret, "register_framebuffer");
479 goto error_fb;
480 }
481 info->fb_info = fb_info;
482
483 xenfb_make_preferred_console();
484 return 0;
485
486 error_fb:
487 fb_deferred_io_cleanup(fb_info);
488 fb_dealloc_cmap(&fb_info->cmap);
489 framebuffer_release(fb_info);
490 error_nomem:
491 if (!ret) {
492 ret = -ENOMEM;
493 xenbus_dev_fatal(dev, ret, "allocating device memory");
494 }
495 error:
496 xenfb_remove(dev);
497 return ret;
498 }
499
xenfb_make_preferred_console(void)500 static void xenfb_make_preferred_console(void)
501 {
502 struct console *c;
503
504 if (console_set_on_cmdline)
505 return;
506
507 console_list_lock();
508 for_each_console(c) {
509 if (!strcmp(c->name, "tty") && c->index == 0)
510 break;
511 }
512 if (c)
513 console_force_preferred_locked(c);
514 console_list_unlock();
515 }
516
xenfb_resume(struct xenbus_device * dev)517 static int xenfb_resume(struct xenbus_device *dev)
518 {
519 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
520
521 xenfb_disconnect_backend(info);
522 xenfb_init_shared_page(info, info->fb_info);
523 return xenfb_connect_backend(dev, info);
524 }
525
xenfb_remove(struct xenbus_device * dev)526 static void xenfb_remove(struct xenbus_device *dev)
527 {
528 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
529
530 xenfb_disconnect_backend(info);
531 if (info->fb_info) {
532 fb_deferred_io_cleanup(info->fb_info);
533 unregister_framebuffer(info->fb_info);
534 fb_dealloc_cmap(&info->fb_info->cmap);
535 framebuffer_release(info->fb_info);
536 }
537 free_page((unsigned long)info->page);
538 vfree(info->gfns);
539 vfree(info->fb);
540 kfree(info);
541 }
542
vmalloc_to_gfn(void * address)543 static unsigned long vmalloc_to_gfn(void *address)
544 {
545 return xen_page_to_gfn(vmalloc_to_page(address));
546 }
547
xenfb_init_shared_page(struct xenfb_info * info,struct fb_info * fb_info)548 static void xenfb_init_shared_page(struct xenfb_info *info,
549 struct fb_info *fb_info)
550 {
551 int i;
552 int epd = PAGE_SIZE / sizeof(info->gfns[0]);
553
554 for (i = 0; i < info->nr_pages; i++)
555 info->gfns[i] = vmalloc_to_gfn(info->fb + i * PAGE_SIZE);
556
557 for (i = 0; i * epd < info->nr_pages; i++)
558 info->page->pd[i] = vmalloc_to_gfn(&info->gfns[i * epd]);
559
560 info->page->width = fb_info->var.xres;
561 info->page->height = fb_info->var.yres;
562 info->page->depth = fb_info->var.bits_per_pixel;
563 info->page->line_length = fb_info->fix.line_length;
564 info->page->mem_length = fb_info->fix.smem_len;
565 info->page->in_cons = info->page->in_prod = 0;
566 info->page->out_cons = info->page->out_prod = 0;
567 }
568
xenfb_connect_backend(struct xenbus_device * dev,struct xenfb_info * info)569 static int xenfb_connect_backend(struct xenbus_device *dev,
570 struct xenfb_info *info)
571 {
572 int ret, evtchn, irq;
573 struct xenbus_transaction xbt;
574
575 ret = xenbus_alloc_evtchn(dev, &evtchn);
576 if (ret)
577 return ret;
578 irq = bind_evtchn_to_irqhandler(evtchn, xenfb_event_handler,
579 0, dev->devicetype, info);
580 if (irq < 0) {
581 xenbus_free_evtchn(dev, evtchn);
582 xenbus_dev_fatal(dev, ret, "bind_evtchn_to_irqhandler");
583 return irq;
584 }
585 again:
586 ret = xenbus_transaction_start(&xbt);
587 if (ret) {
588 xenbus_dev_fatal(dev, ret, "starting transaction");
589 goto unbind_irq;
590 }
591 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
592 virt_to_gfn(info->page));
593 if (ret)
594 goto error_xenbus;
595 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
596 evtchn);
597 if (ret)
598 goto error_xenbus;
599 ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
600 XEN_IO_PROTO_ABI_NATIVE);
601 if (ret)
602 goto error_xenbus;
603 ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
604 if (ret)
605 goto error_xenbus;
606 ret = xenbus_transaction_end(xbt, 0);
607 if (ret) {
608 if (ret == -EAGAIN)
609 goto again;
610 xenbus_dev_fatal(dev, ret, "completing transaction");
611 goto unbind_irq;
612 }
613
614 xenbus_switch_state(dev, XenbusStateInitialised);
615 info->irq = irq;
616 return 0;
617
618 error_xenbus:
619 xenbus_transaction_end(xbt, 1);
620 xenbus_dev_fatal(dev, ret, "writing xenstore");
621 unbind_irq:
622 unbind_from_irqhandler(irq, info);
623 return ret;
624 }
625
xenfb_disconnect_backend(struct xenfb_info * info)626 static void xenfb_disconnect_backend(struct xenfb_info *info)
627 {
628 /* Prevent xenfb refresh */
629 info->update_wanted = 0;
630 if (info->irq >= 0)
631 unbind_from_irqhandler(info->irq, info);
632 info->irq = -1;
633 }
634
xenfb_backend_changed(struct xenbus_device * dev,enum xenbus_state backend_state)635 static void xenfb_backend_changed(struct xenbus_device *dev,
636 enum xenbus_state backend_state)
637 {
638 struct xenfb_info *info = dev_get_drvdata(&dev->dev);
639
640 switch (backend_state) {
641 case XenbusStateInitialising:
642 case XenbusStateInitialised:
643 case XenbusStateReconfiguring:
644 case XenbusStateReconfigured:
645 case XenbusStateUnknown:
646 break;
647
648 case XenbusStateInitWait:
649 xenbus_switch_state(dev, XenbusStateConnected);
650 break;
651
652 case XenbusStateConnected:
653 /*
654 * Work around xenbus race condition: If backend goes
655 * through InitWait to Connected fast enough, we can
656 * get Connected twice here.
657 */
658 if (dev->state != XenbusStateConnected)
659 /* no InitWait seen yet, fudge it */
660 xenbus_switch_state(dev, XenbusStateConnected);
661
662 if (xenbus_read_unsigned(info->xbdev->otherend,
663 "request-update", 0))
664 info->update_wanted = 1;
665
666 info->feature_resize = xenbus_read_unsigned(dev->otherend,
667 "feature-resize", 0);
668 break;
669
670 case XenbusStateClosed:
671 if (dev->state == XenbusStateClosed)
672 break;
673 fallthrough; /* Missed the backend's CLOSING state */
674 case XenbusStateClosing:
675 xenbus_frontend_closed(dev);
676 break;
677 }
678 }
679
680 static const struct xenbus_device_id xenfb_ids[] = {
681 { "vfb" },
682 { "" }
683 };
684
685 static struct xenbus_driver xenfb_driver = {
686 .ids = xenfb_ids,
687 .probe = xenfb_probe,
688 .remove = xenfb_remove,
689 .resume = xenfb_resume,
690 .otherend_changed = xenfb_backend_changed,
691 .not_essential = true,
692 };
693
xenfb_init(void)694 static int __init xenfb_init(void)
695 {
696 if (!xen_domain())
697 return -ENODEV;
698
699 /* Nothing to do if running in dom0. */
700 if (xen_initial_domain())
701 return -ENODEV;
702
703 if (!xen_has_pv_devices())
704 return -ENODEV;
705
706 return xenbus_register_frontend(&xenfb_driver);
707 }
708
xenfb_cleanup(void)709 static void __exit xenfb_cleanup(void)
710 {
711 xenbus_unregister_driver(&xenfb_driver);
712 }
713
714 module_init(xenfb_init);
715 module_exit(xenfb_cleanup);
716
717 MODULE_DESCRIPTION("Xen virtual framebuffer device frontend");
718 MODULE_LICENSE("GPL");
719 MODULE_ALIAS("xen:vfb");
720