1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * inode.c -- user mode filesystem api for usb gadget controllers
4 *
5 * Copyright (C) 2003-2004 David Brownell
6 * Copyright (C) 2003 Agilent Technologies
7 */
8
9
10 /* #define VERBOSE_DEBUG */
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/fs_context.h>
16 #include <linux/pagemap.h>
17 #include <linux/uts.h>
18 #include <linux/wait.h>
19 #include <linux/compiler.h>
20 #include <linux/uaccess.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/poll.h>
24 #include <linux/kthread.h>
25 #include <linux/aio.h>
26 #include <linux/uio.h>
27 #include <linux/refcount.h>
28 #include <linux/delay.h>
29 #include <linux/device.h>
30 #include <linux/moduleparam.h>
31
32 #include <linux/usb/gadgetfs.h>
33 #include <linux/usb/gadget.h>
34
35
36 /*
37 * The gadgetfs API maps each endpoint to a file descriptor so that you
38 * can use standard synchronous read/write calls for I/O. There's some
39 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
40 * drivers show how this works in practice. You can also use AIO to
41 * eliminate I/O gaps between requests, to help when streaming data.
42 *
43 * Key parts that must be USB-specific are protocols defining how the
44 * read/write operations relate to the hardware state machines. There
45 * are two types of files. One type is for the device, implementing ep0.
46 * The other type is for each IN or OUT endpoint. In both cases, the
47 * user mode driver must configure the hardware before using it.
48 *
49 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
50 * (by writing configuration and device descriptors). Afterwards it
51 * may serve as a source of device events, used to handle all control
52 * requests other than basic enumeration.
53 *
54 * - Then, after a SET_CONFIGURATION control request, ep_config() is
55 * called when each /dev/gadget/ep* file is configured (by writing
56 * endpoint descriptors). Afterwards these files are used to write()
57 * IN data or to read() OUT data. To halt the endpoint, a "wrong
58 * direction" request is issued (like reading an IN endpoint).
59 *
60 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
61 * not possible on all hardware. For example, precise fault handling with
62 * respect to data left in endpoint fifos after aborted operations; or
63 * selective clearing of endpoint halts, to implement SET_INTERFACE.
64 */
65
66 #define DRIVER_DESC "USB Gadget filesystem"
67 #define DRIVER_VERSION "24 Aug 2004"
68
69 static const char driver_desc [] = DRIVER_DESC;
70 static const char shortname [] = "gadgetfs";
71
72 MODULE_DESCRIPTION (DRIVER_DESC);
73 MODULE_AUTHOR ("David Brownell");
74 MODULE_LICENSE ("GPL");
75
76 static int ep_open(struct inode *, struct file *);
77
78
79 /*----------------------------------------------------------------------*/
80
81 #define GADGETFS_MAGIC 0xaee71ee7
82
83 /* /dev/gadget/$CHIP represents ep0 and the whole device */
84 enum ep0_state {
85 /* DISABLED is the initial state. */
86 STATE_DEV_DISABLED = 0,
87
88 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
89 * ep0/device i/o modes and binding to the controller. Driver
90 * must always write descriptors to initialize the device, then
91 * the device becomes UNCONNECTED until enumeration.
92 */
93 STATE_DEV_OPENED,
94
95 /* From then on, ep0 fd is in either of two basic modes:
96 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
97 * - SETUP: read/write will transfer control data and succeed;
98 * or if "wrong direction", performs protocol stall
99 */
100 STATE_DEV_UNCONNECTED,
101 STATE_DEV_CONNECTED,
102 STATE_DEV_SETUP,
103
104 /* UNBOUND means the driver closed ep0, so the device won't be
105 * accessible again (DEV_DISABLED) until all fds are closed.
106 */
107 STATE_DEV_UNBOUND,
108 };
109
110 /* enough for the whole queue: most events invalidate others */
111 #define N_EVENT 5
112
113 #define RBUF_SIZE 256
114
115 struct dev_data {
116 spinlock_t lock;
117 refcount_t count;
118 int udc_usage;
119 enum ep0_state state; /* P: lock */
120 struct usb_gadgetfs_event event [N_EVENT];
121 unsigned ev_next;
122 struct fasync_struct *fasync;
123 u8 current_config;
124
125 /* drivers reading ep0 MUST handle control requests (SETUP)
126 * reported that way; else the host will time out.
127 */
128 unsigned usermode_setup : 1,
129 setup_in : 1,
130 setup_can_stall : 1,
131 setup_out_ready : 1,
132 setup_out_error : 1,
133 setup_abort : 1,
134 gadget_registered : 1;
135 unsigned setup_wLength;
136
137 /* the rest is basically write-once */
138 struct usb_config_descriptor *config, *hs_config;
139 struct usb_device_descriptor *dev;
140 struct usb_request *req;
141 struct usb_gadget *gadget;
142 struct list_head epfiles;
143 void *buf;
144 wait_queue_head_t wait;
145 struct super_block *sb;
146 struct dentry *dentry;
147
148 /* except this scratch i/o buffer for ep0 */
149 u8 rbuf[RBUF_SIZE];
150 };
151
get_dev(struct dev_data * data)152 static inline void get_dev (struct dev_data *data)
153 {
154 refcount_inc (&data->count);
155 }
156
put_dev(struct dev_data * data)157 static void put_dev (struct dev_data *data)
158 {
159 if (likely (!refcount_dec_and_test (&data->count)))
160 return;
161 /* needs no more cleanup */
162 BUG_ON (waitqueue_active (&data->wait));
163 kfree (data);
164 }
165
dev_new(void)166 static struct dev_data *dev_new (void)
167 {
168 struct dev_data *dev;
169
170 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
171 if (!dev)
172 return NULL;
173 dev->state = STATE_DEV_DISABLED;
174 refcount_set (&dev->count, 1);
175 spin_lock_init (&dev->lock);
176 INIT_LIST_HEAD (&dev->epfiles);
177 init_waitqueue_head (&dev->wait);
178 return dev;
179 }
180
181 /*----------------------------------------------------------------------*/
182
183 /* other /dev/gadget/$ENDPOINT files represent endpoints */
184 enum ep_state {
185 STATE_EP_DISABLED = 0,
186 STATE_EP_READY,
187 STATE_EP_ENABLED,
188 STATE_EP_UNBOUND,
189 };
190
191 struct ep_data {
192 struct mutex lock;
193 enum ep_state state;
194 refcount_t count;
195 struct dev_data *dev;
196 /* must hold dev->lock before accessing ep or req */
197 struct usb_ep *ep;
198 struct usb_request *req;
199 ssize_t status;
200 char name [16];
201 struct usb_endpoint_descriptor desc, hs_desc;
202 struct list_head epfiles;
203 wait_queue_head_t wait;
204 struct dentry *dentry;
205 };
206
get_ep(struct ep_data * data)207 static inline void get_ep (struct ep_data *data)
208 {
209 refcount_inc (&data->count);
210 }
211
put_ep(struct ep_data * data)212 static void put_ep (struct ep_data *data)
213 {
214 if (likely (!refcount_dec_and_test (&data->count)))
215 return;
216 put_dev (data->dev);
217 /* needs no more cleanup */
218 BUG_ON (!list_empty (&data->epfiles));
219 BUG_ON (waitqueue_active (&data->wait));
220 kfree (data);
221 }
222
223 /*----------------------------------------------------------------------*/
224
225 /* most "how to use the hardware" policy choices are in userspace:
226 * mapping endpoint roles (which the driver needs) to the capabilities
227 * which the usb controller has. most of those capabilities are exposed
228 * implicitly, starting with the driver name and then endpoint names.
229 */
230
231 static const char *CHIP;
232
233 /*----------------------------------------------------------------------*/
234
235 /* NOTE: don't use dev_printk calls before binding to the gadget
236 * at the end of ep0 configuration, or after unbind.
237 */
238
239 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
240 #define xprintk(d,level,fmt,args...) \
241 printk(level "%s: " fmt , shortname , ## args)
242
243 #ifdef DEBUG
244 #define DBG(dev,fmt,args...) \
245 xprintk(dev , KERN_DEBUG , fmt , ## args)
246 #else
247 #define DBG(dev,fmt,args...) \
248 do { } while (0)
249 #endif /* DEBUG */
250
251 #ifdef VERBOSE_DEBUG
252 #define VDEBUG DBG
253 #else
254 #define VDEBUG(dev,fmt,args...) \
255 do { } while (0)
256 #endif /* DEBUG */
257
258 #define ERROR(dev,fmt,args...) \
259 xprintk(dev , KERN_ERR , fmt , ## args)
260 #define INFO(dev,fmt,args...) \
261 xprintk(dev , KERN_INFO , fmt , ## args)
262
263
264 /*----------------------------------------------------------------------*/
265
266 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
267 *
268 * After opening, configure non-control endpoints. Then use normal
269 * stream read() and write() requests; and maybe ioctl() to get more
270 * precise FIFO status when recovering from cancellation.
271 */
272
epio_complete(struct usb_ep * ep,struct usb_request * req)273 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
274 {
275 struct ep_data *epdata = ep->driver_data;
276
277 if (!req->context)
278 return;
279 if (req->status)
280 epdata->status = req->status;
281 else
282 epdata->status = req->actual;
283 complete ((struct completion *)req->context);
284 }
285
286 /* tasklock endpoint, returning when it's connected.
287 * still need dev->lock to use epdata->ep.
288 */
289 static int
get_ready_ep(unsigned f_flags,struct ep_data * epdata,bool is_write)290 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
291 {
292 int val;
293
294 if (f_flags & O_NONBLOCK) {
295 if (!mutex_trylock(&epdata->lock))
296 goto nonblock;
297 if (epdata->state != STATE_EP_ENABLED &&
298 (!is_write || epdata->state != STATE_EP_READY)) {
299 mutex_unlock(&epdata->lock);
300 nonblock:
301 val = -EAGAIN;
302 } else
303 val = 0;
304 return val;
305 }
306
307 val = mutex_lock_interruptible(&epdata->lock);
308 if (val < 0)
309 return val;
310
311 switch (epdata->state) {
312 case STATE_EP_ENABLED:
313 return 0;
314 case STATE_EP_READY: /* not configured yet */
315 if (is_write)
316 return 0;
317 fallthrough;
318 case STATE_EP_UNBOUND: /* clean disconnect */
319 break;
320 // case STATE_EP_DISABLED: /* "can't happen" */
321 default: /* error! */
322 pr_debug ("%s: ep %p not available, state %d\n",
323 shortname, epdata, epdata->state);
324 }
325 mutex_unlock(&epdata->lock);
326 return -ENODEV;
327 }
328
329 static ssize_t
ep_io(struct ep_data * epdata,void * buf,unsigned len)330 ep_io (struct ep_data *epdata, void *buf, unsigned len)
331 {
332 DECLARE_COMPLETION_ONSTACK (done);
333 int value;
334
335 spin_lock_irq (&epdata->dev->lock);
336 if (likely (epdata->ep != NULL)) {
337 struct usb_request *req = epdata->req;
338
339 req->context = &done;
340 req->complete = epio_complete;
341 req->buf = buf;
342 req->length = len;
343 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
344 } else
345 value = -ENODEV;
346 spin_unlock_irq (&epdata->dev->lock);
347
348 if (likely (value == 0)) {
349 value = wait_for_completion_interruptible(&done);
350 if (value != 0) {
351 spin_lock_irq (&epdata->dev->lock);
352 if (likely (epdata->ep != NULL)) {
353 DBG (epdata->dev, "%s i/o interrupted\n",
354 epdata->name);
355 usb_ep_dequeue (epdata->ep, epdata->req);
356 spin_unlock_irq (&epdata->dev->lock);
357
358 wait_for_completion(&done);
359 if (epdata->status == -ECONNRESET)
360 epdata->status = -EINTR;
361 } else {
362 spin_unlock_irq (&epdata->dev->lock);
363
364 DBG (epdata->dev, "endpoint gone\n");
365 epdata->status = -ENODEV;
366 }
367 }
368 return epdata->status;
369 }
370 return value;
371 }
372
373 static int
ep_release(struct inode * inode,struct file * fd)374 ep_release (struct inode *inode, struct file *fd)
375 {
376 struct ep_data *data = fd->private_data;
377 int value;
378
379 value = mutex_lock_interruptible(&data->lock);
380 if (value < 0)
381 return value;
382
383 /* clean up if this can be reopened */
384 if (data->state != STATE_EP_UNBOUND) {
385 data->state = STATE_EP_DISABLED;
386 data->desc.bDescriptorType = 0;
387 data->hs_desc.bDescriptorType = 0;
388 usb_ep_disable(data->ep);
389 }
390 mutex_unlock(&data->lock);
391 put_ep (data);
392 return 0;
393 }
394
ep_ioctl(struct file * fd,unsigned code,unsigned long value)395 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
396 {
397 struct ep_data *data = fd->private_data;
398 int status;
399
400 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
401 return status;
402
403 spin_lock_irq (&data->dev->lock);
404 if (likely (data->ep != NULL)) {
405 switch (code) {
406 case GADGETFS_FIFO_STATUS:
407 status = usb_ep_fifo_status (data->ep);
408 break;
409 case GADGETFS_FIFO_FLUSH:
410 usb_ep_fifo_flush (data->ep);
411 break;
412 case GADGETFS_CLEAR_HALT:
413 status = usb_ep_clear_halt (data->ep);
414 break;
415 default:
416 status = -ENOTTY;
417 }
418 } else
419 status = -ENODEV;
420 spin_unlock_irq (&data->dev->lock);
421 mutex_unlock(&data->lock);
422 return status;
423 }
424
425 /*----------------------------------------------------------------------*/
426
427 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
428
429 struct kiocb_priv {
430 struct usb_request *req;
431 struct ep_data *epdata;
432 struct kiocb *iocb;
433 struct mm_struct *mm;
434 struct work_struct work;
435 void *buf;
436 struct iov_iter to;
437 const void *to_free;
438 unsigned actual;
439 };
440
ep_aio_cancel(struct kiocb * iocb)441 static int ep_aio_cancel(struct kiocb *iocb)
442 {
443 struct kiocb_priv *priv = iocb->private;
444 struct ep_data *epdata;
445 int value;
446
447 local_irq_disable();
448 epdata = priv->epdata;
449 // spin_lock(&epdata->dev->lock);
450 if (likely(epdata && epdata->ep && priv->req))
451 value = usb_ep_dequeue (epdata->ep, priv->req);
452 else
453 value = -EINVAL;
454 // spin_unlock(&epdata->dev->lock);
455 local_irq_enable();
456
457 return value;
458 }
459
ep_user_copy_worker(struct work_struct * work)460 static void ep_user_copy_worker(struct work_struct *work)
461 {
462 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
463 struct mm_struct *mm = priv->mm;
464 struct kiocb *iocb = priv->iocb;
465 size_t ret;
466
467 kthread_use_mm(mm);
468 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
469 kthread_unuse_mm(mm);
470 if (!ret)
471 ret = -EFAULT;
472
473 /* completing the iocb can drop the ctx and mm, don't touch mm after */
474 iocb->ki_complete(iocb, ret);
475
476 kfree(priv->buf);
477 kfree(priv->to_free);
478 kfree(priv);
479 }
480
ep_aio_complete(struct usb_ep * ep,struct usb_request * req)481 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
482 {
483 struct kiocb *iocb = req->context;
484 struct kiocb_priv *priv = iocb->private;
485 struct ep_data *epdata = priv->epdata;
486
487 /* lock against disconnect (and ideally, cancel) */
488 spin_lock(&epdata->dev->lock);
489 priv->req = NULL;
490 priv->epdata = NULL;
491
492 /* if this was a write or a read returning no data then we
493 * don't need to copy anything to userspace, so we can
494 * complete the aio request immediately.
495 */
496 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
497 kfree(req->buf);
498 kfree(priv->to_free);
499 kfree(priv);
500 iocb->private = NULL;
501 iocb->ki_complete(iocb,
502 req->actual ? req->actual : (long)req->status);
503 } else {
504 /* ep_copy_to_user() won't report both; we hide some faults */
505 if (unlikely(0 != req->status))
506 DBG(epdata->dev, "%s fault %d len %d\n",
507 ep->name, req->status, req->actual);
508
509 priv->buf = req->buf;
510 priv->actual = req->actual;
511 INIT_WORK(&priv->work, ep_user_copy_worker);
512 schedule_work(&priv->work);
513 }
514
515 usb_ep_free_request(ep, req);
516 spin_unlock(&epdata->dev->lock);
517 put_ep(epdata);
518 }
519
ep_aio(struct kiocb * iocb,struct kiocb_priv * priv,struct ep_data * epdata,char * buf,size_t len)520 static ssize_t ep_aio(struct kiocb *iocb,
521 struct kiocb_priv *priv,
522 struct ep_data *epdata,
523 char *buf,
524 size_t len)
525 {
526 struct usb_request *req;
527 ssize_t value;
528
529 iocb->private = priv;
530 priv->iocb = iocb;
531
532 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
533 get_ep(epdata);
534 priv->epdata = epdata;
535 priv->actual = 0;
536 priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
537
538 /* each kiocb is coupled to one usb_request, but we can't
539 * allocate or submit those if the host disconnected.
540 */
541 spin_lock_irq(&epdata->dev->lock);
542 value = -ENODEV;
543 if (unlikely(epdata->ep == NULL))
544 goto fail;
545
546 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
547 value = -ENOMEM;
548 if (unlikely(!req))
549 goto fail;
550
551 priv->req = req;
552 req->buf = buf;
553 req->length = len;
554 req->complete = ep_aio_complete;
555 req->context = iocb;
556 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
557 if (unlikely(0 != value)) {
558 usb_ep_free_request(epdata->ep, req);
559 goto fail;
560 }
561 spin_unlock_irq(&epdata->dev->lock);
562 return -EIOCBQUEUED;
563
564 fail:
565 spin_unlock_irq(&epdata->dev->lock);
566 kfree(priv->to_free);
567 kfree(priv);
568 put_ep(epdata);
569 return value;
570 }
571
572 static ssize_t
ep_read_iter(struct kiocb * iocb,struct iov_iter * to)573 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
574 {
575 struct file *file = iocb->ki_filp;
576 struct ep_data *epdata = file->private_data;
577 size_t len = iov_iter_count(to);
578 ssize_t value;
579 char *buf;
580
581 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
582 return value;
583
584 /* halt any endpoint by doing a "wrong direction" i/o call */
585 if (usb_endpoint_dir_in(&epdata->desc)) {
586 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
587 !is_sync_kiocb(iocb)) {
588 mutex_unlock(&epdata->lock);
589 return -EINVAL;
590 }
591 DBG (epdata->dev, "%s halt\n", epdata->name);
592 spin_lock_irq(&epdata->dev->lock);
593 if (likely(epdata->ep != NULL))
594 usb_ep_set_halt(epdata->ep);
595 spin_unlock_irq(&epdata->dev->lock);
596 mutex_unlock(&epdata->lock);
597 return -EBADMSG;
598 }
599
600 buf = kmalloc(len, GFP_KERNEL);
601 if (unlikely(!buf)) {
602 mutex_unlock(&epdata->lock);
603 return -ENOMEM;
604 }
605 if (is_sync_kiocb(iocb)) {
606 value = ep_io(epdata, buf, len);
607 if (value >= 0 && (copy_to_iter(buf, value, to) != value))
608 value = -EFAULT;
609 } else {
610 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
611 value = -ENOMEM;
612 if (!priv)
613 goto fail;
614 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
615 if (!priv->to_free) {
616 kfree(priv);
617 goto fail;
618 }
619 value = ep_aio(iocb, priv, epdata, buf, len);
620 if (value == -EIOCBQUEUED)
621 buf = NULL;
622 }
623 fail:
624 kfree(buf);
625 mutex_unlock(&epdata->lock);
626 return value;
627 }
628
629 static ssize_t ep_config(struct ep_data *, const char *, size_t);
630
631 static ssize_t
ep_write_iter(struct kiocb * iocb,struct iov_iter * from)632 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
633 {
634 struct file *file = iocb->ki_filp;
635 struct ep_data *epdata = file->private_data;
636 size_t len = iov_iter_count(from);
637 bool configured;
638 ssize_t value;
639 char *buf;
640
641 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
642 return value;
643
644 configured = epdata->state == STATE_EP_ENABLED;
645
646 /* halt any endpoint by doing a "wrong direction" i/o call */
647 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
648 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
649 !is_sync_kiocb(iocb)) {
650 mutex_unlock(&epdata->lock);
651 return -EINVAL;
652 }
653 DBG (epdata->dev, "%s halt\n", epdata->name);
654 spin_lock_irq(&epdata->dev->lock);
655 if (likely(epdata->ep != NULL))
656 usb_ep_set_halt(epdata->ep);
657 spin_unlock_irq(&epdata->dev->lock);
658 mutex_unlock(&epdata->lock);
659 return -EBADMSG;
660 }
661
662 buf = kmalloc(len, GFP_KERNEL);
663 if (unlikely(!buf)) {
664 mutex_unlock(&epdata->lock);
665 return -ENOMEM;
666 }
667
668 if (unlikely(!copy_from_iter_full(buf, len, from))) {
669 value = -EFAULT;
670 goto out;
671 }
672
673 if (unlikely(!configured)) {
674 value = ep_config(epdata, buf, len);
675 } else if (is_sync_kiocb(iocb)) {
676 value = ep_io(epdata, buf, len);
677 } else {
678 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
679 value = -ENOMEM;
680 if (priv) {
681 value = ep_aio(iocb, priv, epdata, buf, len);
682 if (value == -EIOCBQUEUED)
683 buf = NULL;
684 }
685 }
686 out:
687 kfree(buf);
688 mutex_unlock(&epdata->lock);
689 return value;
690 }
691
692 /*----------------------------------------------------------------------*/
693
694 /* used after endpoint configuration */
695 static const struct file_operations ep_io_operations = {
696 .owner = THIS_MODULE,
697
698 .open = ep_open,
699 .release = ep_release,
700 .llseek = no_llseek,
701 .unlocked_ioctl = ep_ioctl,
702 .read_iter = ep_read_iter,
703 .write_iter = ep_write_iter,
704 };
705
706 /* ENDPOINT INITIALIZATION
707 *
708 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
709 * status = write (fd, descriptors, sizeof descriptors)
710 *
711 * That write establishes the endpoint configuration, configuring
712 * the controller to process bulk, interrupt, or isochronous transfers
713 * at the right maxpacket size, and so on.
714 *
715 * The descriptors are message type 1, identified by a host order u32
716 * at the beginning of what's written. Descriptor order is: full/low
717 * speed descriptor, then optional high speed descriptor.
718 */
719 static ssize_t
ep_config(struct ep_data * data,const char * buf,size_t len)720 ep_config (struct ep_data *data, const char *buf, size_t len)
721 {
722 struct usb_ep *ep;
723 u32 tag;
724 int value, length = len;
725
726 if (data->state != STATE_EP_READY) {
727 value = -EL2HLT;
728 goto fail;
729 }
730
731 value = len;
732 if (len < USB_DT_ENDPOINT_SIZE + 4)
733 goto fail0;
734
735 /* we might need to change message format someday */
736 memcpy(&tag, buf, 4);
737 if (tag != 1) {
738 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
739 goto fail0;
740 }
741 buf += 4;
742 len -= 4;
743
744 /* NOTE: audio endpoint extensions not accepted here;
745 * just don't include the extra bytes.
746 */
747
748 /* full/low speed descriptor, then high speed */
749 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
750 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
751 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
752 goto fail0;
753 if (len != USB_DT_ENDPOINT_SIZE) {
754 if (len != 2 * USB_DT_ENDPOINT_SIZE)
755 goto fail0;
756 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
757 USB_DT_ENDPOINT_SIZE);
758 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
759 || data->hs_desc.bDescriptorType
760 != USB_DT_ENDPOINT) {
761 DBG(data->dev, "config %s, bad hs length or type\n",
762 data->name);
763 goto fail0;
764 }
765 }
766
767 spin_lock_irq (&data->dev->lock);
768 if (data->dev->state == STATE_DEV_UNBOUND) {
769 value = -ENOENT;
770 goto gone;
771 } else {
772 ep = data->ep;
773 if (ep == NULL) {
774 value = -ENODEV;
775 goto gone;
776 }
777 }
778 switch (data->dev->gadget->speed) {
779 case USB_SPEED_LOW:
780 case USB_SPEED_FULL:
781 ep->desc = &data->desc;
782 break;
783 case USB_SPEED_HIGH:
784 /* fails if caller didn't provide that descriptor... */
785 ep->desc = &data->hs_desc;
786 break;
787 default:
788 DBG(data->dev, "unconnected, %s init abandoned\n",
789 data->name);
790 value = -EINVAL;
791 goto gone;
792 }
793 value = usb_ep_enable(ep);
794 if (value == 0) {
795 data->state = STATE_EP_ENABLED;
796 value = length;
797 }
798 gone:
799 spin_unlock_irq (&data->dev->lock);
800 if (value < 0) {
801 fail:
802 data->desc.bDescriptorType = 0;
803 data->hs_desc.bDescriptorType = 0;
804 }
805 return value;
806 fail0:
807 value = -EINVAL;
808 goto fail;
809 }
810
811 static int
ep_open(struct inode * inode,struct file * fd)812 ep_open (struct inode *inode, struct file *fd)
813 {
814 struct ep_data *data = inode->i_private;
815 int value = -EBUSY;
816
817 if (mutex_lock_interruptible(&data->lock) != 0)
818 return -EINTR;
819 spin_lock_irq (&data->dev->lock);
820 if (data->dev->state == STATE_DEV_UNBOUND)
821 value = -ENOENT;
822 else if (data->state == STATE_EP_DISABLED) {
823 value = 0;
824 data->state = STATE_EP_READY;
825 get_ep (data);
826 fd->private_data = data;
827 VDEBUG (data->dev, "%s ready\n", data->name);
828 } else
829 DBG (data->dev, "%s state %d\n",
830 data->name, data->state);
831 spin_unlock_irq (&data->dev->lock);
832 mutex_unlock(&data->lock);
833 return value;
834 }
835
836 /*----------------------------------------------------------------------*/
837
838 /* EP0 IMPLEMENTATION can be partly in userspace.
839 *
840 * Drivers that use this facility receive various events, including
841 * control requests the kernel doesn't handle. Drivers that don't
842 * use this facility may be too simple-minded for real applications.
843 */
844
ep0_readable(struct dev_data * dev)845 static inline void ep0_readable (struct dev_data *dev)
846 {
847 wake_up (&dev->wait);
848 kill_fasync (&dev->fasync, SIGIO, POLL_IN);
849 }
850
clean_req(struct usb_ep * ep,struct usb_request * req)851 static void clean_req (struct usb_ep *ep, struct usb_request *req)
852 {
853 struct dev_data *dev = ep->driver_data;
854
855 if (req->buf != dev->rbuf) {
856 kfree(req->buf);
857 req->buf = dev->rbuf;
858 }
859 req->complete = epio_complete;
860 dev->setup_out_ready = 0;
861 }
862
ep0_complete(struct usb_ep * ep,struct usb_request * req)863 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
864 {
865 struct dev_data *dev = ep->driver_data;
866 unsigned long flags;
867 int free = 1;
868
869 /* for control OUT, data must still get to userspace */
870 spin_lock_irqsave(&dev->lock, flags);
871 if (!dev->setup_in) {
872 dev->setup_out_error = (req->status != 0);
873 if (!dev->setup_out_error)
874 free = 0;
875 dev->setup_out_ready = 1;
876 ep0_readable (dev);
877 }
878
879 /* clean up as appropriate */
880 if (free && req->buf != &dev->rbuf)
881 clean_req (ep, req);
882 req->complete = epio_complete;
883 spin_unlock_irqrestore(&dev->lock, flags);
884 }
885
setup_req(struct usb_ep * ep,struct usb_request * req,u16 len)886 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
887 {
888 struct dev_data *dev = ep->driver_data;
889
890 if (dev->setup_out_ready) {
891 DBG (dev, "ep0 request busy!\n");
892 return -EBUSY;
893 }
894 if (len > sizeof (dev->rbuf))
895 req->buf = kmalloc(len, GFP_ATOMIC);
896 if (req->buf == NULL) {
897 req->buf = dev->rbuf;
898 return -ENOMEM;
899 }
900 req->complete = ep0_complete;
901 req->length = len;
902 req->zero = 0;
903 return 0;
904 }
905
906 static ssize_t
ep0_read(struct file * fd,char __user * buf,size_t len,loff_t * ptr)907 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
908 {
909 struct dev_data *dev = fd->private_data;
910 ssize_t retval;
911 enum ep0_state state;
912
913 spin_lock_irq (&dev->lock);
914 if (dev->state <= STATE_DEV_OPENED) {
915 retval = -EINVAL;
916 goto done;
917 }
918
919 /* report fd mode change before acting on it */
920 if (dev->setup_abort) {
921 dev->setup_abort = 0;
922 retval = -EIDRM;
923 goto done;
924 }
925
926 /* control DATA stage */
927 if ((state = dev->state) == STATE_DEV_SETUP) {
928
929 if (dev->setup_in) { /* stall IN */
930 VDEBUG(dev, "ep0in stall\n");
931 (void) usb_ep_set_halt (dev->gadget->ep0);
932 retval = -EL2HLT;
933 dev->state = STATE_DEV_CONNECTED;
934
935 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
936 struct usb_ep *ep = dev->gadget->ep0;
937 struct usb_request *req = dev->req;
938
939 if ((retval = setup_req (ep, req, 0)) == 0) {
940 ++dev->udc_usage;
941 spin_unlock_irq (&dev->lock);
942 retval = usb_ep_queue (ep, req, GFP_KERNEL);
943 spin_lock_irq (&dev->lock);
944 --dev->udc_usage;
945 }
946 dev->state = STATE_DEV_CONNECTED;
947
948 /* assume that was SET_CONFIGURATION */
949 if (dev->current_config) {
950 unsigned power;
951
952 if (gadget_is_dualspeed(dev->gadget)
953 && (dev->gadget->speed
954 == USB_SPEED_HIGH))
955 power = dev->hs_config->bMaxPower;
956 else
957 power = dev->config->bMaxPower;
958 usb_gadget_vbus_draw(dev->gadget, 2 * power);
959 }
960
961 } else { /* collect OUT data */
962 if ((fd->f_flags & O_NONBLOCK) != 0
963 && !dev->setup_out_ready) {
964 retval = -EAGAIN;
965 goto done;
966 }
967 spin_unlock_irq (&dev->lock);
968 retval = wait_event_interruptible (dev->wait,
969 dev->setup_out_ready != 0);
970
971 /* FIXME state could change from under us */
972 spin_lock_irq (&dev->lock);
973 if (retval)
974 goto done;
975
976 if (dev->state != STATE_DEV_SETUP) {
977 retval = -ECANCELED;
978 goto done;
979 }
980 dev->state = STATE_DEV_CONNECTED;
981
982 if (dev->setup_out_error)
983 retval = -EIO;
984 else {
985 len = min (len, (size_t)dev->req->actual);
986 ++dev->udc_usage;
987 spin_unlock_irq(&dev->lock);
988 if (copy_to_user (buf, dev->req->buf, len))
989 retval = -EFAULT;
990 else
991 retval = len;
992 spin_lock_irq(&dev->lock);
993 --dev->udc_usage;
994 clean_req (dev->gadget->ep0, dev->req);
995 /* NOTE userspace can't yet choose to stall */
996 }
997 }
998 goto done;
999 }
1000
1001 /* else normal: return event data */
1002 if (len < sizeof dev->event [0]) {
1003 retval = -EINVAL;
1004 goto done;
1005 }
1006 len -= len % sizeof (struct usb_gadgetfs_event);
1007 dev->usermode_setup = 1;
1008
1009 scan:
1010 /* return queued events right away */
1011 if (dev->ev_next != 0) {
1012 unsigned i, n;
1013
1014 n = len / sizeof (struct usb_gadgetfs_event);
1015 if (dev->ev_next < n)
1016 n = dev->ev_next;
1017
1018 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1019 for (i = 0; i < n; i++) {
1020 if (dev->event [i].type == GADGETFS_SETUP) {
1021 dev->state = STATE_DEV_SETUP;
1022 n = i + 1;
1023 break;
1024 }
1025 }
1026 spin_unlock_irq (&dev->lock);
1027 len = n * sizeof (struct usb_gadgetfs_event);
1028 if (copy_to_user (buf, &dev->event, len))
1029 retval = -EFAULT;
1030 else
1031 retval = len;
1032 if (len > 0) {
1033 /* NOTE this doesn't guard against broken drivers;
1034 * concurrent ep0 readers may lose events.
1035 */
1036 spin_lock_irq (&dev->lock);
1037 if (dev->ev_next > n) {
1038 memmove(&dev->event[0], &dev->event[n],
1039 sizeof (struct usb_gadgetfs_event)
1040 * (dev->ev_next - n));
1041 }
1042 dev->ev_next -= n;
1043 spin_unlock_irq (&dev->lock);
1044 }
1045 return retval;
1046 }
1047 if (fd->f_flags & O_NONBLOCK) {
1048 retval = -EAGAIN;
1049 goto done;
1050 }
1051
1052 switch (state) {
1053 default:
1054 DBG (dev, "fail %s, state %d\n", __func__, state);
1055 retval = -ESRCH;
1056 break;
1057 case STATE_DEV_UNCONNECTED:
1058 case STATE_DEV_CONNECTED:
1059 spin_unlock_irq (&dev->lock);
1060 DBG (dev, "%s wait\n", __func__);
1061
1062 /* wait for events */
1063 retval = wait_event_interruptible (dev->wait,
1064 dev->ev_next != 0);
1065 if (retval < 0)
1066 return retval;
1067 spin_lock_irq (&dev->lock);
1068 goto scan;
1069 }
1070
1071 done:
1072 spin_unlock_irq (&dev->lock);
1073 return retval;
1074 }
1075
1076 static struct usb_gadgetfs_event *
next_event(struct dev_data * dev,enum usb_gadgetfs_event_type type)1077 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1078 {
1079 struct usb_gadgetfs_event *event;
1080 unsigned i;
1081
1082 switch (type) {
1083 /* these events purge the queue */
1084 case GADGETFS_DISCONNECT:
1085 if (dev->state == STATE_DEV_SETUP)
1086 dev->setup_abort = 1;
1087 fallthrough;
1088 case GADGETFS_CONNECT:
1089 dev->ev_next = 0;
1090 break;
1091 case GADGETFS_SETUP: /* previous request timed out */
1092 case GADGETFS_SUSPEND: /* same effect */
1093 /* these events can't be repeated */
1094 for (i = 0; i != dev->ev_next; i++) {
1095 if (dev->event [i].type != type)
1096 continue;
1097 DBG(dev, "discard old event[%d] %d\n", i, type);
1098 dev->ev_next--;
1099 if (i == dev->ev_next)
1100 break;
1101 /* indices start at zero, for simplicity */
1102 memmove (&dev->event [i], &dev->event [i + 1],
1103 sizeof (struct usb_gadgetfs_event)
1104 * (dev->ev_next - i));
1105 }
1106 break;
1107 default:
1108 BUG ();
1109 }
1110 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1111 event = &dev->event [dev->ev_next++];
1112 BUG_ON (dev->ev_next > N_EVENT);
1113 memset (event, 0, sizeof *event);
1114 event->type = type;
1115 return event;
1116 }
1117
1118 static ssize_t
ep0_write(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1119 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1120 {
1121 struct dev_data *dev = fd->private_data;
1122 ssize_t retval = -ESRCH;
1123
1124 /* report fd mode change before acting on it */
1125 if (dev->setup_abort) {
1126 dev->setup_abort = 0;
1127 retval = -EIDRM;
1128
1129 /* data and/or status stage for control request */
1130 } else if (dev->state == STATE_DEV_SETUP) {
1131
1132 len = min_t(size_t, len, dev->setup_wLength);
1133 if (dev->setup_in) {
1134 retval = setup_req (dev->gadget->ep0, dev->req, len);
1135 if (retval == 0) {
1136 dev->state = STATE_DEV_CONNECTED;
1137 ++dev->udc_usage;
1138 spin_unlock_irq (&dev->lock);
1139 if (copy_from_user (dev->req->buf, buf, len))
1140 retval = -EFAULT;
1141 else {
1142 if (len < dev->setup_wLength)
1143 dev->req->zero = 1;
1144 retval = usb_ep_queue (
1145 dev->gadget->ep0, dev->req,
1146 GFP_KERNEL);
1147 }
1148 spin_lock_irq(&dev->lock);
1149 --dev->udc_usage;
1150 if (retval < 0) {
1151 clean_req (dev->gadget->ep0, dev->req);
1152 } else
1153 retval = len;
1154
1155 return retval;
1156 }
1157
1158 /* can stall some OUT transfers */
1159 } else if (dev->setup_can_stall) {
1160 VDEBUG(dev, "ep0out stall\n");
1161 (void) usb_ep_set_halt (dev->gadget->ep0);
1162 retval = -EL2HLT;
1163 dev->state = STATE_DEV_CONNECTED;
1164 } else {
1165 DBG(dev, "bogus ep0out stall!\n");
1166 }
1167 } else
1168 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1169
1170 return retval;
1171 }
1172
1173 static int
ep0_fasync(int f,struct file * fd,int on)1174 ep0_fasync (int f, struct file *fd, int on)
1175 {
1176 struct dev_data *dev = fd->private_data;
1177 // caller must F_SETOWN before signal delivery happens
1178 VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1179 return fasync_helper (f, fd, on, &dev->fasync);
1180 }
1181
1182 static struct usb_gadget_driver gadgetfs_driver;
1183
1184 static int
dev_release(struct inode * inode,struct file * fd)1185 dev_release (struct inode *inode, struct file *fd)
1186 {
1187 struct dev_data *dev = fd->private_data;
1188
1189 /* closing ep0 === shutdown all */
1190
1191 if (dev->gadget_registered) {
1192 usb_gadget_unregister_driver (&gadgetfs_driver);
1193 dev->gadget_registered = false;
1194 }
1195
1196 /* at this point "good" hardware has disconnected the
1197 * device from USB; the host won't see it any more.
1198 * alternatively, all host requests will time out.
1199 */
1200
1201 kfree (dev->buf);
1202 dev->buf = NULL;
1203
1204 /* other endpoints were all decoupled from this device */
1205 spin_lock_irq(&dev->lock);
1206 dev->state = STATE_DEV_DISABLED;
1207 spin_unlock_irq(&dev->lock);
1208
1209 put_dev (dev);
1210 return 0;
1211 }
1212
1213 static __poll_t
ep0_poll(struct file * fd,poll_table * wait)1214 ep0_poll (struct file *fd, poll_table *wait)
1215 {
1216 struct dev_data *dev = fd->private_data;
1217 __poll_t mask = 0;
1218
1219 if (dev->state <= STATE_DEV_OPENED)
1220 return DEFAULT_POLLMASK;
1221
1222 poll_wait(fd, &dev->wait, wait);
1223
1224 spin_lock_irq(&dev->lock);
1225
1226 /* report fd mode change before acting on it */
1227 if (dev->setup_abort) {
1228 dev->setup_abort = 0;
1229 mask = EPOLLHUP;
1230 goto out;
1231 }
1232
1233 if (dev->state == STATE_DEV_SETUP) {
1234 if (dev->setup_in || dev->setup_can_stall)
1235 mask = EPOLLOUT;
1236 } else {
1237 if (dev->ev_next != 0)
1238 mask = EPOLLIN;
1239 }
1240 out:
1241 spin_unlock_irq(&dev->lock);
1242 return mask;
1243 }
1244
dev_ioctl(struct file * fd,unsigned code,unsigned long value)1245 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1246 {
1247 struct dev_data *dev = fd->private_data;
1248 struct usb_gadget *gadget = dev->gadget;
1249 long ret = -ENOTTY;
1250
1251 spin_lock_irq(&dev->lock);
1252 if (dev->state == STATE_DEV_OPENED ||
1253 dev->state == STATE_DEV_UNBOUND) {
1254 /* Not bound to a UDC */
1255 } else if (gadget->ops->ioctl) {
1256 ++dev->udc_usage;
1257 spin_unlock_irq(&dev->lock);
1258
1259 ret = gadget->ops->ioctl (gadget, code, value);
1260
1261 spin_lock_irq(&dev->lock);
1262 --dev->udc_usage;
1263 }
1264 spin_unlock_irq(&dev->lock);
1265
1266 return ret;
1267 }
1268
1269 /*----------------------------------------------------------------------*/
1270
1271 /* The in-kernel gadget driver handles most ep0 issues, in particular
1272 * enumerating the single configuration (as provided from user space).
1273 *
1274 * Unrecognized ep0 requests may be handled in user space.
1275 */
1276
make_qualifier(struct dev_data * dev)1277 static void make_qualifier (struct dev_data *dev)
1278 {
1279 struct usb_qualifier_descriptor qual;
1280 struct usb_device_descriptor *desc;
1281
1282 qual.bLength = sizeof qual;
1283 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1284 qual.bcdUSB = cpu_to_le16 (0x0200);
1285
1286 desc = dev->dev;
1287 qual.bDeviceClass = desc->bDeviceClass;
1288 qual.bDeviceSubClass = desc->bDeviceSubClass;
1289 qual.bDeviceProtocol = desc->bDeviceProtocol;
1290
1291 /* assumes ep0 uses the same value for both speeds ... */
1292 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1293
1294 qual.bNumConfigurations = 1;
1295 qual.bRESERVED = 0;
1296
1297 memcpy (dev->rbuf, &qual, sizeof qual);
1298 }
1299
1300 static int
config_buf(struct dev_data * dev,u8 type,unsigned index)1301 config_buf (struct dev_data *dev, u8 type, unsigned index)
1302 {
1303 int len;
1304 int hs = 0;
1305
1306 /* only one configuration */
1307 if (index > 0)
1308 return -EINVAL;
1309
1310 if (gadget_is_dualspeed(dev->gadget)) {
1311 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1312 if (type == USB_DT_OTHER_SPEED_CONFIG)
1313 hs = !hs;
1314 }
1315 if (hs) {
1316 dev->req->buf = dev->hs_config;
1317 len = le16_to_cpu(dev->hs_config->wTotalLength);
1318 } else {
1319 dev->req->buf = dev->config;
1320 len = le16_to_cpu(dev->config->wTotalLength);
1321 }
1322 ((u8 *)dev->req->buf) [1] = type;
1323 return len;
1324 }
1325
1326 static int
gadgetfs_setup(struct usb_gadget * gadget,const struct usb_ctrlrequest * ctrl)1327 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1328 {
1329 struct dev_data *dev = get_gadget_data (gadget);
1330 struct usb_request *req = dev->req;
1331 int value = -EOPNOTSUPP;
1332 struct usb_gadgetfs_event *event;
1333 u16 w_value = le16_to_cpu(ctrl->wValue);
1334 u16 w_length = le16_to_cpu(ctrl->wLength);
1335
1336 if (w_length > RBUF_SIZE) {
1337 if (ctrl->bRequestType & USB_DIR_IN) {
1338 /* Cast away the const, we are going to overwrite on purpose. */
1339 __le16 *temp = (__le16 *)&ctrl->wLength;
1340
1341 *temp = cpu_to_le16(RBUF_SIZE);
1342 w_length = RBUF_SIZE;
1343 } else {
1344 return value;
1345 }
1346 }
1347
1348 spin_lock (&dev->lock);
1349 dev->setup_abort = 0;
1350 if (dev->state == STATE_DEV_UNCONNECTED) {
1351 if (gadget_is_dualspeed(gadget)
1352 && gadget->speed == USB_SPEED_HIGH
1353 && dev->hs_config == NULL) {
1354 spin_unlock(&dev->lock);
1355 ERROR (dev, "no high speed config??\n");
1356 return -EINVAL;
1357 }
1358
1359 dev->state = STATE_DEV_CONNECTED;
1360
1361 INFO (dev, "connected\n");
1362 event = next_event (dev, GADGETFS_CONNECT);
1363 event->u.speed = gadget->speed;
1364 ep0_readable (dev);
1365
1366 /* host may have given up waiting for response. we can miss control
1367 * requests handled lower down (device/endpoint status and features);
1368 * then ep0_{read,write} will report the wrong status. controller
1369 * driver will have aborted pending i/o.
1370 */
1371 } else if (dev->state == STATE_DEV_SETUP)
1372 dev->setup_abort = 1;
1373
1374 req->buf = dev->rbuf;
1375 req->context = NULL;
1376 switch (ctrl->bRequest) {
1377
1378 case USB_REQ_GET_DESCRIPTOR:
1379 if (ctrl->bRequestType != USB_DIR_IN)
1380 goto unrecognized;
1381 switch (w_value >> 8) {
1382
1383 case USB_DT_DEVICE:
1384 value = min (w_length, (u16) sizeof *dev->dev);
1385 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1386 req->buf = dev->dev;
1387 break;
1388 case USB_DT_DEVICE_QUALIFIER:
1389 if (!dev->hs_config)
1390 break;
1391 value = min (w_length, (u16)
1392 sizeof (struct usb_qualifier_descriptor));
1393 make_qualifier (dev);
1394 break;
1395 case USB_DT_OTHER_SPEED_CONFIG:
1396 case USB_DT_CONFIG:
1397 value = config_buf (dev,
1398 w_value >> 8,
1399 w_value & 0xff);
1400 if (value >= 0)
1401 value = min (w_length, (u16) value);
1402 break;
1403 case USB_DT_STRING:
1404 goto unrecognized;
1405
1406 default: // all others are errors
1407 break;
1408 }
1409 break;
1410
1411 /* currently one config, two speeds */
1412 case USB_REQ_SET_CONFIGURATION:
1413 if (ctrl->bRequestType != 0)
1414 goto unrecognized;
1415 if (0 == (u8) w_value) {
1416 value = 0;
1417 dev->current_config = 0;
1418 usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1419 // user mode expected to disable endpoints
1420 } else {
1421 u8 config, power;
1422
1423 if (gadget_is_dualspeed(gadget)
1424 && gadget->speed == USB_SPEED_HIGH) {
1425 config = dev->hs_config->bConfigurationValue;
1426 power = dev->hs_config->bMaxPower;
1427 } else {
1428 config = dev->config->bConfigurationValue;
1429 power = dev->config->bMaxPower;
1430 }
1431
1432 if (config == (u8) w_value) {
1433 value = 0;
1434 dev->current_config = config;
1435 usb_gadget_vbus_draw(gadget, 2 * power);
1436 }
1437 }
1438
1439 /* report SET_CONFIGURATION like any other control request,
1440 * except that usermode may not stall this. the next
1441 * request mustn't be allowed start until this finishes:
1442 * endpoints and threads set up, etc.
1443 *
1444 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1445 * has bad/racey automagic that prevents synchronizing here.
1446 * even kernel mode drivers often miss them.
1447 */
1448 if (value == 0) {
1449 INFO (dev, "configuration #%d\n", dev->current_config);
1450 usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1451 if (dev->usermode_setup) {
1452 dev->setup_can_stall = 0;
1453 goto delegate;
1454 }
1455 }
1456 break;
1457
1458 #ifndef CONFIG_USB_PXA25X
1459 /* PXA automagically handles this request too */
1460 case USB_REQ_GET_CONFIGURATION:
1461 if (ctrl->bRequestType != 0x80)
1462 goto unrecognized;
1463 *(u8 *)req->buf = dev->current_config;
1464 value = min (w_length, (u16) 1);
1465 break;
1466 #endif
1467
1468 default:
1469 unrecognized:
1470 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1471 dev->usermode_setup ? "delegate" : "fail",
1472 ctrl->bRequestType, ctrl->bRequest,
1473 w_value, le16_to_cpu(ctrl->wIndex), w_length);
1474
1475 /* if there's an ep0 reader, don't stall */
1476 if (dev->usermode_setup) {
1477 dev->setup_can_stall = 1;
1478 delegate:
1479 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1480 ? 1 : 0;
1481 dev->setup_wLength = w_length;
1482 dev->setup_out_ready = 0;
1483 dev->setup_out_error = 0;
1484
1485 /* read DATA stage for OUT right away */
1486 if (unlikely (!dev->setup_in && w_length)) {
1487 value = setup_req (gadget->ep0, dev->req,
1488 w_length);
1489 if (value < 0)
1490 break;
1491
1492 ++dev->udc_usage;
1493 spin_unlock (&dev->lock);
1494 value = usb_ep_queue (gadget->ep0, dev->req,
1495 GFP_KERNEL);
1496 spin_lock (&dev->lock);
1497 --dev->udc_usage;
1498 if (value < 0) {
1499 clean_req (gadget->ep0, dev->req);
1500 break;
1501 }
1502
1503 /* we can't currently stall these */
1504 dev->setup_can_stall = 0;
1505 }
1506
1507 /* state changes when reader collects event */
1508 event = next_event (dev, GADGETFS_SETUP);
1509 event->u.setup = *ctrl;
1510 ep0_readable (dev);
1511 spin_unlock (&dev->lock);
1512 return 0;
1513 }
1514 }
1515
1516 /* proceed with data transfer and status phases? */
1517 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1518 req->length = value;
1519 req->zero = value < w_length;
1520
1521 ++dev->udc_usage;
1522 spin_unlock (&dev->lock);
1523 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1524 spin_lock(&dev->lock);
1525 --dev->udc_usage;
1526 spin_unlock(&dev->lock);
1527 if (value < 0) {
1528 DBG (dev, "ep_queue --> %d\n", value);
1529 req->status = 0;
1530 }
1531 return value;
1532 }
1533
1534 /* device stalls when value < 0 */
1535 spin_unlock (&dev->lock);
1536 return value;
1537 }
1538
destroy_ep_files(struct dev_data * dev)1539 static void destroy_ep_files (struct dev_data *dev)
1540 {
1541 DBG (dev, "%s %d\n", __func__, dev->state);
1542
1543 /* dev->state must prevent interference */
1544 spin_lock_irq (&dev->lock);
1545 while (!list_empty(&dev->epfiles)) {
1546 struct ep_data *ep;
1547 struct inode *parent;
1548 struct dentry *dentry;
1549
1550 /* break link to FS */
1551 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1552 list_del_init (&ep->epfiles);
1553 spin_unlock_irq (&dev->lock);
1554
1555 dentry = ep->dentry;
1556 ep->dentry = NULL;
1557 parent = d_inode(dentry->d_parent);
1558
1559 /* break link to controller */
1560 mutex_lock(&ep->lock);
1561 if (ep->state == STATE_EP_ENABLED)
1562 (void) usb_ep_disable (ep->ep);
1563 ep->state = STATE_EP_UNBOUND;
1564 usb_ep_free_request (ep->ep, ep->req);
1565 ep->ep = NULL;
1566 mutex_unlock(&ep->lock);
1567
1568 wake_up (&ep->wait);
1569 put_ep (ep);
1570
1571 /* break link to dcache */
1572 inode_lock(parent);
1573 d_delete (dentry);
1574 dput (dentry);
1575 inode_unlock(parent);
1576
1577 spin_lock_irq (&dev->lock);
1578 }
1579 spin_unlock_irq (&dev->lock);
1580 }
1581
1582
1583 static struct dentry *
1584 gadgetfs_create_file (struct super_block *sb, char const *name,
1585 void *data, const struct file_operations *fops);
1586
activate_ep_files(struct dev_data * dev)1587 static int activate_ep_files (struct dev_data *dev)
1588 {
1589 struct usb_ep *ep;
1590 struct ep_data *data;
1591
1592 gadget_for_each_ep (ep, dev->gadget) {
1593
1594 data = kzalloc(sizeof(*data), GFP_KERNEL);
1595 if (!data)
1596 goto enomem0;
1597 data->state = STATE_EP_DISABLED;
1598 mutex_init(&data->lock);
1599 init_waitqueue_head (&data->wait);
1600
1601 strncpy (data->name, ep->name, sizeof (data->name) - 1);
1602 refcount_set (&data->count, 1);
1603 data->dev = dev;
1604 get_dev (dev);
1605
1606 data->ep = ep;
1607 ep->driver_data = data;
1608
1609 data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1610 if (!data->req)
1611 goto enomem1;
1612
1613 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1614 data, &ep_io_operations);
1615 if (!data->dentry)
1616 goto enomem2;
1617 list_add_tail (&data->epfiles, &dev->epfiles);
1618 }
1619 return 0;
1620
1621 enomem2:
1622 usb_ep_free_request (ep, data->req);
1623 enomem1:
1624 put_dev (dev);
1625 kfree (data);
1626 enomem0:
1627 DBG (dev, "%s enomem\n", __func__);
1628 destroy_ep_files (dev);
1629 return -ENOMEM;
1630 }
1631
1632 static void
gadgetfs_unbind(struct usb_gadget * gadget)1633 gadgetfs_unbind (struct usb_gadget *gadget)
1634 {
1635 struct dev_data *dev = get_gadget_data (gadget);
1636
1637 DBG (dev, "%s\n", __func__);
1638
1639 spin_lock_irq (&dev->lock);
1640 dev->state = STATE_DEV_UNBOUND;
1641 while (dev->udc_usage > 0) {
1642 spin_unlock_irq(&dev->lock);
1643 usleep_range(1000, 2000);
1644 spin_lock_irq(&dev->lock);
1645 }
1646 spin_unlock_irq (&dev->lock);
1647
1648 destroy_ep_files (dev);
1649 gadget->ep0->driver_data = NULL;
1650 set_gadget_data (gadget, NULL);
1651
1652 /* we've already been disconnected ... no i/o is active */
1653 if (dev->req)
1654 usb_ep_free_request (gadget->ep0, dev->req);
1655 DBG (dev, "%s done\n", __func__);
1656 put_dev (dev);
1657 }
1658
1659 static struct dev_data *the_device;
1660
gadgetfs_bind(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1661 static int gadgetfs_bind(struct usb_gadget *gadget,
1662 struct usb_gadget_driver *driver)
1663 {
1664 struct dev_data *dev = the_device;
1665
1666 if (!dev)
1667 return -ESRCH;
1668 if (0 != strcmp (CHIP, gadget->name)) {
1669 pr_err("%s expected %s controller not %s\n",
1670 shortname, CHIP, gadget->name);
1671 return -ENODEV;
1672 }
1673
1674 set_gadget_data (gadget, dev);
1675 dev->gadget = gadget;
1676 gadget->ep0->driver_data = dev;
1677
1678 /* preallocate control response and buffer */
1679 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1680 if (!dev->req)
1681 goto enomem;
1682 dev->req->context = NULL;
1683 dev->req->complete = epio_complete;
1684
1685 if (activate_ep_files (dev) < 0)
1686 goto enomem;
1687
1688 INFO (dev, "bound to %s driver\n", gadget->name);
1689 spin_lock_irq(&dev->lock);
1690 dev->state = STATE_DEV_UNCONNECTED;
1691 spin_unlock_irq(&dev->lock);
1692 get_dev (dev);
1693 return 0;
1694
1695 enomem:
1696 gadgetfs_unbind (gadget);
1697 return -ENOMEM;
1698 }
1699
1700 static void
gadgetfs_disconnect(struct usb_gadget * gadget)1701 gadgetfs_disconnect (struct usb_gadget *gadget)
1702 {
1703 struct dev_data *dev = get_gadget_data (gadget);
1704 unsigned long flags;
1705
1706 spin_lock_irqsave (&dev->lock, flags);
1707 if (dev->state == STATE_DEV_UNCONNECTED)
1708 goto exit;
1709 dev->state = STATE_DEV_UNCONNECTED;
1710
1711 INFO (dev, "disconnected\n");
1712 next_event (dev, GADGETFS_DISCONNECT);
1713 ep0_readable (dev);
1714 exit:
1715 spin_unlock_irqrestore (&dev->lock, flags);
1716 }
1717
1718 static void
gadgetfs_suspend(struct usb_gadget * gadget)1719 gadgetfs_suspend (struct usb_gadget *gadget)
1720 {
1721 struct dev_data *dev = get_gadget_data (gadget);
1722 unsigned long flags;
1723
1724 INFO (dev, "suspended from state %d\n", dev->state);
1725 spin_lock_irqsave(&dev->lock, flags);
1726 switch (dev->state) {
1727 case STATE_DEV_SETUP: // VERY odd... host died??
1728 case STATE_DEV_CONNECTED:
1729 case STATE_DEV_UNCONNECTED:
1730 next_event (dev, GADGETFS_SUSPEND);
1731 ep0_readable (dev);
1732 fallthrough;
1733 default:
1734 break;
1735 }
1736 spin_unlock_irqrestore(&dev->lock, flags);
1737 }
1738
1739 static struct usb_gadget_driver gadgetfs_driver = {
1740 .function = (char *) driver_desc,
1741 .bind = gadgetfs_bind,
1742 .unbind = gadgetfs_unbind,
1743 .setup = gadgetfs_setup,
1744 .reset = gadgetfs_disconnect,
1745 .disconnect = gadgetfs_disconnect,
1746 .suspend = gadgetfs_suspend,
1747
1748 .driver = {
1749 .name = shortname,
1750 },
1751 };
1752
1753 /*----------------------------------------------------------------------*/
1754 /* DEVICE INITIALIZATION
1755 *
1756 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1757 * status = write (fd, descriptors, sizeof descriptors)
1758 *
1759 * That write establishes the device configuration, so the kernel can
1760 * bind to the controller ... guaranteeing it can handle enumeration
1761 * at all necessary speeds. Descriptor order is:
1762 *
1763 * . message tag (u32, host order) ... for now, must be zero; it
1764 * would change to support features like multi-config devices
1765 * . full/low speed config ... all wTotalLength bytes (with interface,
1766 * class, altsetting, endpoint, and other descriptors)
1767 * . high speed config ... all descriptors, for high speed operation;
1768 * this one's optional except for high-speed hardware
1769 * . device descriptor
1770 *
1771 * Endpoints are not yet enabled. Drivers must wait until device
1772 * configuration and interface altsetting changes create
1773 * the need to configure (or unconfigure) them.
1774 *
1775 * After initialization, the device stays active for as long as that
1776 * $CHIP file is open. Events must then be read from that descriptor,
1777 * such as configuration notifications.
1778 */
1779
is_valid_config(struct usb_config_descriptor * config,unsigned int total)1780 static int is_valid_config(struct usb_config_descriptor *config,
1781 unsigned int total)
1782 {
1783 return config->bDescriptorType == USB_DT_CONFIG
1784 && config->bLength == USB_DT_CONFIG_SIZE
1785 && total >= USB_DT_CONFIG_SIZE
1786 && config->bConfigurationValue != 0
1787 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1788 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1789 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1790 /* FIXME check lengths: walk to end */
1791 }
1792
1793 static ssize_t
dev_config(struct file * fd,const char __user * buf,size_t len,loff_t * ptr)1794 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1795 {
1796 struct dev_data *dev = fd->private_data;
1797 ssize_t value, length = len;
1798 unsigned total;
1799 u32 tag;
1800 char *kbuf;
1801
1802 spin_lock_irq(&dev->lock);
1803 if (dev->state > STATE_DEV_OPENED) {
1804 value = ep0_write(fd, buf, len, ptr);
1805 spin_unlock_irq(&dev->lock);
1806 return value;
1807 }
1808 spin_unlock_irq(&dev->lock);
1809
1810 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1811 (len > PAGE_SIZE * 4))
1812 return -EINVAL;
1813
1814 /* we might need to change message format someday */
1815 if (copy_from_user (&tag, buf, 4))
1816 return -EFAULT;
1817 if (tag != 0)
1818 return -EINVAL;
1819 buf += 4;
1820 length -= 4;
1821
1822 kbuf = memdup_user(buf, length);
1823 if (IS_ERR(kbuf))
1824 return PTR_ERR(kbuf);
1825
1826 spin_lock_irq (&dev->lock);
1827 value = -EINVAL;
1828 if (dev->buf) {
1829 kfree(kbuf);
1830 goto fail;
1831 }
1832 dev->buf = kbuf;
1833
1834 /* full or low speed config */
1835 dev->config = (void *) kbuf;
1836 total = le16_to_cpu(dev->config->wTotalLength);
1837 if (!is_valid_config(dev->config, total) ||
1838 total > length - USB_DT_DEVICE_SIZE)
1839 goto fail;
1840 kbuf += total;
1841 length -= total;
1842
1843 /* optional high speed config */
1844 if (kbuf [1] == USB_DT_CONFIG) {
1845 dev->hs_config = (void *) kbuf;
1846 total = le16_to_cpu(dev->hs_config->wTotalLength);
1847 if (!is_valid_config(dev->hs_config, total) ||
1848 total > length - USB_DT_DEVICE_SIZE)
1849 goto fail;
1850 kbuf += total;
1851 length -= total;
1852 } else {
1853 dev->hs_config = NULL;
1854 }
1855
1856 /* could support multiple configs, using another encoding! */
1857
1858 /* device descriptor (tweaked for paranoia) */
1859 if (length != USB_DT_DEVICE_SIZE)
1860 goto fail;
1861 dev->dev = (void *)kbuf;
1862 if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1863 || dev->dev->bDescriptorType != USB_DT_DEVICE
1864 || dev->dev->bNumConfigurations != 1)
1865 goto fail;
1866 dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1867
1868 /* triggers gadgetfs_bind(); then we can enumerate. */
1869 spin_unlock_irq (&dev->lock);
1870 if (dev->hs_config)
1871 gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1872 else
1873 gadgetfs_driver.max_speed = USB_SPEED_FULL;
1874
1875 value = usb_gadget_probe_driver(&gadgetfs_driver);
1876 if (value != 0) {
1877 kfree (dev->buf);
1878 dev->buf = NULL;
1879 } else {
1880 /* at this point "good" hardware has for the first time
1881 * let the USB the host see us. alternatively, if users
1882 * unplug/replug that will clear all the error state.
1883 *
1884 * note: everything running before here was guaranteed
1885 * to choke driver model style diagnostics. from here
1886 * on, they can work ... except in cleanup paths that
1887 * kick in after the ep0 descriptor is closed.
1888 */
1889 value = len;
1890 dev->gadget_registered = true;
1891 }
1892 return value;
1893
1894 fail:
1895 spin_unlock_irq (&dev->lock);
1896 pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1897 kfree (dev->buf);
1898 dev->buf = NULL;
1899 return value;
1900 }
1901
1902 static int
dev_open(struct inode * inode,struct file * fd)1903 dev_open (struct inode *inode, struct file *fd)
1904 {
1905 struct dev_data *dev = inode->i_private;
1906 int value = -EBUSY;
1907
1908 spin_lock_irq(&dev->lock);
1909 if (dev->state == STATE_DEV_DISABLED) {
1910 dev->ev_next = 0;
1911 dev->state = STATE_DEV_OPENED;
1912 fd->private_data = dev;
1913 get_dev (dev);
1914 value = 0;
1915 }
1916 spin_unlock_irq(&dev->lock);
1917 return value;
1918 }
1919
1920 static const struct file_operations ep0_operations = {
1921 .llseek = no_llseek,
1922
1923 .open = dev_open,
1924 .read = ep0_read,
1925 .write = dev_config,
1926 .fasync = ep0_fasync,
1927 .poll = ep0_poll,
1928 .unlocked_ioctl = dev_ioctl,
1929 .release = dev_release,
1930 };
1931
1932 /*----------------------------------------------------------------------*/
1933
1934 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1935 *
1936 * Mounting the filesystem creates a controller file, used first for
1937 * device configuration then later for event monitoring.
1938 */
1939
1940
1941 /* FIXME PAM etc could set this security policy without mount options
1942 * if epfiles inherited ownership and permissons from ep0 ...
1943 */
1944
1945 static unsigned default_uid;
1946 static unsigned default_gid;
1947 static unsigned default_perm = S_IRUSR | S_IWUSR;
1948
1949 module_param (default_uid, uint, 0644);
1950 module_param (default_gid, uint, 0644);
1951 module_param (default_perm, uint, 0644);
1952
1953
1954 static struct inode *
gadgetfs_make_inode(struct super_block * sb,void * data,const struct file_operations * fops,int mode)1955 gadgetfs_make_inode (struct super_block *sb,
1956 void *data, const struct file_operations *fops,
1957 int mode)
1958 {
1959 struct inode *inode = new_inode (sb);
1960
1961 if (inode) {
1962 inode->i_ino = get_next_ino();
1963 inode->i_mode = mode;
1964 inode->i_uid = make_kuid(&init_user_ns, default_uid);
1965 inode->i_gid = make_kgid(&init_user_ns, default_gid);
1966 inode->i_atime = inode->i_mtime = inode->i_ctime
1967 = current_time(inode);
1968 inode->i_private = data;
1969 inode->i_fop = fops;
1970 }
1971 return inode;
1972 }
1973
1974 /* creates in fs root directory, so non-renamable and non-linkable.
1975 * so inode and dentry are paired, until device reconfig.
1976 */
1977 static struct dentry *
gadgetfs_create_file(struct super_block * sb,char const * name,void * data,const struct file_operations * fops)1978 gadgetfs_create_file (struct super_block *sb, char const *name,
1979 void *data, const struct file_operations *fops)
1980 {
1981 struct dentry *dentry;
1982 struct inode *inode;
1983
1984 dentry = d_alloc_name(sb->s_root, name);
1985 if (!dentry)
1986 return NULL;
1987
1988 inode = gadgetfs_make_inode (sb, data, fops,
1989 S_IFREG | (default_perm & S_IRWXUGO));
1990 if (!inode) {
1991 dput(dentry);
1992 return NULL;
1993 }
1994 d_add (dentry, inode);
1995 return dentry;
1996 }
1997
1998 static const struct super_operations gadget_fs_operations = {
1999 .statfs = simple_statfs,
2000 .drop_inode = generic_delete_inode,
2001 };
2002
2003 static int
gadgetfs_fill_super(struct super_block * sb,struct fs_context * fc)2004 gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
2005 {
2006 struct inode *inode;
2007 struct dev_data *dev;
2008
2009 if (the_device)
2010 return -ESRCH;
2011
2012 CHIP = usb_get_gadget_udc_name();
2013 if (!CHIP)
2014 return -ENODEV;
2015
2016 /* superblock */
2017 sb->s_blocksize = PAGE_SIZE;
2018 sb->s_blocksize_bits = PAGE_SHIFT;
2019 sb->s_magic = GADGETFS_MAGIC;
2020 sb->s_op = &gadget_fs_operations;
2021 sb->s_time_gran = 1;
2022
2023 /* root inode */
2024 inode = gadgetfs_make_inode (sb,
2025 NULL, &simple_dir_operations,
2026 S_IFDIR | S_IRUGO | S_IXUGO);
2027 if (!inode)
2028 goto Enomem;
2029 inode->i_op = &simple_dir_inode_operations;
2030 if (!(sb->s_root = d_make_root (inode)))
2031 goto Enomem;
2032
2033 /* the ep0 file is named after the controller we expect;
2034 * user mode code can use it for sanity checks, like we do.
2035 */
2036 dev = dev_new ();
2037 if (!dev)
2038 goto Enomem;
2039
2040 dev->sb = sb;
2041 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2042 if (!dev->dentry) {
2043 put_dev(dev);
2044 goto Enomem;
2045 }
2046
2047 /* other endpoint files are available after hardware setup,
2048 * from binding to a controller.
2049 */
2050 the_device = dev;
2051 return 0;
2052
2053 Enomem:
2054 kfree(CHIP);
2055 CHIP = NULL;
2056
2057 return -ENOMEM;
2058 }
2059
2060 /* "mount -t gadgetfs path /dev/gadget" ends up here */
gadgetfs_get_tree(struct fs_context * fc)2061 static int gadgetfs_get_tree(struct fs_context *fc)
2062 {
2063 return get_tree_single(fc, gadgetfs_fill_super);
2064 }
2065
2066 static const struct fs_context_operations gadgetfs_context_ops = {
2067 .get_tree = gadgetfs_get_tree,
2068 };
2069
gadgetfs_init_fs_context(struct fs_context * fc)2070 static int gadgetfs_init_fs_context(struct fs_context *fc)
2071 {
2072 fc->ops = &gadgetfs_context_ops;
2073 return 0;
2074 }
2075
2076 static void
gadgetfs_kill_sb(struct super_block * sb)2077 gadgetfs_kill_sb (struct super_block *sb)
2078 {
2079 kill_litter_super (sb);
2080 if (the_device) {
2081 put_dev (the_device);
2082 the_device = NULL;
2083 }
2084 kfree(CHIP);
2085 CHIP = NULL;
2086 }
2087
2088 /*----------------------------------------------------------------------*/
2089
2090 static struct file_system_type gadgetfs_type = {
2091 .owner = THIS_MODULE,
2092 .name = shortname,
2093 .init_fs_context = gadgetfs_init_fs_context,
2094 .kill_sb = gadgetfs_kill_sb,
2095 };
2096 MODULE_ALIAS_FS("gadgetfs");
2097
2098 /*----------------------------------------------------------------------*/
2099
init(void)2100 static int __init init (void)
2101 {
2102 int status;
2103
2104 status = register_filesystem (&gadgetfs_type);
2105 if (status == 0)
2106 pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2107 shortname, driver_desc);
2108 return status;
2109 }
2110 module_init (init);
2111
cleanup(void)2112 static void __exit cleanup (void)
2113 {
2114 pr_debug ("unregister %s\n", shortname);
2115 unregister_filesystem (&gadgetfs_type);
2116 }
2117 module_exit (cleanup);
2118
2119