1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
3
4 #include <linux/err.h>
5 #include <linux/errno.h>
6 #include <linux/debugfs.h>
7 #include <linux/fs.h>
8 #include <linux/init.h>
9 #include <linux/idr.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/poll.h>
13 #include <linux/skbuff.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 #include <linux/termios.h>
18 #include <linux/wwan.h>
19 #include <net/rtnetlink.h>
20 #include <uapi/linux/wwan.h>
21
22 /* Maximum number of minors in use */
23 #define WWAN_MAX_MINORS (1 << MINORBITS)
24
25 static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
26 static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
27 static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
28 static struct class *wwan_class;
29 static int wwan_major;
30 static struct dentry *wwan_debugfs_dir;
31
32 #define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
33 #define to_wwan_port(d) container_of(d, struct wwan_port, dev)
34
35 /* WWAN port flags */
36 #define WWAN_PORT_TX_OFF 0
37
38 /**
39 * struct wwan_device - The structure that defines a WWAN device
40 *
41 * @id: WWAN device unique ID.
42 * @dev: Underlying device.
43 * @port_id: Current available port ID to pick.
44 * @ops: wwan device ops
45 * @ops_ctxt: context to pass to ops
46 * @debugfs_dir: WWAN device debugfs dir
47 */
48 struct wwan_device {
49 unsigned int id;
50 struct device dev;
51 atomic_t port_id;
52 const struct wwan_ops *ops;
53 void *ops_ctxt;
54 #ifdef CONFIG_WWAN_DEBUGFS
55 struct dentry *debugfs_dir;
56 #endif
57 };
58
59 /**
60 * struct wwan_port - The structure that defines a WWAN port
61 * @type: Port type
62 * @start_count: Port start counter
63 * @flags: Store port state and capabilities
64 * @ops: Pointer to WWAN port operations
65 * @ops_lock: Protect port ops
66 * @dev: Underlying device
67 * @rxq: Buffer inbound queue
68 * @waitqueue: The waitqueue for port fops (read/write/poll)
69 * @data_lock: Port specific data access serialization
70 * @at_data: AT port specific data
71 */
72 struct wwan_port {
73 enum wwan_port_type type;
74 unsigned int start_count;
75 unsigned long flags;
76 const struct wwan_port_ops *ops;
77 struct mutex ops_lock; /* Serialize ops + protect against removal */
78 struct device dev;
79 struct sk_buff_head rxq;
80 wait_queue_head_t waitqueue;
81 struct mutex data_lock; /* Port specific data access serialization */
82 union {
83 struct {
84 struct ktermios termios;
85 int mdmbits;
86 } at_data;
87 };
88 };
89
index_show(struct device * dev,struct device_attribute * attr,char * buf)90 static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
91 {
92 struct wwan_device *wwan = to_wwan_dev(dev);
93
94 return sprintf(buf, "%d\n", wwan->id);
95 }
96 static DEVICE_ATTR_RO(index);
97
98 static struct attribute *wwan_dev_attrs[] = {
99 &dev_attr_index.attr,
100 NULL,
101 };
102 ATTRIBUTE_GROUPS(wwan_dev);
103
wwan_dev_destroy(struct device * dev)104 static void wwan_dev_destroy(struct device *dev)
105 {
106 struct wwan_device *wwandev = to_wwan_dev(dev);
107
108 ida_free(&wwan_dev_ids, wwandev->id);
109 kfree(wwandev);
110 }
111
112 static const struct device_type wwan_dev_type = {
113 .name = "wwan_dev",
114 .release = wwan_dev_destroy,
115 .groups = wwan_dev_groups,
116 };
117
wwan_dev_parent_match(struct device * dev,const void * parent)118 static int wwan_dev_parent_match(struct device *dev, const void *parent)
119 {
120 return (dev->type == &wwan_dev_type &&
121 (dev->parent == parent || dev == parent));
122 }
123
wwan_dev_get_by_parent(struct device * parent)124 static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
125 {
126 struct device *dev;
127
128 dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
129 if (!dev)
130 return ERR_PTR(-ENODEV);
131
132 return to_wwan_dev(dev);
133 }
134
wwan_dev_name_match(struct device * dev,const void * name)135 static int wwan_dev_name_match(struct device *dev, const void *name)
136 {
137 return dev->type == &wwan_dev_type &&
138 strcmp(dev_name(dev), name) == 0;
139 }
140
wwan_dev_get_by_name(const char * name)141 static struct wwan_device *wwan_dev_get_by_name(const char *name)
142 {
143 struct device *dev;
144
145 dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
146 if (!dev)
147 return ERR_PTR(-ENODEV);
148
149 return to_wwan_dev(dev);
150 }
151
152 #ifdef CONFIG_WWAN_DEBUGFS
wwan_get_debugfs_dir(struct device * parent)153 struct dentry *wwan_get_debugfs_dir(struct device *parent)
154 {
155 struct wwan_device *wwandev;
156
157 wwandev = wwan_dev_get_by_parent(parent);
158 if (IS_ERR(wwandev))
159 return ERR_CAST(wwandev);
160
161 return wwandev->debugfs_dir;
162 }
163 EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
164
wwan_dev_debugfs_match(struct device * dev,const void * dir)165 static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
166 {
167 struct wwan_device *wwandev;
168
169 if (dev->type != &wwan_dev_type)
170 return 0;
171
172 wwandev = to_wwan_dev(dev);
173
174 return wwandev->debugfs_dir == dir;
175 }
176
wwan_dev_get_by_debugfs(struct dentry * dir)177 static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
178 {
179 struct device *dev;
180
181 dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
182 if (!dev)
183 return ERR_PTR(-ENODEV);
184
185 return to_wwan_dev(dev);
186 }
187
wwan_put_debugfs_dir(struct dentry * dir)188 void wwan_put_debugfs_dir(struct dentry *dir)
189 {
190 struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
191
192 if (WARN_ON(IS_ERR(wwandev)))
193 return;
194
195 /* wwan_dev_get_by_debugfs() also got a reference */
196 put_device(&wwandev->dev);
197 put_device(&wwandev->dev);
198 }
199 EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
200 #endif
201
202 /* This function allocates and registers a new WWAN device OR if a WWAN device
203 * already exist for the given parent, it gets a reference and return it.
204 * This function is not exported (for now), it is called indirectly via
205 * wwan_create_port().
206 */
wwan_create_dev(struct device * parent)207 static struct wwan_device *wwan_create_dev(struct device *parent)
208 {
209 struct wwan_device *wwandev;
210 int err, id;
211
212 /* The 'find-alloc-register' operation must be protected against
213 * concurrent execution, a WWAN device is possibly shared between
214 * multiple callers or concurrently unregistered from wwan_remove_dev().
215 */
216 mutex_lock(&wwan_register_lock);
217
218 /* If wwandev already exists, return it */
219 wwandev = wwan_dev_get_by_parent(parent);
220 if (!IS_ERR(wwandev))
221 goto done_unlock;
222
223 id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
224 if (id < 0) {
225 wwandev = ERR_PTR(id);
226 goto done_unlock;
227 }
228
229 wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
230 if (!wwandev) {
231 wwandev = ERR_PTR(-ENOMEM);
232 ida_free(&wwan_dev_ids, id);
233 goto done_unlock;
234 }
235
236 wwandev->dev.parent = parent;
237 wwandev->dev.class = wwan_class;
238 wwandev->dev.type = &wwan_dev_type;
239 wwandev->id = id;
240 dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
241
242 err = device_register(&wwandev->dev);
243 if (err) {
244 put_device(&wwandev->dev);
245 wwandev = ERR_PTR(err);
246 goto done_unlock;
247 }
248
249 #ifdef CONFIG_WWAN_DEBUGFS
250 wwandev->debugfs_dir =
251 debugfs_create_dir(kobject_name(&wwandev->dev.kobj),
252 wwan_debugfs_dir);
253 #endif
254
255 done_unlock:
256 mutex_unlock(&wwan_register_lock);
257
258 return wwandev;
259 }
260
is_wwan_child(struct device * dev,void * data)261 static int is_wwan_child(struct device *dev, void *data)
262 {
263 return dev->class == wwan_class;
264 }
265
wwan_remove_dev(struct wwan_device * wwandev)266 static void wwan_remove_dev(struct wwan_device *wwandev)
267 {
268 int ret;
269
270 /* Prevent concurrent picking from wwan_create_dev */
271 mutex_lock(&wwan_register_lock);
272
273 /* WWAN device is created and registered (get+add) along with its first
274 * child port, and subsequent port registrations only grab a reference
275 * (get). The WWAN device must then be unregistered (del+put) along with
276 * its last port, and reference simply dropped (put) otherwise. In the
277 * same fashion, we must not unregister it when the ops are still there.
278 */
279 if (wwandev->ops)
280 ret = 1;
281 else
282 ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
283
284 if (!ret) {
285 #ifdef CONFIG_WWAN_DEBUGFS
286 debugfs_remove_recursive(wwandev->debugfs_dir);
287 #endif
288 device_unregister(&wwandev->dev);
289 } else {
290 put_device(&wwandev->dev);
291 }
292
293 mutex_unlock(&wwan_register_lock);
294 }
295
296 /* ------- WWAN port management ------- */
297
298 static const struct {
299 const char * const name; /* Port type name */
300 const char * const devsuf; /* Port devce name suffix */
301 } wwan_port_types[WWAN_PORT_MAX + 1] = {
302 [WWAN_PORT_AT] = {
303 .name = "AT",
304 .devsuf = "at",
305 },
306 [WWAN_PORT_MBIM] = {
307 .name = "MBIM",
308 .devsuf = "mbim",
309 },
310 [WWAN_PORT_QMI] = {
311 .name = "QMI",
312 .devsuf = "qmi",
313 },
314 [WWAN_PORT_QCDM] = {
315 .name = "QCDM",
316 .devsuf = "qcdm",
317 },
318 [WWAN_PORT_FIREHOSE] = {
319 .name = "FIREHOSE",
320 .devsuf = "firehose",
321 },
322 [WWAN_PORT_XMMRPC] = {
323 .name = "XMMRPC",
324 .devsuf = "xmmrpc",
325 },
326 };
327
type_show(struct device * dev,struct device_attribute * attr,char * buf)328 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
329 char *buf)
330 {
331 struct wwan_port *port = to_wwan_port(dev);
332
333 return sprintf(buf, "%s\n", wwan_port_types[port->type].name);
334 }
335 static DEVICE_ATTR_RO(type);
336
337 static struct attribute *wwan_port_attrs[] = {
338 &dev_attr_type.attr,
339 NULL,
340 };
341 ATTRIBUTE_GROUPS(wwan_port);
342
wwan_port_destroy(struct device * dev)343 static void wwan_port_destroy(struct device *dev)
344 {
345 struct wwan_port *port = to_wwan_port(dev);
346
347 ida_free(&minors, MINOR(port->dev.devt));
348 mutex_destroy(&port->data_lock);
349 mutex_destroy(&port->ops_lock);
350 kfree(port);
351 }
352
353 static const struct device_type wwan_port_dev_type = {
354 .name = "wwan_port",
355 .release = wwan_port_destroy,
356 .groups = wwan_port_groups,
357 };
358
wwan_port_minor_match(struct device * dev,const void * minor)359 static int wwan_port_minor_match(struct device *dev, const void *minor)
360 {
361 return (dev->type == &wwan_port_dev_type &&
362 MINOR(dev->devt) == *(unsigned int *)minor);
363 }
364
wwan_port_get_by_minor(unsigned int minor)365 static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
366 {
367 struct device *dev;
368
369 dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
370 if (!dev)
371 return ERR_PTR(-ENODEV);
372
373 return to_wwan_port(dev);
374 }
375
376 /* Allocate and set unique name based on passed format
377 *
378 * Name allocation approach is highly inspired by the __dev_alloc_name()
379 * function.
380 *
381 * To avoid names collision, the caller must prevent the new port device
382 * registration as well as concurrent invocation of this function.
383 */
__wwan_port_dev_assign_name(struct wwan_port * port,const char * fmt)384 static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
385 {
386 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
387 const unsigned int max_ports = PAGE_SIZE * 8;
388 struct class_dev_iter iter;
389 unsigned long *idmap;
390 struct device *dev;
391 char buf[0x20];
392 int id;
393
394 idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL);
395 if (!idmap)
396 return -ENOMEM;
397
398 /* Collect ids of same name format ports */
399 class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
400 while ((dev = class_dev_iter_next(&iter))) {
401 if (dev->parent != &wwandev->dev)
402 continue;
403 if (sscanf(dev_name(dev), fmt, &id) != 1)
404 continue;
405 if (id < 0 || id >= max_ports)
406 continue;
407 set_bit(id, idmap);
408 }
409 class_dev_iter_exit(&iter);
410
411 /* Allocate unique id */
412 id = find_first_zero_bit(idmap, max_ports);
413 free_page((unsigned long)idmap);
414
415 snprintf(buf, sizeof(buf), fmt, id); /* Name generation */
416
417 dev = device_find_child_by_name(&wwandev->dev, buf);
418 if (dev) {
419 put_device(dev);
420 return -ENFILE;
421 }
422
423 return dev_set_name(&port->dev, buf);
424 }
425
wwan_create_port(struct device * parent,enum wwan_port_type type,const struct wwan_port_ops * ops,void * drvdata)426 struct wwan_port *wwan_create_port(struct device *parent,
427 enum wwan_port_type type,
428 const struct wwan_port_ops *ops,
429 void *drvdata)
430 {
431 struct wwan_device *wwandev;
432 struct wwan_port *port;
433 char namefmt[0x20];
434 int minor, err;
435
436 if (type > WWAN_PORT_MAX || !ops)
437 return ERR_PTR(-EINVAL);
438
439 /* A port is always a child of a WWAN device, retrieve (allocate or
440 * pick) the WWAN device based on the provided parent device.
441 */
442 wwandev = wwan_create_dev(parent);
443 if (IS_ERR(wwandev))
444 return ERR_CAST(wwandev);
445
446 /* A port is exposed as character device, get a minor */
447 minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
448 if (minor < 0) {
449 err = minor;
450 goto error_wwandev_remove;
451 }
452
453 port = kzalloc(sizeof(*port), GFP_KERNEL);
454 if (!port) {
455 err = -ENOMEM;
456 ida_free(&minors, minor);
457 goto error_wwandev_remove;
458 }
459
460 port->type = type;
461 port->ops = ops;
462 mutex_init(&port->ops_lock);
463 skb_queue_head_init(&port->rxq);
464 init_waitqueue_head(&port->waitqueue);
465 mutex_init(&port->data_lock);
466
467 port->dev.parent = &wwandev->dev;
468 port->dev.class = wwan_class;
469 port->dev.type = &wwan_port_dev_type;
470 port->dev.devt = MKDEV(wwan_major, minor);
471 dev_set_drvdata(&port->dev, drvdata);
472
473 /* allocate unique name based on wwan device id, port type and number */
474 snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
475 wwan_port_types[port->type].devsuf);
476
477 /* Serialize ports registration */
478 mutex_lock(&wwan_register_lock);
479
480 __wwan_port_dev_assign_name(port, namefmt);
481 err = device_register(&port->dev);
482
483 mutex_unlock(&wwan_register_lock);
484
485 if (err)
486 goto error_put_device;
487
488 return port;
489
490 error_put_device:
491 put_device(&port->dev);
492 error_wwandev_remove:
493 wwan_remove_dev(wwandev);
494
495 return ERR_PTR(err);
496 }
497 EXPORT_SYMBOL_GPL(wwan_create_port);
498
wwan_remove_port(struct wwan_port * port)499 void wwan_remove_port(struct wwan_port *port)
500 {
501 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
502
503 mutex_lock(&port->ops_lock);
504 if (port->start_count)
505 port->ops->stop(port);
506 port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
507 mutex_unlock(&port->ops_lock);
508
509 wake_up_interruptible(&port->waitqueue);
510
511 skb_queue_purge(&port->rxq);
512 dev_set_drvdata(&port->dev, NULL);
513 device_unregister(&port->dev);
514
515 /* Release related wwan device */
516 wwan_remove_dev(wwandev);
517 }
518 EXPORT_SYMBOL_GPL(wwan_remove_port);
519
wwan_port_rx(struct wwan_port * port,struct sk_buff * skb)520 void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
521 {
522 skb_queue_tail(&port->rxq, skb);
523 wake_up_interruptible(&port->waitqueue);
524 }
525 EXPORT_SYMBOL_GPL(wwan_port_rx);
526
wwan_port_txon(struct wwan_port * port)527 void wwan_port_txon(struct wwan_port *port)
528 {
529 clear_bit(WWAN_PORT_TX_OFF, &port->flags);
530 wake_up_interruptible(&port->waitqueue);
531 }
532 EXPORT_SYMBOL_GPL(wwan_port_txon);
533
wwan_port_txoff(struct wwan_port * port)534 void wwan_port_txoff(struct wwan_port *port)
535 {
536 set_bit(WWAN_PORT_TX_OFF, &port->flags);
537 }
538 EXPORT_SYMBOL_GPL(wwan_port_txoff);
539
wwan_port_get_drvdata(struct wwan_port * port)540 void *wwan_port_get_drvdata(struct wwan_port *port)
541 {
542 return dev_get_drvdata(&port->dev);
543 }
544 EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
545
wwan_port_op_start(struct wwan_port * port)546 static int wwan_port_op_start(struct wwan_port *port)
547 {
548 int ret = 0;
549
550 mutex_lock(&port->ops_lock);
551 if (!port->ops) { /* Port got unplugged */
552 ret = -ENODEV;
553 goto out_unlock;
554 }
555
556 /* If port is already started, don't start again */
557 if (!port->start_count)
558 ret = port->ops->start(port);
559
560 if (!ret)
561 port->start_count++;
562
563 out_unlock:
564 mutex_unlock(&port->ops_lock);
565
566 return ret;
567 }
568
wwan_port_op_stop(struct wwan_port * port)569 static void wwan_port_op_stop(struct wwan_port *port)
570 {
571 mutex_lock(&port->ops_lock);
572 port->start_count--;
573 if (!port->start_count) {
574 if (port->ops)
575 port->ops->stop(port);
576 skb_queue_purge(&port->rxq);
577 }
578 mutex_unlock(&port->ops_lock);
579 }
580
wwan_port_op_tx(struct wwan_port * port,struct sk_buff * skb,bool nonblock)581 static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb,
582 bool nonblock)
583 {
584 int ret;
585
586 mutex_lock(&port->ops_lock);
587 if (!port->ops) { /* Port got unplugged */
588 ret = -ENODEV;
589 goto out_unlock;
590 }
591
592 if (nonblock || !port->ops->tx_blocking)
593 ret = port->ops->tx(port, skb);
594 else
595 ret = port->ops->tx_blocking(port, skb);
596
597 out_unlock:
598 mutex_unlock(&port->ops_lock);
599
600 return ret;
601 }
602
is_read_blocked(struct wwan_port * port)603 static bool is_read_blocked(struct wwan_port *port)
604 {
605 return skb_queue_empty(&port->rxq) && port->ops;
606 }
607
is_write_blocked(struct wwan_port * port)608 static bool is_write_blocked(struct wwan_port *port)
609 {
610 return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
611 }
612
wwan_wait_rx(struct wwan_port * port,bool nonblock)613 static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
614 {
615 if (!is_read_blocked(port))
616 return 0;
617
618 if (nonblock)
619 return -EAGAIN;
620
621 if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
622 return -ERESTARTSYS;
623
624 return 0;
625 }
626
wwan_wait_tx(struct wwan_port * port,bool nonblock)627 static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
628 {
629 if (!is_write_blocked(port))
630 return 0;
631
632 if (nonblock)
633 return -EAGAIN;
634
635 if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
636 return -ERESTARTSYS;
637
638 return 0;
639 }
640
wwan_port_fops_open(struct inode * inode,struct file * file)641 static int wwan_port_fops_open(struct inode *inode, struct file *file)
642 {
643 struct wwan_port *port;
644 int err = 0;
645
646 port = wwan_port_get_by_minor(iminor(inode));
647 if (IS_ERR(port))
648 return PTR_ERR(port);
649
650 file->private_data = port;
651 stream_open(inode, file);
652
653 err = wwan_port_op_start(port);
654 if (err)
655 put_device(&port->dev);
656
657 return err;
658 }
659
wwan_port_fops_release(struct inode * inode,struct file * filp)660 static int wwan_port_fops_release(struct inode *inode, struct file *filp)
661 {
662 struct wwan_port *port = filp->private_data;
663
664 wwan_port_op_stop(port);
665 put_device(&port->dev);
666
667 return 0;
668 }
669
wwan_port_fops_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos)670 static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
671 size_t count, loff_t *ppos)
672 {
673 struct wwan_port *port = filp->private_data;
674 struct sk_buff *skb;
675 size_t copied;
676 int ret;
677
678 ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
679 if (ret)
680 return ret;
681
682 skb = skb_dequeue(&port->rxq);
683 if (!skb)
684 return -EIO;
685
686 copied = min_t(size_t, count, skb->len);
687 if (copy_to_user(buf, skb->data, copied)) {
688 kfree_skb(skb);
689 return -EFAULT;
690 }
691 skb_pull(skb, copied);
692
693 /* skb is not fully consumed, keep it in the queue */
694 if (skb->len)
695 skb_queue_head(&port->rxq, skb);
696 else
697 consume_skb(skb);
698
699 return copied;
700 }
701
wwan_port_fops_write(struct file * filp,const char __user * buf,size_t count,loff_t * offp)702 static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
703 size_t count, loff_t *offp)
704 {
705 struct wwan_port *port = filp->private_data;
706 struct sk_buff *skb;
707 int ret;
708
709 ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
710 if (ret)
711 return ret;
712
713 skb = alloc_skb(count, GFP_KERNEL);
714 if (!skb)
715 return -ENOMEM;
716
717 if (copy_from_user(skb_put(skb, count), buf, count)) {
718 kfree_skb(skb);
719 return -EFAULT;
720 }
721
722 ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
723 if (ret) {
724 kfree_skb(skb);
725 return ret;
726 }
727
728 return count;
729 }
730
wwan_port_fops_poll(struct file * filp,poll_table * wait)731 static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
732 {
733 struct wwan_port *port = filp->private_data;
734 __poll_t mask = 0;
735
736 poll_wait(filp, &port->waitqueue, wait);
737
738 mutex_lock(&port->ops_lock);
739 if (port->ops && port->ops->tx_poll)
740 mask |= port->ops->tx_poll(port, filp, wait);
741 else if (!is_write_blocked(port))
742 mask |= EPOLLOUT | EPOLLWRNORM;
743 if (!is_read_blocked(port))
744 mask |= EPOLLIN | EPOLLRDNORM;
745 if (!port->ops)
746 mask |= EPOLLHUP | EPOLLERR;
747 mutex_unlock(&port->ops_lock);
748
749 return mask;
750 }
751
752 /* Implements minimalistic stub terminal IOCTLs support */
wwan_port_fops_at_ioctl(struct wwan_port * port,unsigned int cmd,unsigned long arg)753 static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
754 unsigned long arg)
755 {
756 int ret = 0;
757
758 mutex_lock(&port->data_lock);
759
760 switch (cmd) {
761 case TCFLSH:
762 break;
763
764 case TCGETS:
765 if (copy_to_user((void __user *)arg, &port->at_data.termios,
766 sizeof(struct termios)))
767 ret = -EFAULT;
768 break;
769
770 case TCSETS:
771 case TCSETSW:
772 case TCSETSF:
773 if (copy_from_user(&port->at_data.termios, (void __user *)arg,
774 sizeof(struct termios)))
775 ret = -EFAULT;
776 break;
777
778 #ifdef TCGETS2
779 case TCGETS2:
780 if (copy_to_user((void __user *)arg, &port->at_data.termios,
781 sizeof(struct termios2)))
782 ret = -EFAULT;
783 break;
784
785 case TCSETS2:
786 case TCSETSW2:
787 case TCSETSF2:
788 if (copy_from_user(&port->at_data.termios, (void __user *)arg,
789 sizeof(struct termios2)))
790 ret = -EFAULT;
791 break;
792 #endif
793
794 case TIOCMGET:
795 ret = put_user(port->at_data.mdmbits, (int __user *)arg);
796 break;
797
798 case TIOCMSET:
799 case TIOCMBIC:
800 case TIOCMBIS: {
801 int mdmbits;
802
803 if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) {
804 ret = -EFAULT;
805 break;
806 }
807 if (cmd == TIOCMBIC)
808 port->at_data.mdmbits &= ~mdmbits;
809 else if (cmd == TIOCMBIS)
810 port->at_data.mdmbits |= mdmbits;
811 else
812 port->at_data.mdmbits = mdmbits;
813 break;
814 }
815
816 default:
817 ret = -ENOIOCTLCMD;
818 }
819
820 mutex_unlock(&port->data_lock);
821
822 return ret;
823 }
824
wwan_port_fops_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)825 static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
826 unsigned long arg)
827 {
828 struct wwan_port *port = filp->private_data;
829 int res;
830
831 if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */
832 res = wwan_port_fops_at_ioctl(port, cmd, arg);
833 if (res != -ENOIOCTLCMD)
834 return res;
835 }
836
837 switch (cmd) {
838 case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */
839 unsigned long flags;
840 struct sk_buff *skb;
841 int amount = 0;
842
843 spin_lock_irqsave(&port->rxq.lock, flags);
844 skb_queue_walk(&port->rxq, skb)
845 amount += skb->len;
846 spin_unlock_irqrestore(&port->rxq.lock, flags);
847
848 return put_user(amount, (int __user *)arg);
849 }
850
851 default:
852 return -ENOIOCTLCMD;
853 }
854 }
855
856 static const struct file_operations wwan_port_fops = {
857 .owner = THIS_MODULE,
858 .open = wwan_port_fops_open,
859 .release = wwan_port_fops_release,
860 .read = wwan_port_fops_read,
861 .write = wwan_port_fops_write,
862 .poll = wwan_port_fops_poll,
863 .unlocked_ioctl = wwan_port_fops_ioctl,
864 #ifdef CONFIG_COMPAT
865 .compat_ioctl = compat_ptr_ioctl,
866 #endif
867 .llseek = noop_llseek,
868 };
869
wwan_rtnl_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)870 static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
871 struct netlink_ext_ack *extack)
872 {
873 if (!data)
874 return -EINVAL;
875
876 if (!tb[IFLA_PARENT_DEV_NAME])
877 return -EINVAL;
878
879 if (!data[IFLA_WWAN_LINK_ID])
880 return -EINVAL;
881
882 return 0;
883 }
884
885 static struct device_type wwan_type = { .name = "wwan" };
886
wwan_rtnl_alloc(struct nlattr * tb[],const char * ifname,unsigned char name_assign_type,unsigned int num_tx_queues,unsigned int num_rx_queues)887 static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
888 const char *ifname,
889 unsigned char name_assign_type,
890 unsigned int num_tx_queues,
891 unsigned int num_rx_queues)
892 {
893 const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
894 struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
895 struct net_device *dev;
896 unsigned int priv_size;
897
898 if (IS_ERR(wwandev))
899 return ERR_CAST(wwandev);
900
901 /* only supported if ops were registered (not just ports) */
902 if (!wwandev->ops) {
903 dev = ERR_PTR(-EOPNOTSUPP);
904 goto out;
905 }
906
907 priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size;
908 dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type,
909 wwandev->ops->setup, num_tx_queues, num_rx_queues);
910
911 if (dev) {
912 SET_NETDEV_DEV(dev, &wwandev->dev);
913 SET_NETDEV_DEVTYPE(dev, &wwan_type);
914 }
915
916 out:
917 /* release the reference */
918 put_device(&wwandev->dev);
919 return dev;
920 }
921
wwan_rtnl_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)922 static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
923 struct nlattr *tb[], struct nlattr *data[],
924 struct netlink_ext_ack *extack)
925 {
926 struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
927 u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
928 struct wwan_netdev_priv *priv = netdev_priv(dev);
929 int ret;
930
931 if (IS_ERR(wwandev))
932 return PTR_ERR(wwandev);
933
934 /* shouldn't have a netdev (left) with us as parent so WARN */
935 if (WARN_ON(!wwandev->ops)) {
936 ret = -EOPNOTSUPP;
937 goto out;
938 }
939
940 priv->link_id = link_id;
941 if (wwandev->ops->newlink)
942 ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
943 link_id, extack);
944 else
945 ret = register_netdevice(dev);
946
947 out:
948 /* release the reference */
949 put_device(&wwandev->dev);
950 return ret;
951 }
952
wwan_rtnl_dellink(struct net_device * dev,struct list_head * head)953 static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
954 {
955 struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
956
957 if (IS_ERR(wwandev))
958 return;
959
960 /* shouldn't have a netdev (left) with us as parent so WARN */
961 if (WARN_ON(!wwandev->ops))
962 goto out;
963
964 if (wwandev->ops->dellink)
965 wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
966 else
967 unregister_netdevice_queue(dev, head);
968
969 out:
970 /* release the reference */
971 put_device(&wwandev->dev);
972 }
973
wwan_rtnl_get_size(const struct net_device * dev)974 static size_t wwan_rtnl_get_size(const struct net_device *dev)
975 {
976 return
977 nla_total_size(4) + /* IFLA_WWAN_LINK_ID */
978 0;
979 }
980
wwan_rtnl_fill_info(struct sk_buff * skb,const struct net_device * dev)981 static int wwan_rtnl_fill_info(struct sk_buff *skb,
982 const struct net_device *dev)
983 {
984 struct wwan_netdev_priv *priv = netdev_priv(dev);
985
986 if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id))
987 goto nla_put_failure;
988
989 return 0;
990
991 nla_put_failure:
992 return -EMSGSIZE;
993 }
994
995 static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
996 [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
997 };
998
999 static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
1000 .kind = "wwan",
1001 .maxtype = __IFLA_WWAN_MAX,
1002 .alloc = wwan_rtnl_alloc,
1003 .validate = wwan_rtnl_validate,
1004 .newlink = wwan_rtnl_newlink,
1005 .dellink = wwan_rtnl_dellink,
1006 .get_size = wwan_rtnl_get_size,
1007 .fill_info = wwan_rtnl_fill_info,
1008 .policy = wwan_rtnl_policy,
1009 };
1010
wwan_create_default_link(struct wwan_device * wwandev,u32 def_link_id)1011 static void wwan_create_default_link(struct wwan_device *wwandev,
1012 u32 def_link_id)
1013 {
1014 struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1];
1015 struct nlattr *data[IFLA_WWAN_MAX + 1];
1016 struct net_device *dev;
1017 struct nlmsghdr *nlh;
1018 struct sk_buff *msg;
1019
1020 /* Forge attributes required to create a WWAN netdev. We first
1021 * build a netlink message and then parse it. This looks
1022 * odd, but such approach is less error prone.
1023 */
1024 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1025 if (WARN_ON(!msg))
1026 return;
1027 nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0);
1028 if (WARN_ON(!nlh))
1029 goto free_attrs;
1030
1031 if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev)))
1032 goto free_attrs;
1033 tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO);
1034 if (!tb[IFLA_LINKINFO])
1035 goto free_attrs;
1036 linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA);
1037 if (!linkinfo[IFLA_INFO_DATA])
1038 goto free_attrs;
1039 if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id))
1040 goto free_attrs;
1041 nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]);
1042 nla_nest_end(msg, tb[IFLA_LINKINFO]);
1043
1044 nlmsg_end(msg, nlh);
1045
1046 /* The next three parsing calls can not fail */
1047 nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL);
1048 nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO],
1049 NULL, NULL);
1050 nla_parse_nested_deprecated(data, IFLA_WWAN_MAX,
1051 linkinfo[IFLA_INFO_DATA], NULL, NULL);
1052
1053 rtnl_lock();
1054
1055 dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM,
1056 &wwan_rtnl_link_ops, tb, NULL);
1057 if (WARN_ON(IS_ERR(dev)))
1058 goto unlock;
1059
1060 if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) {
1061 free_netdev(dev);
1062 goto unlock;
1063 }
1064
1065 rtnl_configure_link(dev, NULL, 0, NULL); /* Link initialized, notify new link */
1066
1067 unlock:
1068 rtnl_unlock();
1069
1070 free_attrs:
1071 nlmsg_free(msg);
1072 }
1073
1074 /**
1075 * wwan_register_ops - register WWAN device ops
1076 * @parent: Device to use as parent and shared by all WWAN ports and
1077 * created netdevs
1078 * @ops: operations to register
1079 * @ctxt: context to pass to operations
1080 * @def_link_id: id of the default link that will be automatically created by
1081 * the WWAN core for the WWAN device. The default link will not be created
1082 * if the passed value is WWAN_NO_DEFAULT_LINK.
1083 *
1084 * Returns: 0 on success, a negative error code on failure
1085 */
wwan_register_ops(struct device * parent,const struct wwan_ops * ops,void * ctxt,u32 def_link_id)1086 int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
1087 void *ctxt, u32 def_link_id)
1088 {
1089 struct wwan_device *wwandev;
1090
1091 if (WARN_ON(!parent || !ops || !ops->setup))
1092 return -EINVAL;
1093
1094 wwandev = wwan_create_dev(parent);
1095 if (IS_ERR(wwandev))
1096 return PTR_ERR(wwandev);
1097
1098 if (WARN_ON(wwandev->ops)) {
1099 wwan_remove_dev(wwandev);
1100 return -EBUSY;
1101 }
1102
1103 wwandev->ops = ops;
1104 wwandev->ops_ctxt = ctxt;
1105
1106 /* NB: we do not abort ops registration in case of default link
1107 * creation failure. Link ops is the management interface, while the
1108 * default link creation is a service option. And we should not prevent
1109 * a user from manually creating a link latter if service option failed
1110 * now.
1111 */
1112 if (def_link_id != WWAN_NO_DEFAULT_LINK)
1113 wwan_create_default_link(wwandev, def_link_id);
1114
1115 return 0;
1116 }
1117 EXPORT_SYMBOL_GPL(wwan_register_ops);
1118
1119 /* Enqueue child netdev deletion */
wwan_child_dellink(struct device * dev,void * data)1120 static int wwan_child_dellink(struct device *dev, void *data)
1121 {
1122 struct list_head *kill_list = data;
1123
1124 if (dev->type == &wwan_type)
1125 wwan_rtnl_dellink(to_net_dev(dev), kill_list);
1126
1127 return 0;
1128 }
1129
1130 /**
1131 * wwan_unregister_ops - remove WWAN device ops
1132 * @parent: Device to use as parent and shared by all WWAN ports and
1133 * created netdevs
1134 */
wwan_unregister_ops(struct device * parent)1135 void wwan_unregister_ops(struct device *parent)
1136 {
1137 struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
1138 LIST_HEAD(kill_list);
1139
1140 if (WARN_ON(IS_ERR(wwandev)))
1141 return;
1142 if (WARN_ON(!wwandev->ops)) {
1143 put_device(&wwandev->dev);
1144 return;
1145 }
1146
1147 /* put the reference obtained by wwan_dev_get_by_parent(),
1148 * we should still have one (that the owner is giving back
1149 * now) due to the ops being assigned.
1150 */
1151 put_device(&wwandev->dev);
1152
1153 rtnl_lock(); /* Prevent concurent netdev(s) creation/destroying */
1154
1155 /* Remove all child netdev(s), using batch removing */
1156 device_for_each_child(&wwandev->dev, &kill_list,
1157 wwan_child_dellink);
1158 unregister_netdevice_many(&kill_list);
1159
1160 wwandev->ops = NULL; /* Finally remove ops */
1161
1162 rtnl_unlock();
1163
1164 wwandev->ops_ctxt = NULL;
1165 wwan_remove_dev(wwandev);
1166 }
1167 EXPORT_SYMBOL_GPL(wwan_unregister_ops);
1168
wwan_init(void)1169 static int __init wwan_init(void)
1170 {
1171 int err;
1172
1173 err = rtnl_link_register(&wwan_rtnl_link_ops);
1174 if (err)
1175 return err;
1176
1177 wwan_class = class_create(THIS_MODULE, "wwan");
1178 if (IS_ERR(wwan_class)) {
1179 err = PTR_ERR(wwan_class);
1180 goto unregister;
1181 }
1182
1183 /* chrdev used for wwan ports */
1184 wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
1185 &wwan_port_fops);
1186 if (wwan_major < 0) {
1187 err = wwan_major;
1188 goto destroy;
1189 }
1190
1191 #ifdef CONFIG_WWAN_DEBUGFS
1192 wwan_debugfs_dir = debugfs_create_dir("wwan", NULL);
1193 #endif
1194
1195 return 0;
1196
1197 destroy:
1198 class_destroy(wwan_class);
1199 unregister:
1200 rtnl_link_unregister(&wwan_rtnl_link_ops);
1201 return err;
1202 }
1203
wwan_exit(void)1204 static void __exit wwan_exit(void)
1205 {
1206 debugfs_remove_recursive(wwan_debugfs_dir);
1207 __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
1208 rtnl_link_unregister(&wwan_rtnl_link_ops);
1209 class_destroy(wwan_class);
1210 }
1211
1212 module_init(wwan_init);
1213 module_exit(wwan_exit);
1214
1215 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
1216 MODULE_DESCRIPTION("WWAN core");
1217 MODULE_LICENSE("GPL v2");
1218