1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 *
5 * Copyright (C) 2003-2005,2008 David Brownell
6 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
7 * Copyright (C) 2008 Nokia Corporation
8 */
9
10 /* #define VERBOSE_DEBUG */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/gfp.h>
15 #include <linux/device.h>
16 #include <linux/ctype.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_vlan.h>
20 #include <linux/etherdevice.h>
21
22 #include "u_ether.h"
23
24
25 /*
26 * This component encapsulates the Ethernet link glue needed to provide
27 * one (!) network link through the USB gadget stack, normally "usb0".
28 *
29 * The control and data models are handled by the function driver which
30 * connects to this code; such as CDC Ethernet (ECM or EEM),
31 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
32 * management.
33 *
34 * Link level addressing is handled by this component using module
35 * parameters; if no such parameters are provided, random link level
36 * addresses are used. Each end of the link uses one address. The
37 * host end address is exported in various ways, and is often recorded
38 * in configuration databases.
39 *
40 * The driver which assembles each configuration using such a link is
41 * responsible for ensuring that each configuration includes at most one
42 * instance of is network link. (The network layer provides ways for
43 * this single "physical" link to be used by multiple virtual links.)
44 */
45
46 #define UETH__VERSION "29-May-2008"
47
48 /* Experiments show that both Linux and Windows hosts allow up to 16k
49 * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
50 * blocks and still have efficient handling. */
51 #define GETHER_MAX_MTU_SIZE 15412
52 #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
53
54 struct eth_dev {
55 /* lock is held while accessing port_usb
56 */
57 spinlock_t lock;
58 struct gether *port_usb;
59
60 struct net_device *net;
61 struct usb_gadget *gadget;
62
63 spinlock_t req_lock; /* guard {rx,tx}_reqs */
64 struct list_head tx_reqs, rx_reqs;
65 atomic_t tx_qlen;
66
67 struct sk_buff_head rx_frames;
68
69 unsigned qmult;
70
71 unsigned header_len;
72 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
73 int (*unwrap)(struct gether *,
74 struct sk_buff *skb,
75 struct sk_buff_head *list);
76
77 struct work_struct work;
78
79 unsigned long todo;
80 #define WORK_RX_MEMORY 0
81
82 bool zlp;
83 bool no_skb_reserve;
84 bool ifname_set;
85 u8 host_mac[ETH_ALEN];
86 u8 dev_mac[ETH_ALEN];
87 };
88
89 /*-------------------------------------------------------------------------*/
90
91 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
92
93 #define DEFAULT_QLEN 2 /* double buffering by default */
94
95 /* for dual-speed hardware, use deeper queues at high/super speed */
qlen(struct usb_gadget * gadget,unsigned qmult)96 static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
97 {
98 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
99 gadget->speed >= USB_SPEED_SUPER))
100 return qmult * DEFAULT_QLEN;
101 else
102 return DEFAULT_QLEN;
103 }
104
105 /*-------------------------------------------------------------------------*/
106
107 /* REVISIT there must be a better way than having two sets
108 * of debug calls ...
109 */
110
111 #undef DBG
112 #undef VDBG
113 #undef ERROR
114 #undef INFO
115
116 #define xprintk(d, level, fmt, args...) \
117 printk(level "%s: " fmt , (d)->net->name , ## args)
118
119 #ifdef DEBUG
120 #undef DEBUG
121 #define DBG(dev, fmt, args...) \
122 xprintk(dev , KERN_DEBUG , fmt , ## args)
123 #else
124 #define DBG(dev, fmt, args...) \
125 do { } while (0)
126 #endif /* DEBUG */
127
128 #ifdef VERBOSE_DEBUG
129 #define VDBG DBG
130 #else
131 #define VDBG(dev, fmt, args...) \
132 do { } while (0)
133 #endif /* DEBUG */
134
135 #define ERROR(dev, fmt, args...) \
136 xprintk(dev , KERN_ERR , fmt , ## args)
137 #define INFO(dev, fmt, args...) \
138 xprintk(dev , KERN_INFO , fmt , ## args)
139
140 /*-------------------------------------------------------------------------*/
141
142 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
143
eth_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * p)144 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
145 {
146 struct eth_dev *dev = netdev_priv(net);
147
148 strlcpy(p->driver, "g_ether", sizeof(p->driver));
149 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
150 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
151 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
152 }
153
154 /* REVISIT can also support:
155 * - WOL (by tracking suspends and issuing remote wakeup)
156 * - msglevel (implies updated messaging)
157 * - ... probably more ethtool ops
158 */
159
160 static const struct ethtool_ops ops = {
161 .get_drvinfo = eth_get_drvinfo,
162 .get_link = ethtool_op_get_link,
163 };
164
defer_kevent(struct eth_dev * dev,int flag)165 static void defer_kevent(struct eth_dev *dev, int flag)
166 {
167 if (test_and_set_bit(flag, &dev->todo))
168 return;
169 if (!schedule_work(&dev->work))
170 ERROR(dev, "kevent %d may have been dropped\n", flag);
171 else
172 DBG(dev, "kevent %d scheduled\n", flag);
173 }
174
175 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
176
177 static int
rx_submit(struct eth_dev * dev,struct usb_request * req,gfp_t gfp_flags)178 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
179 {
180 struct usb_gadget *g = dev->gadget;
181 struct sk_buff *skb;
182 int retval = -ENOMEM;
183 size_t size = 0;
184 struct usb_ep *out;
185 unsigned long flags;
186
187 spin_lock_irqsave(&dev->lock, flags);
188 if (dev->port_usb)
189 out = dev->port_usb->out_ep;
190 else
191 out = NULL;
192
193 if (!out)
194 {
195 spin_unlock_irqrestore(&dev->lock, flags);
196 return -ENOTCONN;
197 }
198
199 /* Padding up to RX_EXTRA handles minor disagreements with host.
200 * Normally we use the USB "terminate on short read" convention;
201 * so allow up to (N*maxpacket), since that memory is normally
202 * already allocated. Some hardware doesn't deal well with short
203 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
204 * byte off the end (to force hardware errors on overflow).
205 *
206 * RNDIS uses internal framing, and explicitly allows senders to
207 * pad to end-of-packet. That's potentially nice for speed, but
208 * means receivers can't recover lost synch on their own (because
209 * new packets don't only start after a short RX).
210 */
211 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
212 size += dev->port_usb->header_len;
213
214 if (g->quirk_ep_out_aligned_size) {
215 size += out->maxpacket - 1;
216 size -= size % out->maxpacket;
217 }
218
219 if (dev->port_usb->is_fixed)
220 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
221 spin_unlock_irqrestore(&dev->lock, flags);
222
223 skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
224 if (skb == NULL) {
225 DBG(dev, "no rx skb\n");
226 goto enomem;
227 }
228
229 /* Some platforms perform better when IP packets are aligned,
230 * but on at least one, checksumming fails otherwise. Note:
231 * RNDIS headers involve variable numbers of LE32 values.
232 */
233 if (likely(!dev->no_skb_reserve))
234 skb_reserve(skb, NET_IP_ALIGN);
235
236 req->buf = skb->data;
237 req->length = size;
238 req->complete = rx_complete;
239 req->context = skb;
240
241 retval = usb_ep_queue(out, req, gfp_flags);
242 if (retval == -ENOMEM)
243 enomem:
244 defer_kevent(dev, WORK_RX_MEMORY);
245 if (retval) {
246 DBG(dev, "rx submit --> %d\n", retval);
247 if (skb)
248 dev_kfree_skb_any(skb);
249 spin_lock_irqsave(&dev->req_lock, flags);
250 list_add(&req->list, &dev->rx_reqs);
251 spin_unlock_irqrestore(&dev->req_lock, flags);
252 }
253 return retval;
254 }
255
rx_complete(struct usb_ep * ep,struct usb_request * req)256 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
257 {
258 struct sk_buff *skb = req->context, *skb2;
259 struct eth_dev *dev = ep->driver_data;
260 int status = req->status;
261
262 switch (status) {
263
264 /* normal completion */
265 case 0:
266 skb_put(skb, req->actual);
267
268 if (dev->unwrap) {
269 unsigned long flags;
270
271 spin_lock_irqsave(&dev->lock, flags);
272 if (dev->port_usb) {
273 status = dev->unwrap(dev->port_usb,
274 skb,
275 &dev->rx_frames);
276 } else {
277 dev_kfree_skb_any(skb);
278 status = -ENOTCONN;
279 }
280 spin_unlock_irqrestore(&dev->lock, flags);
281 } else {
282 skb_queue_tail(&dev->rx_frames, skb);
283 }
284 skb = NULL;
285
286 skb2 = skb_dequeue(&dev->rx_frames);
287 while (skb2) {
288 if (status < 0
289 || ETH_HLEN > skb2->len
290 || skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
291 dev->net->stats.rx_errors++;
292 dev->net->stats.rx_length_errors++;
293 DBG(dev, "rx length %d\n", skb2->len);
294 dev_kfree_skb_any(skb2);
295 goto next_frame;
296 }
297 skb2->protocol = eth_type_trans(skb2, dev->net);
298 dev->net->stats.rx_packets++;
299 dev->net->stats.rx_bytes += skb2->len;
300
301 /* no buffer copies needed, unless hardware can't
302 * use skb buffers.
303 */
304 status = netif_rx(skb2);
305 next_frame:
306 skb2 = skb_dequeue(&dev->rx_frames);
307 }
308 break;
309
310 /* software-driven interface shutdown */
311 case -ECONNRESET: /* unlink */
312 case -ESHUTDOWN: /* disconnect etc */
313 VDBG(dev, "rx shutdown, code %d\n", status);
314 goto quiesce;
315
316 /* for hardware automagic (such as pxa) */
317 case -ECONNABORTED: /* endpoint reset */
318 DBG(dev, "rx %s reset\n", ep->name);
319 defer_kevent(dev, WORK_RX_MEMORY);
320 quiesce:
321 dev_kfree_skb_any(skb);
322 goto clean;
323
324 /* data overrun */
325 case -EOVERFLOW:
326 dev->net->stats.rx_over_errors++;
327 fallthrough;
328
329 default:
330 dev->net->stats.rx_errors++;
331 DBG(dev, "rx status %d\n", status);
332 break;
333 }
334
335 if (skb)
336 dev_kfree_skb_any(skb);
337 if (!netif_running(dev->net)) {
338 clean:
339 spin_lock(&dev->req_lock);
340 list_add(&req->list, &dev->rx_reqs);
341 spin_unlock(&dev->req_lock);
342 req = NULL;
343 }
344 if (req)
345 rx_submit(dev, req, GFP_ATOMIC);
346 }
347
prealloc(struct list_head * list,struct usb_ep * ep,unsigned n)348 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
349 {
350 unsigned i;
351 struct usb_request *req;
352
353 if (!n)
354 return -ENOMEM;
355
356 /* queue/recycle up to N requests */
357 i = n;
358 list_for_each_entry(req, list, list) {
359 if (i-- == 0)
360 goto extra;
361 }
362 while (i--) {
363 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
364 if (!req)
365 return list_empty(list) ? -ENOMEM : 0;
366 list_add(&req->list, list);
367 }
368 return 0;
369
370 extra:
371 /* free extras */
372 for (;;) {
373 struct list_head *next;
374
375 next = req->list.next;
376 list_del(&req->list);
377 usb_ep_free_request(ep, req);
378
379 if (next == list)
380 break;
381
382 req = container_of(next, struct usb_request, list);
383 }
384 return 0;
385 }
386
alloc_requests(struct eth_dev * dev,struct gether * link,unsigned n)387 static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
388 {
389 int status;
390
391 spin_lock(&dev->req_lock);
392 status = prealloc(&dev->tx_reqs, link->in_ep, n);
393 if (status < 0)
394 goto fail;
395 status = prealloc(&dev->rx_reqs, link->out_ep, n);
396 if (status < 0)
397 goto fail;
398 goto done;
399 fail:
400 DBG(dev, "can't alloc requests\n");
401 done:
402 spin_unlock(&dev->req_lock);
403 return status;
404 }
405
rx_fill(struct eth_dev * dev,gfp_t gfp_flags)406 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
407 {
408 struct usb_request *req;
409 unsigned long flags;
410
411 /* fill unused rxq slots with some skb */
412 spin_lock_irqsave(&dev->req_lock, flags);
413 while (!list_empty(&dev->rx_reqs)) {
414 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
415 list_del_init(&req->list);
416 spin_unlock_irqrestore(&dev->req_lock, flags);
417
418 if (rx_submit(dev, req, gfp_flags) < 0) {
419 defer_kevent(dev, WORK_RX_MEMORY);
420 return;
421 }
422
423 spin_lock_irqsave(&dev->req_lock, flags);
424 }
425 spin_unlock_irqrestore(&dev->req_lock, flags);
426 }
427
eth_work(struct work_struct * work)428 static void eth_work(struct work_struct *work)
429 {
430 struct eth_dev *dev = container_of(work, struct eth_dev, work);
431
432 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
433 if (netif_running(dev->net))
434 rx_fill(dev, GFP_KERNEL);
435 }
436
437 if (dev->todo)
438 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
439 }
440
tx_complete(struct usb_ep * ep,struct usb_request * req)441 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
442 {
443 struct sk_buff *skb = req->context;
444 struct eth_dev *dev = ep->driver_data;
445
446 switch (req->status) {
447 default:
448 dev->net->stats.tx_errors++;
449 VDBG(dev, "tx err %d\n", req->status);
450 fallthrough;
451 case -ECONNRESET: /* unlink */
452 case -ESHUTDOWN: /* disconnect etc */
453 dev_kfree_skb_any(skb);
454 break;
455 case 0:
456 dev->net->stats.tx_bytes += skb->len;
457 dev_consume_skb_any(skb);
458 }
459 dev->net->stats.tx_packets++;
460
461 spin_lock(&dev->req_lock);
462 list_add(&req->list, &dev->tx_reqs);
463 spin_unlock(&dev->req_lock);
464
465 atomic_dec(&dev->tx_qlen);
466 if (netif_carrier_ok(dev->net))
467 netif_wake_queue(dev->net);
468 }
469
is_promisc(u16 cdc_filter)470 static inline int is_promisc(u16 cdc_filter)
471 {
472 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
473 }
474
eth_start_xmit(struct sk_buff * skb,struct net_device * net)475 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
476 struct net_device *net)
477 {
478 struct eth_dev *dev = netdev_priv(net);
479 int length = 0;
480 int retval;
481 struct usb_request *req = NULL;
482 unsigned long flags;
483 struct usb_ep *in;
484 u16 cdc_filter;
485
486 spin_lock_irqsave(&dev->lock, flags);
487 if (dev->port_usb) {
488 in = dev->port_usb->in_ep;
489 cdc_filter = dev->port_usb->cdc_filter;
490 } else {
491 in = NULL;
492 cdc_filter = 0;
493 }
494 spin_unlock_irqrestore(&dev->lock, flags);
495
496 if (!in) {
497 if (skb)
498 dev_kfree_skb_any(skb);
499 return NETDEV_TX_OK;
500 }
501
502 /* apply outgoing CDC or RNDIS filters */
503 if (skb && !is_promisc(cdc_filter)) {
504 u8 *dest = skb->data;
505
506 if (is_multicast_ether_addr(dest)) {
507 u16 type;
508
509 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
510 * SET_ETHERNET_MULTICAST_FILTERS requests
511 */
512 if (is_broadcast_ether_addr(dest))
513 type = USB_CDC_PACKET_TYPE_BROADCAST;
514 else
515 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
516 if (!(cdc_filter & type)) {
517 dev_kfree_skb_any(skb);
518 return NETDEV_TX_OK;
519 }
520 }
521 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
522 }
523
524 spin_lock_irqsave(&dev->req_lock, flags);
525 /*
526 * this freelist can be empty if an interrupt triggered disconnect()
527 * and reconfigured the gadget (shutting down this queue) after the
528 * network stack decided to xmit but before we got the spinlock.
529 */
530 if (list_empty(&dev->tx_reqs)) {
531 spin_unlock_irqrestore(&dev->req_lock, flags);
532 return NETDEV_TX_BUSY;
533 }
534
535 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
536 list_del(&req->list);
537
538 /* temporarily stop TX queue when the freelist empties */
539 if (list_empty(&dev->tx_reqs))
540 netif_stop_queue(net);
541 spin_unlock_irqrestore(&dev->req_lock, flags);
542
543 /* no buffer copies needed, unless the network stack did it
544 * or the hardware can't use skb buffers.
545 * or there's not enough space for extra headers we need
546 */
547 if (dev->wrap) {
548 unsigned long flags;
549
550 spin_lock_irqsave(&dev->lock, flags);
551 if (dev->port_usb)
552 skb = dev->wrap(dev->port_usb, skb);
553 spin_unlock_irqrestore(&dev->lock, flags);
554 if (!skb) {
555 /* Multi frame CDC protocols may store the frame for
556 * later which is not a dropped frame.
557 */
558 if (dev->port_usb &&
559 dev->port_usb->supports_multi_frame)
560 goto multiframe;
561 goto drop;
562 }
563 }
564
565 length = skb->len;
566 req->buf = skb->data;
567 req->context = skb;
568 req->complete = tx_complete;
569
570 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
571 if (dev->port_usb &&
572 dev->port_usb->is_fixed &&
573 length == dev->port_usb->fixed_in_len &&
574 (length % in->maxpacket) == 0)
575 req->zero = 0;
576 else
577 req->zero = 1;
578
579 /* use zlp framing on tx for strict CDC-Ether conformance,
580 * though any robust network rx path ignores extra padding.
581 * and some hardware doesn't like to write zlps.
582 */
583 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
584 length++;
585
586 req->length = length;
587
588 retval = usb_ep_queue(in, req, GFP_ATOMIC);
589 switch (retval) {
590 default:
591 DBG(dev, "tx queue err %d\n", retval);
592 break;
593 case 0:
594 netif_trans_update(net);
595 atomic_inc(&dev->tx_qlen);
596 }
597
598 if (retval) {
599 dev_kfree_skb_any(skb);
600 drop:
601 dev->net->stats.tx_dropped++;
602 multiframe:
603 spin_lock_irqsave(&dev->req_lock, flags);
604 if (list_empty(&dev->tx_reqs))
605 netif_start_queue(net);
606 list_add(&req->list, &dev->tx_reqs);
607 spin_unlock_irqrestore(&dev->req_lock, flags);
608 }
609 return NETDEV_TX_OK;
610 }
611
612 /*-------------------------------------------------------------------------*/
613
eth_start(struct eth_dev * dev,gfp_t gfp_flags)614 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
615 {
616 DBG(dev, "%s\n", __func__);
617
618 /* fill the rx queue */
619 rx_fill(dev, gfp_flags);
620
621 /* and open the tx floodgates */
622 atomic_set(&dev->tx_qlen, 0);
623 netif_wake_queue(dev->net);
624 }
625
eth_open(struct net_device * net)626 static int eth_open(struct net_device *net)
627 {
628 struct eth_dev *dev = netdev_priv(net);
629 struct gether *link;
630
631 DBG(dev, "%s\n", __func__);
632 if (netif_carrier_ok(dev->net))
633 eth_start(dev, GFP_KERNEL);
634
635 spin_lock_irq(&dev->lock);
636 link = dev->port_usb;
637 if (link && link->open)
638 link->open(link);
639 spin_unlock_irq(&dev->lock);
640
641 return 0;
642 }
643
eth_stop(struct net_device * net)644 static int eth_stop(struct net_device *net)
645 {
646 struct eth_dev *dev = netdev_priv(net);
647 unsigned long flags;
648
649 VDBG(dev, "%s\n", __func__);
650 netif_stop_queue(net);
651
652 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
653 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
654 dev->net->stats.rx_errors, dev->net->stats.tx_errors
655 );
656
657 /* ensure there are no more active requests */
658 spin_lock_irqsave(&dev->lock, flags);
659 if (dev->port_usb) {
660 struct gether *link = dev->port_usb;
661 const struct usb_endpoint_descriptor *in;
662 const struct usb_endpoint_descriptor *out;
663
664 if (link->close)
665 link->close(link);
666
667 /* NOTE: we have no abort-queue primitive we could use
668 * to cancel all pending I/O. Instead, we disable then
669 * reenable the endpoints ... this idiom may leave toggle
670 * wrong, but that's a self-correcting error.
671 *
672 * REVISIT: we *COULD* just let the transfers complete at
673 * their own pace; the network stack can handle old packets.
674 * For the moment we leave this here, since it works.
675 */
676 in = link->in_ep->desc;
677 out = link->out_ep->desc;
678 usb_ep_disable(link->in_ep);
679 usb_ep_disable(link->out_ep);
680 if (netif_carrier_ok(net)) {
681 DBG(dev, "host still using in/out endpoints\n");
682 link->in_ep->desc = in;
683 link->out_ep->desc = out;
684 usb_ep_enable(link->in_ep);
685 usb_ep_enable(link->out_ep);
686 }
687 }
688 spin_unlock_irqrestore(&dev->lock, flags);
689
690 return 0;
691 }
692
693 /*-------------------------------------------------------------------------*/
694
get_ether_addr(const char * str,u8 * dev_addr)695 static int get_ether_addr(const char *str, u8 *dev_addr)
696 {
697 if (str) {
698 unsigned i;
699
700 for (i = 0; i < 6; i++) {
701 unsigned char num;
702
703 if ((*str == '.') || (*str == ':'))
704 str++;
705 num = hex_to_bin(*str++) << 4;
706 num |= hex_to_bin(*str++);
707 dev_addr [i] = num;
708 }
709 if (is_valid_ether_addr(dev_addr))
710 return 0;
711 }
712 eth_random_addr(dev_addr);
713 return 1;
714 }
715
get_ether_addr_str(u8 dev_addr[ETH_ALEN],char * str,int len)716 static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
717 {
718 if (len < 18)
719 return -EINVAL;
720
721 snprintf(str, len, "%pM", dev_addr);
722 return 18;
723 }
724
725 static const struct net_device_ops eth_netdev_ops = {
726 .ndo_open = eth_open,
727 .ndo_stop = eth_stop,
728 .ndo_start_xmit = eth_start_xmit,
729 .ndo_set_mac_address = eth_mac_addr,
730 .ndo_validate_addr = eth_validate_addr,
731 };
732
733 static struct device_type gadget_type = {
734 .name = "gadget",
735 };
736
737 /*
738 * gether_setup_name - initialize one ethernet-over-usb link
739 * @g: gadget to associated with these links
740 * @ethaddr: NULL, or a buffer in which the ethernet address of the
741 * host side of the link is recorded
742 * @netname: name for network device (for example, "usb")
743 * Context: may sleep
744 *
745 * This sets up the single network link that may be exported by a
746 * gadget driver using this framework. The link layer addresses are
747 * set up using module parameters.
748 *
749 * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
750 */
gether_setup_name(struct usb_gadget * g,const char * dev_addr,const char * host_addr,u8 ethaddr[ETH_ALEN],unsigned qmult,const char * netname)751 struct eth_dev *gether_setup_name(struct usb_gadget *g,
752 const char *dev_addr, const char *host_addr,
753 u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
754 {
755 struct eth_dev *dev;
756 struct net_device *net;
757 int status;
758 u8 addr[ETH_ALEN];
759
760 net = alloc_etherdev(sizeof *dev);
761 if (!net)
762 return ERR_PTR(-ENOMEM);
763
764 dev = netdev_priv(net);
765 spin_lock_init(&dev->lock);
766 spin_lock_init(&dev->req_lock);
767 INIT_WORK(&dev->work, eth_work);
768 INIT_LIST_HEAD(&dev->tx_reqs);
769 INIT_LIST_HEAD(&dev->rx_reqs);
770
771 skb_queue_head_init(&dev->rx_frames);
772
773 /* network device setup */
774 dev->net = net;
775 dev->qmult = qmult;
776 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
777
778 if (get_ether_addr(dev_addr, addr))
779 dev_warn(&g->dev,
780 "using random %s ethernet address\n", "self");
781 eth_hw_addr_set(net, addr);
782 if (get_ether_addr(host_addr, dev->host_mac))
783 dev_warn(&g->dev,
784 "using random %s ethernet address\n", "host");
785
786 if (ethaddr)
787 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
788
789 net->netdev_ops = ð_netdev_ops;
790
791 net->ethtool_ops = &ops;
792
793 /* MTU range: 14 - 15412 */
794 net->min_mtu = ETH_HLEN;
795 net->max_mtu = GETHER_MAX_MTU_SIZE;
796
797 dev->gadget = g;
798 SET_NETDEV_DEV(net, &g->dev);
799 SET_NETDEV_DEVTYPE(net, &gadget_type);
800
801 status = register_netdev(net);
802 if (status < 0) {
803 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
804 free_netdev(net);
805 dev = ERR_PTR(status);
806 } else {
807 INFO(dev, "MAC %pM\n", net->dev_addr);
808 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
809
810 /*
811 * two kinds of host-initiated state changes:
812 * - iff DATA transfer is active, carrier is "on"
813 * - tx queueing enabled if open *and* carrier is "on"
814 */
815 netif_carrier_off(net);
816 }
817
818 return dev;
819 }
820 EXPORT_SYMBOL_GPL(gether_setup_name);
821
gether_setup_name_default(const char * netname)822 struct net_device *gether_setup_name_default(const char *netname)
823 {
824 struct net_device *net;
825 struct eth_dev *dev;
826
827 net = alloc_etherdev(sizeof(*dev));
828 if (!net)
829 return ERR_PTR(-ENOMEM);
830
831 dev = netdev_priv(net);
832 spin_lock_init(&dev->lock);
833 spin_lock_init(&dev->req_lock);
834 INIT_WORK(&dev->work, eth_work);
835 INIT_LIST_HEAD(&dev->tx_reqs);
836 INIT_LIST_HEAD(&dev->rx_reqs);
837
838 skb_queue_head_init(&dev->rx_frames);
839
840 /* network device setup */
841 dev->net = net;
842 dev->qmult = QMULT_DEFAULT;
843 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
844
845 eth_random_addr(dev->dev_mac);
846 pr_warn("using random %s ethernet address\n", "self");
847 eth_random_addr(dev->host_mac);
848 pr_warn("using random %s ethernet address\n", "host");
849
850 net->netdev_ops = ð_netdev_ops;
851
852 net->ethtool_ops = &ops;
853 SET_NETDEV_DEVTYPE(net, &gadget_type);
854
855 /* MTU range: 14 - 15412 */
856 net->min_mtu = ETH_HLEN;
857 net->max_mtu = GETHER_MAX_MTU_SIZE;
858
859 return net;
860 }
861 EXPORT_SYMBOL_GPL(gether_setup_name_default);
862
gether_register_netdev(struct net_device * net)863 int gether_register_netdev(struct net_device *net)
864 {
865 struct eth_dev *dev;
866 struct usb_gadget *g;
867 int status;
868
869 if (!net->dev.parent)
870 return -EINVAL;
871 dev = netdev_priv(net);
872 g = dev->gadget;
873
874 net->addr_assign_type = NET_ADDR_RANDOM;
875 eth_hw_addr_set(net, dev->dev_mac);
876
877 status = register_netdev(net);
878 if (status < 0) {
879 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
880 return status;
881 } else {
882 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
883 INFO(dev, "MAC %pM\n", dev->dev_mac);
884
885 /* two kinds of host-initiated state changes:
886 * - iff DATA transfer is active, carrier is "on"
887 * - tx queueing enabled if open *and* carrier is "on"
888 */
889 netif_carrier_off(net);
890 }
891
892 return status;
893 }
894 EXPORT_SYMBOL_GPL(gether_register_netdev);
895
gether_set_gadget(struct net_device * net,struct usb_gadget * g)896 void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
897 {
898 struct eth_dev *dev;
899
900 dev = netdev_priv(net);
901 dev->gadget = g;
902 SET_NETDEV_DEV(net, &g->dev);
903 }
904 EXPORT_SYMBOL_GPL(gether_set_gadget);
905
gether_set_dev_addr(struct net_device * net,const char * dev_addr)906 int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
907 {
908 struct eth_dev *dev;
909 u8 new_addr[ETH_ALEN];
910
911 dev = netdev_priv(net);
912 if (get_ether_addr(dev_addr, new_addr))
913 return -EINVAL;
914 memcpy(dev->dev_mac, new_addr, ETH_ALEN);
915 return 0;
916 }
917 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
918
gether_get_dev_addr(struct net_device * net,char * dev_addr,int len)919 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
920 {
921 struct eth_dev *dev;
922 int ret;
923
924 dev = netdev_priv(net);
925 ret = get_ether_addr_str(dev->dev_mac, dev_addr, len);
926 if (ret + 1 < len) {
927 dev_addr[ret++] = '\n';
928 dev_addr[ret] = '\0';
929 }
930
931 return ret;
932 }
933 EXPORT_SYMBOL_GPL(gether_get_dev_addr);
934
gether_set_host_addr(struct net_device * net,const char * host_addr)935 int gether_set_host_addr(struct net_device *net, const char *host_addr)
936 {
937 struct eth_dev *dev;
938 u8 new_addr[ETH_ALEN];
939
940 dev = netdev_priv(net);
941 if (get_ether_addr(host_addr, new_addr))
942 return -EINVAL;
943 memcpy(dev->host_mac, new_addr, ETH_ALEN);
944 return 0;
945 }
946 EXPORT_SYMBOL_GPL(gether_set_host_addr);
947
gether_get_host_addr(struct net_device * net,char * host_addr,int len)948 int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
949 {
950 struct eth_dev *dev;
951 int ret;
952
953 dev = netdev_priv(net);
954 ret = get_ether_addr_str(dev->host_mac, host_addr, len);
955 if (ret + 1 < len) {
956 host_addr[ret++] = '\n';
957 host_addr[ret] = '\0';
958 }
959
960 return ret;
961 }
962 EXPORT_SYMBOL_GPL(gether_get_host_addr);
963
gether_get_host_addr_cdc(struct net_device * net,char * host_addr,int len)964 int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
965 {
966 struct eth_dev *dev;
967
968 if (len < 13)
969 return -EINVAL;
970
971 dev = netdev_priv(net);
972 snprintf(host_addr, len, "%pm", dev->host_mac);
973
974 return strlen(host_addr);
975 }
976 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
977
gether_get_host_addr_u8(struct net_device * net,u8 host_mac[ETH_ALEN])978 void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
979 {
980 struct eth_dev *dev;
981
982 dev = netdev_priv(net);
983 memcpy(host_mac, dev->host_mac, ETH_ALEN);
984 }
985 EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
986
gether_set_qmult(struct net_device * net,unsigned qmult)987 void gether_set_qmult(struct net_device *net, unsigned qmult)
988 {
989 struct eth_dev *dev;
990
991 dev = netdev_priv(net);
992 dev->qmult = qmult;
993 }
994 EXPORT_SYMBOL_GPL(gether_set_qmult);
995
gether_get_qmult(struct net_device * net)996 unsigned gether_get_qmult(struct net_device *net)
997 {
998 struct eth_dev *dev;
999
1000 dev = netdev_priv(net);
1001 return dev->qmult;
1002 }
1003 EXPORT_SYMBOL_GPL(gether_get_qmult);
1004
gether_get_ifname(struct net_device * net,char * name,int len)1005 int gether_get_ifname(struct net_device *net, char *name, int len)
1006 {
1007 struct eth_dev *dev = netdev_priv(net);
1008 int ret;
1009
1010 rtnl_lock();
1011 ret = scnprintf(name, len, "%s\n",
1012 dev->ifname_set ? net->name : netdev_name(net));
1013 rtnl_unlock();
1014 return ret;
1015 }
1016 EXPORT_SYMBOL_GPL(gether_get_ifname);
1017
gether_set_ifname(struct net_device * net,const char * name,int len)1018 int gether_set_ifname(struct net_device *net, const char *name, int len)
1019 {
1020 struct eth_dev *dev = netdev_priv(net);
1021 char tmp[IFNAMSIZ];
1022 const char *p;
1023
1024 if (name[len - 1] == '\n')
1025 len--;
1026
1027 if (len >= sizeof(tmp))
1028 return -E2BIG;
1029
1030 strscpy(tmp, name, len + 1);
1031 if (!dev_valid_name(tmp))
1032 return -EINVAL;
1033
1034 /* Require exactly one %d, so binding will not fail with EEXIST. */
1035 p = strchr(name, '%');
1036 if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1037 return -EINVAL;
1038
1039 strncpy(net->name, tmp, sizeof(net->name));
1040 dev->ifname_set = true;
1041
1042 return 0;
1043 }
1044 EXPORT_SYMBOL_GPL(gether_set_ifname);
1045
1046 /*
1047 * gether_cleanup - remove Ethernet-over-USB device
1048 * Context: may sleep
1049 *
1050 * This is called to free all resources allocated by @gether_setup().
1051 */
gether_cleanup(struct eth_dev * dev)1052 void gether_cleanup(struct eth_dev *dev)
1053 {
1054 if (!dev)
1055 return;
1056
1057 unregister_netdev(dev->net);
1058 flush_work(&dev->work);
1059 free_netdev(dev->net);
1060 }
1061 EXPORT_SYMBOL_GPL(gether_cleanup);
1062
1063 /**
1064 * gether_connect - notify network layer that USB link is active
1065 * @link: the USB link, set up with endpoints, descriptors matching
1066 * current device speed, and any framing wrapper(s) set up.
1067 * Context: irqs blocked
1068 *
1069 * This is called to activate endpoints and let the network layer know
1070 * the connection is active ("carrier detect"). It may cause the I/O
1071 * queues to open and start letting network packets flow, but will in
1072 * any case activate the endpoints so that they respond properly to the
1073 * USB host.
1074 *
1075 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1076 * indicate some error code (negative errno), ep->driver_data values
1077 * have been overwritten.
1078 */
gether_connect(struct gether * link)1079 struct net_device *gether_connect(struct gether *link)
1080 {
1081 struct eth_dev *dev = link->ioport;
1082 int result = 0;
1083
1084 if (!dev)
1085 return ERR_PTR(-EINVAL);
1086
1087 link->in_ep->driver_data = dev;
1088 result = usb_ep_enable(link->in_ep);
1089 if (result != 0) {
1090 DBG(dev, "enable %s --> %d\n",
1091 link->in_ep->name, result);
1092 goto fail0;
1093 }
1094
1095 link->out_ep->driver_data = dev;
1096 result = usb_ep_enable(link->out_ep);
1097 if (result != 0) {
1098 DBG(dev, "enable %s --> %d\n",
1099 link->out_ep->name, result);
1100 goto fail1;
1101 }
1102
1103 if (result == 0)
1104 result = alloc_requests(dev, link, qlen(dev->gadget,
1105 dev->qmult));
1106
1107 if (result == 0) {
1108 dev->zlp = link->is_zlp_ok;
1109 dev->no_skb_reserve = gadget_avoids_skb_reserve(dev->gadget);
1110 DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
1111
1112 dev->header_len = link->header_len;
1113 dev->unwrap = link->unwrap;
1114 dev->wrap = link->wrap;
1115
1116 spin_lock(&dev->lock);
1117 dev->port_usb = link;
1118 if (netif_running(dev->net)) {
1119 if (link->open)
1120 link->open(link);
1121 } else {
1122 if (link->close)
1123 link->close(link);
1124 }
1125 spin_unlock(&dev->lock);
1126
1127 netif_carrier_on(dev->net);
1128 if (netif_running(dev->net))
1129 eth_start(dev, GFP_ATOMIC);
1130
1131 /* on error, disable any endpoints */
1132 } else {
1133 (void) usb_ep_disable(link->out_ep);
1134 fail1:
1135 (void) usb_ep_disable(link->in_ep);
1136 }
1137 fail0:
1138 /* caller is responsible for cleanup on error */
1139 if (result < 0)
1140 return ERR_PTR(result);
1141 return dev->net;
1142 }
1143 EXPORT_SYMBOL_GPL(gether_connect);
1144
1145 /**
1146 * gether_disconnect - notify network layer that USB link is inactive
1147 * @link: the USB link, on which gether_connect() was called
1148 * Context: irqs blocked
1149 *
1150 * This is called to deactivate endpoints and let the network layer know
1151 * the connection went inactive ("no carrier").
1152 *
1153 * On return, the state is as if gether_connect() had never been called.
1154 * The endpoints are inactive, and accordingly without active USB I/O.
1155 * Pointers to endpoint descriptors and endpoint private data are nulled.
1156 */
gether_disconnect(struct gether * link)1157 void gether_disconnect(struct gether *link)
1158 {
1159 struct eth_dev *dev = link->ioport;
1160 struct usb_request *req;
1161
1162 WARN_ON(!dev);
1163 if (!dev)
1164 return;
1165
1166 DBG(dev, "%s\n", __func__);
1167
1168 netif_stop_queue(dev->net);
1169 netif_carrier_off(dev->net);
1170
1171 /* disable endpoints, forcing (synchronous) completion
1172 * of all pending i/o. then free the request objects
1173 * and forget about the endpoints.
1174 */
1175 usb_ep_disable(link->in_ep);
1176 spin_lock(&dev->req_lock);
1177 while (!list_empty(&dev->tx_reqs)) {
1178 req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
1179 list_del(&req->list);
1180
1181 spin_unlock(&dev->req_lock);
1182 usb_ep_free_request(link->in_ep, req);
1183 spin_lock(&dev->req_lock);
1184 }
1185 spin_unlock(&dev->req_lock);
1186 link->in_ep->desc = NULL;
1187
1188 usb_ep_disable(link->out_ep);
1189 spin_lock(&dev->req_lock);
1190 while (!list_empty(&dev->rx_reqs)) {
1191 req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
1192 list_del(&req->list);
1193
1194 spin_unlock(&dev->req_lock);
1195 usb_ep_free_request(link->out_ep, req);
1196 spin_lock(&dev->req_lock);
1197 }
1198 spin_unlock(&dev->req_lock);
1199 link->out_ep->desc = NULL;
1200
1201 /* finish forgetting about this USB link episode */
1202 dev->header_len = 0;
1203 dev->unwrap = NULL;
1204 dev->wrap = NULL;
1205
1206 spin_lock(&dev->lock);
1207 dev->port_usb = NULL;
1208 spin_unlock(&dev->lock);
1209 }
1210 EXPORT_SYMBOL_GPL(gether_disconnect);
1211
1212 MODULE_LICENSE("GPL");
1213 MODULE_AUTHOR("David Brownell");
1214