Lines Matching refs:net
232 static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_login_response() argument
236 struct tb_xdomain *xd = net->xd; in tbnet_login_response()
242 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); in tbnet_login_response()
249 static int tbnet_login_request(struct tbnet *net, u8 sequence) in tbnet_login_request() argument
253 struct tb_xdomain *xd = net->xd; in tbnet_login_request()
258 atomic_inc_return(&net->command_id)); in tbnet_login_request()
261 request.transmit_path = net->local_transmit_path; in tbnet_login_request()
269 static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_logout_response() argument
273 struct tb_xdomain *xd = net->xd; in tbnet_logout_response()
278 atomic_inc_return(&net->command_id)); in tbnet_logout_response()
283 static int tbnet_logout_request(struct tbnet *net) in tbnet_logout_request() argument
287 struct tb_xdomain *xd = net->xd; in tbnet_logout_request()
292 atomic_inc_return(&net->command_id)); in tbnet_logout_request()
300 static void start_login(struct tbnet *net) in start_login() argument
302 mutex_lock(&net->connection_lock); in start_login()
303 net->login_sent = false; in start_login()
304 net->login_received = false; in start_login()
305 mutex_unlock(&net->connection_lock); in start_login()
307 queue_delayed_work(system_long_wq, &net->login_work, in start_login()
311 static void stop_login(struct tbnet *net) in stop_login() argument
313 cancel_delayed_work_sync(&net->login_work); in stop_login()
314 cancel_work_sync(&net->connected_work); in stop_login()
358 static void tbnet_tear_down(struct tbnet *net, bool send_logout) in tbnet_tear_down() argument
360 netif_carrier_off(net->dev); in tbnet_tear_down()
361 netif_stop_queue(net->dev); in tbnet_tear_down()
363 stop_login(net); in tbnet_tear_down()
365 mutex_lock(&net->connection_lock); in tbnet_tear_down()
367 if (net->login_sent && net->login_received) { in tbnet_tear_down()
371 ret = tbnet_logout_request(net); in tbnet_tear_down()
376 tb_ring_stop(net->rx_ring.ring); in tbnet_tear_down()
377 tb_ring_stop(net->tx_ring.ring); in tbnet_tear_down()
378 tbnet_free_buffers(&net->rx_ring); in tbnet_tear_down()
379 tbnet_free_buffers(&net->tx_ring); in tbnet_tear_down()
381 ret = tb_xdomain_disable_paths(net->xd, in tbnet_tear_down()
382 net->local_transmit_path, in tbnet_tear_down()
383 net->rx_ring.ring->hop, in tbnet_tear_down()
384 net->remote_transmit_path, in tbnet_tear_down()
385 net->tx_ring.ring->hop); in tbnet_tear_down()
387 netdev_warn(net->dev, "failed to disable DMA paths\n"); in tbnet_tear_down()
389 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); in tbnet_tear_down()
390 net->remote_transmit_path = 0; in tbnet_tear_down()
393 net->login_retries = 0; in tbnet_tear_down()
394 net->login_sent = false; in tbnet_tear_down()
395 net->login_received = false; in tbnet_tear_down()
397 mutex_unlock(&net->connection_lock); in tbnet_tear_down()
403 struct tbnet *net = data; in tbnet_handle_packet() local
412 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) in tbnet_handle_packet()
414 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) in tbnet_handle_packet()
419 if (route != net->xd->route) in tbnet_handle_packet()
428 if (!netif_running(net->dev)) in tbnet_handle_packet()
431 ret = tbnet_login_response(net, route, sequence, in tbnet_handle_packet()
434 mutex_lock(&net->connection_lock); in tbnet_handle_packet()
435 net->login_received = true; in tbnet_handle_packet()
436 net->remote_transmit_path = pkg->transmit_path; in tbnet_handle_packet()
442 if (net->login_retries >= TBNET_LOGIN_RETRIES || in tbnet_handle_packet()
443 !net->login_sent) { in tbnet_handle_packet()
444 net->login_retries = 0; in tbnet_handle_packet()
446 &net->login_work, 0); in tbnet_handle_packet()
448 mutex_unlock(&net->connection_lock); in tbnet_handle_packet()
450 queue_work(system_long_wq, &net->connected_work); in tbnet_handle_packet()
455 ret = tbnet_logout_response(net, route, sequence, command_id); in tbnet_handle_packet()
457 queue_work(system_long_wq, &net->disconnect_work); in tbnet_handle_packet()
465 netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); in tbnet_handle_packet()
475 static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) in tbnet_alloc_rx_buffers() argument
477 struct tbnet_ring *ring = &net->rx_ring; in tbnet_alloc_rx_buffers()
507 tf->dev = net->dev; in tbnet_alloc_rx_buffers()
521 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) in tbnet_get_tx_buffer() argument
523 struct tbnet_ring *ring = &net->tx_ring; in tbnet_get_tx_buffer()
546 struct tbnet *net = netdev_priv(tf->dev); in tbnet_tx_callback() local
549 net->tx_ring.prod++; in tbnet_tx_callback()
551 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) in tbnet_tx_callback()
552 netif_wake_queue(net->dev); in tbnet_tx_callback()
555 static int tbnet_alloc_tx_buffers(struct tbnet *net) in tbnet_alloc_tx_buffers() argument
557 struct tbnet_ring *ring = &net->tx_ring; in tbnet_alloc_tx_buffers()
580 tf->dev = net->dev; in tbnet_alloc_tx_buffers()
595 struct tbnet *net = container_of(work, typeof(*net), connected_work); in tbnet_connected_work() local
599 if (netif_carrier_ok(net->dev)) in tbnet_connected_work()
602 mutex_lock(&net->connection_lock); in tbnet_connected_work()
603 connected = net->login_sent && net->login_received; in tbnet_connected_work()
604 mutex_unlock(&net->connection_lock); in tbnet_connected_work()
609 ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path); in tbnet_connected_work()
610 if (ret != net->remote_transmit_path) { in tbnet_connected_work()
611 netdev_err(net->dev, "failed to allocate Rx HopID\n"); in tbnet_connected_work()
618 ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path, in tbnet_connected_work()
619 net->rx_ring.ring->hop, in tbnet_connected_work()
620 net->remote_transmit_path, in tbnet_connected_work()
621 net->tx_ring.ring->hop); in tbnet_connected_work()
623 netdev_err(net->dev, "failed to enable DMA paths\n"); in tbnet_connected_work()
627 tb_ring_start(net->tx_ring.ring); in tbnet_connected_work()
628 tb_ring_start(net->rx_ring.ring); in tbnet_connected_work()
630 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); in tbnet_connected_work()
634 ret = tbnet_alloc_tx_buffers(net); in tbnet_connected_work()
638 netif_carrier_on(net->dev); in tbnet_connected_work()
639 netif_start_queue(net->dev); in tbnet_connected_work()
643 tbnet_free_buffers(&net->rx_ring); in tbnet_connected_work()
645 tb_ring_stop(net->rx_ring.ring); in tbnet_connected_work()
646 tb_ring_stop(net->tx_ring.ring); in tbnet_connected_work()
647 tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path); in tbnet_connected_work()
652 struct tbnet *net = container_of(work, typeof(*net), login_work.work); in tbnet_login_work() local
656 if (netif_carrier_ok(net->dev)) in tbnet_login_work()
659 ret = tbnet_login_request(net, net->login_retries % 4); in tbnet_login_work()
661 if (net->login_retries++ < TBNET_LOGIN_RETRIES) { in tbnet_login_work()
662 queue_delayed_work(system_long_wq, &net->login_work, in tbnet_login_work()
665 netdev_info(net->dev, "ThunderboltIP login timed out\n"); in tbnet_login_work()
668 net->login_retries = 0; in tbnet_login_work()
670 mutex_lock(&net->connection_lock); in tbnet_login_work()
671 net->login_sent = true; in tbnet_login_work()
672 mutex_unlock(&net->connection_lock); in tbnet_login_work()
674 queue_work(system_long_wq, &net->connected_work); in tbnet_login_work()
680 struct tbnet *net = container_of(work, typeof(*net), disconnect_work); in tbnet_disconnect_work() local
682 tbnet_tear_down(net, false); in tbnet_disconnect_work()
685 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, in tbnet_check_frame() argument
692 net->stats.rx_crc_errors++; in tbnet_check_frame()
695 net->stats.rx_over_errors++; in tbnet_check_frame()
702 net->stats.rx_length_errors++; in tbnet_check_frame()
712 net->stats.rx_length_errors++; in tbnet_check_frame()
719 if (net->skb && net->rx_hdr.frame_count) { in tbnet_check_frame()
721 if (frame_count != net->rx_hdr.frame_count) { in tbnet_check_frame()
722 net->stats.rx_length_errors++; in tbnet_check_frame()
729 if (frame_index != net->rx_hdr.frame_index + 1 || in tbnet_check_frame()
730 frame_id != net->rx_hdr.frame_id) { in tbnet_check_frame()
731 net->stats.rx_missed_errors++; in tbnet_check_frame()
735 if (net->skb->len + frame_size > TBNET_MAX_MTU) { in tbnet_check_frame()
736 net->stats.rx_length_errors++; in tbnet_check_frame()
745 net->stats.rx_length_errors++; in tbnet_check_frame()
749 net->stats.rx_missed_errors++; in tbnet_check_frame()
758 struct tbnet *net = container_of(napi, struct tbnet, napi); in tbnet_poll() local
759 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); in tbnet_poll()
760 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); in tbnet_poll()
778 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
782 frame = tb_ring_poll(net->rx_ring.ring); in tbnet_poll()
793 net->rx_ring.cons++; in tbnet_poll()
797 if (!tbnet_check_frame(net, tf, hdr)) { in tbnet_poll()
799 dev_kfree_skb_any(net->skb); in tbnet_poll()
800 net->skb = NULL; in tbnet_poll()
806 skb = net->skb; in tbnet_poll()
812 net->stats.rx_errors++; in tbnet_poll()
819 net->skb = skb; in tbnet_poll()
826 net->rx_hdr.frame_size = frame_size; in tbnet_poll()
827 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count); in tbnet_poll()
828 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index); in tbnet_poll()
829 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id); in tbnet_poll()
830 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1; in tbnet_poll()
833 net->stats.rx_bytes += frame_size; in tbnet_poll()
836 skb->protocol = eth_type_trans(skb, net->dev); in tbnet_poll()
837 napi_gro_receive(&net->napi, skb); in tbnet_poll()
838 net->skb = NULL; in tbnet_poll()
842 net->stats.rx_packets += rx_packets; in tbnet_poll()
845 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
852 tb_ring_poll_complete(net->rx_ring.ring); in tbnet_poll()
859 struct tbnet *net = data; in tbnet_start_poll() local
861 napi_schedule(&net->napi); in tbnet_start_poll()
866 struct tbnet *net = netdev_priv(dev); in tbnet_open() local
867 struct tb_xdomain *xd = net->xd; in tbnet_open()
880 net->tx_ring.ring = ring; in tbnet_open()
885 tb_ring_free(net->tx_ring.ring); in tbnet_open()
886 net->tx_ring.ring = NULL; in tbnet_open()
889 net->local_transmit_path = hopid; in tbnet_open()
896 tbnet_start_poll, net); in tbnet_open()
899 tb_ring_free(net->tx_ring.ring); in tbnet_open()
900 net->tx_ring.ring = NULL; in tbnet_open()
903 net->rx_ring.ring = ring; in tbnet_open()
905 napi_enable(&net->napi); in tbnet_open()
906 start_login(net); in tbnet_open()
913 struct tbnet *net = netdev_priv(dev); in tbnet_stop() local
915 napi_disable(&net->napi); in tbnet_stop()
917 cancel_work_sync(&net->disconnect_work); in tbnet_stop()
918 tbnet_tear_down(net, true); in tbnet_stop()
920 tb_ring_free(net->rx_ring.ring); in tbnet_stop()
921 net->rx_ring.ring = NULL; in tbnet_stop()
923 tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path); in tbnet_stop()
924 tb_ring_free(net->tx_ring.ring); in tbnet_stop()
925 net->tx_ring.ring = NULL; in tbnet_stop()
930 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, in tbnet_xmit_csum_and_map() argument
934 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); in tbnet_xmit_csum_and_map()
1042 struct tbnet *net = netdev_priv(dev); in tbnet_start_xmit() local
1044 u16 frame_id = atomic_read(&net->frame_id); in tbnet_start_xmit()
1056 if (tbnet_available_buffers(&net->tx_ring) < nframes) { in tbnet_start_xmit()
1057 netif_stop_queue(net->dev); in tbnet_start_xmit()
1061 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1111 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1149 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1)) in tbnet_start_xmit()
1153 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); in tbnet_start_xmit()
1155 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) in tbnet_start_xmit()
1156 atomic_inc(&net->frame_id); in tbnet_start_xmit()
1158 net->stats.tx_packets++; in tbnet_start_xmit()
1159 net->stats.tx_bytes += skb->len; in tbnet_start_xmit()
1167 net->tx_ring.cons -= frame_index; in tbnet_start_xmit()
1170 net->stats.tx_errors++; in tbnet_start_xmit()
1178 struct tbnet *net = netdev_priv(dev); in tbnet_get_stats64() local
1180 stats->tx_packets = net->stats.tx_packets; in tbnet_get_stats64()
1181 stats->rx_packets = net->stats.rx_packets; in tbnet_get_stats64()
1182 stats->tx_bytes = net->stats.tx_bytes; in tbnet_get_stats64()
1183 stats->rx_bytes = net->stats.rx_bytes; in tbnet_get_stats64()
1184 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + in tbnet_get_stats64()
1185 net->stats.rx_over_errors + net->stats.rx_crc_errors + in tbnet_get_stats64()
1186 net->stats.rx_missed_errors; in tbnet_get_stats64()
1187 stats->tx_errors = net->stats.tx_errors; in tbnet_get_stats64()
1188 stats->rx_length_errors = net->stats.rx_length_errors; in tbnet_get_stats64()
1189 stats->rx_over_errors = net->stats.rx_over_errors; in tbnet_get_stats64()
1190 stats->rx_crc_errors = net->stats.rx_crc_errors; in tbnet_get_stats64()
1191 stats->rx_missed_errors = net->stats.rx_missed_errors; in tbnet_get_stats64()
1203 const struct tbnet *net = netdev_priv(dev); in tbnet_generate_mac() local
1204 const struct tb_xdomain *xd = net->xd; in tbnet_generate_mac()
1224 struct tbnet *net; in tbnet_probe() local
1227 dev = alloc_etherdev(sizeof(*net)); in tbnet_probe()
1233 net = netdev_priv(dev); in tbnet_probe()
1234 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); in tbnet_probe()
1235 INIT_WORK(&net->connected_work, tbnet_connected_work); in tbnet_probe()
1236 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); in tbnet_probe()
1237 mutex_init(&net->connection_lock); in tbnet_probe()
1238 atomic_set(&net->command_id, 0); in tbnet_probe()
1239 atomic_set(&net->frame_id, 0); in tbnet_probe()
1240 net->svc = svc; in tbnet_probe()
1241 net->dev = dev; in tbnet_probe()
1242 net->xd = xd; in tbnet_probe()
1267 netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT); in tbnet_probe()
1273 net->handler.uuid = &tbnet_svc_uuid; in tbnet_probe()
1274 net->handler.callback = tbnet_handle_packet; in tbnet_probe()
1275 net->handler.data = net; in tbnet_probe()
1276 tb_register_protocol_handler(&net->handler); in tbnet_probe()
1278 tb_service_set_drvdata(svc, net); in tbnet_probe()
1282 tb_unregister_protocol_handler(&net->handler); in tbnet_probe()
1292 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_remove() local
1294 unregister_netdev(net->dev); in tbnet_remove()
1295 tb_unregister_protocol_handler(&net->handler); in tbnet_remove()
1296 free_netdev(net->dev); in tbnet_remove()
1307 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_suspend() local
1309 stop_login(net); in tbnet_suspend()
1310 if (netif_running(net->dev)) { in tbnet_suspend()
1311 netif_device_detach(net->dev); in tbnet_suspend()
1312 tbnet_tear_down(net, true); in tbnet_suspend()
1315 tb_unregister_protocol_handler(&net->handler); in tbnet_suspend()
1322 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_resume() local
1324 tb_register_protocol_handler(&net->handler); in tbnet_resume()
1326 netif_carrier_off(net->dev); in tbnet_resume()
1327 if (netif_running(net->dev)) { in tbnet_resume()
1328 netif_device_attach(net->dev); in tbnet_resume()
1329 start_login(net); in tbnet_resume()