1 /*
2 * Copyright (c) 2013 Corey Tabaka
3 * Copyright (c) 2015 Travis Geiselbrecht
4 *
5 * Use of this source code is governed by a MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT
8 */
9
10 #include <lk/reg.h>
11 #include <lk/err.h>
12 #include <pcnet.h>
13 #include <lk/debug.h>
14 #include <lk/trace.h>
15 #include <assert.h>
16 #include <arch/x86.h>
17 #include <platform/pc.h>
18 #include <platform/pcnet.h>
19 #include <platform/interrupts.h>
20 #include <kernel/thread.h>
21 #include <kernel/mutex.h>
22 #include <kernel/event.h>
23 #include <dev/class/netif.h>
24 #include <dev/bus/pci.h>
25 #include <stdlib.h>
26 #include <malloc.h>
27 #include <string.h>
28 #include <lwip/pbuf.h>
29 #include <lk/init.h>
30
31 #define LOCAL_TRACE 0
32
33 #define PCNET_INIT_TIMEOUT 20000
34 #define MAX_PACKET_SIZE 1518
35
36 #define QEMU_IRQ_BUG_WORKAROUND 1
37
38 struct pcnet_state {
39 int irq;
40 addr_t base;
41
42 uint8_t padr[6];
43
44 struct init_block_32 *ib;
45
46 struct rd_style3 *rd;
47 struct td_style3 *td;
48
49 struct pbuf **rx_buffers;
50 struct pbuf **tx_buffers;
51
52 /* queue accounting */
53 int rd_head;
54 int td_head;
55 int td_tail;
56
57 int rd_count;
58 int td_count;
59
60 int tx_pending;
61
62 mutex_t tx_lock;
63
64 /* bottom half state */
65 event_t event;
66 event_t initialized;
67 bool done;
68
69 struct netstack_state *netstack_state;
70 };
71
72 static status_t pcnet_init(struct device *dev);
73 static status_t pcnet_read_pci_config(struct device *dev, pci_location_t *loc);
74
75 static enum handler_return pcnet_irq_handler(void *arg);
76
77 static int pcnet_thread(void *arg);
78 static bool pcnet_service_tx(struct device *dev);
79 static bool pcnet_service_rx(struct device *dev);
80
81 static status_t pcnet_set_state(struct device *dev, struct netstack_state *state);
82 static ssize_t pcnet_get_hwaddr(struct device *dev, void *buf, size_t max_len);
83 static ssize_t pcnet_get_mtu(struct device *dev);
84
85 static status_t pcnet_output(struct device *dev, struct pbuf *p);
86
87 static struct netif_ops pcnet_ops = {
88 .std = {
89 .init = pcnet_init,
90 },
91
92 .set_state = pcnet_set_state,
93 .get_hwaddr = pcnet_get_hwaddr,
94 .get_mtu = pcnet_get_mtu,
95
96 .output = pcnet_output,
97 };
98
99 DRIVER_EXPORT(netif, &pcnet_ops.std);
100
pcnet_read_csr(struct device * dev,uint8_t rap)101 static inline uint32_t pcnet_read_csr(struct device *dev, uint8_t rap) {
102 struct pcnet_state *state = dev->state;
103
104 outpd(state->base + REG_RAP, rap);
105 return inpd(state->base + REG_RDP);
106 }
107
pcnet_write_csr(struct device * dev,uint8_t rap,uint16_t data)108 static inline void pcnet_write_csr(struct device *dev, uint8_t rap, uint16_t data) {
109 struct pcnet_state *state = dev->state;
110
111 outpd(state->base + REG_RAP, rap);
112 outpd(state->base + REG_RDP, data);
113 }
114
pcnet_read_bcr(struct device * dev,uint8_t rap)115 static inline uint32_t pcnet_read_bcr(struct device *dev, uint8_t rap) {
116 struct pcnet_state *state = dev->state;
117
118 outpd(state->base + REG_RAP, rap);
119 return inpd(state->base + REG_BDP);
120 }
121
pcnet_write_bcr(struct device * dev,uint8_t rap,uint16_t data)122 static inline void pcnet_write_bcr(struct device *dev, uint8_t rap, uint16_t data) {
123 struct pcnet_state *state = dev->state;
124
125 outpd(state->base + REG_RAP, rap);
126 outpd(state->base + REG_BDP, data);
127 }
128
pcnet_init(struct device * dev)129 static status_t pcnet_init(struct device *dev) {
130 status_t res = NO_ERROR;
131 pci_location_t loc;
132 int i;
133
134 const struct platform_pcnet_config *config = dev->config;
135
136 if (!config)
137 return ERR_NOT_CONFIGURED;
138
139 if (pci_find_pci_device(&loc, config->device_id, config->vendor_id, config->index) != _PCI_SUCCESSFUL) {
140 TRACEF("device not found\n");
141 return ERR_NOT_FOUND;
142 }
143
144 struct pcnet_state *state = calloc(1, sizeof(struct pcnet_state));
145 if (!state)
146 return ERR_NO_MEMORY;
147
148 dev->state = state;
149
150 res = pcnet_read_pci_config(dev, &loc);
151 if (res)
152 goto error;
153
154 for (i=0; i < 6; i++)
155 state->padr[i] = inp(state->base + i);
156
157 LTRACEF("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", state->padr[0], state->padr[1], state->padr[2],
158 state->padr[3], state->padr[4], state->padr[5]);
159
160 /* put the controller into 32bit wide mode by performing a 32bit write to CSR0 */
161 outpd(state->base + 0, 0);
162
163 /* stop the controller for configuration */
164 pcnet_write_csr(dev, 0, CSR0_STOP);
165
166 /* setup 32bit (style 3) structures, burst, all CSR4 bits valid, TDM1[29] is ADD_FCS */
167 pcnet_write_csr(dev, 58, 3);
168
169 /* DMA plus enable */
170 pcnet_write_csr(dev, 4, pcnet_read_csr(dev, 4) | CSR4_DMAPLUS);
171
172 /* allocate 128 tx and 128 rx descriptor rings */
173 state->td_count = 128;
174 state->rd_count = 128;
175 state->td = memalign(16, state->td_count * DESC_SIZE);
176 state->rd = memalign(16, state->rd_count * DESC_SIZE);
177
178 state->rx_buffers = calloc(state->rd_count, sizeof(struct pbuf *));
179 state->tx_buffers = calloc(state->td_count, sizeof(struct pbuf *));
180
181 state->tx_pending = 0;
182
183 if (!state->td || !state->rd || !state->tx_buffers || !state->rx_buffers) {
184 res = ERR_NO_MEMORY;
185 goto error;
186 }
187
188 memset(state->td, 0, state->td_count * DESC_SIZE);
189 memset(state->rd, 0, state->rd_count * DESC_SIZE);
190
191 /* allocate temporary init block space */
192 state->ib = memalign(4, sizeof(struct init_block_32));
193 if (!state->ib) {
194 res = ERR_NO_MEMORY;
195 goto error;
196 }
197
198 LTRACEF("Init block addr: %p\n", state->ib);
199
200 /* setup init block */
201 state->ib->tlen = 7; // 128 descriptors
202 state->ib->rlen = 7; // 128 descriptors
203 state->ib->mode = 0;
204
205 state->ib->ladr = ~0;
206 state->ib->tdra = (uint32_t) state->td;
207 state->ib->rdra = (uint32_t) state->rd;
208
209 memcpy(state->ib->padr, state->padr, 6);
210
211 /* load the init block address */
212 pcnet_write_csr(dev, 1, (uint32_t) state->ib);
213 pcnet_write_csr(dev, 2, (uint32_t) state->ib >> 16);
214
215 /* setup receive descriptors */
216 for (i=0; i < state->rd_count; i++) {
217 //LTRACEF("Allocating pbuf %d\n", i);
218 struct pbuf *p = pbuf_alloc(PBUF_RAW, MAX_PACKET_SIZE, PBUF_RAM);
219
220 state->rd[i].rbadr = (uint32_t) p->payload;
221 state->rd[i].bcnt = -p->tot_len;
222 state->rd[i].ones = 0xf;
223 state->rd[i].own = 1;
224
225 state->rx_buffers[i] = p;
226 }
227
228 mutex_init(&state->tx_lock);
229
230 state->done = false;
231 event_init(&state->event, false, EVENT_FLAG_AUTOUNSIGNAL);
232 event_init(&state->initialized, false, 0);
233
234 /* start up a thread to process packet activity */
235 thread_resume(thread_create("[pcnet bh]", pcnet_thread, dev, DEFAULT_PRIORITY,
236 DEFAULT_STACK_SIZE));
237
238 register_int_handler(state->irq, pcnet_irq_handler, dev);
239 unmask_interrupt(state->irq);
240
241 #if QEMU_IRQ_BUG_WORKAROUND
242 register_int_handler(INT_BASE + 15, pcnet_irq_handler, dev);
243 unmask_interrupt(INT_BASE + 15);
244 #endif
245
246 /* wait for initialization to complete */
247 res = event_wait_timeout(&state->initialized, PCNET_INIT_TIMEOUT);
248 if (res) {
249 /* TODO: cancel bottom half thread and tear down device instance */
250 LTRACEF("Failed to wait for IDON: %d\n", res);
251 return res;
252 }
253
254 LTRACE_EXIT;
255 return res;
256
257 error:
258 LTRACEF("Error: %d\n", res);
259
260 if (state) {
261 free(state->td);
262 free(state->rd);
263 free(state->ib);
264 free(state->tx_buffers);
265 free(state->rx_buffers);
266 }
267
268 free(state);
269
270 return res;
271 }
272
pcnet_read_pci_config(struct device * dev,pci_location_t * loc)273 static status_t pcnet_read_pci_config(struct device *dev, pci_location_t *loc) {
274 status_t res = NO_ERROR;
275 pci_config_t config;
276 uint8_t *buf = (uint8_t *) &config;
277 unsigned i;
278
279 DEBUG_ASSERT(dev->state);
280
281 struct pcnet_state *state = dev->state;
282
283 for (i=0; i < sizeof(config); i++)
284 pci_read_config_byte(loc, i, buf + i);
285
286 LTRACEF("Resources:\n");
287
288 for (i=0; i < countof(config.base_addresses); i++) {
289 if (config.base_addresses[i] & 0x1) {
290 LTRACEF(" BAR %d I/O REG: %04x\n", i, config.base_addresses[i] & ~0x3);
291
292 state->base = config.base_addresses[i] & ~0x3;
293 break;
294 }
295 }
296
297 if (!state->base) {
298 res = ERR_NOT_CONFIGURED;
299 goto error;
300 }
301
302 if (config.interrupt_line != 0xff) {
303 LTRACEF(" IRQ %u\n", config.interrupt_line);
304
305 state->irq = config.interrupt_line + INT_BASE;
306 } else {
307 res = ERR_NOT_CONFIGURED;
308 goto error;
309 }
310
311 LTRACEF("Command: %04x\n", config.command);
312 LTRACEF("Status: %04x\n", config.status);
313
314 pci_write_config_half(loc, PCI_CONFIG_COMMAND,
315 (config.command | PCI_COMMAND_IO_EN | PCI_COMMAND_BUS_MASTER_EN) & ~PCI_COMMAND_MEM_EN);
316
317 error:
318 return res;
319 }
320
pcnet_irq_handler(void * arg)321 static enum handler_return pcnet_irq_handler(void *arg) {
322 struct device *dev = arg;
323 struct pcnet_state *state = dev->state;
324
325 mask_interrupt(state->irq);
326
327 #if QEMU_IRQ_BUG_WORKAROUND
328 mask_interrupt(INT_BASE + 15);
329 #endif
330
331 event_signal(&state->event, false);
332
333 return INT_RESCHEDULE;
334 }
335
pcnet_thread(void * arg)336 static int pcnet_thread(void *arg) {
337 DEBUG_ASSERT(arg);
338
339 struct device *dev = arg;
340 struct pcnet_state *state = dev->state;
341
342 /* kick off init, enable ints, and start operation */
343 pcnet_write_csr(dev, 0, CSR0_INIT | CSR0_IENA | CSR0_STRT);
344
345 while (!state->done) {
346 LTRACEF("Waiting for event.\n");
347 //event_wait_timeout(&state->event, 5000);
348 event_wait(&state->event);
349
350 int csr0 = pcnet_read_csr(dev, 0);
351
352 /* disable interrupts at the controller */
353 pcnet_write_csr(dev, 0, csr0 & ~CSR0_IENA);
354
355 LTRACEF("CSR0 = %04x\n", csr0);
356
357 #if LOCAL_TRACE
358 if (csr0 & CSR0_RINT) TRACEF("RINT\n");
359 if (csr0 & CSR0_TINT) TRACEF("TINT\n");
360 #endif
361
362 if (csr0 & CSR0_IDON) {
363 LTRACEF("IDON\n");
364
365 /* free the init block that we no longer need */
366 free(state->ib);
367 state->ib = NULL;
368
369 event_signal(&state->initialized, true);
370 }
371
372 if (csr0 & CSR0_ERR) {
373 LTRACEF("ERR\n");
374
375 /* TODO: handle errors, though not many need it */
376
377 /* clear flags, preserve necessary enables */
378 pcnet_write_csr(dev, 0, csr0 & (CSR0_TXON | CSR0_RXON | CSR0_IENA));
379 }
380
381 bool again = !!(csr0 & (CSR0_RINT | CSR0_TINT));
382 while (again) {
383 again = pcnet_service_tx(dev) | pcnet_service_rx(dev);
384 }
385
386 /* enable interrupts at the controller */
387 pcnet_write_csr(dev, 0, CSR0_IENA);
388 unmask_interrupt(state->irq);
389
390 #if QEMU_IRQ_BUG_WORKAROUND
391 unmask_interrupt(INT_BASE + 15);
392 #endif
393 }
394
395 return 0;
396 }
397
pcnet_service_tx(struct device * dev)398 static bool pcnet_service_tx(struct device *dev) {
399 LTRACE_ENTRY;
400
401 struct pcnet_state *state = dev->state;
402
403 mutex_acquire(&state->tx_lock);
404
405 struct td_style3 *td = &state->td[state->td_tail];
406
407 if (state->tx_pending && td->own == 0) {
408 struct pbuf *p = state->tx_buffers[state->td_tail];
409 DEBUG_ASSERT(p);
410
411 state->tx_buffers[state->td_tail] = NULL;
412
413 LTRACEF("Retiring packet: td_tail=%d p=%p tot_len=%u\n", state->td_tail, p, p->tot_len);
414
415 state->tx_pending--;
416 state->td_tail = (state->td_tail + 1) % state->td_count;
417
418 if (td->err) {
419 LTRACEF("Descriptor error status encountered\n");
420 hexdump8(td, sizeof(*td));
421 }
422
423 mutex_release(&state->tx_lock);
424
425 pbuf_free(p);
426
427 LTRACE_EXIT;
428 return true;
429 } else {
430 mutex_release(&state->tx_lock);
431
432 #if 0
433 LTRACEF("Nothing to do for TX.\n");
434 for (int i=0; i < state->td_count; i++)
435 printf("%d ", state->td[i].own);
436 printf("\n");
437 #endif
438
439 LTRACE_EXIT;
440 return false;
441 }
442 }
443
pcnet_service_rx(struct device * dev)444 static bool pcnet_service_rx(struct device *dev) {
445 LTRACE_ENTRY;
446
447 struct pcnet_state *state = dev->state;
448
449 struct rd_style3 *rd = &state->rd[state->rd_head];
450
451 if (rd->own == 0) {
452 struct pbuf *p = state->rx_buffers[state->rd_head];
453 DEBUG_ASSERT(p);
454
455 LTRACEF("Processing RX descriptor %d\n", state->rd_head);
456
457 if (rd->err) {
458 LTRACEF("Descriptor error status encountered\n");
459 hexdump8(rd, sizeof(*rd));
460 } else {
461 if (rd->mcnt <= p->tot_len) {
462
463 pbuf_realloc(p, rd->mcnt);
464
465 #if LOCAL_TRACE
466 LTRACEF("payload=%p len=%u\n", p->payload, p->tot_len);
467 hexdump8(p->payload, p->tot_len);
468 #endif
469
470 class_netstack_input(dev, state->netstack_state, p);
471
472 p = state->rx_buffers[state->rd_head] = pbuf_alloc(PBUF_RAW, MAX_PACKET_SIZE, PBUF_RAM);
473 } else {
474 LTRACEF("RX packet size error: mcnt = %u, buf len = %u\n", rd->mcnt, p->tot_len);
475 }
476 }
477
478 memset(rd, 0, sizeof(*rd));
479 memset(p->payload, 0, p->tot_len);
480
481 rd->rbadr = (uint32_t) p->payload;
482 rd->bcnt = -p->tot_len;
483 rd->ones = 0xf;
484 rd->own = 1;
485
486 state->rd_head = (state->rd_head + 1) % state->rd_count;
487
488 LTRACE_EXIT;
489 return true;
490 } else {
491 #if 0
492 LTRACEF("Nothing to do for RX: rd_head=%d.\n", state->rd_head);
493 for (int i=0; i < state->rd_count; i++)
494 printf("%d ", state->rd[i].own);
495 printf("\n");
496 #endif
497 }
498
499 LTRACE_EXIT;
500 return false;
501 }
502
pcnet_set_state(struct device * dev,struct netstack_state * netstack_state)503 static status_t pcnet_set_state(struct device *dev, struct netstack_state *netstack_state) {
504 if (!dev)
505 return ERR_INVALID_ARGS;
506
507 if (!dev->state)
508 return ERR_NOT_CONFIGURED;
509
510 struct pcnet_state *state = dev->state;
511
512 state->netstack_state = netstack_state;
513
514 return NO_ERROR;
515 }
516
pcnet_get_hwaddr(struct device * dev,void * buf,size_t max_len)517 static ssize_t pcnet_get_hwaddr(struct device *dev, void *buf, size_t max_len) {
518 if (!dev || !buf)
519 return ERR_INVALID_ARGS;
520
521 if (!dev->state)
522 return ERR_NOT_CONFIGURED;
523
524 struct pcnet_state *state = dev->state;
525
526 memcpy(buf, state->padr, MIN(sizeof(state->padr), max_len));
527
528 return sizeof(state->padr);
529 }
530
pcnet_get_mtu(struct device * dev)531 static ssize_t pcnet_get_mtu(struct device *dev) {
532 if (!dev)
533 return ERR_INVALID_ARGS;
534
535 return 1500;
536 }
537
pcnet_output(struct device * dev,struct pbuf * p)538 static status_t pcnet_output(struct device *dev, struct pbuf *p) {
539 LTRACE_ENTRY;
540
541 if (!dev || !p)
542 return ERR_INVALID_ARGS;
543
544 if (!dev->state)
545 return ERR_NOT_CONFIGURED;
546
547 status_t res = NO_ERROR;
548 struct pcnet_state *state = dev->state;
549
550 mutex_acquire(&state->tx_lock);
551
552 struct td_style3 *td = &state->td[state->td_head];
553
554 if (td->own) {
555 LTRACEF("TX descriptor ring full\n");
556 res = ERR_NOT_READY; // maybe this should be ERR_NOT_ENOUGH_BUFFER?
557 goto done;
558 }
559
560 pbuf_ref(p);
561 p = pbuf_coalesce(p, PBUF_RAW);
562
563 #if LOCAL_TRACE
564 LTRACEF("Queuing packet: td_head=%d p=%p tot_len=%u\n", state->td_head, p, p->tot_len);
565 hexdump8(p->payload, p->tot_len);
566 #endif
567
568 /* clear flags */
569 memset(td, 0, sizeof(*td));
570
571 td->tbadr = (uint32_t) p->payload;
572 td->bcnt = -p->tot_len;
573 td->stp = 1;
574 td->enp = 1;
575 td->add_no_fcs = 1;
576 td->ones = 0xf;
577
578 state->tx_buffers[state->td_head] = p;
579 state->tx_pending++;
580
581 state->td_head = (state->td_head + 1) % state->td_count;
582
583 td->own = 1;
584
585 /* trigger tx */
586 pcnet_write_csr(dev, 0, CSR0_TDMD);
587
588 done:
589 mutex_release(&state->tx_lock);
590 LTRACE_EXIT;
591 return res;
592 }
593
594 static const struct platform_pcnet_config pcnet0_config = {
595 .vendor_id = 0x1022,
596 .device_id = 0x2000,
597 .index = 0,
598 };
599
600 DEVICE_INSTANCE(netif, pcnet0, &pcnet0_config, 0);
601
pcnet_init_hook(uint level)602 static void pcnet_init_hook(uint level) {
603 status_t err = device_init(device_get_by_name(netif, pcnet0));
604 if (err < 0)
605 return;
606
607 class_netif_add(device_get_by_name(netif, pcnet0));
608 }
609
610 LK_INIT_HOOK(pcnet, &pcnet_init_hook, LK_INIT_LEVEL_PLATFORM);
611
612