1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host bridge driver for Apple system-on-chips.
4 *
5 * The HW is ECAM compliant, so once the controller is initialized,
6 * the driver mostly deals MSI mapping and handling of per-port
7 * interrupts (INTx, management and error signals).
8 *
9 * Initialization requires enabling power and clocks, along with a
10 * number of register pokes.
11 *
12 * Copyright (C) 2021 Alyssa Rosenzweig <alyssa@rosenzweig.io>
13 * Copyright (C) 2021 Google LLC
14 * Copyright (C) 2021 Corellium LLC
15 * Copyright (C) 2021 Mark Kettenis <kettenis@openbsd.org>
16 *
17 * Author: Alyssa Rosenzweig <alyssa@rosenzweig.io>
18 * Author: Marc Zyngier <maz@kernel.org>
19 */
20
21 #include <linux/gpio/consumer.h>
22 #include <linux/kernel.h>
23 #include <linux/iopoll.h>
24 #include <linux/irqchip/chained_irq.h>
25 #include <linux/irqdomain.h>
26 #include <linux/list.h>
27 #include <linux/module.h>
28 #include <linux/msi.h>
29 #include <linux/notifier.h>
30 #include <linux/of_irq.h>
31 #include <linux/pci-ecam.h>
32
33 #define CORE_RC_PHYIF_CTL 0x00024
34 #define CORE_RC_PHYIF_CTL_RUN BIT(0)
35 #define CORE_RC_PHYIF_STAT 0x00028
36 #define CORE_RC_PHYIF_STAT_REFCLK BIT(4)
37 #define CORE_RC_CTL 0x00050
38 #define CORE_RC_CTL_RUN BIT(0)
39 #define CORE_RC_STAT 0x00058
40 #define CORE_RC_STAT_READY BIT(0)
41 #define CORE_FABRIC_STAT 0x04000
42 #define CORE_FABRIC_STAT_MASK 0x001F001F
43 #define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
44 #define CORE_LANE_CFG_REFCLK0REQ BIT(0)
45 #define CORE_LANE_CFG_REFCLK1 BIT(1)
46 #define CORE_LANE_CFG_REFCLK0ACK BIT(2)
47 #define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
48 #define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
49 #define CORE_LANE_CTL_CFGACC BIT(15)
50
51 #define PORT_LTSSMCTL 0x00080
52 #define PORT_LTSSMCTL_START BIT(0)
53 #define PORT_INTSTAT 0x00100
54 #define PORT_INT_TUNNEL_ERR 31
55 #define PORT_INT_CPL_TIMEOUT 23
56 #define PORT_INT_RID2SID_MAPERR 22
57 #define PORT_INT_CPL_ABORT 21
58 #define PORT_INT_MSI_BAD_DATA 19
59 #define PORT_INT_MSI_ERR 18
60 #define PORT_INT_REQADDR_GT32 17
61 #define PORT_INT_AF_TIMEOUT 15
62 #define PORT_INT_LINK_DOWN 14
63 #define PORT_INT_LINK_UP 12
64 #define PORT_INT_LINK_BWMGMT 11
65 #define PORT_INT_AER_MASK (15 << 4)
66 #define PORT_INT_PORT_ERR 4
67 #define PORT_INT_INTx(i) i
68 #define PORT_INT_INTx_MASK 15
69 #define PORT_INTMSK 0x00104
70 #define PORT_INTMSKSET 0x00108
71 #define PORT_INTMSKCLR 0x0010c
72 #define PORT_MSICFG 0x00124
73 #define PORT_MSICFG_EN BIT(0)
74 #define PORT_MSICFG_L2MSINUM_SHIFT 4
75 #define PORT_MSIBASE 0x00128
76 #define PORT_MSIBASE_1_SHIFT 16
77 #define PORT_MSIADDR 0x00168
78 #define PORT_LINKSTS 0x00208
79 #define PORT_LINKSTS_UP BIT(0)
80 #define PORT_LINKSTS_BUSY BIT(2)
81 #define PORT_LINKCMDSTS 0x00210
82 #define PORT_OUTS_NPREQS 0x00284
83 #define PORT_OUTS_NPREQS_REQ BIT(24)
84 #define PORT_OUTS_NPREQS_CPL BIT(16)
85 #define PORT_RXWR_FIFO 0x00288
86 #define PORT_RXWR_FIFO_HDR GENMASK(15, 10)
87 #define PORT_RXWR_FIFO_DATA GENMASK(9, 0)
88 #define PORT_RXRD_FIFO 0x0028C
89 #define PORT_RXRD_FIFO_REQ GENMASK(6, 0)
90 #define PORT_OUTS_CPLS 0x00290
91 #define PORT_OUTS_CPLS_SHRD GENMASK(14, 8)
92 #define PORT_OUTS_CPLS_WAIT GENMASK(6, 0)
93 #define PORT_APPCLK 0x00800
94 #define PORT_APPCLK_EN BIT(0)
95 #define PORT_APPCLK_CGDIS BIT(8)
96 #define PORT_STATUS 0x00804
97 #define PORT_STATUS_READY BIT(0)
98 #define PORT_REFCLK 0x00810
99 #define PORT_REFCLK_EN BIT(0)
100 #define PORT_REFCLK_CGDIS BIT(8)
101 #define PORT_PERST 0x00814
102 #define PORT_PERST_OFF BIT(0)
103 #define PORT_RID2SID(i16) (0x00828 + 4 * (i16))
104 #define PORT_RID2SID_VALID BIT(31)
105 #define PORT_RID2SID_SID_SHIFT 16
106 #define PORT_RID2SID_BUS_SHIFT 8
107 #define PORT_RID2SID_DEV_SHIFT 3
108 #define PORT_RID2SID_FUNC_SHIFT 0
109 #define PORT_OUTS_PREQS_HDR 0x00980
110 #define PORT_OUTS_PREQS_HDR_MASK GENMASK(9, 0)
111 #define PORT_OUTS_PREQS_DATA 0x00984
112 #define PORT_OUTS_PREQS_DATA_MASK GENMASK(15, 0)
113 #define PORT_TUNCTRL 0x00988
114 #define PORT_TUNCTRL_PERST_ON BIT(0)
115 #define PORT_TUNCTRL_PERST_ACK_REQ BIT(1)
116 #define PORT_TUNSTAT 0x0098c
117 #define PORT_TUNSTAT_PERST_ON BIT(0)
118 #define PORT_TUNSTAT_PERST_ACK_PEND BIT(1)
119 #define PORT_PREFMEM_ENABLE 0x00994
120
121 #define MAX_RID2SID 64
122
123 /*
124 * The doorbell address is set to 0xfffff000, which by convention
125 * matches what MacOS does, and it is possible to use any other
126 * address (in the bottom 4GB, as the base register is only 32bit).
127 * However, it has to be excluded from the IOVA range, and the DART
128 * driver has to know about it.
129 */
130 #define DOORBELL_ADDR CONFIG_PCIE_APPLE_MSI_DOORBELL_ADDR
131
132 struct apple_pcie {
133 struct mutex lock;
134 struct device *dev;
135 void __iomem *base;
136 struct irq_domain *domain;
137 unsigned long *bitmap;
138 struct list_head ports;
139 struct completion event;
140 struct irq_fwspec fwspec;
141 u32 nvecs;
142 };
143
144 struct apple_pcie_port {
145 struct apple_pcie *pcie;
146 struct device_node *np;
147 void __iomem *base;
148 struct irq_domain *domain;
149 struct list_head entry;
150 DECLARE_BITMAP(sid_map, MAX_RID2SID);
151 int sid_map_sz;
152 int idx;
153 };
154
rmw_set(u32 set,void __iomem * addr)155 static void rmw_set(u32 set, void __iomem *addr)
156 {
157 writel_relaxed(readl_relaxed(addr) | set, addr);
158 }
159
rmw_clear(u32 clr,void __iomem * addr)160 static void rmw_clear(u32 clr, void __iomem *addr)
161 {
162 writel_relaxed(readl_relaxed(addr) & ~clr, addr);
163 }
164
apple_msi_top_irq_mask(struct irq_data * d)165 static void apple_msi_top_irq_mask(struct irq_data *d)
166 {
167 pci_msi_mask_irq(d);
168 irq_chip_mask_parent(d);
169 }
170
apple_msi_top_irq_unmask(struct irq_data * d)171 static void apple_msi_top_irq_unmask(struct irq_data *d)
172 {
173 pci_msi_unmask_irq(d);
174 irq_chip_unmask_parent(d);
175 }
176
177 static struct irq_chip apple_msi_top_chip = {
178 .name = "PCIe MSI",
179 .irq_mask = apple_msi_top_irq_mask,
180 .irq_unmask = apple_msi_top_irq_unmask,
181 .irq_eoi = irq_chip_eoi_parent,
182 .irq_set_affinity = irq_chip_set_affinity_parent,
183 .irq_set_type = irq_chip_set_type_parent,
184 };
185
apple_msi_compose_msg(struct irq_data * data,struct msi_msg * msg)186 static void apple_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
187 {
188 msg->address_hi = upper_32_bits(DOORBELL_ADDR);
189 msg->address_lo = lower_32_bits(DOORBELL_ADDR);
190 msg->data = data->hwirq;
191 }
192
193 static struct irq_chip apple_msi_bottom_chip = {
194 .name = "MSI",
195 .irq_mask = irq_chip_mask_parent,
196 .irq_unmask = irq_chip_unmask_parent,
197 .irq_eoi = irq_chip_eoi_parent,
198 .irq_set_affinity = irq_chip_set_affinity_parent,
199 .irq_set_type = irq_chip_set_type_parent,
200 .irq_compose_msi_msg = apple_msi_compose_msg,
201 };
202
apple_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)203 static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
204 unsigned int nr_irqs, void *args)
205 {
206 struct apple_pcie *pcie = domain->host_data;
207 struct irq_fwspec fwspec = pcie->fwspec;
208 unsigned int i;
209 int ret, hwirq;
210
211 mutex_lock(&pcie->lock);
212
213 hwirq = bitmap_find_free_region(pcie->bitmap, pcie->nvecs,
214 order_base_2(nr_irqs));
215
216 mutex_unlock(&pcie->lock);
217
218 if (hwirq < 0)
219 return -ENOSPC;
220
221 fwspec.param[1] += hwirq;
222
223 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
224 if (ret)
225 return ret;
226
227 for (i = 0; i < nr_irqs; i++) {
228 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
229 &apple_msi_bottom_chip,
230 domain->host_data);
231 }
232
233 return 0;
234 }
235
apple_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)236 static void apple_msi_domain_free(struct irq_domain *domain, unsigned int virq,
237 unsigned int nr_irqs)
238 {
239 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
240 struct apple_pcie *pcie = domain->host_data;
241
242 mutex_lock(&pcie->lock);
243
244 bitmap_release_region(pcie->bitmap, d->hwirq, order_base_2(nr_irqs));
245
246 mutex_unlock(&pcie->lock);
247 }
248
249 static const struct irq_domain_ops apple_msi_domain_ops = {
250 .alloc = apple_msi_domain_alloc,
251 .free = apple_msi_domain_free,
252 };
253
254 static struct msi_domain_info apple_msi_info = {
255 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
256 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
257 .chip = &apple_msi_top_chip,
258 };
259
apple_port_irq_mask(struct irq_data * data)260 static void apple_port_irq_mask(struct irq_data *data)
261 {
262 struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
263
264 writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKSET);
265 }
266
apple_port_irq_unmask(struct irq_data * data)267 static void apple_port_irq_unmask(struct irq_data *data)
268 {
269 struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
270
271 writel_relaxed(BIT(data->hwirq), port->base + PORT_INTMSKCLR);
272 }
273
hwirq_is_intx(unsigned int hwirq)274 static bool hwirq_is_intx(unsigned int hwirq)
275 {
276 return BIT(hwirq) & PORT_INT_INTx_MASK;
277 }
278
apple_port_irq_ack(struct irq_data * data)279 static void apple_port_irq_ack(struct irq_data *data)
280 {
281 struct apple_pcie_port *port = irq_data_get_irq_chip_data(data);
282
283 if (!hwirq_is_intx(data->hwirq))
284 writel_relaxed(BIT(data->hwirq), port->base + PORT_INTSTAT);
285 }
286
apple_port_irq_set_type(struct irq_data * data,unsigned int type)287 static int apple_port_irq_set_type(struct irq_data *data, unsigned int type)
288 {
289 /*
290 * It doesn't seem that there is any way to configure the
291 * trigger, so assume INTx have to be level (as per the spec),
292 * and the rest is edge (which looks likely).
293 */
294 if (hwirq_is_intx(data->hwirq) ^ !!(type & IRQ_TYPE_LEVEL_MASK))
295 return -EINVAL;
296
297 irqd_set_trigger_type(data, type);
298 return 0;
299 }
300
301 static struct irq_chip apple_port_irqchip = {
302 .name = "PCIe",
303 .irq_ack = apple_port_irq_ack,
304 .irq_mask = apple_port_irq_mask,
305 .irq_unmask = apple_port_irq_unmask,
306 .irq_set_type = apple_port_irq_set_type,
307 };
308
apple_port_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)309 static int apple_port_irq_domain_alloc(struct irq_domain *domain,
310 unsigned int virq, unsigned int nr_irqs,
311 void *args)
312 {
313 struct apple_pcie_port *port = domain->host_data;
314 struct irq_fwspec *fwspec = args;
315 int i;
316
317 for (i = 0; i < nr_irqs; i++) {
318 irq_flow_handler_t flow = handle_edge_irq;
319 unsigned int type = IRQ_TYPE_EDGE_RISING;
320
321 if (hwirq_is_intx(fwspec->param[0] + i)) {
322 flow = handle_level_irq;
323 type = IRQ_TYPE_LEVEL_HIGH;
324 }
325
326 irq_domain_set_info(domain, virq + i, fwspec->param[0] + i,
327 &apple_port_irqchip, port, flow,
328 NULL, NULL);
329
330 irq_set_irq_type(virq + i, type);
331 }
332
333 return 0;
334 }
335
apple_port_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)336 static void apple_port_irq_domain_free(struct irq_domain *domain,
337 unsigned int virq, unsigned int nr_irqs)
338 {
339 int i;
340
341 for (i = 0; i < nr_irqs; i++) {
342 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
343
344 irq_set_handler(virq + i, NULL);
345 irq_domain_reset_irq_data(d);
346 }
347 }
348
349 static const struct irq_domain_ops apple_port_irq_domain_ops = {
350 .translate = irq_domain_translate_onecell,
351 .alloc = apple_port_irq_domain_alloc,
352 .free = apple_port_irq_domain_free,
353 };
354
apple_port_irq_handler(struct irq_desc * desc)355 static void apple_port_irq_handler(struct irq_desc *desc)
356 {
357 struct apple_pcie_port *port = irq_desc_get_handler_data(desc);
358 struct irq_chip *chip = irq_desc_get_chip(desc);
359 unsigned long stat;
360 int i;
361
362 chained_irq_enter(chip, desc);
363
364 stat = readl_relaxed(port->base + PORT_INTSTAT);
365
366 for_each_set_bit(i, &stat, 32)
367 generic_handle_domain_irq(port->domain, i);
368
369 chained_irq_exit(chip, desc);
370 }
371
apple_pcie_port_setup_irq(struct apple_pcie_port * port)372 static int apple_pcie_port_setup_irq(struct apple_pcie_port *port)
373 {
374 struct fwnode_handle *fwnode = &port->np->fwnode;
375 unsigned int irq;
376
377 /* FIXME: consider moving each interrupt under each port */
378 irq = irq_of_parse_and_map(to_of_node(dev_fwnode(port->pcie->dev)),
379 port->idx);
380 if (!irq)
381 return -ENXIO;
382
383 port->domain = irq_domain_create_linear(fwnode, 32,
384 &apple_port_irq_domain_ops,
385 port);
386 if (!port->domain)
387 return -ENOMEM;
388
389 /* Disable all interrupts */
390 writel_relaxed(~0, port->base + PORT_INTMSKSET);
391 writel_relaxed(~0, port->base + PORT_INTSTAT);
392
393 irq_set_chained_handler_and_data(irq, apple_port_irq_handler, port);
394
395 /* Configure MSI base address */
396 BUILD_BUG_ON(upper_32_bits(DOORBELL_ADDR));
397 writel_relaxed(lower_32_bits(DOORBELL_ADDR), port->base + PORT_MSIADDR);
398
399 /* Enable MSIs, shared between all ports */
400 writel_relaxed(0, port->base + PORT_MSIBASE);
401 writel_relaxed((ilog2(port->pcie->nvecs) << PORT_MSICFG_L2MSINUM_SHIFT) |
402 PORT_MSICFG_EN, port->base + PORT_MSICFG);
403
404 return 0;
405 }
406
apple_pcie_port_irq(int irq,void * data)407 static irqreturn_t apple_pcie_port_irq(int irq, void *data)
408 {
409 struct apple_pcie_port *port = data;
410 unsigned int hwirq = irq_domain_get_irq_data(port->domain, irq)->hwirq;
411
412 switch (hwirq) {
413 case PORT_INT_LINK_UP:
414 dev_info_ratelimited(port->pcie->dev, "Link up on %pOF\n",
415 port->np);
416 complete_all(&port->pcie->event);
417 break;
418 case PORT_INT_LINK_DOWN:
419 dev_info_ratelimited(port->pcie->dev, "Link down on %pOF\n",
420 port->np);
421 break;
422 default:
423 return IRQ_NONE;
424 }
425
426 return IRQ_HANDLED;
427 }
428
apple_pcie_port_register_irqs(struct apple_pcie_port * port)429 static int apple_pcie_port_register_irqs(struct apple_pcie_port *port)
430 {
431 static struct {
432 unsigned int hwirq;
433 const char *name;
434 } port_irqs[] = {
435 { PORT_INT_LINK_UP, "Link up", },
436 { PORT_INT_LINK_DOWN, "Link down", },
437 };
438 int i;
439
440 for (i = 0; i < ARRAY_SIZE(port_irqs); i++) {
441 struct irq_fwspec fwspec = {
442 .fwnode = &port->np->fwnode,
443 .param_count = 1,
444 .param = {
445 [0] = port_irqs[i].hwirq,
446 },
447 };
448 unsigned int irq;
449 int ret;
450
451 irq = irq_domain_alloc_irqs(port->domain, 1, NUMA_NO_NODE,
452 &fwspec);
453 if (WARN_ON(!irq))
454 continue;
455
456 ret = request_irq(irq, apple_pcie_port_irq, 0,
457 port_irqs[i].name, port);
458 WARN_ON(ret);
459 }
460
461 return 0;
462 }
463
apple_pcie_setup_refclk(struct apple_pcie * pcie,struct apple_pcie_port * port)464 static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
465 struct apple_pcie_port *port)
466 {
467 u32 stat;
468 int res;
469
470 res = readl_relaxed_poll_timeout(pcie->base + CORE_RC_PHYIF_STAT, stat,
471 stat & CORE_RC_PHYIF_STAT_REFCLK,
472 100, 50000);
473 if (res < 0)
474 return res;
475
476 rmw_set(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
477 rmw_set(CORE_LANE_CFG_REFCLK0REQ, pcie->base + CORE_LANE_CFG(port->idx));
478
479 res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
480 stat, stat & CORE_LANE_CFG_REFCLK0ACK,
481 100, 50000);
482 if (res < 0)
483 return res;
484
485 rmw_set(CORE_LANE_CFG_REFCLK1, pcie->base + CORE_LANE_CFG(port->idx));
486 res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
487 stat, stat & CORE_LANE_CFG_REFCLK1,
488 100, 50000);
489
490 if (res < 0)
491 return res;
492
493 rmw_clear(CORE_LANE_CTL_CFGACC, pcie->base + CORE_LANE_CTL(port->idx));
494
495 rmw_set(CORE_LANE_CFG_REFCLKEN, pcie->base + CORE_LANE_CFG(port->idx));
496 rmw_set(PORT_REFCLK_EN, port->base + PORT_REFCLK);
497
498 return 0;
499 }
500
apple_pcie_rid2sid_write(struct apple_pcie_port * port,int idx,u32 val)501 static u32 apple_pcie_rid2sid_write(struct apple_pcie_port *port,
502 int idx, u32 val)
503 {
504 writel_relaxed(val, port->base + PORT_RID2SID(idx));
505 /* Read back to ensure completion of the write */
506 return readl_relaxed(port->base + PORT_RID2SID(idx));
507 }
508
apple_pcie_setup_port(struct apple_pcie * pcie,struct device_node * np)509 static int apple_pcie_setup_port(struct apple_pcie *pcie,
510 struct device_node *np)
511 {
512 struct platform_device *platform = to_platform_device(pcie->dev);
513 struct apple_pcie_port *port;
514 struct gpio_desc *reset;
515 u32 stat, idx;
516 int ret, i;
517
518 reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
519 GPIOD_OUT_LOW, "PERST#");
520 if (IS_ERR(reset))
521 return PTR_ERR(reset);
522
523 port = devm_kzalloc(pcie->dev, sizeof(*port), GFP_KERNEL);
524 if (!port)
525 return -ENOMEM;
526
527 ret = of_property_read_u32_index(np, "reg", 0, &idx);
528 if (ret)
529 return ret;
530
531 /* Use the first reg entry to work out the port index */
532 port->idx = idx >> 11;
533 port->pcie = pcie;
534 port->np = np;
535
536 port->base = devm_platform_ioremap_resource(platform, port->idx + 2);
537 if (IS_ERR(port->base))
538 return PTR_ERR(port->base);
539
540 rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
541
542 /* Assert PERST# before setting up the clock */
543 gpiod_set_value(reset, 1);
544
545 ret = apple_pcie_setup_refclk(pcie, port);
546 if (ret < 0)
547 return ret;
548
549 /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
550 usleep_range(100, 200);
551
552 /* Deassert PERST# */
553 rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
554 gpiod_set_value(reset, 0);
555
556 /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
557 msleep(100);
558
559 ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
560 stat & PORT_STATUS_READY, 100, 250000);
561 if (ret < 0) {
562 dev_err(pcie->dev, "port %pOF ready wait timeout\n", np);
563 return ret;
564 }
565
566 ret = apple_pcie_port_setup_irq(port);
567 if (ret)
568 return ret;
569
570 /* Reset all RID/SID mappings, and check for RAZ/WI registers */
571 for (i = 0; i < MAX_RID2SID; i++) {
572 if (apple_pcie_rid2sid_write(port, i, 0xbad1d) != 0xbad1d)
573 break;
574 apple_pcie_rid2sid_write(port, i, 0);
575 }
576
577 dev_dbg(pcie->dev, "%pOF: %d RID/SID mapping entries\n", np, i);
578
579 port->sid_map_sz = i;
580
581 list_add_tail(&port->entry, &pcie->ports);
582 init_completion(&pcie->event);
583
584 ret = apple_pcie_port_register_irqs(port);
585 WARN_ON(ret);
586
587 writel_relaxed(PORT_LTSSMCTL_START, port->base + PORT_LTSSMCTL);
588
589 if (!wait_for_completion_timeout(&pcie->event, HZ / 10))
590 dev_warn(pcie->dev, "%pOF link didn't come up\n", np);
591
592 return 0;
593 }
594
apple_msi_init(struct apple_pcie * pcie)595 static int apple_msi_init(struct apple_pcie *pcie)
596 {
597 struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
598 struct of_phandle_args args = {};
599 struct irq_domain *parent;
600 int ret;
601
602 ret = of_parse_phandle_with_args(to_of_node(fwnode), "msi-ranges",
603 "#interrupt-cells", 0, &args);
604 if (ret)
605 return ret;
606
607 ret = of_property_read_u32_index(to_of_node(fwnode), "msi-ranges",
608 args.args_count + 1, &pcie->nvecs);
609 if (ret)
610 return ret;
611
612 of_phandle_args_to_fwspec(args.np, args.args, args.args_count,
613 &pcie->fwspec);
614
615 pcie->bitmap = devm_bitmap_zalloc(pcie->dev, pcie->nvecs, GFP_KERNEL);
616 if (!pcie->bitmap)
617 return -ENOMEM;
618
619 parent = irq_find_matching_fwspec(&pcie->fwspec, DOMAIN_BUS_WIRED);
620 if (!parent) {
621 dev_err(pcie->dev, "failed to find parent domain\n");
622 return -ENXIO;
623 }
624
625 parent = irq_domain_create_hierarchy(parent, 0, pcie->nvecs, fwnode,
626 &apple_msi_domain_ops, pcie);
627 if (!parent) {
628 dev_err(pcie->dev, "failed to create IRQ domain\n");
629 return -ENOMEM;
630 }
631 irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
632
633 pcie->domain = pci_msi_create_irq_domain(fwnode, &apple_msi_info,
634 parent);
635 if (!pcie->domain) {
636 dev_err(pcie->dev, "failed to create MSI domain\n");
637 irq_domain_remove(parent);
638 return -ENOMEM;
639 }
640
641 return 0;
642 }
643
apple_pcie_get_port(struct pci_dev * pdev)644 static struct apple_pcie_port *apple_pcie_get_port(struct pci_dev *pdev)
645 {
646 struct pci_config_window *cfg = pdev->sysdata;
647 struct apple_pcie *pcie = cfg->priv;
648 struct pci_dev *port_pdev;
649 struct apple_pcie_port *port;
650
651 /* Find the root port this device is on */
652 port_pdev = pcie_find_root_port(pdev);
653
654 /* If finding the port itself, nothing to do */
655 if (WARN_ON(!port_pdev) || pdev == port_pdev)
656 return NULL;
657
658 list_for_each_entry(port, &pcie->ports, entry) {
659 if (port->idx == PCI_SLOT(port_pdev->devfn))
660 return port;
661 }
662
663 return NULL;
664 }
665
apple_pcie_add_device(struct apple_pcie_port * port,struct pci_dev * pdev)666 static int apple_pcie_add_device(struct apple_pcie_port *port,
667 struct pci_dev *pdev)
668 {
669 u32 sid, rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
670 int idx, err;
671
672 dev_dbg(&pdev->dev, "added to bus %s, index %d\n",
673 pci_name(pdev->bus->self), port->idx);
674
675 err = of_map_id(port->pcie->dev->of_node, rid, "iommu-map",
676 "iommu-map-mask", NULL, &sid);
677 if (err)
678 return err;
679
680 mutex_lock(&port->pcie->lock);
681
682 idx = bitmap_find_free_region(port->sid_map, port->sid_map_sz, 0);
683 if (idx >= 0) {
684 apple_pcie_rid2sid_write(port, idx,
685 PORT_RID2SID_VALID |
686 (sid << PORT_RID2SID_SID_SHIFT) | rid);
687
688 dev_dbg(&pdev->dev, "mapping RID%x to SID%x (index %d)\n",
689 rid, sid, idx);
690 }
691
692 mutex_unlock(&port->pcie->lock);
693
694 return idx >= 0 ? 0 : -ENOSPC;
695 }
696
apple_pcie_release_device(struct apple_pcie_port * port,struct pci_dev * pdev)697 static void apple_pcie_release_device(struct apple_pcie_port *port,
698 struct pci_dev *pdev)
699 {
700 u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
701 int idx;
702
703 mutex_lock(&port->pcie->lock);
704
705 for_each_set_bit(idx, port->sid_map, port->sid_map_sz) {
706 u32 val;
707
708 val = readl_relaxed(port->base + PORT_RID2SID(idx));
709 if ((val & 0xffff) == rid) {
710 apple_pcie_rid2sid_write(port, idx, 0);
711 bitmap_release_region(port->sid_map, idx, 0);
712 dev_dbg(&pdev->dev, "Released %x (%d)\n", val, idx);
713 break;
714 }
715 }
716
717 mutex_unlock(&port->pcie->lock);
718 }
719
apple_pcie_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)720 static int apple_pcie_bus_notifier(struct notifier_block *nb,
721 unsigned long action,
722 void *data)
723 {
724 struct device *dev = data;
725 struct pci_dev *pdev = to_pci_dev(dev);
726 struct apple_pcie_port *port;
727 int err;
728
729 /*
730 * This is a bit ugly. We assume that if we get notified for
731 * any PCI device, we must be in charge of it, and that there
732 * is no other PCI controller in the whole system. It probably
733 * holds for now, but who knows for how long?
734 */
735 port = apple_pcie_get_port(pdev);
736 if (!port)
737 return NOTIFY_DONE;
738
739 switch (action) {
740 case BUS_NOTIFY_ADD_DEVICE:
741 err = apple_pcie_add_device(port, pdev);
742 if (err)
743 return notifier_from_errno(err);
744 break;
745 case BUS_NOTIFY_DEL_DEVICE:
746 apple_pcie_release_device(port, pdev);
747 break;
748 default:
749 return NOTIFY_DONE;
750 }
751
752 return NOTIFY_OK;
753 }
754
755 static struct notifier_block apple_pcie_nb = {
756 .notifier_call = apple_pcie_bus_notifier,
757 };
758
apple_pcie_init(struct pci_config_window * cfg)759 static int apple_pcie_init(struct pci_config_window *cfg)
760 {
761 struct device *dev = cfg->parent;
762 struct platform_device *platform = to_platform_device(dev);
763 struct device_node *of_port;
764 struct apple_pcie *pcie;
765 int ret;
766
767 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
768 if (!pcie)
769 return -ENOMEM;
770
771 pcie->dev = dev;
772
773 mutex_init(&pcie->lock);
774
775 pcie->base = devm_platform_ioremap_resource(platform, 1);
776 if (IS_ERR(pcie->base))
777 return PTR_ERR(pcie->base);
778
779 cfg->priv = pcie;
780 INIT_LIST_HEAD(&pcie->ports);
781
782 for_each_child_of_node(dev->of_node, of_port) {
783 ret = apple_pcie_setup_port(pcie, of_port);
784 if (ret) {
785 dev_err(pcie->dev, "Port %pOF setup fail: %d\n", of_port, ret);
786 of_node_put(of_port);
787 return ret;
788 }
789 }
790
791 return apple_msi_init(pcie);
792 }
793
apple_pcie_probe(struct platform_device * pdev)794 static int apple_pcie_probe(struct platform_device *pdev)
795 {
796 int ret;
797
798 ret = bus_register_notifier(&pci_bus_type, &apple_pcie_nb);
799 if (ret)
800 return ret;
801
802 ret = pci_host_common_probe(pdev);
803 if (ret)
804 bus_unregister_notifier(&pci_bus_type, &apple_pcie_nb);
805
806 return ret;
807 }
808
809 static const struct pci_ecam_ops apple_pcie_cfg_ecam_ops = {
810 .init = apple_pcie_init,
811 .pci_ops = {
812 .map_bus = pci_ecam_map_bus,
813 .read = pci_generic_config_read,
814 .write = pci_generic_config_write,
815 }
816 };
817
818 static const struct of_device_id apple_pcie_of_match[] = {
819 { .compatible = "apple,pcie", .data = &apple_pcie_cfg_ecam_ops },
820 { }
821 };
822 MODULE_DEVICE_TABLE(of, apple_pcie_of_match);
823
824 static struct platform_driver apple_pcie_driver = {
825 .probe = apple_pcie_probe,
826 .driver = {
827 .name = "pcie-apple",
828 .of_match_table = apple_pcie_of_match,
829 .suppress_bind_attrs = true,
830 },
831 };
832 module_platform_driver(apple_pcie_driver);
833
834 MODULE_LICENSE("GPL v2");
835