1 /*
2 * xen/arch/arm/gic-v2.c
3 *
4 * ARM Generic Interrupt Controller support v2
5 *
6 * Tim Deegan <tim@xen.org>
7 * Copyright (c) 2011 Citrix Systems.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #include <xen/lib.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/vmap.h>
24 #include <xen/irq.h>
25 #include <xen/iocap.h>
26 #include <xen/sched.h>
27 #include <xen/errno.h>
28 #include <xen/softirq.h>
29 #include <xen/list.h>
30 #include <xen/device_tree.h>
31 #include <xen/libfdt/libfdt.h>
32 #include <xen/sizes.h>
33 #include <xen/acpi.h>
34 #include <acpi/actables.h>
35 #include <asm/p2m.h>
36 #include <asm/domain.h>
37 #include <asm/platform.h>
38 #include <asm/device.h>
39
40 #include <asm/io.h>
41 #include <asm/gic.h>
42 #include <asm/acpi.h>
43
44 /*
45 * LR register definitions are GIC v2 specific.
46 * Moved these definitions from header file to here
47 */
48 #define GICH_V2_LR_VIRTUAL_MASK 0x3ff
49 #define GICH_V2_LR_VIRTUAL_SHIFT 0
50 #define GICH_V2_LR_PHYSICAL_MASK 0x3ff
51 #define GICH_V2_LR_PHYSICAL_SHIFT 10
52 #define GICH_V2_LR_STATE_MASK 0x3
53 #define GICH_V2_LR_STATE_SHIFT 28
54 #define GICH_V2_LR_PRIORITY_SHIFT 23
55 #define GICH_V2_LR_PRIORITY_MASK 0x1f
56 #define GICH_V2_LR_HW_SHIFT 31
57 #define GICH_V2_LR_HW_MASK 0x1
58 #define GICH_V2_LR_GRP_SHIFT 30
59 #define GICH_V2_LR_GRP_MASK 0x1
60 #define GICH_V2_LR_MAINTENANCE_IRQ (1<<19)
61 #define GICH_V2_LR_GRP1 (1<<30)
62 #define GICH_V2_LR_HW (1<<31)
63 #define GICH_V2_LR_CPUID_SHIFT 9
64 #define GICH_V2_VTR_NRLRGS 0x3f
65
66 #define GICH_V2_VMCR_PRIORITY_MASK 0x1f
67 #define GICH_V2_VMCR_PRIORITY_SHIFT 27
68
69 /* GICv2m extension register definitions. */
70 /*
71 * MSI_TYPER:
72 * [31:26] Reserved
73 * [25:16] lowest SPI assigned to MSI
74 * [15:10] Reserved
75 * [9:0] Number of SPIs assigned to MSI
76 */
77 #define V2M_MSI_TYPER 0x008
78 #define V2M_MSI_TYPER_BASE_SHIFT 16
79 #define V2M_MSI_TYPER_BASE_MASK 0x3FF
80 #define V2M_MSI_TYPER_NUM_MASK 0x3FF
81 #define V2M_MSI_SETSPI_NS 0x040
82 #define V2M_MIN_SPI 32
83 #define V2M_MAX_SPI 1019
84 #define V2M_MSI_IIDR 0xFCC
85
86 #define V2M_MSI_TYPER_BASE_SPI(x) \
87 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
88
89 #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
90
91 struct v2m_data {
92 struct list_head entry;
93 /* Pointer to the DT node representing the v2m frame */
94 const struct dt_device_node *dt_node;
95 paddr_t addr; /* Register frame base */
96 paddr_t size; /* Register frame size */
97 u32 spi_start; /* The SPI number that MSIs start */
98 u32 nr_spis; /* The number of SPIs for MSIs */
99 };
100
101 /* v2m extension register frame information list */
102 static LIST_HEAD(gicv2m_info);
103
104 /* Global state */
105 static struct {
106 void __iomem * map_dbase; /* IO mapped Address of distributor registers */
107 void __iomem * map_cbase; /* IO mapped Address of CPU interface registers */
108 void __iomem * map_hbase; /* IO Address of virtual interface registers */
109 spinlock_t lock;
110 } gicv2;
111
112 static struct gic_info gicv2_info;
113
114 /* The GIC mapping of CPU interfaces does not necessarily match the
115 * logical CPU numbering. Let's use mapping as returned by the GIC
116 * itself
117 */
118 static DEFINE_PER_CPU(u8, gic_cpu_id);
119
120 /* Maximum cpu interface per GIC */
121 #define NR_GIC_CPU_IF 8
122
writeb_gicd(uint8_t val,unsigned int offset)123 static inline void writeb_gicd(uint8_t val, unsigned int offset)
124 {
125 writeb_relaxed(val, gicv2.map_dbase + offset);
126 }
127
writel_gicd(uint32_t val,unsigned int offset)128 static inline void writel_gicd(uint32_t val, unsigned int offset)
129 {
130 writel_relaxed(val, gicv2.map_dbase + offset);
131 }
132
readl_gicd(unsigned int offset)133 static inline uint32_t readl_gicd(unsigned int offset)
134 {
135 return readl_relaxed(gicv2.map_dbase + offset);
136 }
137
writel_gicc(uint32_t val,unsigned int offset)138 static inline void writel_gicc(uint32_t val, unsigned int offset)
139 {
140 writel_relaxed(val, gicv2.map_cbase + offset);
141 }
142
readl_gicc(unsigned int offset)143 static inline uint32_t readl_gicc(unsigned int offset)
144 {
145 return readl_relaxed(gicv2.map_cbase + offset);
146 }
147
writel_gich(uint32_t val,unsigned int offset)148 static inline void writel_gich(uint32_t val, unsigned int offset)
149 {
150 writel_relaxed(val, gicv2.map_hbase + offset);
151 }
152
readl_gich(int unsigned offset)153 static inline uint32_t readl_gich(int unsigned offset)
154 {
155 return readl_relaxed(gicv2.map_hbase + offset);
156 }
157
gicv2_cpu_mask(const cpumask_t * cpumask)158 static unsigned int gicv2_cpu_mask(const cpumask_t *cpumask)
159 {
160 unsigned int cpu;
161 unsigned int mask = 0;
162 cpumask_t possible_mask;
163
164 cpumask_and(&possible_mask, cpumask, &cpu_possible_map);
165 for_each_cpu( cpu, &possible_mask )
166 {
167 ASSERT(cpu < NR_GIC_CPU_IF);
168 mask |= per_cpu(gic_cpu_id, cpu);
169 }
170
171 return mask;
172 }
173
gicv2_save_state(struct vcpu * v)174 static void gicv2_save_state(struct vcpu *v)
175 {
176 int i;
177
178 /* No need for spinlocks here because interrupts are disabled around
179 * this call and it only accesses struct vcpu fields that cannot be
180 * accessed simultaneously by another pCPU.
181 */
182 for ( i = 0; i < gicv2_info.nr_lrs; i++ )
183 v->arch.gic.v2.lr[i] = readl_gich(GICH_LR + i * 4);
184
185 v->arch.gic.v2.apr = readl_gich(GICH_APR);
186 v->arch.gic.v2.vmcr = readl_gich(GICH_VMCR);
187 /* Disable until next VCPU scheduled */
188 writel_gich(0, GICH_HCR);
189 }
190
gicv2_restore_state(const struct vcpu * v)191 static void gicv2_restore_state(const struct vcpu *v)
192 {
193 int i;
194
195 for ( i = 0; i < gicv2_info.nr_lrs; i++ )
196 writel_gich(v->arch.gic.v2.lr[i], GICH_LR + i * 4);
197
198 writel_gich(v->arch.gic.v2.apr, GICH_APR);
199 writel_gich(v->arch.gic.v2.vmcr, GICH_VMCR);
200 writel_gich(GICH_HCR_EN, GICH_HCR);
201 }
202
gicv2_dump_state(const struct vcpu * v)203 static void gicv2_dump_state(const struct vcpu *v)
204 {
205 int i;
206
207 if ( v == current )
208 {
209 for ( i = 0; i < gicv2_info.nr_lrs; i++ )
210 printk(" HW_LR[%d]=%x\n", i,
211 readl_gich(GICH_LR + i * 4));
212 }
213 else
214 {
215 for ( i = 0; i < gicv2_info.nr_lrs; i++ )
216 printk(" VCPU_LR[%d]=%x\n", i, v->arch.gic.v2.lr[i]);
217 }
218 }
219
gicv2_eoi_irq(struct irq_desc * irqd)220 static void gicv2_eoi_irq(struct irq_desc *irqd)
221 {
222 int irq = irqd->irq;
223 /* Lower the priority */
224 writel_gicc(irq, GICC_EOIR);
225 }
226
gicv2_dir_irq(struct irq_desc * irqd)227 static void gicv2_dir_irq(struct irq_desc *irqd)
228 {
229 /* Deactivate */
230 writel_gicc(irqd->irq, GICC_DIR);
231 }
232
gicv2_read_irq(void)233 static unsigned int gicv2_read_irq(void)
234 {
235 return (readl_gicc(GICC_IAR) & GICC_IA_IRQ);
236 }
237
gicv2_set_irq_type(struct irq_desc * desc,unsigned int type)238 static void gicv2_set_irq_type(struct irq_desc *desc, unsigned int type)
239 {
240 uint32_t cfg, actual, edgebit;
241 unsigned int irq = desc->irq;
242
243 spin_lock(&gicv2.lock);
244 /* Set edge / level */
245 cfg = readl_gicd(GICD_ICFGR + (irq / 16) * 4);
246 edgebit = 2u << (2 * (irq % 16));
247 if ( type & IRQ_TYPE_LEVEL_MASK )
248 cfg &= ~edgebit;
249 else if ( type & IRQ_TYPE_EDGE_BOTH )
250 cfg |= edgebit;
251 writel_gicd(cfg, GICD_ICFGR + (irq / 16) * 4);
252
253 actual = readl_gicd(GICD_ICFGR + (irq / 16) * 4);
254 if ( ( cfg & edgebit ) ^ ( actual & edgebit ) )
255 {
256 printk(XENLOG_WARNING "GICv2: WARNING: "
257 "CPU%d: Failed to configure IRQ%u as %s-triggered. "
258 "H/w forces to %s-triggered.\n",
259 smp_processor_id(), desc->irq,
260 cfg & edgebit ? "Edge" : "Level",
261 actual & edgebit ? "Edge" : "Level");
262 desc->arch.type = actual & edgebit ?
263 IRQ_TYPE_EDGE_RISING :
264 IRQ_TYPE_LEVEL_HIGH;
265 }
266
267 spin_unlock(&gicv2.lock);
268 }
269
gicv2_set_irq_priority(struct irq_desc * desc,unsigned int priority)270 static void gicv2_set_irq_priority(struct irq_desc *desc,
271 unsigned int priority)
272 {
273 unsigned int irq = desc->irq;
274
275 spin_lock(&gicv2.lock);
276
277 /* Set priority */
278 writeb_gicd(priority, GICD_IPRIORITYR + irq);
279
280 spin_unlock(&gicv2.lock);
281 }
282
gicv2_dist_init(void)283 static void __init gicv2_dist_init(void)
284 {
285 uint32_t type;
286 uint32_t cpumask;
287 uint32_t gic_cpus;
288 unsigned int nr_lines;
289 int i;
290
291 cpumask = readl_gicd(GICD_ITARGETSR) & 0xff;
292 cpumask |= cpumask << 8;
293 cpumask |= cpumask << 16;
294
295 /* Disable the distributor */
296 writel_gicd(0, GICD_CTLR);
297
298 type = readl_gicd(GICD_TYPER);
299 nr_lines = 32 * ((type & GICD_TYPE_LINES) + 1);
300 gic_cpus = 1 + ((type & GICD_TYPE_CPUS) >> 5);
301 printk("GICv2: %d lines, %d cpu%s%s (IID %8.8x).\n",
302 nr_lines, gic_cpus, (gic_cpus == 1) ? "" : "s",
303 (type & GICD_TYPE_SEC) ? ", secure" : "",
304 readl_gicd(GICD_IIDR));
305
306 /* Default all global IRQs to level, active low */
307 for ( i = 32; i < nr_lines; i += 16 )
308 writel_gicd(0x0, GICD_ICFGR + (i / 16) * 4);
309
310 /* Route all global IRQs to this CPU */
311 for ( i = 32; i < nr_lines; i += 4 )
312 writel_gicd(cpumask, GICD_ITARGETSR + (i / 4) * 4);
313
314 /* Default priority for global interrupts */
315 for ( i = 32; i < nr_lines; i += 4 )
316 writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
317 GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
318 GICD_IPRIORITYR + (i / 4) * 4);
319
320 /* Disable all global interrupts */
321 for ( i = 32; i < nr_lines; i += 32 )
322 writel_gicd(~0x0, GICD_ICENABLER + (i / 32) * 4);
323
324 /* Only 1020 interrupts are supported */
325 gicv2_info.nr_lines = min(1020U, nr_lines);
326
327 /* Turn on the distributor */
328 writel_gicd(GICD_CTL_ENABLE, GICD_CTLR);
329 }
330
gicv2_cpu_init(void)331 static void gicv2_cpu_init(void)
332 {
333 int i;
334
335 this_cpu(gic_cpu_id) = readl_gicd(GICD_ITARGETSR) & 0xff;
336
337 /* The first 32 interrupts (PPI and SGI) are banked per-cpu, so
338 * even though they are controlled with GICD registers, they must
339 * be set up here with the other per-cpu state. */
340 writel_gicd(0xffff0000, GICD_ICENABLER); /* Disable all PPI */
341 writel_gicd(0x0000ffff, GICD_ISENABLER); /* Enable all SGI */
342
343 /* Set SGI priorities */
344 for ( i = 0; i < 16; i += 4 )
345 writel_gicd(GIC_PRI_IPI << 24 | GIC_PRI_IPI << 16 |
346 GIC_PRI_IPI << 8 | GIC_PRI_IPI,
347 GICD_IPRIORITYR + (i / 4) * 4);
348
349 /* Set PPI priorities */
350 for ( i = 16; i < 32; i += 4 )
351 writel_gicd(GIC_PRI_IRQ << 24 | GIC_PRI_IRQ << 16 |
352 GIC_PRI_IRQ << 8 | GIC_PRI_IRQ,
353 GICD_IPRIORITYR + (i / 4) * 4);
354
355 /* Local settings: interface controller */
356 /* Don't mask by priority */
357 writel_gicc(0xff, GICC_PMR);
358 /* Finest granularity of priority */
359 writel_gicc(0x0, GICC_BPR);
360 /* Turn on delivery */
361 writel_gicc(GICC_CTL_ENABLE|GICC_CTL_EOI, GICC_CTLR);
362 }
363
gicv2_cpu_disable(void)364 static void gicv2_cpu_disable(void)
365 {
366 writel_gicc(0x0, GICC_CTLR);
367 }
368
gicv2_hyp_init(void)369 static void gicv2_hyp_init(void)
370 {
371 uint32_t vtr;
372 uint8_t nr_lrs;
373
374 vtr = readl_gich(GICH_VTR);
375 nr_lrs = (vtr & GICH_V2_VTR_NRLRGS) + 1;
376 gicv2_info.nr_lrs = nr_lrs;
377 }
378
gicv2_hyp_disable(void)379 static void gicv2_hyp_disable(void)
380 {
381 writel_gich(0, GICH_HCR);
382 }
383
gicv2_secondary_cpu_init(void)384 static int gicv2_secondary_cpu_init(void)
385 {
386 spin_lock(&gicv2.lock);
387
388 gicv2_cpu_init();
389 gicv2_hyp_init();
390
391 spin_unlock(&gicv2.lock);
392
393 return 0;
394 }
395
gicv2_send_SGI(enum gic_sgi sgi,enum gic_sgi_mode irqmode,const cpumask_t * cpu_mask)396 static void gicv2_send_SGI(enum gic_sgi sgi, enum gic_sgi_mode irqmode,
397 const cpumask_t *cpu_mask)
398 {
399 unsigned int mask = 0;
400 cpumask_t online_mask;
401
402 switch ( irqmode )
403 {
404 case SGI_TARGET_OTHERS:
405 writel_gicd(GICD_SGI_TARGET_OTHERS | sgi, GICD_SGIR);
406 break;
407 case SGI_TARGET_SELF:
408 writel_gicd(GICD_SGI_TARGET_SELF | sgi, GICD_SGIR);
409 break;
410 case SGI_TARGET_LIST:
411 cpumask_and(&online_mask, cpu_mask, &cpu_online_map);
412 mask = gicv2_cpu_mask(&online_mask);
413 writel_gicd(GICD_SGI_TARGET_LIST |
414 (mask << GICD_SGI_TARGET_SHIFT) | sgi,
415 GICD_SGIR);
416 break;
417 default:
418 BUG();
419 }
420 }
421
422 /* Shut down the per-CPU GIC interface */
gicv2_disable_interface(void)423 static void gicv2_disable_interface(void)
424 {
425 spin_lock(&gicv2.lock);
426 gicv2_cpu_disable();
427 gicv2_hyp_disable();
428 spin_unlock(&gicv2.lock);
429 }
430
gicv2_update_lr(int lr,const struct pending_irq * p,unsigned int state)431 static void gicv2_update_lr(int lr, const struct pending_irq *p,
432 unsigned int state)
433 {
434 uint32_t lr_reg;
435
436 BUG_ON(lr >= gicv2_info.nr_lrs);
437 BUG_ON(lr < 0);
438
439 lr_reg = (((state & GICH_V2_LR_STATE_MASK) << GICH_V2_LR_STATE_SHIFT) |
440 ((GIC_PRI_TO_GUEST(p->priority) & GICH_V2_LR_PRIORITY_MASK)
441 << GICH_V2_LR_PRIORITY_SHIFT) |
442 ((p->irq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT));
443
444 if ( p->desc != NULL )
445 lr_reg |= GICH_V2_LR_HW | ((p->desc->irq & GICH_V2_LR_PHYSICAL_MASK )
446 << GICH_V2_LR_PHYSICAL_SHIFT);
447
448 writel_gich(lr_reg, GICH_LR + lr * 4);
449 }
450
gicv2_clear_lr(int lr)451 static void gicv2_clear_lr(int lr)
452 {
453 writel_gich(0, GICH_LR + lr * 4);
454 }
455
gicv2_read_lr(int lr,struct gic_lr * lr_reg)456 static void gicv2_read_lr(int lr, struct gic_lr *lr_reg)
457 {
458 uint32_t lrv;
459
460 lrv = readl_gich(GICH_LR + lr * 4);
461 lr_reg->pirq = (lrv >> GICH_V2_LR_PHYSICAL_SHIFT) & GICH_V2_LR_PHYSICAL_MASK;
462 lr_reg->virq = (lrv >> GICH_V2_LR_VIRTUAL_SHIFT) & GICH_V2_LR_VIRTUAL_MASK;
463 lr_reg->priority = (lrv >> GICH_V2_LR_PRIORITY_SHIFT) & GICH_V2_LR_PRIORITY_MASK;
464 lr_reg->state = (lrv >> GICH_V2_LR_STATE_SHIFT) & GICH_V2_LR_STATE_MASK;
465 lr_reg->hw_status = (lrv >> GICH_V2_LR_HW_SHIFT) & GICH_V2_LR_HW_MASK;
466 lr_reg->grp = (lrv >> GICH_V2_LR_GRP_SHIFT) & GICH_V2_LR_GRP_MASK;
467 }
468
gicv2_write_lr(int lr,const struct gic_lr * lr_reg)469 static void gicv2_write_lr(int lr, const struct gic_lr *lr_reg)
470 {
471 uint32_t lrv = 0;
472
473 lrv = ( ((lr_reg->pirq & GICH_V2_LR_PHYSICAL_MASK) << GICH_V2_LR_PHYSICAL_SHIFT) |
474 ((lr_reg->virq & GICH_V2_LR_VIRTUAL_MASK) << GICH_V2_LR_VIRTUAL_SHIFT) |
475 ((uint32_t)(lr_reg->priority & GICH_V2_LR_PRIORITY_MASK)
476 << GICH_V2_LR_PRIORITY_SHIFT) |
477 ((uint32_t)(lr_reg->state & GICH_V2_LR_STATE_MASK)
478 << GICH_V2_LR_STATE_SHIFT) |
479 ((uint32_t)(lr_reg->hw_status & GICH_V2_LR_HW_MASK)
480 << GICH_V2_LR_HW_SHIFT) |
481 ((uint32_t)(lr_reg->grp & GICH_V2_LR_GRP_MASK) << GICH_V2_LR_GRP_SHIFT) );
482
483 writel_gich(lrv, GICH_LR + lr * 4);
484 }
485
gicv2_hcr_status(uint32_t flag,bool status)486 static void gicv2_hcr_status(uint32_t flag, bool status)
487 {
488 uint32_t hcr = readl_gich(GICH_HCR);
489
490 if ( status )
491 hcr |= flag;
492 else
493 hcr &= (~flag);
494
495 writel_gich(hcr, GICH_HCR);
496 }
497
gicv2_read_vmcr_priority(void)498 static unsigned int gicv2_read_vmcr_priority(void)
499 {
500 return ((readl_gich(GICH_VMCR) >> GICH_V2_VMCR_PRIORITY_SHIFT)
501 & GICH_V2_VMCR_PRIORITY_MASK);
502 }
503
gicv2_read_apr(int apr_reg)504 static unsigned int gicv2_read_apr(int apr_reg)
505 {
506 return readl_gich(GICH_APR);
507 }
508
gicv2_irq_enable(struct irq_desc * desc)509 static void gicv2_irq_enable(struct irq_desc *desc)
510 {
511 unsigned long flags;
512 int irq = desc->irq;
513
514 ASSERT(spin_is_locked(&desc->lock));
515
516 spin_lock_irqsave(&gicv2.lock, flags);
517 clear_bit(_IRQ_DISABLED, &desc->status);
518 dsb(sy);
519 /* Enable routing */
520 writel_gicd((1u << (irq % 32)), GICD_ISENABLER + (irq / 32) * 4);
521 spin_unlock_irqrestore(&gicv2.lock, flags);
522 }
523
gicv2_irq_disable(struct irq_desc * desc)524 static void gicv2_irq_disable(struct irq_desc *desc)
525 {
526 unsigned long flags;
527 int irq = desc->irq;
528
529 ASSERT(spin_is_locked(&desc->lock));
530
531 spin_lock_irqsave(&gicv2.lock, flags);
532 /* Disable routing */
533 writel_gicd(1u << (irq % 32), GICD_ICENABLER + (irq / 32) * 4);
534 set_bit(_IRQ_DISABLED, &desc->status);
535 spin_unlock_irqrestore(&gicv2.lock, flags);
536 }
537
gicv2_irq_startup(struct irq_desc * desc)538 static unsigned int gicv2_irq_startup(struct irq_desc *desc)
539 {
540 gicv2_irq_enable(desc);
541
542 return 0;
543 }
544
gicv2_irq_shutdown(struct irq_desc * desc)545 static void gicv2_irq_shutdown(struct irq_desc *desc)
546 {
547 gicv2_irq_disable(desc);
548 }
549
gicv2_irq_ack(struct irq_desc * desc)550 static void gicv2_irq_ack(struct irq_desc *desc)
551 {
552 /* No ACK -- reading IAR has done this for us */
553 }
554
gicv2_host_irq_end(struct irq_desc * desc)555 static void gicv2_host_irq_end(struct irq_desc *desc)
556 {
557 /* Lower the priority */
558 gicv2_eoi_irq(desc);
559 /* Deactivate */
560 gicv2_dir_irq(desc);
561 }
562
gicv2_guest_irq_end(struct irq_desc * desc)563 static void gicv2_guest_irq_end(struct irq_desc *desc)
564 {
565 /* Lower the priority of the IRQ */
566 gicv2_eoi_irq(desc);
567 /* Deactivation happens in maintenance interrupt / via GICV */
568 }
569
gicv2_irq_set_affinity(struct irq_desc * desc,const cpumask_t * cpu_mask)570 static void gicv2_irq_set_affinity(struct irq_desc *desc, const cpumask_t *cpu_mask)
571 {
572 unsigned int mask;
573
574 ASSERT(!cpumask_empty(cpu_mask));
575
576 spin_lock(&gicv2.lock);
577
578 mask = gicv2_cpu_mask(cpu_mask);
579
580 /* Set target CPU mask (RAZ/WI on uniprocessor) */
581 writeb_gicd(mask, GICD_ITARGETSR + desc->irq);
582
583 spin_unlock(&gicv2.lock);
584 }
585
gicv2_map_hwdown_extra_mappings(struct domain * d)586 static int gicv2_map_hwdown_extra_mappings(struct domain *d)
587 {
588 const struct v2m_data *v2m_data;
589
590 /* For the moment, we'll assign all v2m frames to the hardware domain. */
591 list_for_each_entry( v2m_data, &gicv2m_info, entry )
592 {
593 int ret;
594 u32 spi;
595
596 printk("GICv2: Mapping v2m frame to d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n",
597 d->domain_id, v2m_data->addr, v2m_data->size,
598 v2m_data->spi_start, v2m_data->nr_spis);
599
600 ret = map_mmio_regions(d, gaddr_to_gfn(v2m_data->addr),
601 PFN_UP(v2m_data->size),
602 maddr_to_mfn(v2m_data->addr));
603 if ( ret )
604 {
605 printk(XENLOG_ERR "GICv2: Map v2m frame to d%d failed.\n",
606 d->domain_id);
607 return ret;
608 }
609
610 /*
611 * Map all SPIs that are allocated to MSIs for the frame to the
612 * domain.
613 */
614 for ( spi = v2m_data->spi_start;
615 spi < (v2m_data->spi_start + v2m_data->nr_spis); spi++ )
616 {
617 /*
618 * MSIs are always edge-triggered. Configure the associated SPIs
619 * to be edge-rising as default type.
620 */
621 ret = irq_set_spi_type(spi, IRQ_TYPE_EDGE_RISING);
622 if ( ret )
623 {
624 printk(XENLOG_ERR
625 "GICv2: Failed to set v2m MSI SPI[%d] type.\n", spi);
626 return ret;
627 }
628
629 /* Route a SPI that is allocated to MSI to the domain. */
630 ret = route_irq_to_guest(d, spi, spi, "v2m");
631 if ( ret )
632 {
633 printk(XENLOG_ERR
634 "GICv2: Failed to route v2m MSI SPI[%d] to Dom%d.\n",
635 spi, d->domain_id);
636 return ret;
637 }
638
639 /* Reserve a SPI that is allocated to MSI for the domain. */
640 if ( !vgic_reserve_virq(d, spi) )
641 {
642 printk(XENLOG_ERR
643 "GICv2: Failed to reserve v2m MSI SPI[%d] for Dom%d.\n",
644 spi, d->domain_id);
645 return -EINVAL;
646 }
647 }
648 }
649
650 return 0;
651 }
652
653 /*
654 * Set up gic v2m DT sub-node.
655 * Please refer to the binding document:
656 * https://www.kernel.org/doc/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
657 */
gicv2m_make_dt_node(const struct domain * d,const struct dt_device_node * gic,void * fdt)658 static int gicv2m_make_dt_node(const struct domain *d,
659 const struct dt_device_node *gic,
660 void *fdt)
661 {
662 u32 len;
663 int res;
664 const void *prop = NULL;
665 const struct dt_device_node *v2m = NULL;
666 const struct v2m_data *v2m_data;
667
668 /* It is not necessary to create the node if there are not GICv2m frames */
669 if ( list_empty(&gicv2m_info) )
670 return 0;
671
672 /* The sub-nodes require the ranges property */
673 prop = dt_get_property(gic, "ranges", &len);
674 if ( !prop )
675 {
676 printk(XENLOG_ERR "Can't find ranges property for the gic node\n");
677 return -FDT_ERR_XEN(ENOENT);
678 }
679
680 res = fdt_property(fdt, "ranges", prop, len);
681 if ( res )
682 return res;
683
684 list_for_each_entry( v2m_data, &gicv2m_info, entry )
685 {
686 v2m = v2m_data->dt_node;
687
688 printk("GICv2: Creating v2m DT node for d%d: addr=0x%"PRIpaddr" size=0x%"PRIpaddr" spi_base=%u num_spis=%u\n",
689 d->domain_id, v2m_data->addr, v2m_data->size,
690 v2m_data->spi_start, v2m_data->nr_spis);
691
692 res = fdt_begin_node(fdt, v2m->name);
693 if ( res )
694 return res;
695
696 res = fdt_property_string(fdt, "compatible", "arm,gic-v2m-frame");
697 if ( res )
698 return res;
699
700 res = fdt_property(fdt, "msi-controller", NULL, 0);
701 if ( res )
702 return res;
703
704 if ( v2m->phandle )
705 {
706 res = fdt_property_cell(fdt, "phandle", v2m->phandle);
707 if ( res )
708 return res;
709 }
710
711 /* Use the same reg regions as v2m node in host DTB. */
712 prop = dt_get_property(v2m, "reg", &len);
713 if ( !prop )
714 {
715 printk(XENLOG_ERR "GICv2: Can't find v2m reg property.\n");
716 res = -FDT_ERR_XEN(ENOENT);
717 return res;
718 }
719
720 res = fdt_property(fdt, "reg", prop, len);
721 if ( res )
722 return res;
723
724 /*
725 * The properties msi-base-spi and msi-num-spis are used to override
726 * the hardware settings. Therefore it is fine to always write them
727 * in the guest DT.
728 */
729 res = fdt_property_u32(fdt, "arm,msi-base-spi", v2m_data->spi_start);
730 if ( res )
731 {
732 printk(XENLOG_ERR
733 "GICv2: Failed to create v2m msi-base-spi in Guest DT.\n");
734 return res;
735 }
736
737 res = fdt_property_u32(fdt, "arm,msi-num-spis", v2m_data->nr_spis);
738 if ( res )
739 {
740 printk(XENLOG_ERR
741 "GICv2: Failed to create v2m msi-num-spis in Guest DT.\n");
742 return res;
743 }
744
745 fdt_end_node(fdt);
746 }
747
748 return res;
749 }
750
gicv2_make_hwdom_dt_node(const struct domain * d,const struct dt_device_node * gic,void * fdt)751 static int gicv2_make_hwdom_dt_node(const struct domain *d,
752 const struct dt_device_node *gic,
753 void *fdt)
754 {
755 const void *compatible = NULL;
756 u32 len;
757 const __be32 *regs;
758 int res = 0;
759
760 compatible = dt_get_property(gic, "compatible", &len);
761 if ( !compatible )
762 {
763 dprintk(XENLOG_ERR, "Can't find compatible property for the gic node\n");
764 return -FDT_ERR_XEN(ENOENT);
765 }
766
767 res = fdt_property(fdt, "compatible", compatible, len);
768 if ( res )
769 return res;
770
771 /*
772 * DTB provides up to 4 regions to handle virtualization
773 * (in order GICD, GICC, GICH and GICV interfaces)
774 * however dom0 just needs GICD and GICC provided by Xen.
775 */
776 regs = dt_get_property(gic, "reg", &len);
777 if ( !regs )
778 {
779 dprintk(XENLOG_ERR, "Can't find reg property for the gic node\n");
780 return -FDT_ERR_XEN(ENOENT);
781 }
782
783 len = dt_cells_to_size(dt_n_addr_cells(gic) + dt_n_size_cells(gic));
784 len *= 2;
785
786 res = fdt_property(fdt, "reg", regs, len);
787 if ( res )
788 return res;
789
790 res = gicv2m_make_dt_node(d, gic, fdt);
791
792 return res;
793 }
794
795 /* XXX different for level vs edge */
796 static hw_irq_controller gicv2_host_irq_type = {
797 .typename = "gic-v2",
798 .startup = gicv2_irq_startup,
799 .shutdown = gicv2_irq_shutdown,
800 .enable = gicv2_irq_enable,
801 .disable = gicv2_irq_disable,
802 .ack = gicv2_irq_ack,
803 .end = gicv2_host_irq_end,
804 .set_affinity = gicv2_irq_set_affinity,
805 };
806
807 static hw_irq_controller gicv2_guest_irq_type = {
808 .typename = "gic-v2",
809 .startup = gicv2_irq_startup,
810 .shutdown = gicv2_irq_shutdown,
811 .enable = gicv2_irq_enable,
812 .disable = gicv2_irq_disable,
813 .ack = gicv2_irq_ack,
814 .end = gicv2_guest_irq_end,
815 .set_affinity = gicv2_irq_set_affinity,
816 };
817
gicv2_is_aliased(paddr_t cbase,paddr_t csize)818 static bool gicv2_is_aliased(paddr_t cbase, paddr_t csize)
819 {
820 uint32_t val_low, val_high;
821
822 if ( csize != SZ_128K )
823 return false;
824
825 /*
826 * Verify that we have the first 4kB of a GIC400
827 * aliased over the first 64kB by checking the
828 * GICC_IIDR register on both ends.
829 */
830 val_low = readl_gicc(GICC_IIDR);
831 val_high = readl_gicc(GICC_IIDR + 0xf000);
832
833 return ((val_low & 0xfff0fff) == 0x0202043B && val_low == val_high);
834 }
835
gicv2_add_v2m_frame_to_list(paddr_t addr,paddr_t size,u32 spi_start,u32 nr_spis,const struct dt_device_node * v2m)836 static void gicv2_add_v2m_frame_to_list(paddr_t addr, paddr_t size,
837 u32 spi_start, u32 nr_spis,
838 const struct dt_device_node *v2m)
839 {
840 struct v2m_data *v2m_data;
841
842 /*
843 * If the hardware setting hasn't been overridden by DT or ACPI, we have
844 * to read base_spi and num_spis from hardware registers to reserve irqs.
845 */
846 if ( !spi_start || !nr_spis )
847 {
848 u32 msi_typer;
849 void __iomem *base;
850
851 base = ioremap_nocache(addr, size);
852 if ( !base )
853 panic("GICv2: Cannot remap v2m register frame");
854
855 msi_typer = readl_relaxed(base + V2M_MSI_TYPER);
856 spi_start = V2M_MSI_TYPER_BASE_SPI(msi_typer);
857 nr_spis = V2M_MSI_TYPER_NUM_SPI(msi_typer);
858
859 iounmap(base);
860 }
861
862 if ( spi_start < V2M_MIN_SPI )
863 panic("GICv2: Invalid v2m base SPI:%u\n", spi_start);
864
865 if ( ( nr_spis == 0 ) || ( spi_start + nr_spis > V2M_MAX_SPI ) )
866 panic("GICv2: Number of v2m SPIs (%u) exceed maximum (%u)\n",
867 nr_spis, V2M_MAX_SPI - V2M_MIN_SPI + 1);
868
869 /* Allocate an entry to record new v2m frame information. */
870 v2m_data = xzalloc_bytes(sizeof(struct v2m_data));
871 if ( !v2m_data )
872 panic("GICv2: Cannot allocate memory for v2m frame");
873
874 INIT_LIST_HEAD(&v2m_data->entry);
875 v2m_data->addr = addr;
876 v2m_data->size = size;
877 v2m_data->spi_start = spi_start;
878 v2m_data->nr_spis = nr_spis;
879 v2m_data->dt_node = v2m;
880
881 printk("GICv2m extension register frame:\n"
882 " gic_v2m_addr=%"PRIpaddr"\n"
883 " gic_v2m_size=%"PRIpaddr"\n"
884 " gic_v2m_spi_base=%u\n"
885 " gic_v2m_num_spis=%u\n",
886 v2m_data->addr, v2m_data->size,
887 v2m_data->spi_start, v2m_data->nr_spis);
888
889 list_add_tail(&v2m_data->entry, &gicv2m_info);
890 }
891
gicv2_extension_dt_init(const struct dt_device_node * node)892 static void gicv2_extension_dt_init(const struct dt_device_node *node)
893 {
894 const struct dt_device_node *v2m = NULL;
895
896 /*
897 * Check whether this GIC implements the v2m extension. If so,
898 * add v2m register frames to gicv2m_info.
899 */
900 dt_for_each_child_node(node, v2m)
901 {
902 u32 spi_start = 0, nr_spis = 0;
903 paddr_t addr, size;
904
905 if ( !dt_device_is_compatible(v2m, "arm,gic-v2m-frame") )
906 continue;
907
908 /* Get register frame resource from DT. */
909 if ( dt_device_get_address(v2m, 0, &addr, &size) )
910 panic("GICv2: Cannot find a valid v2m frame address");
911
912 /*
913 * Check whether DT uses msi-base-spi and msi-num-spis properties to
914 * override the hardware setting.
915 */
916 if ( dt_property_read_u32(v2m, "arm,msi-base-spi", &spi_start) &&
917 dt_property_read_u32(v2m, "arm,msi-num-spis", &nr_spis) )
918 printk("GICv2: DT overriding v2m hardware setting (base:%u, num:%u)\n",
919 spi_start, nr_spis);
920
921 /* Add this v2m frame information to list. */
922 gicv2_add_v2m_frame_to_list(addr, size, spi_start, nr_spis, v2m);
923 }
924 }
925
926 static paddr_t __initdata hbase, dbase, cbase, csize, vbase;
927
gicv2_dt_init(void)928 static void __init gicv2_dt_init(void)
929 {
930 int res;
931 paddr_t vsize;
932 const struct dt_device_node *node = gicv2_info.node;
933
934 res = dt_device_get_address(node, 0, &dbase, NULL);
935 if ( res )
936 panic("GICv2: Cannot find a valid address for the distributor");
937
938 res = dt_device_get_address(node, 1, &cbase, &csize);
939 if ( res )
940 panic("GICv2: Cannot find a valid address for the CPU");
941
942 res = dt_device_get_address(node, 2, &hbase, NULL);
943 if ( res )
944 panic("GICv2: Cannot find a valid address for the hypervisor");
945
946 res = dt_device_get_address(node, 3, &vbase, &vsize);
947 if ( res )
948 panic("GICv2: Cannot find a valid address for the virtual CPU");
949
950 res = platform_get_irq(node, 0);
951 if ( res < 0 )
952 panic("GICv2: Cannot find the maintenance IRQ");
953 gicv2_info.maintenance_irq = res;
954
955 /* TODO: Add check on distributor */
956
957 /*
958 * The GICv2 CPU interface should at least be 8KB. Although, most of the DT
959 * don't correctly set it and use the GICv1 CPU interface size (i.e 4KB).
960 * Warn and then fixup.
961 */
962 if ( csize < SZ_8K )
963 {
964 printk(XENLOG_WARNING "GICv2: WARNING: "
965 "The GICC size is too small: %#"PRIx64" expected %#x\n",
966 csize, SZ_8K);
967 if ( platform_has_quirk(PLATFORM_QUIRK_GIC_64K_STRIDE) )
968 {
969 printk(XENLOG_WARNING "GICv2: enable platform quirk: 64K stride\n");
970 vsize = csize = SZ_128K;
971 } else
972 csize = SZ_8K;
973 }
974
975 /*
976 * Check if the CPU interface and virtual CPU interface have the
977 * same size.
978 */
979 if ( csize != vsize )
980 panic("GICv2: Sizes of GICC (%#"PRIpaddr") and GICV (%#"PRIpaddr") don't match\n",
981 csize, vsize);
982
983 /*
984 * Check whether this GIC implements the v2m extension. If so,
985 * add v2m register frames to gicv2_extension_info.
986 */
987 gicv2_extension_dt_init(node);
988 }
989
gicv2_iomem_deny_access(const struct domain * d)990 static int gicv2_iomem_deny_access(const struct domain *d)
991 {
992 int rc;
993 unsigned long mfn, nr;
994
995 mfn = dbase >> PAGE_SHIFT;
996 rc = iomem_deny_access(d, mfn, mfn + 1);
997 if ( rc )
998 return rc;
999
1000 mfn = hbase >> PAGE_SHIFT;
1001 rc = iomem_deny_access(d, mfn, mfn + 1);
1002 if ( rc )
1003 return rc;
1004
1005 mfn = cbase >> PAGE_SHIFT;
1006 nr = DIV_ROUND_UP(csize, PAGE_SIZE);
1007 rc = iomem_deny_access(d, mfn, mfn + nr);
1008 if ( rc )
1009 return rc;
1010
1011 mfn = vbase >> PAGE_SHIFT;
1012 return iomem_deny_access(d, mfn, mfn + nr);
1013 }
1014
gicv2_get_hwdom_extra_madt_size(const struct domain * d)1015 static unsigned long gicv2_get_hwdom_extra_madt_size(const struct domain *d)
1016 {
1017 return 0;
1018 }
1019
1020 #ifdef CONFIG_ACPI
gicv2_make_hwdom_madt(const struct domain * d,u32 offset)1021 static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset)
1022 {
1023 struct acpi_subtable_header *header;
1024 struct acpi_madt_generic_interrupt *host_gicc, *gicc;
1025 u32 i, size, table_len = 0;
1026 u8 *base_ptr = d->arch.efi_acpi_table + offset;
1027
1028 header = acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
1029 if ( !header )
1030 {
1031 printk("Can't get GICC entry");
1032 return -EINVAL;
1033 }
1034
1035 host_gicc = container_of(header, struct acpi_madt_generic_interrupt,
1036 header);
1037 size = sizeof(struct acpi_madt_generic_interrupt);
1038 /* Add Generic Interrupt */
1039 for ( i = 0; i < d->max_vcpus; i++ )
1040 {
1041 gicc = (struct acpi_madt_generic_interrupt *)(base_ptr + table_len);
1042 memcpy(gicc, host_gicc, size);
1043 gicc->cpu_interface_number = i;
1044 gicc->uid = i;
1045 gicc->flags = ACPI_MADT_ENABLED;
1046 gicc->arm_mpidr = vcpuid_to_vaffinity(i);
1047 gicc->parking_version = 0;
1048 gicc->performance_interrupt = 0;
1049 gicc->gicv_base_address = 0;
1050 gicc->gich_base_address = 0;
1051 gicc->vgic_interrupt = 0;
1052 table_len += size;
1053 }
1054
1055 return table_len;
1056 }
1057
1058 static int __init
gic_acpi_parse_madt_cpu(struct acpi_subtable_header * header,const unsigned long end)1059 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
1060 const unsigned long end)
1061 {
1062 static int cpu_base_assigned = 0;
1063 struct acpi_madt_generic_interrupt *processor =
1064 container_of(header, struct acpi_madt_generic_interrupt, header);
1065
1066 if ( BAD_MADT_ENTRY(processor, end) )
1067 return -EINVAL;
1068
1069 /* Read from APIC table and fill up the GIC variables */
1070 if ( cpu_base_assigned == 0 )
1071 {
1072 cbase = processor->base_address;
1073 csize = SZ_8K;
1074 hbase = processor->gich_base_address;
1075 vbase = processor->gicv_base_address;
1076 gicv2_info.maintenance_irq = processor->vgic_interrupt;
1077
1078 if ( processor->flags & ACPI_MADT_VGIC_IRQ_MODE )
1079 irq_set_type(gicv2_info.maintenance_irq, IRQ_TYPE_EDGE_BOTH);
1080 else
1081 irq_set_type(gicv2_info.maintenance_irq, IRQ_TYPE_LEVEL_MASK);
1082
1083 cpu_base_assigned = 1;
1084 }
1085 else
1086 {
1087 if ( cbase != processor->base_address
1088 || hbase != processor->gich_base_address
1089 || vbase != processor->gicv_base_address
1090 || gicv2_info.maintenance_irq != processor->vgic_interrupt )
1091 {
1092 printk("GICv2: GICC entries are not same in MADT table\n");
1093 return -EINVAL;
1094 }
1095 }
1096
1097 return 0;
1098 }
1099
1100 static int __init
gic_acpi_parse_madt_distributor(struct acpi_subtable_header * header,const unsigned long end)1101 gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
1102 const unsigned long end)
1103 {
1104 struct acpi_madt_generic_distributor *dist =
1105 container_of(header, struct acpi_madt_generic_distributor, header);
1106
1107 if ( BAD_MADT_ENTRY(dist, end) )
1108 return -EINVAL;
1109
1110 dbase = dist->base_address;
1111
1112 return 0;
1113 }
1114
gicv2_acpi_init(void)1115 static void __init gicv2_acpi_init(void)
1116 {
1117 acpi_status status;
1118 struct acpi_table_header *table;
1119 int count;
1120
1121 status = acpi_get_table(ACPI_SIG_MADT, 0, &table);
1122
1123 if ( ACPI_FAILURE(status) )
1124 {
1125 const char *msg = acpi_format_exception(status);
1126
1127 panic("GICv2: Failed to get MADT table, %s", msg);
1128 }
1129
1130 /* Collect CPU base addresses */
1131 count = acpi_parse_entries(ACPI_SIG_MADT, sizeof(struct acpi_table_madt),
1132 gic_acpi_parse_madt_cpu, table,
1133 ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
1134 if ( count <= 0 )
1135 panic("GICv2: No valid GICC entries exists");
1136
1137 /*
1138 * Find distributor base address. We expect one distributor entry since
1139 * ACPI 5.0 spec neither support multi-GIC instances nor GIC cascade.
1140 */
1141 count = acpi_parse_entries(ACPI_SIG_MADT, sizeof(struct acpi_table_madt),
1142 gic_acpi_parse_madt_distributor, table,
1143 ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
1144 if ( count <= 0 )
1145 panic("GICv2: No valid GICD entries exists");
1146 }
1147 #else
gicv2_acpi_init(void)1148 static void __init gicv2_acpi_init(void) { }
gicv2_make_hwdom_madt(const struct domain * d,u32 offset)1149 static int gicv2_make_hwdom_madt(const struct domain *d, u32 offset)
1150 {
1151 return 0;
1152 }
1153 #endif
1154
gicv2_init(void)1155 static int __init gicv2_init(void)
1156 {
1157 uint32_t aliased_offset = 0;
1158
1159 if ( acpi_disabled )
1160 gicv2_dt_init();
1161 else
1162 gicv2_acpi_init();
1163
1164 printk("GICv2 initialization:\n"
1165 " gic_dist_addr=%"PRIpaddr"\n"
1166 " gic_cpu_addr=%"PRIpaddr"\n"
1167 " gic_hyp_addr=%"PRIpaddr"\n"
1168 " gic_vcpu_addr=%"PRIpaddr"\n"
1169 " gic_maintenance_irq=%u\n",
1170 dbase, cbase, hbase, vbase,
1171 gicv2_info.maintenance_irq);
1172
1173 if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) ||
1174 (hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) )
1175 panic("GICv2 interfaces not page aligned");
1176
1177 gicv2.map_dbase = ioremap_nocache(dbase, PAGE_SIZE);
1178 if ( !gicv2.map_dbase )
1179 panic("GICv2: Failed to ioremap for GIC distributor\n");
1180
1181 gicv2.map_cbase = ioremap_nocache(cbase, csize);
1182 if ( !gicv2.map_cbase )
1183 panic("GICv2: Failed to ioremap for GIC CPU interface\n");
1184
1185 if ( gicv2_is_aliased(cbase, csize) )
1186 {
1187 /*
1188 * Move the base up by 60kB, so that we have a 8kB contiguous
1189 * region, which allows us to use GICC_DIR at its
1190 * normal offset.
1191 * Note the variable cbase is not updated as we need the original
1192 * value for the vGICv2 emulation.
1193 */
1194 aliased_offset = 0xf000;
1195
1196 gicv2.map_cbase += aliased_offset;
1197
1198 printk(XENLOG_WARNING
1199 "GICv2: Adjusting CPU interface base to %#"PRIx64"\n",
1200 cbase + aliased_offset);
1201 } else if ( csize == SZ_128K )
1202 printk(XENLOG_WARNING
1203 "GICv2: GICC size=%#"PRIx64" but not aliased\n",
1204 csize);
1205
1206 gicv2.map_hbase = ioremap_nocache(hbase, PAGE_SIZE);
1207 if ( !gicv2.map_hbase )
1208 panic("GICv2: Failed to ioremap for GIC Virtual interface\n");
1209
1210 vgic_v2_setup_hw(dbase, cbase, csize, vbase, aliased_offset);
1211
1212 /* Global settings: interrupt distributor */
1213 spin_lock_init(&gicv2.lock);
1214 spin_lock(&gicv2.lock);
1215
1216 gicv2_dist_init();
1217 gicv2_cpu_init();
1218 gicv2_hyp_init();
1219
1220 spin_unlock(&gicv2.lock);
1221
1222 return 0;
1223 }
1224
gicv2_do_LPI(unsigned int lpi)1225 static void gicv2_do_LPI(unsigned int lpi)
1226 {
1227 /* No LPIs in a GICv2 */
1228 BUG();
1229 }
1230
1231 const static struct gic_hw_operations gicv2_ops = {
1232 .info = &gicv2_info,
1233 .init = gicv2_init,
1234 .secondary_init = gicv2_secondary_cpu_init,
1235 .save_state = gicv2_save_state,
1236 .restore_state = gicv2_restore_state,
1237 .dump_state = gicv2_dump_state,
1238 .gic_host_irq_type = &gicv2_host_irq_type,
1239 .gic_guest_irq_type = &gicv2_guest_irq_type,
1240 .eoi_irq = gicv2_eoi_irq,
1241 .deactivate_irq = gicv2_dir_irq,
1242 .read_irq = gicv2_read_irq,
1243 .set_irq_type = gicv2_set_irq_type,
1244 .set_irq_priority = gicv2_set_irq_priority,
1245 .send_SGI = gicv2_send_SGI,
1246 .disable_interface = gicv2_disable_interface,
1247 .update_lr = gicv2_update_lr,
1248 .update_hcr_status = gicv2_hcr_status,
1249 .clear_lr = gicv2_clear_lr,
1250 .read_lr = gicv2_read_lr,
1251 .write_lr = gicv2_write_lr,
1252 .read_vmcr_priority = gicv2_read_vmcr_priority,
1253 .read_apr = gicv2_read_apr,
1254 .make_hwdom_dt_node = gicv2_make_hwdom_dt_node,
1255 .make_hwdom_madt = gicv2_make_hwdom_madt,
1256 .get_hwdom_extra_madt_size = gicv2_get_hwdom_extra_madt_size,
1257 .map_hwdom_extra_mappings = gicv2_map_hwdown_extra_mappings,
1258 .iomem_deny_access = gicv2_iomem_deny_access,
1259 .do_LPI = gicv2_do_LPI,
1260 };
1261
1262 /* Set up the GIC */
gicv2_dt_preinit(struct dt_device_node * node,const void * data)1263 static int __init gicv2_dt_preinit(struct dt_device_node *node,
1264 const void *data)
1265 {
1266 gicv2_info.hw_version = GIC_V2;
1267 gicv2_info.node = node;
1268 register_gic_ops(&gicv2_ops);
1269 dt_irq_xlate = gic_irq_xlate;
1270
1271 return 0;
1272 }
1273
1274 static const struct dt_device_match gicv2_dt_match[] __initconst =
1275 {
1276 DT_MATCH_GIC_V2,
1277 { /* sentinel */ },
1278 };
1279
1280 DT_DEVICE_START(gicv2, "GICv2", DEVICE_GIC)
1281 .dt_match = gicv2_dt_match,
1282 .init = gicv2_dt_preinit,
1283 DT_DEVICE_END
1284
1285 #ifdef CONFIG_ACPI
1286 /* Set up the GIC */
1287 static int __init gicv2_acpi_preinit(const void *data)
1288 {
1289 gicv2_info.hw_version = GIC_V2;
1290 register_gic_ops(&gicv2_ops);
1291
1292 return 0;
1293 }
1294
1295 ACPI_DEVICE_START(agicv2, "GICv2", DEVICE_GIC)
1296 .class_type = ACPI_MADT_GIC_VERSION_V2,
1297 .init = gicv2_acpi_preinit,
1298 ACPI_DEVICE_END
1299 #endif
1300 /*
1301 * Local variables:
1302 * mode: C
1303 * c-file-style: "BSD"
1304 * c-basic-offset: 4
1305 * indent-tabs-mode: nil
1306 * End:
1307 */
1308