1 /*
2  * xen/arch/arm/gic.c
3  *
4  * ARM Generic Interrupt Controller support
5  *
6  * Tim Deegan <tim@xen.org>
7  * Copyright (c) 2011 Citrix Systems.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #include <xen/lib.h>
21 #include <xen/init.h>
22 #include <xen/mm.h>
23 #include <xen/irq.h>
24 #include <xen/sched.h>
25 #include <xen/errno.h>
26 #include <xen/softirq.h>
27 #include <xen/list.h>
28 #include <xen/device_tree.h>
29 #include <xen/acpi.h>
30 #include <xen/cpu.h>
31 #include <xen/notifier.h>
32 #include <asm/p2m.h>
33 #include <asm/domain.h>
34 #include <asm/platform.h>
35 #include <asm/device.h>
36 #include <asm/io.h>
37 #include <asm/gic.h>
38 #include <asm/vgic.h>
39 #include <asm/acpi.h>
40 
41 DEFINE_PER_CPU(uint64_t, lr_mask);
42 
43 #undef GIC_DEBUG
44 
45 const struct gic_hw_operations *gic_hw_ops;
46 
build_assertions(void)47 static void __init __maybe_unused build_assertions(void)
48 {
49     /* Check our enum gic_sgi only covers SGIs */
50     BUILD_BUG_ON(GIC_SGI_MAX > NR_GIC_SGI);
51 }
52 
register_gic_ops(const struct gic_hw_operations * ops)53 void register_gic_ops(const struct gic_hw_operations *ops)
54 {
55     gic_hw_ops = ops;
56 }
57 
clear_cpu_lr_mask(void)58 static void clear_cpu_lr_mask(void)
59 {
60     this_cpu(lr_mask) = 0ULL;
61 }
62 
gic_hw_version(void)63 enum gic_version gic_hw_version(void)
64 {
65    return gic_hw_ops->info->hw_version;
66 }
67 
gic_number_lines(void)68 unsigned int gic_number_lines(void)
69 {
70     return gic_hw_ops->info->nr_lines;
71 }
72 
gic_save_state(struct vcpu * v)73 void gic_save_state(struct vcpu *v)
74 {
75     ASSERT(!local_irq_is_enabled());
76     ASSERT(!is_idle_vcpu(v));
77 
78     /* No need for spinlocks here because interrupts are disabled around
79      * this call and it only accesses struct vcpu fields that cannot be
80      * accessed simultaneously by another pCPU.
81      */
82     v->arch.lr_mask = this_cpu(lr_mask);
83     gic_hw_ops->save_state(v);
84     isb();
85 }
86 
gic_restore_state(struct vcpu * v)87 void gic_restore_state(struct vcpu *v)
88 {
89     ASSERT(!local_irq_is_enabled());
90     ASSERT(!is_idle_vcpu(v));
91 
92     this_cpu(lr_mask) = v->arch.lr_mask;
93     gic_hw_ops->restore_state(v);
94 
95     isb();
96 }
97 
98 /* desc->irq needs to be disabled before calling this function */
gic_set_irq_type(struct irq_desc * desc,unsigned int type)99 void gic_set_irq_type(struct irq_desc *desc, unsigned int type)
100 {
101     /*
102      * IRQ must be disabled before configuring it (see 4.3.13 in ARM IHI
103      * 0048B.b). We rely on the caller to do it.
104      */
105     ASSERT(test_bit(_IRQ_DISABLED, &desc->status));
106     ASSERT(spin_is_locked(&desc->lock));
107     ASSERT(type != IRQ_TYPE_INVALID);
108 
109     gic_hw_ops->set_irq_type(desc, type);
110 }
111 
gic_set_irq_priority(struct irq_desc * desc,unsigned int priority)112 static void gic_set_irq_priority(struct irq_desc *desc, unsigned int priority)
113 {
114     gic_hw_ops->set_irq_priority(desc, priority);
115 }
116 
117 /* Program the GIC to route an interrupt to the host (i.e. Xen)
118  * - needs to be called with desc.lock held
119  */
gic_route_irq_to_xen(struct irq_desc * desc,unsigned int priority)120 void gic_route_irq_to_xen(struct irq_desc *desc, unsigned int priority)
121 {
122     ASSERT(priority <= 0xff);     /* Only 8 bits of priority */
123     ASSERT(desc->irq < gic_number_lines());/* Can't route interrupts that don't exist */
124     ASSERT(test_bit(_IRQ_DISABLED, &desc->status));
125     ASSERT(spin_is_locked(&desc->lock));
126 
127     desc->handler = gic_hw_ops->gic_host_irq_type;
128 
129     gic_set_irq_type(desc, desc->arch.type);
130     gic_set_irq_priority(desc, priority);
131 }
132 
133 /* Program the GIC to route an interrupt to a guest
134  *   - desc.lock must be held
135  */
gic_route_irq_to_guest(struct domain * d,unsigned int virq,struct irq_desc * desc,unsigned int priority)136 int gic_route_irq_to_guest(struct domain *d, unsigned int virq,
137                            struct irq_desc *desc, unsigned int priority)
138 {
139     int ret;
140 
141     ASSERT(spin_is_locked(&desc->lock));
142     /* Caller has already checked that the IRQ is an SPI */
143     ASSERT(virq >= 32);
144     ASSERT(virq < vgic_num_irqs(d));
145     ASSERT(!is_lpi(virq));
146 
147     /*
148      * When routing an IRQ to guest, the virtual state is not synced
149      * back to the physical IRQ. To prevent get unsync, restrict the
150      * routing to when the Domain is been created.
151      */
152     if ( d->creation_finished )
153         return -EBUSY;
154 
155     ret = vgic_connect_hw_irq(d, NULL, virq, desc, true);
156     if ( ret )
157         return ret;
158 
159     desc->handler = gic_hw_ops->gic_guest_irq_type;
160     set_bit(_IRQ_GUEST, &desc->status);
161 
162     if ( !irq_type_set_by_domain(d) )
163         gic_set_irq_type(desc, desc->arch.type);
164     gic_set_irq_priority(desc, priority);
165 
166     return 0;
167 }
168 
169 /* This function only works with SPIs for now */
gic_remove_irq_from_guest(struct domain * d,unsigned int virq,struct irq_desc * desc)170 int gic_remove_irq_from_guest(struct domain *d, unsigned int virq,
171                               struct irq_desc *desc)
172 {
173     int ret;
174 
175     ASSERT(spin_is_locked(&desc->lock));
176     ASSERT(test_bit(_IRQ_GUEST, &desc->status));
177     ASSERT(!is_lpi(virq));
178 
179     /*
180      * Removing an interrupt while the domain is running may have
181      * undesirable effect on the vGIC emulation.
182      */
183     if ( !d->is_dying )
184         return -EBUSY;
185 
186     desc->handler->shutdown(desc);
187 
188     /* EOI the IRQ if it has not been done by the guest */
189     if ( test_bit(_IRQ_INPROGRESS, &desc->status) )
190         gic_hw_ops->deactivate_irq(desc);
191     clear_bit(_IRQ_INPROGRESS, &desc->status);
192 
193     ret = vgic_connect_hw_irq(d, NULL, virq, desc, false);
194     if ( ret )
195         return ret;
196 
197     clear_bit(_IRQ_GUEST, &desc->status);
198     desc->handler = &no_irq_type;
199 
200     return 0;
201 }
202 
gic_irq_xlate(const u32 * intspec,unsigned int intsize,unsigned int * out_hwirq,unsigned int * out_type)203 int gic_irq_xlate(const u32 *intspec, unsigned int intsize,
204                   unsigned int *out_hwirq,
205                   unsigned int *out_type)
206 {
207     if ( intsize < 3 )
208         return -EINVAL;
209 
210     /* Get the interrupt number and add 16 to skip over SGIs */
211     *out_hwirq = intspec[1] + 16;
212 
213     /* For SPIs, we need to add 16 more to get the GIC irq ID number */
214     if ( !intspec[0] )
215         *out_hwirq += 16;
216 
217     if ( out_type )
218         *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
219 
220     return 0;
221 }
222 
223 /* Map extra GIC MMIO, irqs and other hw stuffs to the hardware domain. */
gic_map_hwdom_extra_mappings(struct domain * d)224 int gic_map_hwdom_extra_mappings(struct domain *d)
225 {
226     if ( gic_hw_ops->map_hwdom_extra_mappings )
227         return gic_hw_ops->map_hwdom_extra_mappings(d);
228 
229     return 0;
230 }
231 
gic_dt_preinit(void)232 static void __init gic_dt_preinit(void)
233 {
234     int rc;
235     struct dt_device_node *node;
236     uint8_t num_gics = 0;
237 
238     dt_for_each_device_node( dt_host, node )
239     {
240         if ( !dt_get_property(node, "interrupt-controller", NULL) )
241             continue;
242 
243         if ( !dt_get_parent(node) )
244             continue;
245 
246         rc = device_init(node, DEVICE_GIC, NULL);
247         if ( !rc )
248         {
249             /* NOTE: Only one GIC is supported */
250             num_gics = 1;
251             break;
252         }
253     }
254     if ( !num_gics )
255         panic("Unable to find compatible GIC in the device tree\n");
256 
257     /* Set the GIC as the primary interrupt controller */
258     dt_interrupt_controller = node;
259     dt_device_set_used_by(node, DOMID_XEN);
260 }
261 
262 #ifdef CONFIG_ACPI
gic_acpi_preinit(void)263 static void __init gic_acpi_preinit(void)
264 {
265     struct acpi_subtable_header *header;
266     struct acpi_madt_generic_distributor *dist;
267 
268     header = acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
269     if ( !header )
270         panic("No valid GICD entries exists\n");
271 
272     dist = container_of(header, struct acpi_madt_generic_distributor, header);
273 
274     if ( acpi_device_init(DEVICE_GIC, NULL, dist->version) )
275         panic("Unable to find compatible GIC in the ACPI table\n");
276 }
277 #else
gic_acpi_preinit(void)278 static void __init gic_acpi_preinit(void) { }
279 #endif
280 
281 /* Find the interrupt controller and set up the callback to translate
282  * device tree or ACPI IRQ.
283  */
gic_preinit(void)284 void __init gic_preinit(void)
285 {
286     if ( acpi_disabled )
287         gic_dt_preinit();
288     else
289         gic_acpi_preinit();
290 }
291 
292 /* Set up the GIC */
gic_init(void)293 void __init gic_init(void)
294 {
295     if ( gic_hw_ops->init() )
296         panic("Failed to initialize the GIC drivers\n");
297     /* Clear LR mask for cpu0 */
298     clear_cpu_lr_mask();
299 }
300 
send_SGI_mask(const cpumask_t * cpumask,enum gic_sgi sgi)301 void send_SGI_mask(const cpumask_t *cpumask, enum gic_sgi sgi)
302 {
303     gic_hw_ops->send_SGI(sgi, SGI_TARGET_LIST, cpumask);
304 }
305 
send_SGI_one(unsigned int cpu,enum gic_sgi sgi)306 void send_SGI_one(unsigned int cpu, enum gic_sgi sgi)
307 {
308     send_SGI_mask(cpumask_of(cpu), sgi);
309 }
310 
send_SGI_self(enum gic_sgi sgi)311 void send_SGI_self(enum gic_sgi sgi)
312 {
313     gic_hw_ops->send_SGI(sgi, SGI_TARGET_SELF, NULL);
314 }
315 
send_SGI_allbutself(enum gic_sgi sgi)316 void send_SGI_allbutself(enum gic_sgi sgi)
317 {
318    gic_hw_ops->send_SGI(sgi, SGI_TARGET_OTHERS, NULL);
319 }
320 
smp_send_state_dump(unsigned int cpu)321 void smp_send_state_dump(unsigned int cpu)
322 {
323     send_SGI_one(cpu, GIC_SGI_DUMP_STATE);
324 }
325 
326 /* Set up the per-CPU parts of the GIC for a secondary CPU */
gic_init_secondary_cpu(void)327 void gic_init_secondary_cpu(void)
328 {
329     gic_hw_ops->secondary_init();
330     /* Clear LR mask for secondary cpus */
331     clear_cpu_lr_mask();
332 }
333 
334 /* Shut down the per-CPU GIC interface */
gic_disable_cpu(void)335 void gic_disable_cpu(void)
336 {
337     ASSERT(!local_irq_is_enabled());
338 
339     gic_hw_ops->disable_interface();
340 }
341 
do_sgi(struct cpu_user_regs * regs,enum gic_sgi sgi)342 static void do_sgi(struct cpu_user_regs *regs, enum gic_sgi sgi)
343 {
344     struct irq_desc *desc = irq_to_desc(sgi);
345 
346     perfc_incr(ipis);
347 
348     /* Lower the priority */
349     gic_hw_ops->eoi_irq(desc);
350 
351     /*
352      * Ensure any shared data written by the CPU sending
353      * the IPI is read after we've read the ACK register on the GIC.
354      * Matches the write barrier in send_SGI_* helpers.
355      */
356     smp_rmb();
357 
358     switch (sgi)
359     {
360     case GIC_SGI_EVENT_CHECK:
361         /* Nothing to do, will check for events on return path */
362         break;
363     case GIC_SGI_DUMP_STATE:
364         dump_execstate(regs);
365         break;
366     case GIC_SGI_CALL_FUNCTION:
367         smp_call_function_interrupt();
368         break;
369     default:
370         panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id());
371         break;
372     }
373 
374     /* Deactivate */
375     gic_hw_ops->deactivate_irq(desc);
376 }
377 
378 /* Accept an interrupt from the GIC and dispatch its handler */
gic_interrupt(struct cpu_user_regs * regs,int is_fiq)379 void gic_interrupt(struct cpu_user_regs *regs, int is_fiq)
380 {
381     unsigned int irq;
382 
383     do  {
384         /* Reading IRQ will ACK it */
385         irq = gic_hw_ops->read_irq();
386 
387         if ( likely(irq >= 16 && irq < 1020) )
388         {
389             isb();
390             do_IRQ(regs, irq, is_fiq);
391         }
392         else if ( is_lpi(irq) )
393         {
394             isb();
395             gic_hw_ops->do_LPI(irq);
396         }
397         else if ( unlikely(irq < 16) )
398         {
399             do_sgi(regs, irq);
400         }
401         else
402         {
403             local_irq_disable();
404             break;
405         }
406     } while (1);
407 }
408 
maintenance_interrupt(int irq,void * dev_id,struct cpu_user_regs * regs)409 static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
410 {
411     /*
412      * This is a dummy interrupt handler.
413      * Receiving the interrupt is going to cause gic_inject to be called
414      * on return to guest that is going to clear the old LRs and inject
415      * new interrupts.
416      *
417      * Do not add code here: maintenance interrupts caused by setting
418      * GICH_HCR_UIE, might read as spurious interrupts (1023) because
419      * GICH_HCR_UIE is cleared before reading GICC_IAR. As a consequence
420      * this handler is not called.
421      */
422     perfc_incr(maintenance_irqs);
423 }
424 
gic_dump_info(struct vcpu * v)425 void gic_dump_info(struct vcpu *v)
426 {
427     printk("GICH_LRs (vcpu %d) mask=%"PRIx64"\n", v->vcpu_id, v->arch.lr_mask);
428     gic_hw_ops->dump_state(v);
429 }
430 
init_maintenance_interrupt(void)431 void init_maintenance_interrupt(void)
432 {
433     request_irq(gic_hw_ops->info->maintenance_irq, 0, maintenance_interrupt,
434                 "irq-maintenance", NULL);
435 }
436 
gic_make_hwdom_dt_node(const struct domain * d,const struct dt_device_node * gic,void * fdt)437 int gic_make_hwdom_dt_node(const struct domain *d,
438                            const struct dt_device_node *gic,
439                            void *fdt)
440 {
441     ASSERT(gic == dt_interrupt_controller);
442 
443     return gic_hw_ops->make_hwdom_dt_node(d, gic, fdt);
444 }
445 
gic_make_hwdom_madt(const struct domain * d,u32 offset)446 int gic_make_hwdom_madt(const struct domain *d, u32 offset)
447 {
448     return gic_hw_ops->make_hwdom_madt(d, offset);
449 }
450 
gic_get_hwdom_madt_size(const struct domain * d)451 unsigned long gic_get_hwdom_madt_size(const struct domain *d)
452 {
453     unsigned long madt_size;
454 
455     madt_size = sizeof(struct acpi_table_madt)
456                 + sizeof(struct acpi_madt_generic_interrupt) * d->max_vcpus
457                 + sizeof(struct acpi_madt_generic_distributor)
458                 + gic_hw_ops->get_hwdom_extra_madt_size(d);
459 
460     return madt_size;
461 }
462 
gic_iomem_deny_access(const struct domain * d)463 int gic_iomem_deny_access(const struct domain *d)
464 {
465     return gic_hw_ops->iomem_deny_access(d);
466 }
467 
cpu_gic_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)468 static int cpu_gic_callback(struct notifier_block *nfb,
469                             unsigned long action,
470                             void *hcpu)
471 {
472     switch ( action )
473     {
474     case CPU_DYING:
475         /* This is reverting the work done in init_maintenance_interrupt */
476         release_irq(gic_hw_ops->info->maintenance_irq, NULL);
477         break;
478     default:
479         break;
480     }
481 
482     return NOTIFY_DONE;
483 }
484 
485 static struct notifier_block cpu_gic_nfb = {
486     .notifier_call = cpu_gic_callback,
487 };
488 
cpu_gic_notifier_init(void)489 static int __init cpu_gic_notifier_init(void)
490 {
491     register_cpu_notifier(&cpu_gic_nfb);
492 
493     return 0;
494 }
495 __initcall(cpu_gic_notifier_init);
496 
497 /*
498  * Local variables:
499  * mode: C
500  * c-file-style: "BSD"
501  * c-basic-offset: 4
502  * indent-tabs-mode: nil
503  * End:
504  */
505