1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com>
4  * Copyright 2021, HENSOLDT Cyber
5  *
6  * SPDX-License-Identifier: GPL-2.0-only
7  */
8 
9 #include <config.h>
10 #include <types.h>
11 #include <machine/registerset.h>
12 #include <machine/timer.h>
13 #include <arch/machine.h>
14 #include <arch/smp/ipi.h>
15 
16 #ifndef CONFIG_KERNEL_MCS
17 #define RESET_CYCLES ((TIMER_CLOCK_HZ / MS_IN_S) * CONFIG_TIMER_TICK_MS)
18 #endif /* !CONFIG_KERNEL_MCS */
19 
20 #define IS_IRQ_VALID(X) (((X)) <= maxIRQ && (X) != irqInvalid)
21 
getRestartPC(tcb_t * thread)22 word_t PURE getRestartPC(tcb_t *thread)
23 {
24     return getRegister(thread, FaultIP);
25 }
26 
setNextPC(tcb_t * thread,word_t v)27 void setNextPC(tcb_t *thread, word_t v)
28 {
29     setRegister(thread, NextIP, v);
30 }
31 
map_kernel_devices(void)32 BOOT_CODE void map_kernel_devices(void)
33 {
34     /* If there are no kernel device frames at all, then kernel_device_frames is
35      * NULL. Thus we can't use ARRAY_SIZE(kernel_device_frames) here directly,
36      * but have to use NUM_KERNEL_DEVICE_FRAMES that is defined accordingly.
37      */
38     for (int i = 0; i < NUM_KERNEL_DEVICE_FRAMES; i++) {
39         const kernel_frame_t *frame = &kernel_device_frames[i];
40         map_kernel_frame(frame->paddr, frame->pptr, VMKernelOnly);
41         if (!frame->userAvailable) {
42             reserve_region((p_region_t) {
43                 .start = frame->paddr,
44                 .end   = frame->paddr + BIT(seL4_LargePageBits)
45             });
46         }
47     }
48 }
49 
50 /*
51  * The following assumes familiarity with RISC-V interrupt delivery and the PLIC.
52  * See the RISC-V privileged specification v1.10 and the comment in
53  * include/plat/spike/plat/machine.h for more information.
54  * RISC-V IRQ handling on seL4 works as follows:
55  *
56  * On other architectures the kernel masks interrupts between delivering them to
57  * userlevel and receiving the acknowledgment invocation. This strategy doesn't
58  * work on RISC-V as an IRQ is implicitly masked when it is claimed, until the
59  * claim is acknowledged. If we mask and unmask the interrupt at the PLIC while
60  * a claim is in progress we sometimes experience IRQ sources not being masked
61  * and unmasked as expected. Because of this, we don't mask and unmask IRQs that
62  * are for user level, and also call plic_complete_claim for seL4_IRQHandler_Ack.
63  */
64 
65 static irq_t active_irq[CONFIG_MAX_NUM_NODES];
66 
67 
68 /**
69  * Gets the active irq. Returns the same irq if called again before ackInterrupt.
70  *
71  * This function is called by the kernel to get the interrupt that is currently
72  * active. It not interrupt is currently active, it will try to find one and
73  * put it in the active state. If no interrupt is found, irqInvalid is returned.
74  * It can't be assumed that if isIRQPending() returned true, there will always
75  * be an active interrupt then this is called. It may hold in mayn cases, but
76  * there are corner cases with level-triggered interrupts or on multicore
77  * systems.
78  * This function can be called multiple times during one kernel entry. It must
79  * guarantee that once one interrupt is reported as active, this interrupt is
80  * always returned until ackInterrupt() is called eventually.
81  *
82  * @return     The active irq or irqInvalid.
83  */
getActiveIRQ(void)84 static inline irq_t getActiveIRQ(void)
85 {
86     irq_t *active_irq_slot = &active_irq[CURRENT_CPU_INDEX()];
87 
88     /* If an interrupt is currently active, then return it. */
89     irq_t irq = *active_irq_slot;
90     if (IS_IRQ_VALID(irq)) {
91         return irq;
92     }
93 
94     /* No interrupt currently active, find a new one from the sources. The
95      * priorities are: external -> software -> timer.
96      */
97     word_t sip = read_sip();
98     if (sip & BIT(SIP_SEIP)) {
99         /* Even if we say an external interrupt is pending, the PLIC may not
100          * return any pending interrupt here in some corner cases. A level
101          * triggered interrupt might have been deasserted again or another hard
102          * has claimed it in a multicore system.
103          */
104         irq = plic_get_claim();
105 #ifdef ENABLE_SMP_SUPPORT
106     } else if (sip & BIT(SIP_SSIP)) {
107         sbi_clear_ipi();
108         irq = ipi_get_irq();
109 #endif
110     } else if (sip & BIT(SIP_STIP)) {
111         irq = KERNEL_TIMER_IRQ;
112     } else {
113         /* Seems none of the known sources has a pending interrupt. This can
114          * happen if e.g. if another hart context has claimed the interrupt
115          * already.
116          */
117         irq = irqInvalid;
118     }
119 
120     /* There is no guarantee that there is a new interrupt. */
121     if (!IS_IRQ_VALID(irq)) {
122         /* Sanity check: the slot can't hold an interrupt either. */
123         assert(!IS_IRQ_VALID(*active_irq_slot));
124         return irqInvalid;
125     }
126 
127     /* A new interrupt is active, remember it. */
128     *active_irq_slot = irq;
129     return irq;
130 }
131 
132 #ifdef HAVE_SET_TRIGGER
133 /**
134  * Sets the irq trigger.
135  *
136  * setIRQTrigger can change the trigger between edge and level at the PLIC for
137  * external interrupts. It is implementation specific as whether the PLIC has
138  * support for this operation.
139  *
140  * @param[in]  irq             The irq
141  * @param[in]  edge_triggered  edge triggered otherwise level triggered
142  */
setIRQTrigger(irq_t irq,bool_t edge_triggered)143 void setIRQTrigger(irq_t irq, bool_t edge_triggered)
144 {
145     plic_irq_set_trigger(irq, edge_triggered);
146 }
147 #endif
148 
149 /* isIRQPending is used to determine whether to preempt long running
150  * operations at various preemption points throughout the kernel. If this
151  * returns true, it means that if the Kernel were to return to user mode, it
152  * would then immediately take an interrupt. We check the SIP register for if
153  * either a timer interrupt (STIP) or an external interrupt (SEIP) is pending.
154  * We don't check software generated interrupts. These are used to perform cross
155  * core signalling which isn't currently supported.
156  * TODO: Add SSIP check when SMP support is added.
157  */
isIRQPending(void)158 static inline bool_t isIRQPending(void)
159 {
160     word_t sip = read_sip();
161     return (sip & (BIT(SIP_STIP) | BIT(SIP_SEIP)));
162 }
163 
164 /**
165  * Disable or enable IRQs.
166  *
167  * maskInterrupt disables and enables IRQs. When an IRQ is disabled, it should
168  * not raise an interrupt on the Kernel's HART context. This either masks the
169  * core timer on the sie register or masks an external IRQ at the plic.
170  *
171  * @param[in]  disable  The disable
172  * @param[in]  irq      The irq
173  */
maskInterrupt(bool_t disable,irq_t irq)174 static inline void maskInterrupt(bool_t disable, irq_t irq)
175 {
176     assert(IS_IRQ_VALID(irq));
177     if (irq == KERNEL_TIMER_IRQ) {
178         if (disable) {
179             clear_sie_mask(BIT(SIE_STIE));
180         } else {
181             set_sie_mask(BIT(SIE_STIE));
182         }
183 #ifdef ENABLE_SMP_SUPPORT
184     } else if (irq == irq_reschedule_ipi || irq == irq_remote_call_ipi) {
185         return;
186 #endif
187     } else {
188         plic_mask_irq(disable, irq);
189     }
190 }
191 
192 /**
193  * Kernel has dealt with the pending interrupt getActiveIRQ can return next IRQ.
194  *
195  * ackInterrupt is used by the kernel to indicate it has processed the interrupt
196  * delivery and getActiveIRQ is now able to return a different IRQ number. Note
197  * that this is called after a notification has been signalled to user level,
198  * but before user level has handled the cause.
199  *
200  * @param[in]  irq   The irq
201  */
ackInterrupt(irq_t irq)202 static inline void ackInterrupt(irq_t irq)
203 {
204     assert(IS_IRQ_VALID(irq));
205     active_irq[CURRENT_CPU_INDEX()] = irqInvalid;
206 
207     if (irq == KERNEL_TIMER_IRQ) {
208         /* Reprogramming the timer has cleared the interrupt. */
209         return;
210     }
211 #ifdef ENABLE_SMP_SUPPORT
212     if (irq == irq_reschedule_ipi || irq == irq_remote_call_ipi) {
213         ipi_clear_irq(irq);
214     }
215 #endif
216 }
217 
218 #ifndef CONFIG_KERNEL_MCS
resetTimer(void)219 void resetTimer(void)
220 {
221     uint64_t target;
222     // repeatedly try and set the timer in a loop as otherwise there is a race and we
223     // may set a timeout in the past, resulting in it never getting triggered
224     do {
225         target = riscv_read_time() + RESET_CYCLES;
226         sbi_set_timer(target);
227     } while (riscv_read_time() > target);
228 }
229 
230 /**
231    DONT_TRANSLATE
232  */
initTimer(void)233 BOOT_CODE void initTimer(void)
234 {
235     sbi_set_timer(riscv_read_time() + RESET_CYCLES);
236 }
237 #endif /* !CONFIG_KERNEL_MCS */
238 
initLocalIRQController(void)239 BOOT_CODE void initLocalIRQController(void)
240 {
241     printf("Init local IRQ\n");
242 
243     /* Init per-hart PLIC */
244     plic_init_hart();
245 
246     /* Enable timer and external interrupt. If SMP is enabled, then enable the
247      * software interrupt also, it is used as IPI between cores. */
248     set_sie_mask(BIT(SIE_SEIE) | BIT(SIE_STIE) | SMP_TERNARY(BIT(SIE_SSIE), 0));
249 }
250 
initIRQController(void)251 BOOT_CODE void initIRQController(void)
252 {
253     printf("Initializing PLIC...\n");
254 
255     /* Initialize active_irq[] properly to stick to the semantics and play safe.
256      * Effectively this is not needed if irqInvalid is zero (which is currently
257      * the case) and the array is in the BSS, that is filled with zeros (which
258      * the a kernel loader is supposed to do and which the ELF-Loader does).
259      */
260     for (word_t i = 0; i < ARRAY_SIZE(active_irq); i++) {
261         active_irq[i] = irqInvalid;
262     }
263 
264     plic_init_controller();
265 }
266 
handleSpuriousIRQ(void)267 static inline void handleSpuriousIRQ(void)
268 {
269     /* Do nothing */
270     printf("Superior IRQ!! SIP %lx\n", read_sip());
271 }
272