1 /*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <drivers/timer/arm_generic.h>
8
initGenericTimer(void)9 BOOT_CODE void initGenericTimer(void)
10 {
11 if (config_set(CONFIG_DEBUG_BUILD)) {
12 /* check the frequency is correct */
13 word_t gpt_cntfrq = 0;
14 SYSTEM_READ_WORD(CNTFRQ, gpt_cntfrq);
15 /* The CNTFRQ register is a 32-bit register, its value can safely be
16 * compared with TIMER_CLOCK_HZ.
17 */
18 if ((gpt_cntfrq != 0) && (gpt_cntfrq != TIMER_CLOCK_HZ)) {
19 /* TIMER_CLOCK_HZ is defined as a unsigned long long constant on
20 * every architecture. */
21 printf("Warning: gpt_cntfrq %"SEL4_PRIu_word", expected %llu\n",
22 gpt_cntfrq, TIMER_CLOCK_HZ);
23 }
24 }
25
26 #ifdef CONFIG_KERNEL_MCS
27 /* this sets the irq to UINT64_MAX */
28 ackDeadlineIRQ();
29 SYSTEM_WRITE_WORD(CNT_CTL, BIT(0));
30 #else /* CONFIG_KERNEL_MCS */
31 resetTimer();
32 #endif /* !CONFIG_KERNEL_MCS */
33 }
34
35 /*
36 * The exynos5 platforms require custom hardware initialisation before the
37 * generic timer is usable. They need to overwrite initTimer before calling
38 * initGenericTimer because of this. We cannot use a `weak` symbol definition
39 * in this case because the kernel is built as a single file and multiple
40 * symbol definitions with the same name are not allowed. We therefore resort
41 * to ifdef'ing out this initTimer definition for exynos5 platforms.
42 */
43 #ifndef CONFIG_PLAT_EXYNOS5
initTimer(void)44 BOOT_CODE void initTimer(void)
45 {
46 initGenericTimer();
47 }
48 #endif
49
50 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
51
52 #include <arch/object/vcpu.h>
53 #include <armv/vcpu.h>
54
55 /** MODIFIES: */
56 /** DONT_TRANSLATE */
read_cntpct(void)57 static inline uint64_t read_cntpct(void)
58 {
59 uint64_t val;
60 SYSTEM_READ_64(CNTPCT, val);
61 return val;
62 }
63
save_virt_timer(vcpu_t * vcpu)64 static void save_virt_timer(vcpu_t *vcpu)
65 {
66 /* Save control register */
67 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTV_CTL);
68 vcpu_hw_write_reg(seL4_VCPUReg_CNTV_CTL, 0);
69 /* Save Compare Value and Offset registers */
70 #ifdef CONFIG_ARCH_AARCH64
71 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTV_CVAL);
72 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTVOFF);
73 vcpu_save_reg(vcpu, seL4_VCPUReg_CNTKCTL_EL1);
74 check_export_arch_timer();
75 #else
76 uint64_t cval = get_cntv_cval_64();
77 uint64_t cntvoff = get_cntv_off_64();
78 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTV_CVALhigh, (word_t)(cval >> 32));
79 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTV_CVALlow, (word_t)cval);
80 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFhigh, (word_t)(cntvoff >> 32));
81 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFlow, (word_t)cntvoff);
82 #endif
83 #ifdef CONFIG_VTIMER_UPDATE_VOFFSET
84 /* Save counter value at the time the vcpu is disabled */
85 vcpu->virtTimer.last_pcount = read_cntpct();
86 #endif
87 }
88
restore_virt_timer(vcpu_t * vcpu)89 static void restore_virt_timer(vcpu_t *vcpu)
90 {
91 /* Restore virtual timer state */
92 #ifdef CONFIG_ARCH_AARCH64
93 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTV_CVAL);
94 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTKCTL_EL1);
95 #else
96 uint32_t cval_high = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTV_CVALhigh);
97 uint32_t cval_low = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTV_CVALlow);
98 uint64_t cval = ((uint64_t)cval_high << 32) | (uint64_t) cval_low;
99 set_cntv_cval_64(cval);
100 #endif
101
102 /* Set virtual timer offset */
103 #ifdef CONFIG_VTIMER_UPDATE_VOFFSET
104 uint64_t pcount_delta;
105 uint64_t current_cntpct = read_cntpct();
106 pcount_delta = current_cntpct - vcpu->virtTimer.last_pcount;
107 #endif
108 #ifdef CONFIG_ARCH_AARCH64
109 #ifdef CONFIG_VTIMER_UPDATE_VOFFSET
110 uint64_t offset = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTVOFF);
111 offset += pcount_delta;
112 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFF, offset);
113 #endif
114 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTVOFF);
115 #else
116 uint32_t offset_high = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTVOFFhigh);
117 uint32_t offset_low = vcpu_read_reg(vcpu, seL4_VCPUReg_CNTVOFFlow);
118 uint64_t offset = ((uint64_t)offset_high << 32) | (uint64_t) offset_low;
119 #ifdef CONFIG_VTIMER_UPDATE_VOFFSET
120 offset += pcount_delta;
121 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFhigh, (word_t)(offset >> 32));
122 vcpu_write_reg(vcpu, seL4_VCPUReg_CNTVOFFlow, (word_t) offset);
123 #endif
124 set_cntv_off_64(offset);
125 #endif
126 /* For verification, need to ensure we don't unmask an inactive interrupt;
127 * the virtual timer should never get disabled, but the knowledge is not
128 * available at this point */
129 /* Restore interrupt mask state */
130 if (likely(isIRQActive(CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), INTERRUPT_VTIMER_EVENT)))) {
131 maskInterrupt(vcpu->vppi_masked[irqVPPIEventIndex(CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), INTERRUPT_VTIMER_EVENT))],
132 CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), INTERRUPT_VTIMER_EVENT));
133 }
134 /* Restore virtual timer control register */
135 vcpu_restore_reg(vcpu, seL4_VCPUReg_CNTV_CTL);
136 }
137
138 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
139