1 /*
2  * Copyright 2014, General Dynamics C4 Systems
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <api/types.h>
8 #include <arch/machine.h>
9 #include <arch/machine/hardware.h>
10 #include <arch/machine/l2c_310.h>
11 
12 #define LINE_START(a) ROUND_DOWN(a, L1_CACHE_LINE_SIZE_BITS)
13 #define LINE_INDEX(a) (LINE_START(a)>>L1_CACHE_LINE_SIZE_BITS)
14 
cleanCacheRange_PoC(vptr_t start,vptr_t end,paddr_t pstart)15 static void cleanCacheRange_PoC(vptr_t start, vptr_t end, paddr_t pstart)
16 {
17     vptr_t line;
18     word_t index;
19 
20     for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
21         line = index << L1_CACHE_LINE_SIZE_BITS;
22         cleanByVA(line, pstart + (line - start));
23     }
24 }
25 
cleanInvalidateCacheRange_RAM(vptr_t start,vptr_t end,paddr_t pstart)26 void cleanInvalidateCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
27 {
28     vptr_t line;
29     word_t index;
30     /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
31             \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
32         \<and> \<acute>start <= \<acute>end, id)" */
33 
34     /* First clean the L1 range */
35     cleanCacheRange_PoC(start, end, pstart);
36 
37     /* ensure operation completes and visible in L2 */
38     dsb();
39 
40     /* Now clean and invalidate the L2 range */
41     plat_cleanInvalidateL2Range(pstart, pstart + (end - start));
42 
43     /* Finally clean and invalidate the L1 range. The extra clean is only strictly neccessary
44      * in a multiprocessor environment to prevent a write being lost if another core is
45      * attempting a store at the same time. As the range should already be clean asking
46      * it to clean again should not affect performance */
47     for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
48         line = index << L1_CACHE_LINE_SIZE_BITS;
49         cleanInvalByVA(line, pstart + (line - start));
50     }
51     /* ensure clean and invalidate complete */
52     dsb();
53 }
54 
cleanCacheRange_RAM(vptr_t start,vptr_t end,paddr_t pstart)55 void cleanCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
56 {
57     /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
58             \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
59         \<and> \<acute>start <= \<acute>end
60         \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
61 
62     /* clean l1 to l2 */
63     cleanCacheRange_PoC(start, end, pstart);
64 
65     /* ensure cache operation completes before cleaning l2 */
66     dsb();
67 
68     /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
69             \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
70         \<and> \<acute>start <= \<acute>end
71         \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
72 
73     /* now clean l2 to RAM */
74     plat_cleanL2Range(pstart, pstart + (end - start));
75 }
76 
cleanCacheRange_PoU(vptr_t start,vptr_t end,paddr_t pstart)77 void cleanCacheRange_PoU(vptr_t start, vptr_t end, paddr_t pstart)
78 {
79     vptr_t line;
80     word_t index;
81 
82     /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
83             \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
84         \<and> \<acute>start <= \<acute>end
85         \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
86 
87     for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
88         line = index << L1_CACHE_LINE_SIZE_BITS;
89         cleanByVA_PoU(line, pstart + (line - start));
90     }
91 }
92 
invalidateCacheRange_RAM(vptr_t start,vptr_t end,paddr_t pstart)93 void invalidateCacheRange_RAM(vptr_t start, vptr_t end, paddr_t pstart)
94 {
95     vptr_t line;
96     word_t index;
97 
98     /* If the start and end are not aligned to a cache line boundary
99      * then we need to clean the line first to prevent invalidating
100      * bytes we didn't mean to. Calling the functions in this way is
101      * not the most efficient method, but we assume the user will
102      * rarely be this silly */
103     if (start != LINE_START(start)) {
104         cleanCacheRange_RAM(start, start, pstart);
105     }
106     if (end + 1 != LINE_START(end + 1)) {
107         line = LINE_START(end);
108         cleanCacheRange_RAM(line, line, pstart + (line - start));
109     }
110 
111     /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
112             \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
113         \<and> \<acute>start <= \<acute>end
114         \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
115 
116     /* Invalidate L2 range. Invalidating the L2 before the L1 is the order
117      * given in the l2c_310 manual, as an L1 line might be allocated from the L2
118      * before the L2 can be invalidated. */
119     plat_invalidateL2Range(pstart, pstart + (end - start));
120 
121     /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
122             \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
123         \<and> \<acute>start <= \<acute>end
124         \<and> \<acute>pstart <= \<acute>pstart + (\<acute>end - \<acute>start), id)" */
125 
126     /* Now invalidate L1 range */
127     for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
128         line = index << L1_CACHE_LINE_SIZE_BITS;
129         invalidateByVA(line, pstart + (line - start));
130     }
131     /* Ensure invalidate completes */
132     dsb();
133 }
134 
invalidateCacheRange_I(vptr_t start,vptr_t end,paddr_t pstart)135 void invalidateCacheRange_I(vptr_t start, vptr_t end, paddr_t pstart)
136 {
137 #if defined(CONFIG_ARM_ICACHE_VIPT) && defined(CONFIG_ARM_HYPERVISOR_SUPPORT)
138     /* In cases where the hypervisor is supported, the virtual address passed
139      * to this function are kernel aliases for the underlying physical memory
140      * rather than the virtual address in the actual vspace. This works fine
141      * when the cache is PIPT, as the cache-line is indexed by physical address,
142      * and the alias maps to the same physical address. On VIPT this is not the
143      * case, and it is not possible to correctly index using an aliased address.
144      * As the only possible fallback the entire cache is invalidated in this
145      * case
146      */
147     invalidate_I_PoU();
148 #else
149     vptr_t line;
150     word_t index;
151 
152     for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
153         line = index << L1_CACHE_LINE_SIZE_BITS;
154         invalidateByVA_I(line, pstart + (line - start));
155     }
156 #endif
157 }
158 
branchFlushRange(vptr_t start,vptr_t end,paddr_t pstart)159 void branchFlushRange(vptr_t start, vptr_t end, paddr_t pstart)
160 {
161     vptr_t line;
162     word_t index;
163 
164     for (index = LINE_INDEX(start); index < LINE_INDEX(end) + 1; index++) {
165         line = index << L1_CACHE_LINE_SIZE_BITS;
166         branchFlush(line, pstart + (line - start));
167     }
168 }
169 
cleanCaches_PoU(void)170 void cleanCaches_PoU(void)
171 {
172     dsb();
173     clean_D_PoU();
174     dsb();
175     invalidate_I_PoU();
176     dsb();
177 }
178 
cleanInvalidateL1Caches(void)179 void cleanInvalidateL1Caches(void)
180 {
181     dsb();
182     cleanInvalidate_D_PoC();
183     dsb();
184     invalidate_I_PoU();
185     dsb();
186 }
187 
arch_clean_invalidate_caches(void)188 void arch_clean_invalidate_caches(void)
189 {
190     cleanCaches_PoU();
191     plat_cleanInvalidateL2Cache();
192     cleanInvalidateL1Caches();
193     isb();
194 }
195 
arch_clean_invalidate_L1_caches(word_t type)196 void arch_clean_invalidate_L1_caches(word_t type)
197 {
198     dsb();
199     if (type & BIT(1)) {
200         cleanInvalidate_L1D();
201         dsb();
202     }
203     if (type & BIT(0)) {
204         invalidate_I_PoU();
205         dsb();
206         isb();
207     }
208 }
209