1 /*
2  * Copyright 2014, General Dynamics C4 Systems
3  * Copyright 2020, HENSOLDT Cyber GmbH
4  *
5  * SPDX-License-Identifier: GPL-2.0-only
6  */
7 
8 #include <config.h>
9 #include <arch/machine/l2c_310.h>
10 
11 #define L2_LINE_SIZE_BITS 5
12 #define L2_LINE_SIZE BIT(L2_LINE_SIZE_BITS) /* 32 byte line size */
13 
14 #define L2_LINE_START(a) ROUND_DOWN(a, L2_LINE_SIZE_BITS)
15 
16 compile_assert(l2_l1_same_line_size, L2_LINE_SIZE_BITS == L1_CACHE_LINE_SIZE_BITS)
17 
18 /* MSHIELD Control */
19 #define MSHIELD_SMC_ROM_CTRL_CTRL         0x102
20 #define MSHIELD_SMC_ROM_CTRL_AUX          0x109
21 #define MSHIELD_SMC_ROM_CTRL_LATENCY      0x112
22 /* MSHIELD Address Filter */
23 #define MSHIELD_SMC_ROM_ADDR_FILT_START   /* ? */
24 #define MSHIELD_SMC_ROM_ADDR_FILT_END     /* ? */
25 /* MSHIELD Control 2 */
26 #define MSHIELD_SMC_ROM_CTRL2_DEBUG       0x100
27 #define MSHIELD_SMC_ROM_CTRL2_PREFETCH    0x113 /* ? */
28 #define MSHIELD_SMC_ROM_CTRL2_POWER       /* ? */
29 /* MSHIELD Cache maintenance */
30 #define MSHIELD_SMC_ROM_MAINT_INVALIDATE  0x101
31 
32 
33 /* Cache ID */
34 #define PL310_LOCKDOWN_BY_MASK            (0xf<<25)
35 #define PL310_LOCKDOWN_BY_MASTER          (0xe<<25)
36 #define PL310_LOCKDOWN_BY_LINE            (0xf<<25)
37 
38 /* Primary control */
39 #define CTRL_CTRL_EN BIT(0)
40 
41 /* Auxiliary control */
42 #define CTRL_AUX_EARLY_BRESP_EN                            BIT(30)
43 #define CTRL_AUX_IPREFETCH_EN                              BIT(29)
44 #define CTRL_AUX_DPREFETCH_EN                              BIT(28)
45 #define CTRL_AUX_NSECURE_INT_ACCESS                        BIT(27)
46 #define CTRL_AUX_NSECURE_LOCKDOWN_EN                       BIT(26)
47 #define CTRL_AUX_REPLACEMENT_POLICY                        BIT(25)
48 #define CTRL_AUX_FORCE_WR_ALLOC(X)            (((X)&0x3) * BIT(23)
49 #define CTRL_AUX_SHARED_ATTRIB_OVERRIDE_EN                 BIT(22)
50 #define CTRL_AUX_PARITY_EN                                 BIT(21)
51 #define CTRL_AUX_EVENT_MONITOR_BUS_EN                      BIT(20)
52 #define CTRL_AUX_WAYSIZE(X)                   (((X)&0x7) * BIT(17) )
53 #define CTRL_AUX_ASSOCIATIVITY                             BIT(16)
54 #define CTRL_AUX_SHARED_ATTRIB_INVALIDATE_EN               BIT(13)
55 #define CTRL_AUX_EXCLUSIVE_CACHE_CONFIG                    BIT(12)
56 #define CTRL_AUX_STOREBUFDEV_LIMIT_EN                      BIT(11)
57 #define CTRL_AUX_HIGH_PRIO_SODEV_EN                        BIT(10)
58 #define CTRL_AUX_FULL_LINE_ZEROS_ENABLE                    BIT( 0)
59 
60 #define CTRL_AUX_WAYSIZE_16K         CTRL_AUX_WAYSIZE(1)
61 #define CTRL_AUX_WAYSIZE_32K         CTRL_AUX_WAYSIZE(2)
62 #define CTRL_AUX_WAYSIZE_64K         CTRL_AUX_WAYSIZE(3)
63 #define CTRL_AUX_WAYSIZE_128K        CTRL_AUX_WAYSIZE(4)
64 #define CTRL_AUX_WAYSIZE_256K        CTRL_AUX_WAYSIZE(5)
65 #define CTRL_AUX_WAYSIZE_512K        CTRL_AUX_WAYSIZE(6)
66 
67 #define CTRL_AUX_ASSOCIATIVITY_8WAY   (0 * CTRL_AUX_ASSOCIATIVITY)
68 #define CTRL_AUX_ASSOCIATIVITY_16WAY  (1 * CTRL_AUX_ASSOCIATIVITY)
69 
70 #define CTRL_AUS_REPLPOLICY_RROBIN    (0 * CTRL_AUX_REPLACEMENT_POLICY)
71 #define CTRL_AUS_REPLPOLICY_PRAND     (1 * CTRL_AUX_REPLACEMENT_POLICY)
72 
73 /* Latency */
74 #define CTRL_RAM_LATENCY_SET(X,S)    (((X)&0x7) * BIT(S))
75 #define CTRL_RAM_LATENCY_SETUP(X)    CTRL_RAM_LATENCY_SET(X, 0)
76 #define CTRL_RAM_LATENCY_READ(X)     CTRL_RAM_LATENCY_SET(X, 4)
77 #define CTRL_RAM_LATENCY_WRITE(X)    CTRL_RAM_LATENCY_SET(X, 8)
78 
79 #define CTRL_RAM_LATENCY(W,R,S)    ( CTRL_RAM_LATENCY_SETUP(S) \
80                                    | CTRL_RAM_LATENCY_READ(R)  \
81                                    | CTRL_RAM_LATENCY_WRITE(W) )
82 
83 
84 /* Maintenance */
85 #define MAINTENANCE_PENDING          BIT(0)
86 
87 /* POWER */
88 #define CTRL2_PWR_DYNAMIC_CLK_EN     BIT(1)
89 #define CTRL2_PWR_STANDBY_ON         BIT(0)
90 
91 /* PREFECTCH */
92 #define CTRL2_PFET_DBL_LINEFILL_EN              BIT(30)
93 #define CTRL2_PFET_INST_PREFETCH_EN             BIT(29)
94 #define CTRL2_PFET_DATA_PREFETCH_EN             BIT(28)
95 #define CTRL2_PFET_DBL_LINEFILL_ON_WRAP_EN      BIT(27)
96 #define CTRL2_PFET_PREFETCH_DROP_EN             BIT(24)
97 #define CTRL2_PFET_INCR_DBL_LINEFILL_EN         BIT(23)
98 #define CTRL2_PFET_NOT_SAME_ID_ON_EXCL_SEQ_EN   BIT(21)
99 #define CTRL2_PFET_PREFETCH_OFFSET(X)    ((X) * BIT( 0) )
100 
101 
102 struct l2cc_map {
103 
104     struct {
105         uint32_t cache_id;              /* 0x000 */
106         uint32_t cache_type;            /* 0x004 */
107         uint32_t res[62];
108     } id /* reg0 */;
109 
110     struct {
111         uint32_t control;               /* 0x100 */
112         uint32_t aux_control;           /* 0x104 */
113         uint32_t tag_ram_control;       /* 0x108 */
114         uint32_t data_ram_control;      /* 0x10C */
115         uint32_t res[60];
116     } control /* reg1 */;
117 
118     struct {
119         uint32_t ev_counter_ctrl;       /* 0x200 */
120         uint32_t ev_counter1_cfg;       /* 0x204 */
121         uint32_t ev_counter0_cfg;       /* 0x208 */
122         uint32_t ev_counter1;           /* 0x20C */
123         uint32_t ev_counter0;           /* 0x210 */
124         uint32_t int_mask;              /* 0x214 */
125         uint32_t int_mask_status;       /* 0x218 */
126         uint32_t int_raw_status;        /* 0x21C */
127         uint32_t int_clear;             /* 0x220 */
128         uint32_t res[55];
129     } interrupt /* reg2 */;
130 
131     struct {
132         uint32_t res[64];
133     } reg3;
134     struct {
135         uint32_t res[64];
136     } reg4;
137     struct {
138         uint32_t res[64];
139     } reg5;
140     struct {
141         uint32_t res[64];
142     } reg6;
143 
144     struct {
145         uint32_t res[12];
146         uint32_t cache_sync;            /* 0x730 */
147         uint32_t res1[15];
148         uint32_t inv_pa;                /* 0x770 */
149         uint32_t res2[2];
150         uint32_t inv_way;               /* 0x77C */
151         uint32_t res3[12];
152         uint32_t clean_pa;              /* 0x7B0 */
153         uint32_t res4[1];
154         uint32_t clean_index;           /* 0x7B8 */
155         uint32_t clean_way;             /* 0x7BC */
156         uint32_t res5[12];
157         uint32_t clean_inv_pa;          /* 0x7F0 */
158         uint32_t res6[1];
159         uint32_t clean_inv_index;       /* 0x7F8 */
160         uint32_t clean_inv_way;         /* 0x7FC */
161     } maintenance /* reg7 */;
162 
163     struct {
164         uint32_t res[64];
165     } reg8;
166 
167     struct {
168         uint32_t d_lockdown0;           /* 0x900 */
169         uint32_t i_lockdown0;           /* 0x904 */
170         uint32_t d_lockdown1;           /* 0x908 */
171         uint32_t i_lockdown1;           /* 0x90C */
172         uint32_t d_lockdown2;           /* 0x910 */
173         uint32_t i_lockdown2;           /* 0x914 */
174         uint32_t d_lockdown3;           /* 0x918 */
175         uint32_t i_lockdown3;           /* 0x91C */
176         uint32_t d_lockdown4;           /* 0x920 */
177         uint32_t i_lockdown4;           /* 0x924 */
178         uint32_t d_lockdown5;           /* 0x928 */
179         uint32_t i_lockdown5;           /* 0x92C */
180         uint32_t d_lockdown6;           /* 0x930 */
181         uint32_t i_lockdown6;           /* 0x934 */
182         uint32_t d_lockdown7;           /* 0x938 */
183         uint32_t i_lockdown7;           /* 0x93C */
184         uint32_t res[4];
185         uint32_t lock_line_eng;         /* 0x950 */
186         uint32_t unlock_wayg;           /* 0x954 */
187         uint32_t res1[42];
188     } lockdown /* reg9 */;
189 
190     struct {
191         uint32_t res[64];
192     } reg10;
193     struct {
194         uint32_t res[64];
195     } reg11;
196 
197     struct {
198         uint32_t addr_filtering_start;  /* 0xC00 */
199         uint32_t addr_filtering_end;    /* 0xC04 */
200         uint32_t res[62];
201     } filter /* reg12 */;
202 
203     struct {
204         uint32_t res[64];
205     } reg13;
206     struct {
207         uint32_t res[64];
208     } reg14;
209 
210     struct {
211         uint32_t res[16];
212         uint32_t debug_ctrl;            /* 0xF40 */
213         uint32_t res1[7];
214         uint32_t prefetch_ctrl;         /* 0xF60 */
215         uint32_t res2[7];
216         uint32_t power_ctrl;            /* 0xF80 */
217         uint32_t res3[31];
218     } control2 /* reg15 */;
219 };
220 
221 
222 #ifndef L2CC_L2C310_PPTR
223 #error L2CC_L2C310_PPTR must be defined for virtual memory access to the L2 cache controller
224 #else  /* L2CC_PPTR */
225 volatile struct l2cc_map *const l2cc
226     = (volatile struct l2cc_map *)L2CC_L2C310_PPTR;
227 #endif /* !L2CC_PPTR */
228 
229 
230 #ifdef TI_MSHIELD
mshield_smc(uint32_t callid,uint32_t arg1,uint32_t arg2)231 BOOT_CODE static void mshield_smc(uint32_t callid, uint32_t arg1, uint32_t arg2)
232 {
233     register uint32_t _arg1 asm("r0") = arg1;
234     register uint32_t _arg2 asm("r1") = arg2;
235     register uint32_t _callid asm("r12") = callid;
236     asm volatile("push {r2-r12, lr}\n"
237                  "dsb\n"
238                  "smc #0\n"
239                  "pop {r2-r12, lr}"
240                  :: "r"(_callid), "r"(_arg1), "r"(_arg2));
241 }
242 #endif /* TI_MSHIELD */
243 
initL2Cache(void)244 BOOT_CODE void initL2Cache(void)
245 {
246 #ifndef CONFIG_DEBUG_DISABLE_L2_CACHE
247     uint32_t aux;
248     uint32_t tag_ram;
249     uint32_t data_ram;
250     uint32_t prefetch;
251 
252     /* L2 cache must be disabled during initialisation */
253 #ifndef TI_MSHIELD
254     l2cc->control.control &= ~CTRL_CTRL_EN;
255 #endif
256 
257     prefetch = CTRL2_PFET_INST_PREFETCH_EN | CTRL2_PFET_DATA_PREFETCH_EN;
258 #if defined(CONFIG_PLAT_IMX6)
259     tag_ram  = CTRL_RAM_LATENCY(1, 2, 1);
260     data_ram = CTRL_RAM_LATENCY(1, 2, 1);
261 #else
262     tag_ram  = CTRL_RAM_LATENCY(1, 1, 0);
263     data_ram = CTRL_RAM_LATENCY(1, 2, 0);
264 #endif
265 
266     aux      = 0
267                | CTRL_AUX_IPREFETCH_EN
268                | CTRL_AUX_DPREFETCH_EN
269                | CTRL_AUX_NSECURE_INT_ACCESS
270                | CTRL_AUX_NSECURE_LOCKDOWN_EN
271                | CTRL_AUX_ASSOCIATIVITY_16WAY
272                | CTRL_AUS_REPLPOLICY_RROBIN;
273 
274 #if defined(CONFIG_PLAT_IMX6SX)
275     aux |= CTRL_AUX_WAYSIZE_16K;
276 #elif defined(CONFIG_PLAT_EXYNOS4) || defined(CONFIG_PLAT_IMX6) || defined(CONFIG_PLAT_ZYNQ7000) || defined(CONFIG_PLAT_ALLWINNERA20)
277     aux |= CTRL_AUX_WAYSIZE_64K;
278 #elif defined(OMAP4)
279     aux |= CTRL_AUX_WAYSIZE_32K;
280 #else /* ! (EXYNOS4 || OMAP4 || IMX6) */
281 #error Unknown platform for L2C-310
282 #endif /* EXYNOS4 || OMAP4 || IMX6 */
283 
284 #ifdef TI_MSHIELD
285     /* Access secure registers through Security Middleware Call */
286     /* 1: Write to aux Tag RAM latentcy, Data RAM latency, prefect, power control registers  */
287     mshield_smc(MSHIELD_SMC_ROM_CTRL_CTRL, 0, 0);
288     mshield_smc(MSHIELD_SMC_ROM_CTRL_AUX, aux, 0);
289     mshield_smc(MSHIELD_SMC_ROM_CTRL_LATENCY, tag_ram, data_ram);
290 
291 #else /* !TI_MSHIELD */
292     /* Direct register access */
293     /* 1: Write to aux Tag RAM latentcy, Data RAM latency, prefect, power control registers  */
294     l2cc->control.aux_control      = aux;
295     l2cc->control.tag_ram_control  = tag_ram;
296     l2cc->control.data_ram_control = data_ram;
297     l2cc->control2.prefetch_ctrl   = prefetch;
298 
299 #endif /* TI_MSHIELD */
300 
301     /* 2: Invalidate by way. */
302     l2cc->maintenance.inv_way = 0xffff;
303     while (l2cc->maintenance.inv_way & 0xffff);
304 
305     /* 3: write to lockdown D & I reg9 if required  */
306     if ((l2cc->id.cache_type & PL310_LOCKDOWN_BY_MASK) == PL310_LOCKDOWN_BY_MASTER) {
307         /* disable lockdown */
308         l2cc->lockdown.d_lockdown0 = 0;
309         l2cc->lockdown.i_lockdown0 = 0;
310         l2cc->lockdown.d_lockdown1 = 0;
311         l2cc->lockdown.i_lockdown1 = 0;
312         l2cc->lockdown.d_lockdown2 = 0;
313         l2cc->lockdown.i_lockdown2 = 0;
314         l2cc->lockdown.d_lockdown3 = 0;
315         l2cc->lockdown.i_lockdown3 = 0;
316         l2cc->lockdown.d_lockdown4 = 0;
317         l2cc->lockdown.i_lockdown4 = 0;
318         l2cc->lockdown.d_lockdown5 = 0;
319         l2cc->lockdown.i_lockdown5 = 0;
320         l2cc->lockdown.d_lockdown6 = 0;
321         l2cc->lockdown.i_lockdown6 = 0;
322         l2cc->lockdown.d_lockdown7 = 0;
323         l2cc->lockdown.i_lockdown7 = 0;
324     }
325     if ((l2cc->id.cache_type & PL310_LOCKDOWN_BY_MASK) == PL310_LOCKDOWN_BY_LINE) {
326         /* disable lockdown */
327         l2cc->lockdown.lock_line_eng = 0;
328     }
329 
330     /* 4: write to interrupt clear register to clear any residual raw interrupts set */
331     l2cc->interrupt.int_mask  = 0x0;
332     /* 5: write to interrupt mask register if you want to enable interrupts (active high) */
333     l2cc->interrupt.int_clear = MASK(9);
334 
335     /* 6: Enable the L2 cache */
336 #ifdef TI_MSHIELD
337     /* Access secure registers through Security Middleware Call */
338     mshield_smc(MSHIELD_SMC_ROM_CTRL_CTRL, 1, 0);
339 #else /* !TI_MSHIELD */
340     /* Direct register access */
341     l2cc->control.control |= CTRL_CTRL_EN;
342 #endif /* TI_MSHIELD */
343 
344 #if defined(CONFIG_ARM_CORTEX_A9) && defined(CONFIG_ENABLE_A9_PREFETCHER)
345     /* Set bit 1 in the ACTLR, which on the cortex-a9 is the l2 prefetch enable
346      * bit. See section 4.3.10 of the Cortex-A9 Technical Reference Manual */
347     setACTLR(getACTLR() | BIT(1));
348 #endif
349 
350 #endif /* !CONFIG_DEBUG_DISABLE_L2_CACHE */
351 }
352 
L2_cacheSync(void)353 static inline void L2_cacheSync(void)
354 {
355     dmb();
356     l2cc->maintenance.cache_sync = 0;
357     while (l2cc->maintenance.cache_sync & MAINTENANCE_PENDING);
358 }
359 
plat_cleanInvalidateL2Cache(void)360 void plat_cleanInvalidateL2Cache(void)
361 {
362     if (!config_set(CONFIG_DEBUG_DISABLE_L2_CACHE)) {
363         l2cc->maintenance.clean_way = 0xffff;
364         while (l2cc->maintenance.clean_way);
365         L2_cacheSync();
366         l2cc->maintenance.inv_way = 0xffff;
367         while (l2cc->maintenance.inv_way);
368         L2_cacheSync();
369     }
370 }
371 
plat_cleanCache(void)372 void plat_cleanCache(void)
373 {
374 #ifndef CONFIG_DEBUG_DISABLE_L2_CACHE
375     /* Clean by way. */
376     l2cc->maintenance.clean_way = 0xffff;
377     while (l2cc->maintenance.clean_way & 0xffff);
378     L2_cacheSync();
379 #endif /* !CONFIG_DEBUG_DISABLE_L2_CACHE */
380 }
381 
plat_cleanL2Range(paddr_t start,paddr_t end)382 void plat_cleanL2Range(paddr_t start, paddr_t end)
383 {
384 #ifndef CONFIG_DEBUG_DISABLE_L2_CACHE
385     /* Documentation specifies this as the only possible line size */
386     assert(((l2cc->id.cache_type >> 12) & 0x3) == 0x0);
387 
388     for (start = L2_LINE_START(start);
389          start != L2_LINE_START(end + L2_LINE_SIZE);
390          start += L2_LINE_SIZE) {
391         l2cc->maintenance.clean_pa = start;
392         /* do not need to wait for every invalidate as 310 is atomic */
393     }
394     L2_cacheSync();
395 #endif /* !CONFIG_DEBUG_DISABLE_L2_CACHE */
396 }
397 
plat_invalidateL2Range(paddr_t start,paddr_t end)398 void plat_invalidateL2Range(paddr_t start, paddr_t end)
399 {
400 #ifndef CONFIG_DEBUG_DISABLE_L2_CACHE
401     /* Documentation specifies this as the only possible line size */
402     assert(((l2cc->id.cache_type >> 12) & 0x3) == 0x0);
403 
404     /* We assume that if this is a partial line that whoever is calling us
405      * has already done the clean, so we just blindly invalidate all the lines */
406 
407     for (start = L2_LINE_START(start);
408          start != L2_LINE_START(end + L2_LINE_SIZE);
409          start += L2_LINE_SIZE) {
410         l2cc->maintenance.inv_pa = start;
411         /* do not need to wait for every invalidate as 310 is atomic */
412     }
413     L2_cacheSync();
414 #endif /* !CONFIG_DEBUG_DISABLE_L2_CACHE */
415 }
416 
plat_cleanInvalidateL2Range(paddr_t start,paddr_t end)417 void plat_cleanInvalidateL2Range(paddr_t start, paddr_t end)
418 {
419 #ifndef CONFIG_DEBUG_DISABLE_L2_CACHE
420     /* Documentation specifies this as the only possible line size */
421     assert(((l2cc->id.cache_type >> 12) & 0x3) == 0x0);
422 
423     for (start = L2_LINE_START(start);
424          start != L2_LINE_START(end + L2_LINE_SIZE);
425          start += L2_LINE_SIZE) {
426         /* Work around an errata and call the clean and invalidate separately */
427         l2cc->maintenance.clean_pa = start;
428         dmb();
429         l2cc->maintenance.inv_pa = start;
430         /* do not need to wait for every invalidate as 310 is atomic */
431     }
432     L2_cacheSync();
433 #endif /* !CONFIG_DEBUG_DISABLE_L2_CACHE */
434 }
435 
436