1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * This file is based on sample code from ARMv8 ARM.
7 */
8
9#include <asm-offsets.h>
10#include <config.h>
11#include <asm/macro.h>
12#include <asm/system.h>
13#include <linux/linkage.h>
14
15/*
16 * void __asm_dcache_level(level)
17 *
18 * flush or invalidate one level cache.
19 *
20 * x0: cache level
21 * x1: 0 clean & invalidate, 1 invalidate only
22 * x2~x9: clobbered
23 */
24.pushsection .text.__asm_dcache_level, "ax"
25ENTRY(__asm_dcache_level)
26	lsl	x12, x0, #1
27	msr	csselr_el1, x12		/* select cache level */
28	isb				/* sync change of cssidr_el1 */
29	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
30	ubfx	x2, x6,  #0,  #3	/* x2 <- log2(cache line size)-4 */
31	ubfx	x3, x6,  #3, #10	/* x3 <- number of cache ways - 1 */
32	ubfx	x4, x6, #13, #15	/* x4 <- number of cache sets - 1 */
33	add	x2, x2, #4		/* x2 <- log2(cache line size) */
34	clz	w5, w3			/* bit position of #ways */
35	/* x12 <- cache level << 1 */
36	/* x2 <- line length offset */
37	/* x3 <- number of cache ways - 1 */
38	/* x4 <- number of cache sets - 1 */
39	/* x5 <- bit position of #ways */
40
41loop_set:
42	mov	x6, x3			/* x6 <- working copy of #ways */
43loop_way:
44	lsl	x7, x6, x5
45	orr	x9, x12, x7		/* map way and level to cisw value */
46	lsl	x7, x4, x2
47	orr	x9, x9, x7		/* map set number to cisw value */
48	tbz	w1, #0, 1f
49	dc	isw, x9
50	b	2f
511:	dc	cisw, x9		/* clean & invalidate by set/way */
522:	subs	x6, x6, #1		/* decrement the way */
53	b.ge	loop_way
54	subs	x4, x4, #1		/* decrement the set */
55	b.ge	loop_set
56
57	ret
58ENDPROC(__asm_dcache_level)
59.popsection
60
61/*
62 * void __asm_flush_dcache_all(int invalidate_only)
63 *
64 * x0: 0 clean & invalidate, 1 invalidate only
65 *
66 * flush or invalidate all data cache by SET/WAY.
67 */
68.pushsection .text.__asm_dcache_all, "ax"
69ENTRY(__asm_dcache_all)
70	mov	x1, x0
71	dsb	sy
72	mrs	x10, clidr_el1		/* read clidr_el1 */
73	ubfx	x11, x10, #24, #3	/* x11 <- loc */
74	cbz	x11, finished		/* if loc is 0, exit */
75	mov	x15, lr
76	mov	x0, #0			/* start flush at cache level 0 */
77	/* x0  <- cache level */
78	/* x10 <- clidr_el1 */
79	/* x11 <- loc */
80	/* x15 <- return address */
81
82loop_level:
83	add	x12, x0, x0, lsl #1	/* x12 <- tripled cache level */
84	lsr	x12, x10, x12
85	and	x12, x12, #7		/* x12 <- cache type */
86	cmp	x12, #2
87	b.lt	skip			/* skip if no cache or icache */
88	bl	__asm_dcache_level	/* x1 = 0 flush, 1 invalidate */
89skip:
90	add	x0, x0, #1		/* increment cache level */
91	cmp	x11, x0
92	b.gt	loop_level
93
94	mov	x0, #0
95	msr	csselr_el1, x0		/* restore csselr_el1 */
96	dsb	sy
97	isb
98	mov	lr, x15
99
100finished:
101	ret
102ENDPROC(__asm_dcache_all)
103.popsection
104
105.pushsection .text.__asm_flush_dcache_all, "ax"
106ENTRY(__asm_flush_dcache_all)
107	mov	x0, #0
108	b	__asm_dcache_all
109ENDPROC(__asm_flush_dcache_all)
110.popsection
111
112.pushsection .text.__asm_invalidate_dcache_all, "ax"
113ENTRY(__asm_invalidate_dcache_all)
114	mov	x0, #0x1
115	b	__asm_dcache_all
116ENDPROC(__asm_invalidate_dcache_all)
117.popsection
118
119/*
120 * void __asm_flush_dcache_range(start, end)
121 *
122 * clean & invalidate data cache in the range
123 *
124 * x0: start address
125 * x1: end address
126 */
127.pushsection .text.__asm_flush_dcache_range, "ax"
128ENTRY(__asm_flush_dcache_range)
129	mrs	x3, ctr_el0
130	ubfx	x3, x3, #16, #4
131	mov	x2, #4
132	lsl	x2, x2, x3		/* cache line size */
133
134	/* x2 <- minimal cache line size in cache system */
135	sub	x3, x2, #1
136	bic	x0, x0, x3
1371:	dc	civac, x0	/* clean & invalidate data or unified cache */
138	add	x0, x0, x2
139	cmp	x0, x1
140	b.lo	1b
141	dsb	sy
142	ret
143ENDPROC(__asm_flush_dcache_range)
144.popsection
145/*
146 * void __asm_invalidate_dcache_range(start, end)
147 *
148 * invalidate data cache in the range
149 *
150 * x0: start address
151 * x1: end address
152 */
153.pushsection .text.__asm_invalidate_dcache_range, "ax"
154ENTRY(__asm_invalidate_dcache_range)
155	mrs	x3, ctr_el0
156	ubfx	x3, x3, #16, #4
157	mov	x2, #4
158	lsl	x2, x2, x3		/* cache line size */
159
160	/* x2 <- minimal cache line size in cache system */
161	sub	x3, x2, #1
162	bic	x0, x0, x3
1631:	dc	ivac, x0	/* invalidate data or unified cache */
164	add	x0, x0, x2
165	cmp	x0, x1
166	b.lo	1b
167	dsb	sy
168	ret
169ENDPROC(__asm_invalidate_dcache_range)
170.popsection
171
172/*
173 * void __asm_invalidate_icache_all(void)
174 *
175 * invalidate all tlb entries.
176 */
177.pushsection .text.__asm_invalidate_icache_all, "ax"
178ENTRY(__asm_invalidate_icache_all)
179	ic	ialluis
180	isb	sy
181	ret
182ENDPROC(__asm_invalidate_icache_all)
183.popsection
184
185.pushsection .text.__asm_invalidate_l3_dcache, "ax"
186WEAK(__asm_invalidate_l3_dcache)
187	mov	x0, #0			/* return status as success */
188	ret
189ENDPROC(__asm_invalidate_l3_dcache)
190.popsection
191
192.pushsection .text.__asm_flush_l3_dcache, "ax"
193WEAK(__asm_flush_l3_dcache)
194	mov	x0, #0			/* return status as success */
195	ret
196ENDPROC(__asm_flush_l3_dcache)
197.popsection
198
199.pushsection .text.__asm_invalidate_l3_icache, "ax"
200WEAK(__asm_invalidate_l3_icache)
201	mov	x0, #0			/* return status as success */
202	ret
203ENDPROC(__asm_invalidate_l3_icache)
204.popsection
205
206/*
207 * void __asm_switch_ttbr(ulong new_ttbr)
208 *
209 * Safely switches to a new page table.
210 */
211.pushsection .text.__asm_switch_ttbr, "ax"
212ENTRY(__asm_switch_ttbr)
213	/* x2 = SCTLR (alive throghout the function) */
214	switch_el x4, 3f, 2f, 1f
2153:	mrs	x2, sctlr_el3
216	b	0f
2172:	mrs	x2, sctlr_el2
218	b	0f
2191:	mrs	x2, sctlr_el1
2200:
221
222	/* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
223	movn	x1, #(CR_M | CR_C | CR_I)
224	and	x1, x2, x1
225	switch_el x4, 3f, 2f, 1f
2263:	msr	sctlr_el3, x1
227	b	0f
2282:	msr	sctlr_el2, x1
229	b	0f
2301:	msr	sctlr_el1, x1
2310:	isb
232
233	/* This call only clobbers x30 (lr) and x9 (unused) */
234	mov	x3, x30
235	bl	__asm_invalidate_tlb_all
236
237	/* From here on we're running safely with caches disabled */
238
239	/* Set TTBR to our first argument */
240	switch_el x4, 3f, 2f, 1f
2413:	msr	ttbr0_el3, x0
242	b	0f
2432:	msr	ttbr0_el2, x0
244	b	0f
2451:	msr	ttbr0_el1, x0
2460:	isb
247
248	/* Restore original SCTLR and thus enable caches again */
249	switch_el x4, 3f, 2f, 1f
2503:	msr	sctlr_el3, x2
251	b	0f
2522:	msr	sctlr_el2, x2
253	b	0f
2541:	msr	sctlr_el1, x2
2550:	isb
256
257	ret	x3
258ENDPROC(__asm_switch_ttbr)
259.popsection
260