1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright IBM Corp. 1999, 2011
4 *
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 */
7
8 #ifndef __ASM_CMPXCHG_H
9 #define __ASM_CMPXCHG_H
10
11 #include <linux/mmdebug.h>
12 #include <linux/types.h>
13 #include <linux/bug.h>
14
15 void __xchg_called_with_bad_pointer(void);
16
__xchg(unsigned long x,unsigned long address,int size)17 static __always_inline unsigned long __xchg(unsigned long x,
18 unsigned long address, int size)
19 {
20 unsigned long old;
21 int shift;
22
23 switch (size) {
24 case 1:
25 shift = (3 ^ (address & 3)) << 3;
26 address ^= address & 3;
27 asm volatile(
28 " l %0,%1\n"
29 "0: lr 0,%0\n"
30 " nr 0,%3\n"
31 " or 0,%2\n"
32 " cs %0,0,%1\n"
33 " jl 0b\n"
34 : "=&d" (old), "+Q" (*(int *) address)
35 : "d" ((x & 0xff) << shift), "d" (~(0xff << shift))
36 : "memory", "cc", "0");
37 return old >> shift;
38 case 2:
39 shift = (2 ^ (address & 2)) << 3;
40 address ^= address & 2;
41 asm volatile(
42 " l %0,%1\n"
43 "0: lr 0,%0\n"
44 " nr 0,%3\n"
45 " or 0,%2\n"
46 " cs %0,0,%1\n"
47 " jl 0b\n"
48 : "=&d" (old), "+Q" (*(int *) address)
49 : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift))
50 : "memory", "cc", "0");
51 return old >> shift;
52 case 4:
53 asm volatile(
54 " l %0,%1\n"
55 "0: cs %0,%2,%1\n"
56 " jl 0b\n"
57 : "=&d" (old), "+Q" (*(int *) address)
58 : "d" (x)
59 : "memory", "cc");
60 return old;
61 case 8:
62 asm volatile(
63 " lg %0,%1\n"
64 "0: csg %0,%2,%1\n"
65 " jl 0b\n"
66 : "=&d" (old), "+QS" (*(long *) address)
67 : "d" (x)
68 : "memory", "cc");
69 return old;
70 }
71 __xchg_called_with_bad_pointer();
72 return x;
73 }
74
75 #define arch_xchg(ptr, x) \
76 ({ \
77 __typeof__(*(ptr)) __ret; \
78 \
79 __ret = (__typeof__(*(ptr))) \
80 __xchg((unsigned long)(x), (unsigned long)(ptr), \
81 sizeof(*(ptr))); \
82 __ret; \
83 })
84
85 void __cmpxchg_called_with_bad_pointer(void);
86
__cmpxchg(unsigned long address,unsigned long old,unsigned long new,int size)87 static __always_inline unsigned long __cmpxchg(unsigned long address,
88 unsigned long old,
89 unsigned long new, int size)
90 {
91 switch (size) {
92 case 1: {
93 unsigned int prev, shift, mask;
94
95 shift = (3 ^ (address & 3)) << 3;
96 address ^= address & 3;
97 old = (old & 0xff) << shift;
98 new = (new & 0xff) << shift;
99 mask = ~(0xff << shift);
100 asm volatile(
101 " l %[prev],%[address]\n"
102 " nr %[prev],%[mask]\n"
103 " xilf %[mask],0xffffffff\n"
104 " or %[new],%[prev]\n"
105 " or %[prev],%[tmp]\n"
106 "0: lr %[tmp],%[prev]\n"
107 " cs %[prev],%[new],%[address]\n"
108 " jnl 1f\n"
109 " xr %[tmp],%[prev]\n"
110 " xr %[new],%[tmp]\n"
111 " nr %[tmp],%[mask]\n"
112 " jz 0b\n"
113 "1:"
114 : [prev] "=&d" (prev),
115 [address] "+Q" (*(int *)address),
116 [tmp] "+&d" (old),
117 [new] "+&d" (new),
118 [mask] "+&d" (mask)
119 :: "memory", "cc");
120 return prev >> shift;
121 }
122 case 2: {
123 unsigned int prev, shift, mask;
124
125 shift = (2 ^ (address & 2)) << 3;
126 address ^= address & 2;
127 old = (old & 0xffff) << shift;
128 new = (new & 0xffff) << shift;
129 mask = ~(0xffff << shift);
130 asm volatile(
131 " l %[prev],%[address]\n"
132 " nr %[prev],%[mask]\n"
133 " xilf %[mask],0xffffffff\n"
134 " or %[new],%[prev]\n"
135 " or %[prev],%[tmp]\n"
136 "0: lr %[tmp],%[prev]\n"
137 " cs %[prev],%[new],%[address]\n"
138 " jnl 1f\n"
139 " xr %[tmp],%[prev]\n"
140 " xr %[new],%[tmp]\n"
141 " nr %[tmp],%[mask]\n"
142 " jz 0b\n"
143 "1:"
144 : [prev] "=&d" (prev),
145 [address] "+Q" (*(int *)address),
146 [tmp] "+&d" (old),
147 [new] "+&d" (new),
148 [mask] "+&d" (mask)
149 :: "memory", "cc");
150 return prev >> shift;
151 }
152 case 4: {
153 unsigned int prev = old;
154
155 asm volatile(
156 " cs %[prev],%[new],%[address]\n"
157 : [prev] "+&d" (prev),
158 [address] "+Q" (*(int *)address)
159 : [new] "d" (new)
160 : "memory", "cc");
161 return prev;
162 }
163 case 8: {
164 unsigned long prev = old;
165
166 asm volatile(
167 " csg %[prev],%[new],%[address]\n"
168 : [prev] "+&d" (prev),
169 [address] "+QS" (*(long *)address)
170 : [new] "d" (new)
171 : "memory", "cc");
172 return prev;
173 }
174 }
175 __cmpxchg_called_with_bad_pointer();
176 return old;
177 }
178
179 #define arch_cmpxchg(ptr, o, n) \
180 ({ \
181 __typeof__(*(ptr)) __ret; \
182 \
183 __ret = (__typeof__(*(ptr))) \
184 __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \
185 (unsigned long)(n), sizeof(*(ptr))); \
186 __ret; \
187 })
188
189 #define arch_cmpxchg64 arch_cmpxchg
190 #define arch_cmpxchg_local arch_cmpxchg
191 #define arch_cmpxchg64_local arch_cmpxchg
192
193 #define system_has_cmpxchg_double() 1
194
__cmpxchg_double(unsigned long p1,unsigned long p2,unsigned long o1,unsigned long o2,unsigned long n1,unsigned long n2)195 static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2,
196 unsigned long o1, unsigned long o2,
197 unsigned long n1, unsigned long n2)
198 {
199 union register_pair old = { .even = o1, .odd = o2, };
200 union register_pair new = { .even = n1, .odd = n2, };
201 int cc;
202
203 asm volatile(
204 " cdsg %[old],%[new],%[ptr]\n"
205 " ipm %[cc]\n"
206 " srl %[cc],28\n"
207 : [cc] "=&d" (cc), [old] "+&d" (old.pair)
208 : [new] "d" (new.pair),
209 [ptr] "QS" (*(unsigned long *)p1), "Q" (*(unsigned long *)p2)
210 : "memory", "cc");
211 return !cc;
212 }
213
214 #define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
215 ({ \
216 typeof(p1) __p1 = (p1); \
217 typeof(p2) __p2 = (p2); \
218 \
219 BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
220 BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
221 VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\
222 __cmpxchg_double((unsigned long)__p1, (unsigned long)__p2, \
223 (unsigned long)(o1), (unsigned long)(o2), \
224 (unsigned long)(n1), (unsigned long)(n2)); \
225 })
226
227 #endif /* __ASM_CMPXCHG_H */
228