1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * Copyright (c) 2014,2017, 2019 The Linux Foundation. All rights reserved.
7 */
8
9 #ifndef __ADRENO_GPU_H__
10 #define __ADRENO_GPU_H__
11
12 #include <linux/firmware.h>
13 #include <linux/iopoll.h>
14
15 #include "msm_gpu.h"
16
17 #include "adreno_common.xml.h"
18 #include "adreno_pm4.xml.h"
19
20 extern bool snapshot_debugbus;
21 extern bool allow_vram_carveout;
22
23 enum {
24 ADRENO_FW_PM4 = 0,
25 ADRENO_FW_SQE = 0, /* a6xx */
26 ADRENO_FW_PFP = 1,
27 ADRENO_FW_GMU = 1, /* a6xx */
28 ADRENO_FW_GPMU = 2,
29 ADRENO_FW_MAX,
30 };
31
32 enum adreno_quirks {
33 ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
34 ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
35 ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
36 };
37
38 struct adreno_rev {
39 uint8_t core;
40 uint8_t major;
41 uint8_t minor;
42 uint8_t patchid;
43 };
44
45 #define ANY_ID 0xff
46
47 #define ADRENO_REV(core, major, minor, patchid) \
48 ((struct adreno_rev){ core, major, minor, patchid })
49
50 struct adreno_gpu_funcs {
51 struct msm_gpu_funcs base;
52 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
53 };
54
55 struct adreno_reglist {
56 u32 offset;
57 u32 value;
58 };
59
60 extern const struct adreno_reglist a630_hwcg[], a640_hwcg[], a650_hwcg[], a660_hwcg[];
61
62 struct adreno_info {
63 struct adreno_rev rev;
64 uint32_t revn;
65 const char *name;
66 const char *fw[ADRENO_FW_MAX];
67 uint32_t gmem;
68 enum adreno_quirks quirks;
69 struct msm_gpu *(*init)(struct drm_device *dev);
70 const char *zapfw;
71 u32 inactive_period;
72 const struct adreno_reglist *hwcg;
73 };
74
75 const struct adreno_info *adreno_info(struct adreno_rev rev);
76
77 struct adreno_gpu {
78 struct msm_gpu base;
79 struct adreno_rev rev;
80 const struct adreno_info *info;
81 uint32_t gmem; /* actual gmem size */
82 uint32_t revn; /* numeric revision name */
83 const struct adreno_gpu_funcs *funcs;
84
85 /* interesting register offsets to dump: */
86 const unsigned int *registers;
87
88 /*
89 * Are we loading fw from legacy path? Prior to addition
90 * of gpu firmware to linux-firmware, the fw files were
91 * placed in toplevel firmware directory, following qcom's
92 * android kernel. But linux-firmware preferred they be
93 * placed in a 'qcom' subdirectory.
94 *
95 * For backwards compatibility, we try first to load from
96 * the new path, using request_firmware_direct() to avoid
97 * any potential timeout waiting for usermode helper, then
98 * fall back to the old path (with direct load). And
99 * finally fall back to request_firmware() with the new
100 * path to allow the usermode helper.
101 */
102 enum {
103 FW_LOCATION_UNKNOWN = 0,
104 FW_LOCATION_NEW, /* /lib/firmware/qcom/$fwfile */
105 FW_LOCATION_LEGACY, /* /lib/firmware/$fwfile */
106 FW_LOCATION_HELPER,
107 } fwloc;
108
109 /* firmware: */
110 const struct firmware *fw[ADRENO_FW_MAX];
111
112 /*
113 * Register offsets are different between some GPUs.
114 * GPU specific offsets will be exported by GPU specific
115 * code (a3xx_gpu.c) and stored in this common location.
116 */
117 const unsigned int *reg_offsets;
118 };
119 #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
120
121 struct adreno_ocmem {
122 struct ocmem *ocmem;
123 unsigned long base;
124 void *hdl;
125 };
126
127 /* platform config data (ie. from DT, or pdata) */
128 struct adreno_platform_config {
129 struct adreno_rev rev;
130 };
131
132 #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
133
134 #define spin_until(X) ({ \
135 int __ret = -ETIMEDOUT; \
136 unsigned long __t = jiffies + ADRENO_IDLE_TIMEOUT; \
137 do { \
138 if (X) { \
139 __ret = 0; \
140 break; \
141 } \
142 } while (time_before(jiffies, __t)); \
143 __ret; \
144 })
145
146 bool adreno_cmp_rev(struct adreno_rev rev1, struct adreno_rev rev2);
147
adreno_is_a2xx(struct adreno_gpu * gpu)148 static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
149 {
150 return (gpu->revn < 300);
151 }
152
adreno_is_a20x(struct adreno_gpu * gpu)153 static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
154 {
155 return (gpu->revn < 210);
156 }
157
adreno_is_a225(struct adreno_gpu * gpu)158 static inline bool adreno_is_a225(struct adreno_gpu *gpu)
159 {
160 return gpu->revn == 225;
161 }
162
adreno_is_a305(struct adreno_gpu * gpu)163 static inline bool adreno_is_a305(struct adreno_gpu *gpu)
164 {
165 return gpu->revn == 305;
166 }
167
adreno_is_a306(struct adreno_gpu * gpu)168 static inline bool adreno_is_a306(struct adreno_gpu *gpu)
169 {
170 /* yes, 307, because a305c is 306 */
171 return gpu->revn == 307;
172 }
173
adreno_is_a320(struct adreno_gpu * gpu)174 static inline bool adreno_is_a320(struct adreno_gpu *gpu)
175 {
176 return gpu->revn == 320;
177 }
178
adreno_is_a330(struct adreno_gpu * gpu)179 static inline bool adreno_is_a330(struct adreno_gpu *gpu)
180 {
181 return gpu->revn == 330;
182 }
183
adreno_is_a330v2(struct adreno_gpu * gpu)184 static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
185 {
186 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
187 }
188
adreno_is_a405(struct adreno_gpu * gpu)189 static inline int adreno_is_a405(struct adreno_gpu *gpu)
190 {
191 return gpu->revn == 405;
192 }
193
adreno_is_a420(struct adreno_gpu * gpu)194 static inline int adreno_is_a420(struct adreno_gpu *gpu)
195 {
196 return gpu->revn == 420;
197 }
198
adreno_is_a430(struct adreno_gpu * gpu)199 static inline int adreno_is_a430(struct adreno_gpu *gpu)
200 {
201 return gpu->revn == 430;
202 }
203
adreno_is_a508(struct adreno_gpu * gpu)204 static inline int adreno_is_a508(struct adreno_gpu *gpu)
205 {
206 return gpu->revn == 508;
207 }
208
adreno_is_a509(struct adreno_gpu * gpu)209 static inline int adreno_is_a509(struct adreno_gpu *gpu)
210 {
211 return gpu->revn == 509;
212 }
213
adreno_is_a510(struct adreno_gpu * gpu)214 static inline int adreno_is_a510(struct adreno_gpu *gpu)
215 {
216 return gpu->revn == 510;
217 }
218
adreno_is_a512(struct adreno_gpu * gpu)219 static inline int adreno_is_a512(struct adreno_gpu *gpu)
220 {
221 return gpu->revn == 512;
222 }
223
adreno_is_a530(struct adreno_gpu * gpu)224 static inline int adreno_is_a530(struct adreno_gpu *gpu)
225 {
226 return gpu->revn == 530;
227 }
228
adreno_is_a540(struct adreno_gpu * gpu)229 static inline int adreno_is_a540(struct adreno_gpu *gpu)
230 {
231 return gpu->revn == 540;
232 }
233
adreno_is_a618(struct adreno_gpu * gpu)234 static inline int adreno_is_a618(struct adreno_gpu *gpu)
235 {
236 return gpu->revn == 618;
237 }
238
adreno_is_a630(struct adreno_gpu * gpu)239 static inline int adreno_is_a630(struct adreno_gpu *gpu)
240 {
241 return gpu->revn == 630;
242 }
243
adreno_is_a640_family(struct adreno_gpu * gpu)244 static inline int adreno_is_a640_family(struct adreno_gpu *gpu)
245 {
246 return (gpu->revn == 640) || (gpu->revn == 680);
247 }
248
adreno_is_a650(struct adreno_gpu * gpu)249 static inline int adreno_is_a650(struct adreno_gpu *gpu)
250 {
251 return gpu->revn == 650;
252 }
253
adreno_is_7c3(struct adreno_gpu * gpu)254 static inline int adreno_is_7c3(struct adreno_gpu *gpu)
255 {
256 /* The order of args is important here to handle ANY_ID correctly */
257 return adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), gpu->rev);
258 }
259
adreno_is_a660(struct adreno_gpu * gpu)260 static inline int adreno_is_a660(struct adreno_gpu *gpu)
261 {
262 return gpu->revn == 660;
263 }
264
adreno_is_a660_family(struct adreno_gpu * gpu)265 static inline int adreno_is_a660_family(struct adreno_gpu *gpu)
266 {
267 return adreno_is_a660(gpu) || adreno_is_7c3(gpu);
268 }
269
270 /* check for a650, a660, or any derivatives */
adreno_is_a650_family(struct adreno_gpu * gpu)271 static inline int adreno_is_a650_family(struct adreno_gpu *gpu)
272 {
273 return gpu->revn == 650 || gpu->revn == 620 ||
274 adreno_is_a660_family(gpu);
275 }
276
277 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
278 const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
279 const char *fwname);
280 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
281 const struct firmware *fw, u64 *iova);
282 int adreno_hw_init(struct msm_gpu *gpu);
283 void adreno_recover(struct msm_gpu *gpu);
284 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
285 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
286 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
287 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
288 struct drm_printer *p);
289 #endif
290 void adreno_dump_info(struct msm_gpu *gpu);
291 void adreno_dump(struct msm_gpu *gpu);
292 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
293 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
294
295 int adreno_gpu_ocmem_init(struct device *dev, struct adreno_gpu *adreno_gpu,
296 struct adreno_ocmem *ocmem);
297 void adreno_gpu_ocmem_cleanup(struct adreno_ocmem *ocmem);
298
299 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
300 struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
301 int nr_rings);
302 void adreno_gpu_cleanup(struct adreno_gpu *gpu);
303 int adreno_load_fw(struct adreno_gpu *adreno_gpu);
304
305 void adreno_gpu_state_destroy(struct msm_gpu_state *state);
306
307 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
308 int adreno_gpu_state_put(struct msm_gpu_state *state);
309
310 /*
311 * Common helper function to initialize the default address space for arm-smmu
312 * attached targets
313 */
314 struct msm_gem_address_space *
315 adreno_iommu_create_address_space(struct msm_gpu *gpu,
316 struct platform_device *pdev);
317
318 void adreno_set_llc_attributes(struct iommu_domain *iommu);
319
320 /*
321 * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
322 * out of secure mode
323 */
324 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
325
326 /* ringbuffer helpers (the parts that are adreno specific) */
327
328 static inline void
OUT_PKT0(struct msm_ringbuffer * ring,uint16_t regindx,uint16_t cnt)329 OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
330 {
331 adreno_wait_ring(ring, cnt+1);
332 OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
333 }
334
335 /* no-op packet: */
336 static inline void
OUT_PKT2(struct msm_ringbuffer * ring)337 OUT_PKT2(struct msm_ringbuffer *ring)
338 {
339 adreno_wait_ring(ring, 1);
340 OUT_RING(ring, CP_TYPE2_PKT);
341 }
342
343 static inline void
OUT_PKT3(struct msm_ringbuffer * ring,uint8_t opcode,uint16_t cnt)344 OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
345 {
346 adreno_wait_ring(ring, cnt+1);
347 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
348 }
349
PM4_PARITY(u32 val)350 static inline u32 PM4_PARITY(u32 val)
351 {
352 return (0x9669 >> (0xF & (val ^
353 (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
354 (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
355 (val >> 28)))) & 1;
356 }
357
358 /* Maximum number of values that can be executed for one opcode */
359 #define TYPE4_MAX_PAYLOAD 127
360
361 #define PKT4(_reg, _cnt) \
362 (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
363 (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
364
365 static inline void
OUT_PKT4(struct msm_ringbuffer * ring,uint16_t regindx,uint16_t cnt)366 OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
367 {
368 adreno_wait_ring(ring, cnt + 1);
369 OUT_RING(ring, PKT4(regindx, cnt));
370 }
371
372 static inline void
OUT_PKT7(struct msm_ringbuffer * ring,uint8_t opcode,uint16_t cnt)373 OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
374 {
375 adreno_wait_ring(ring, cnt + 1);
376 OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
377 ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
378 }
379
380 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
381 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
382 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
383 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
384 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
385
get_wptr(struct msm_ringbuffer * ring)386 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
387 {
388 return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
389 }
390
391 /*
392 * Given a register and a count, return a value to program into
393 * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
394 * registers starting at _reg.
395 *
396 * The register base needs to be a multiple of the length. If it is not, the
397 * hardware will quietly mask off the bits for you and shift the size. For
398 * example, if you intend the protection to start at 0x07 for a length of 4
399 * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
400 * expose registers you intended to protect!
401 */
402 #define ADRENO_PROTECT_RW(_reg, _len) \
403 ((1 << 30) | (1 << 29) | \
404 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
405
406 /*
407 * Same as above, but allow reads over the range. For areas of mixed use (such
408 * as performance counters) this allows us to protect a much larger range with a
409 * single register
410 */
411 #define ADRENO_PROTECT_RDONLY(_reg, _len) \
412 ((1 << 29) \
413 ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
414
415
416 #define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
417 readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
418 interval, timeout)
419
420 #endif /* __ADRENO_GPU_H__ */
421