1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_cs.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_common.h"
33
34 #include "vcn/vcn_1_0_offset.h"
35 #include "vcn/vcn_1_0_sh_mask.h"
36 #include "mmhub/mmhub_9_1_offset.h"
37 #include "mmhub/mmhub_9_1_sh_mask.h"
38
39 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #include "jpeg_v1_0.h"
41 #include "vcn_v1_0.h"
42
43 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
44 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
45 #define mmUVD_REG_XX_MASK_1_0 0x05ac
46 #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
47
48 static int vcn_v1_0_stop(struct amdgpu_device *adev);
49 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
50 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
51 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
52 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
53 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
54 int inst_idx, struct dpg_pause_state *new_state);
55
56 static void vcn_v1_0_idle_work_handler(struct work_struct *work);
57 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
58
59 /**
60 * vcn_v1_0_early_init - set function pointers and load microcode
61 *
62 * @handle: amdgpu_device pointer
63 *
64 * Set ring and irq function pointers
65 * Load microcode from filesystem
66 */
vcn_v1_0_early_init(void * handle)67 static int vcn_v1_0_early_init(void *handle)
68 {
69 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
70
71 adev->vcn.num_enc_rings = 2;
72
73 vcn_v1_0_set_dec_ring_funcs(adev);
74 vcn_v1_0_set_enc_ring_funcs(adev);
75 vcn_v1_0_set_irq_funcs(adev);
76
77 jpeg_v1_0_early_init(handle);
78
79 return amdgpu_vcn_early_init(adev);
80 }
81
82 /**
83 * vcn_v1_0_sw_init - sw init for VCN block
84 *
85 * @handle: amdgpu_device pointer
86 *
87 * Load firmware and sw initialization
88 */
vcn_v1_0_sw_init(void * handle)89 static int vcn_v1_0_sw_init(void *handle)
90 {
91 struct amdgpu_ring *ring;
92 int i, r;
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94
95 /* VCN DEC TRAP */
96 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
97 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
98 if (r)
99 return r;
100
101 /* VCN ENC TRAP */
102 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
104 &adev->vcn.inst->irq);
105 if (r)
106 return r;
107 }
108
109 r = amdgpu_vcn_sw_init(adev);
110 if (r)
111 return r;
112
113 /* Override the work func */
114 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
115
116 amdgpu_vcn_setup_ucode(adev);
117
118 r = amdgpu_vcn_resume(adev);
119 if (r)
120 return r;
121
122 ring = &adev->vcn.inst->ring_dec;
123 sprintf(ring->name, "vcn_dec");
124 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
125 AMDGPU_RING_PRIO_DEFAULT, NULL);
126 if (r)
127 return r;
128
129 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
130 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
131 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
132 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
133 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
134 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
135 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
136 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
137 adev->vcn.internal.nop = adev->vcn.inst->external.nop =
138 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
139
140 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
141 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
142
143 ring = &adev->vcn.inst->ring_enc[i];
144 sprintf(ring->name, "vcn_enc%d", i);
145 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
146 hw_prio, NULL);
147 if (r)
148 return r;
149 }
150
151 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
152
153 if (amdgpu_vcnfw_log) {
154 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
155
156 fw_shared->present_flag_0 = 0;
157 amdgpu_vcn_fwlog_init(adev->vcn.inst);
158 }
159
160 r = jpeg_v1_0_sw_init(handle);
161
162 return r;
163 }
164
165 /**
166 * vcn_v1_0_sw_fini - sw fini for VCN block
167 *
168 * @handle: amdgpu_device pointer
169 *
170 * VCN suspend and free up sw allocation
171 */
vcn_v1_0_sw_fini(void * handle)172 static int vcn_v1_0_sw_fini(void *handle)
173 {
174 int r;
175 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
176
177 r = amdgpu_vcn_suspend(adev);
178 if (r)
179 return r;
180
181 jpeg_v1_0_sw_fini(handle);
182
183 r = amdgpu_vcn_sw_fini(adev);
184
185 return r;
186 }
187
188 /**
189 * vcn_v1_0_hw_init - start and test VCN block
190 *
191 * @handle: amdgpu_device pointer
192 *
193 * Initialize the hardware, boot up the VCPU and do some testing
194 */
vcn_v1_0_hw_init(void * handle)195 static int vcn_v1_0_hw_init(void *handle)
196 {
197 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
198 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
199 int i, r;
200
201 r = amdgpu_ring_test_helper(ring);
202 if (r)
203 goto done;
204
205 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
206 ring = &adev->vcn.inst->ring_enc[i];
207 r = amdgpu_ring_test_helper(ring);
208 if (r)
209 goto done;
210 }
211
212 ring = &adev->jpeg.inst->ring_dec;
213 r = amdgpu_ring_test_helper(ring);
214 if (r)
215 goto done;
216
217 done:
218 if (!r)
219 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
220 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
221
222 return r;
223 }
224
225 /**
226 * vcn_v1_0_hw_fini - stop the hardware block
227 *
228 * @handle: amdgpu_device pointer
229 *
230 * Stop the VCN block, mark ring as not ready any more
231 */
vcn_v1_0_hw_fini(void * handle)232 static int vcn_v1_0_hw_fini(void *handle)
233 {
234 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
235
236 cancel_delayed_work_sync(&adev->vcn.idle_work);
237
238 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
239 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
240 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
241 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
242 }
243
244 return 0;
245 }
246
247 /**
248 * vcn_v1_0_suspend - suspend VCN block
249 *
250 * @handle: amdgpu_device pointer
251 *
252 * HW fini and suspend VCN block
253 */
vcn_v1_0_suspend(void * handle)254 static int vcn_v1_0_suspend(void *handle)
255 {
256 int r;
257 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258 bool idle_work_unexecuted;
259
260 idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
261 if (idle_work_unexecuted) {
262 if (adev->pm.dpm_enabled)
263 amdgpu_dpm_enable_uvd(adev, false);
264 }
265
266 r = vcn_v1_0_hw_fini(adev);
267 if (r)
268 return r;
269
270 r = amdgpu_vcn_suspend(adev);
271
272 return r;
273 }
274
275 /**
276 * vcn_v1_0_resume - resume VCN block
277 *
278 * @handle: amdgpu_device pointer
279 *
280 * Resume firmware and hw init VCN block
281 */
vcn_v1_0_resume(void * handle)282 static int vcn_v1_0_resume(void *handle)
283 {
284 int r;
285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
286
287 r = amdgpu_vcn_resume(adev);
288 if (r)
289 return r;
290
291 r = vcn_v1_0_hw_init(adev);
292
293 return r;
294 }
295
296 /**
297 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
298 *
299 * @adev: amdgpu_device pointer
300 *
301 * Let the VCN memory controller know it's offsets
302 */
vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device * adev)303 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
304 {
305 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
306 uint32_t offset;
307
308 /* cache window 0: fw */
309 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
310 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
311 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
312 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
313 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
314 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
315 offset = 0;
316 } else {
317 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
318 lower_32_bits(adev->vcn.inst->gpu_addr));
319 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
320 upper_32_bits(adev->vcn.inst->gpu_addr));
321 offset = size;
322 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
323 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
324 }
325
326 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
327
328 /* cache window 1: stack */
329 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
330 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
331 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
332 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
333 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
334 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
335
336 /* cache window 2: context */
337 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
338 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
339 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
340 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
341 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
342 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
343
344 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
345 adev->gfx.config.gb_addr_config);
346 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
347 adev->gfx.config.gb_addr_config);
348 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
349 adev->gfx.config.gb_addr_config);
350 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
351 adev->gfx.config.gb_addr_config);
352 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
353 adev->gfx.config.gb_addr_config);
354 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
355 adev->gfx.config.gb_addr_config);
356 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
357 adev->gfx.config.gb_addr_config);
358 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
359 adev->gfx.config.gb_addr_config);
360 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
361 adev->gfx.config.gb_addr_config);
362 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
363 adev->gfx.config.gb_addr_config);
364 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
365 adev->gfx.config.gb_addr_config);
366 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
367 adev->gfx.config.gb_addr_config);
368 }
369
vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device * adev)370 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
371 {
372 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
373 uint32_t offset;
374
375 /* cache window 0: fw */
376 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
377 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
378 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
379 0xFFFFFFFF, 0);
380 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
381 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
382 0xFFFFFFFF, 0);
383 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
384 0xFFFFFFFF, 0);
385 offset = 0;
386 } else {
387 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
388 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
389 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
390 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
391 offset = size;
392 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
393 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
394 }
395
396 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
397
398 /* cache window 1: stack */
399 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
400 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
401 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
402 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
403 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
404 0xFFFFFFFF, 0);
405 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
406 0xFFFFFFFF, 0);
407
408 /* cache window 2: context */
409 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
410 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
411 0xFFFFFFFF, 0);
412 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
413 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
414 0xFFFFFFFF, 0);
415 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
416 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
417 0xFFFFFFFF, 0);
418
419 /* VCN global tiling registers */
420 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
421 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
422 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
423 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
424 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
425 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
426 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
427 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
428 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
429 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
430 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
431 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
432 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
433 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
434 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
435 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
436 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
437 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
438 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
439 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
440 }
441
442 /**
443 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
444 *
445 * @adev: amdgpu_device pointer
446 *
447 * Disable clock gating for VCN block
448 */
vcn_v1_0_disable_clock_gating(struct amdgpu_device * adev)449 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
450 {
451 uint32_t data;
452
453 /* JPEG disable CGC */
454 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
455
456 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
457 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
458 else
459 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
460
461 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
462 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
463 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
464
465 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
466 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
467 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
468
469 /* UVD disable CGC */
470 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
471 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
472 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
473 else
474 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
475
476 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
477 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
478 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
479
480 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
481 data &= ~(UVD_CGC_GATE__SYS_MASK
482 | UVD_CGC_GATE__UDEC_MASK
483 | UVD_CGC_GATE__MPEG2_MASK
484 | UVD_CGC_GATE__REGS_MASK
485 | UVD_CGC_GATE__RBC_MASK
486 | UVD_CGC_GATE__LMI_MC_MASK
487 | UVD_CGC_GATE__LMI_UMC_MASK
488 | UVD_CGC_GATE__IDCT_MASK
489 | UVD_CGC_GATE__MPRD_MASK
490 | UVD_CGC_GATE__MPC_MASK
491 | UVD_CGC_GATE__LBSI_MASK
492 | UVD_CGC_GATE__LRBBM_MASK
493 | UVD_CGC_GATE__UDEC_RE_MASK
494 | UVD_CGC_GATE__UDEC_CM_MASK
495 | UVD_CGC_GATE__UDEC_IT_MASK
496 | UVD_CGC_GATE__UDEC_DB_MASK
497 | UVD_CGC_GATE__UDEC_MP_MASK
498 | UVD_CGC_GATE__WCB_MASK
499 | UVD_CGC_GATE__VCPU_MASK
500 | UVD_CGC_GATE__SCPU_MASK);
501 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
502
503 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
504 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
505 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
506 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
507 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
508 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
509 | UVD_CGC_CTRL__SYS_MODE_MASK
510 | UVD_CGC_CTRL__UDEC_MODE_MASK
511 | UVD_CGC_CTRL__MPEG2_MODE_MASK
512 | UVD_CGC_CTRL__REGS_MODE_MASK
513 | UVD_CGC_CTRL__RBC_MODE_MASK
514 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
515 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
516 | UVD_CGC_CTRL__IDCT_MODE_MASK
517 | UVD_CGC_CTRL__MPRD_MODE_MASK
518 | UVD_CGC_CTRL__MPC_MODE_MASK
519 | UVD_CGC_CTRL__LBSI_MODE_MASK
520 | UVD_CGC_CTRL__LRBBM_MODE_MASK
521 | UVD_CGC_CTRL__WCB_MODE_MASK
522 | UVD_CGC_CTRL__VCPU_MODE_MASK
523 | UVD_CGC_CTRL__SCPU_MODE_MASK);
524 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
525
526 /* turn on */
527 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
528 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
529 | UVD_SUVD_CGC_GATE__SIT_MASK
530 | UVD_SUVD_CGC_GATE__SMP_MASK
531 | UVD_SUVD_CGC_GATE__SCM_MASK
532 | UVD_SUVD_CGC_GATE__SDB_MASK
533 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
534 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
535 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
536 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
537 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
538 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
539 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
540 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
541 | UVD_SUVD_CGC_GATE__SCLR_MASK
542 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
543 | UVD_SUVD_CGC_GATE__ENT_MASK
544 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
545 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
546 | UVD_SUVD_CGC_GATE__SITE_MASK
547 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
548 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
549 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
550 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
551 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
552 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
553
554 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
555 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
556 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
557 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
558 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
559 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
560 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
561 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
562 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
563 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
564 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
565 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
566 }
567
568 /**
569 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
570 *
571 * @adev: amdgpu_device pointer
572 *
573 * Enable clock gating for VCN block
574 */
vcn_v1_0_enable_clock_gating(struct amdgpu_device * adev)575 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
576 {
577 uint32_t data = 0;
578
579 /* enable JPEG CGC */
580 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
581 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
582 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
583 else
584 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
585 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
586 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
587 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
588
589 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
590 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
591 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
592
593 /* enable UVD CGC */
594 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
595 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
596 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
597 else
598 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
599 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
600 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
601 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
602
603 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
604 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
605 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
606 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
607 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
608 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
609 | UVD_CGC_CTRL__SYS_MODE_MASK
610 | UVD_CGC_CTRL__UDEC_MODE_MASK
611 | UVD_CGC_CTRL__MPEG2_MODE_MASK
612 | UVD_CGC_CTRL__REGS_MODE_MASK
613 | UVD_CGC_CTRL__RBC_MODE_MASK
614 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
615 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
616 | UVD_CGC_CTRL__IDCT_MODE_MASK
617 | UVD_CGC_CTRL__MPRD_MODE_MASK
618 | UVD_CGC_CTRL__MPC_MODE_MASK
619 | UVD_CGC_CTRL__LBSI_MODE_MASK
620 | UVD_CGC_CTRL__LRBBM_MODE_MASK
621 | UVD_CGC_CTRL__WCB_MODE_MASK
622 | UVD_CGC_CTRL__VCPU_MODE_MASK
623 | UVD_CGC_CTRL__SCPU_MODE_MASK);
624 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
625
626 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
627 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
628 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
629 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
630 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
631 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
632 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
633 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
634 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
635 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
636 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
637 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
638 }
639
vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel)640 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
641 {
642 uint32_t reg_data = 0;
643
644 /* disable JPEG CGC */
645 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
646 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
647 else
648 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
649 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
650 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
651 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
652
653 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
654
655 /* enable sw clock gating control */
656 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
657 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
658 else
659 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
660 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
661 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
662 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
663 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
664 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
665 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
666 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
667 UVD_CGC_CTRL__SYS_MODE_MASK |
668 UVD_CGC_CTRL__UDEC_MODE_MASK |
669 UVD_CGC_CTRL__MPEG2_MODE_MASK |
670 UVD_CGC_CTRL__REGS_MODE_MASK |
671 UVD_CGC_CTRL__RBC_MODE_MASK |
672 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
673 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
674 UVD_CGC_CTRL__IDCT_MODE_MASK |
675 UVD_CGC_CTRL__MPRD_MODE_MASK |
676 UVD_CGC_CTRL__MPC_MODE_MASK |
677 UVD_CGC_CTRL__LBSI_MODE_MASK |
678 UVD_CGC_CTRL__LRBBM_MODE_MASK |
679 UVD_CGC_CTRL__WCB_MODE_MASK |
680 UVD_CGC_CTRL__VCPU_MODE_MASK |
681 UVD_CGC_CTRL__SCPU_MODE_MASK);
682 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
683
684 /* turn off clock gating */
685 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
686
687 /* turn on SUVD clock gating */
688 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
689
690 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
691 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
692 }
693
vcn_1_0_disable_static_power_gating(struct amdgpu_device * adev)694 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
695 {
696 uint32_t data = 0;
697
698 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
699 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
700 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
701 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
702 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
703 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
704 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
705 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
706 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
707 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
708 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
709 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
710
711 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
712 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF);
713 } else {
714 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
715 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
716 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
717 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
718 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
719 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
720 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
721 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
722 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
723 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
724 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
725 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
726 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF);
727 }
728
729 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
730
731 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
732 data &= ~0x103;
733 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
734 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
735
736 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
737 }
738
vcn_1_0_enable_static_power_gating(struct amdgpu_device * adev)739 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
740 {
741 uint32_t data = 0;
742
743 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
744 /* Before power off, this indicator has to be turned on */
745 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
746 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
747 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
748 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
749
750
751 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
752 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
753 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
754 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
755 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
756 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
757 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
758 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
759 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
760 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
761 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
762
763 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
764
765 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
766 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
767 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
768 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
769 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
770 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
771 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
772 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
773 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
774 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
775 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
776 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF);
777 }
778 }
779
780 /**
781 * vcn_v1_0_start_spg_mode - start VCN block
782 *
783 * @adev: amdgpu_device pointer
784 *
785 * Setup and start the VCN block
786 */
vcn_v1_0_start_spg_mode(struct amdgpu_device * adev)787 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
788 {
789 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
790 uint32_t rb_bufsz, tmp;
791 uint32_t lmi_swap_cntl;
792 int i, j, r;
793
794 /* disable byte swapping */
795 lmi_swap_cntl = 0;
796
797 vcn_1_0_disable_static_power_gating(adev);
798
799 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
800 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
801
802 /* disable clock gating */
803 vcn_v1_0_disable_clock_gating(adev);
804
805 /* disable interupt */
806 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
807 ~UVD_MASTINT_EN__VCPU_EN_MASK);
808
809 /* initialize VCN memory controller */
810 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
811 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
812 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
813 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
814 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
815 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
816
817 #ifdef __BIG_ENDIAN
818 /* swap (8 in 32) RB and IB */
819 lmi_swap_cntl = 0xa;
820 #endif
821 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
822
823 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
824 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
825 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
826 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
827
828 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
829 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
830 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
831 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
832 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
833
834 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
835 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
836 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
837 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
838 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
839
840 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
841 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
842 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
843 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
844
845 vcn_v1_0_mc_resume_spg_mode(adev);
846
847 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
848 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
849 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
850
851 /* enable VCPU clock */
852 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
853
854 /* boot up the VCPU */
855 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
856 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
857
858 /* enable UMC */
859 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
860 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
861
862 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
863 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
864 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
865 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
866
867 for (i = 0; i < 10; ++i) {
868 uint32_t status;
869
870 for (j = 0; j < 100; ++j) {
871 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
872 if (status & UVD_STATUS__IDLE)
873 break;
874 mdelay(10);
875 }
876 r = 0;
877 if (status & UVD_STATUS__IDLE)
878 break;
879
880 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
881 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
882 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
883 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
884 mdelay(10);
885 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
886 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
887 mdelay(10);
888 r = -1;
889 }
890
891 if (r) {
892 DRM_ERROR("VCN decode not responding, giving up!!!\n");
893 return r;
894 }
895 /* enable master interrupt */
896 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
897 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
898
899 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
900 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
901 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
902 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
903
904 /* clear the busy bit of UVD_STATUS */
905 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
906 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
907
908 /* force RBC into idle state */
909 rb_bufsz = order_base_2(ring->ring_size);
910 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
911 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
912 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
913 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
914 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
915 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
916
917 /* set the write pointer delay */
918 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
919
920 /* set the wb address */
921 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
922 (upper_32_bits(ring->gpu_addr) >> 2));
923
924 /* program the RB_BASE for ring buffer */
925 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
926 lower_32_bits(ring->gpu_addr));
927 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
928 upper_32_bits(ring->gpu_addr));
929
930 /* Initialize the ring buffer's read and write pointers */
931 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
932
933 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
934
935 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
936 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
937 lower_32_bits(ring->wptr));
938
939 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
940 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
941
942 ring = &adev->vcn.inst->ring_enc[0];
943 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
944 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
945 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
946 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
947 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
948
949 ring = &adev->vcn.inst->ring_enc[1];
950 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
951 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
952 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
953 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
954 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
955
956 jpeg_v1_0_start(adev, 0);
957
958 return 0;
959 }
960
vcn_v1_0_start_dpg_mode(struct amdgpu_device * adev)961 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
962 {
963 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
964 uint32_t rb_bufsz, tmp;
965 uint32_t lmi_swap_cntl;
966
967 /* disable byte swapping */
968 lmi_swap_cntl = 0;
969
970 vcn_1_0_enable_static_power_gating(adev);
971
972 /* enable dynamic power gating mode */
973 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
974 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
975 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
976 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
977
978 /* enable clock gating */
979 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
980
981 /* enable VCPU clock */
982 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
983 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
984 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
985 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
986
987 /* disable interupt */
988 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
989 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
990
991 /* initialize VCN memory controller */
992 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
993 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
994 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
995 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
996 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
997 UVD_LMI_CTRL__REQ_MODE_MASK |
998 UVD_LMI_CTRL__CRC_RESET_MASK |
999 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1000 0x00100000L, 0xFFFFFFFF, 0);
1001
1002 #ifdef __BIG_ENDIAN
1003 /* swap (8 in 32) RB and IB */
1004 lmi_swap_cntl = 0xa;
1005 #endif
1006 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1007
1008 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL,
1009 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1010
1011 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0,
1012 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1013 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1014 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1015 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1016
1017 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0,
1018 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1019 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1020 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1021 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1022
1023 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX,
1024 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1025 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1026 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1027
1028 vcn_v1_0_mc_resume_dpg_mode(adev);
1029
1030 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1031 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1032
1033 /* boot up the VCPU */
1034 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1035
1036 /* enable UMC */
1037 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2,
1038 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1039 0xFFFFFFFF, 0);
1040
1041 /* enable master interrupt */
1042 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1043 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1044
1045 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1046 /* setup mmUVD_LMI_CTRL */
1047 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1048 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1049 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1050 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1051 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1052 UVD_LMI_CTRL__REQ_MODE_MASK |
1053 UVD_LMI_CTRL__CRC_RESET_MASK |
1054 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1055 0x00100000L, 0xFFFFFFFF, 1);
1056
1057 tmp = adev->gfx.config.gb_addr_config;
1058 /* setup VCN global tiling registers */
1059 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1060 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1061
1062 /* enable System Interrupt for JRBC */
1063 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN,
1064 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1065
1066 /* force RBC into idle state */
1067 rb_bufsz = order_base_2(ring->ring_size);
1068 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1069 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1070 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1071 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1072 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1073 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1074
1075 /* set the write pointer delay */
1076 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1077
1078 /* set the wb address */
1079 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1080 (upper_32_bits(ring->gpu_addr) >> 2));
1081
1082 /* program the RB_BASE for ring buffer */
1083 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1084 lower_32_bits(ring->gpu_addr));
1085 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1086 upper_32_bits(ring->gpu_addr));
1087
1088 /* Initialize the ring buffer's read and write pointers */
1089 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1090
1091 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1092
1093 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1094 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1095 lower_32_bits(ring->wptr));
1096
1097 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1098 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1099
1100 jpeg_v1_0_start(adev, 1);
1101
1102 return 0;
1103 }
1104
vcn_v1_0_start(struct amdgpu_device * adev)1105 static int vcn_v1_0_start(struct amdgpu_device *adev)
1106 {
1107 return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
1108 vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
1109 }
1110
1111 /**
1112 * vcn_v1_0_stop_spg_mode - stop VCN block
1113 *
1114 * @adev: amdgpu_device pointer
1115 *
1116 * stop the VCN block
1117 */
vcn_v1_0_stop_spg_mode(struct amdgpu_device * adev)1118 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1119 {
1120 int tmp;
1121
1122 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1123
1124 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1125 UVD_LMI_STATUS__READ_CLEAN_MASK |
1126 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1127 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1128 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1129
1130 /* stall UMC channel */
1131 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1132 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1133 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1134
1135 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1136 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1137 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1138
1139 /* disable VCPU clock */
1140 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1141 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1142
1143 /* reset LMI UMC/LMI */
1144 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1145 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1146 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1147
1148 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1149 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1150 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1151
1152 /* put VCPU into reset */
1153 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1154 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1155 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1156
1157 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1158
1159 vcn_v1_0_enable_clock_gating(adev);
1160 vcn_1_0_enable_static_power_gating(adev);
1161 return 0;
1162 }
1163
vcn_v1_0_stop_dpg_mode(struct amdgpu_device * adev)1164 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1165 {
1166 uint32_t tmp;
1167
1168 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1169 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1170 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1171 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1172
1173 /* wait for read ptr to be equal to write ptr */
1174 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1175 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1176
1177 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1178 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1179
1180 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1181 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF);
1182
1183 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1184 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1185
1186 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1187 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1188 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1189
1190 /* disable dynamic power gating mode */
1191 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1192 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1193
1194 return 0;
1195 }
1196
vcn_v1_0_stop(struct amdgpu_device * adev)1197 static int vcn_v1_0_stop(struct amdgpu_device *adev)
1198 {
1199 int r;
1200
1201 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1202 r = vcn_v1_0_stop_dpg_mode(adev);
1203 else
1204 r = vcn_v1_0_stop_spg_mode(adev);
1205
1206 return r;
1207 }
1208
vcn_v1_0_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1209 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1210 int inst_idx, struct dpg_pause_state *new_state)
1211 {
1212 int ret_code;
1213 uint32_t reg_data = 0;
1214 uint32_t reg_data2 = 0;
1215 struct amdgpu_ring *ring;
1216
1217 /* pause/unpause if state is changed */
1218 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1219 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1220 adev->vcn.inst[inst_idx].pause_state.fw_based,
1221 adev->vcn.inst[inst_idx].pause_state.jpeg,
1222 new_state->fw_based, new_state->jpeg);
1223
1224 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1225 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1226
1227 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1228 ret_code = 0;
1229
1230 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1231 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1232 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1233 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1234
1235 if (!ret_code) {
1236 /* pause DPG non-jpeg */
1237 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1238 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1239 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1240 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1241 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1242
1243 /* Restore */
1244 ring = &adev->vcn.inst->ring_enc[0];
1245 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1246 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1247 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1248 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1249 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1250
1251 ring = &adev->vcn.inst->ring_enc[1];
1252 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1253 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1254 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1255 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1256 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1257
1258 ring = &adev->vcn.inst->ring_dec;
1259 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1260 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1261 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1262 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1263 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1264 }
1265 } else {
1266 /* unpause dpg non-jpeg, no need to wait */
1267 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1268 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1269 }
1270 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1271 }
1272
1273 /* pause/unpause if state is changed */
1274 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
1275 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1276 adev->vcn.inst[inst_idx].pause_state.fw_based,
1277 adev->vcn.inst[inst_idx].pause_state.jpeg,
1278 new_state->fw_based, new_state->jpeg);
1279
1280 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1281 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1282
1283 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1284 ret_code = 0;
1285
1286 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1287 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1288 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1289 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1290
1291 if (!ret_code) {
1292 /* Make sure JPRG Snoop is disabled before sending the pause */
1293 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1294 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1295 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1296
1297 /* pause DPG jpeg */
1298 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1299 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1300 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1301 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1302 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1303
1304 /* Restore */
1305 ring = &adev->jpeg.inst->ring_dec;
1306 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1307 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1308 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1309 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1310 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1311 lower_32_bits(ring->gpu_addr));
1312 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1313 upper_32_bits(ring->gpu_addr));
1314 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1315 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1316 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1317 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1318
1319 ring = &adev->vcn.inst->ring_dec;
1320 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1321 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1322 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1323 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1324 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1325 }
1326 } else {
1327 /* unpause dpg jpeg, no need to wait */
1328 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1329 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1330 }
1331 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
1332 }
1333
1334 return 0;
1335 }
1336
vcn_v1_0_is_idle(void * handle)1337 static bool vcn_v1_0_is_idle(void *handle)
1338 {
1339 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1340
1341 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1342 }
1343
vcn_v1_0_wait_for_idle(void * handle)1344 static int vcn_v1_0_wait_for_idle(void *handle)
1345 {
1346 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1347 int ret;
1348
1349 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1350 UVD_STATUS__IDLE);
1351
1352 return ret;
1353 }
1354
vcn_v1_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1355 static int vcn_v1_0_set_clockgating_state(void *handle,
1356 enum amd_clockgating_state state)
1357 {
1358 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1359 bool enable = (state == AMD_CG_STATE_GATE);
1360
1361 if (enable) {
1362 /* wait for STATUS to clear */
1363 if (!vcn_v1_0_is_idle(handle))
1364 return -EBUSY;
1365 vcn_v1_0_enable_clock_gating(adev);
1366 } else {
1367 /* disable HW gating and enable Sw gating */
1368 vcn_v1_0_disable_clock_gating(adev);
1369 }
1370 return 0;
1371 }
1372
1373 /**
1374 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1375 *
1376 * @ring: amdgpu_ring pointer
1377 *
1378 * Returns the current hardware read pointer
1379 */
vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring * ring)1380 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1381 {
1382 struct amdgpu_device *adev = ring->adev;
1383
1384 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1385 }
1386
1387 /**
1388 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1389 *
1390 * @ring: amdgpu_ring pointer
1391 *
1392 * Returns the current hardware write pointer
1393 */
vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring * ring)1394 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1395 {
1396 struct amdgpu_device *adev = ring->adev;
1397
1398 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1399 }
1400
1401 /**
1402 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1403 *
1404 * @ring: amdgpu_ring pointer
1405 *
1406 * Commits the write pointer to the hardware
1407 */
vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring * ring)1408 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1409 {
1410 struct amdgpu_device *adev = ring->adev;
1411
1412 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1413 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1414 lower_32_bits(ring->wptr) | 0x80000000);
1415
1416 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1417 }
1418
1419 /**
1420 * vcn_v1_0_dec_ring_insert_start - insert a start command
1421 *
1422 * @ring: amdgpu_ring pointer
1423 *
1424 * Write a start command to the ring.
1425 */
vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring * ring)1426 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1427 {
1428 struct amdgpu_device *adev = ring->adev;
1429
1430 amdgpu_ring_write(ring,
1431 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1432 amdgpu_ring_write(ring, 0);
1433 amdgpu_ring_write(ring,
1434 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1435 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1436 }
1437
1438 /**
1439 * vcn_v1_0_dec_ring_insert_end - insert a end command
1440 *
1441 * @ring: amdgpu_ring pointer
1442 *
1443 * Write a end command to the ring.
1444 */
vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring * ring)1445 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1446 {
1447 struct amdgpu_device *adev = ring->adev;
1448
1449 amdgpu_ring_write(ring,
1450 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1451 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1452 }
1453
1454 /**
1455 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1456 *
1457 * @ring: amdgpu_ring pointer
1458 * @addr: address
1459 * @seq: sequence number
1460 * @flags: fence related flags
1461 *
1462 * Write a fence and a trap command to the ring.
1463 */
vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1464 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1465 unsigned flags)
1466 {
1467 struct amdgpu_device *adev = ring->adev;
1468
1469 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1470
1471 amdgpu_ring_write(ring,
1472 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1473 amdgpu_ring_write(ring, seq);
1474 amdgpu_ring_write(ring,
1475 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1476 amdgpu_ring_write(ring, addr & 0xffffffff);
1477 amdgpu_ring_write(ring,
1478 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1479 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1480 amdgpu_ring_write(ring,
1481 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1482 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1483
1484 amdgpu_ring_write(ring,
1485 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1486 amdgpu_ring_write(ring, 0);
1487 amdgpu_ring_write(ring,
1488 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1489 amdgpu_ring_write(ring, 0);
1490 amdgpu_ring_write(ring,
1491 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1492 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1493 }
1494
1495 /**
1496 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1497 *
1498 * @ring: amdgpu_ring pointer
1499 * @job: job to retrieve vmid from
1500 * @ib: indirect buffer to execute
1501 * @flags: unused
1502 *
1503 * Write ring commands to execute the indirect buffer
1504 */
vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1505 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1506 struct amdgpu_job *job,
1507 struct amdgpu_ib *ib,
1508 uint32_t flags)
1509 {
1510 struct amdgpu_device *adev = ring->adev;
1511 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1512
1513 amdgpu_ring_write(ring,
1514 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1515 amdgpu_ring_write(ring, vmid);
1516
1517 amdgpu_ring_write(ring,
1518 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1519 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1520 amdgpu_ring_write(ring,
1521 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1522 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1523 amdgpu_ring_write(ring,
1524 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1525 amdgpu_ring_write(ring, ib->length_dw);
1526 }
1527
vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1528 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1529 uint32_t reg, uint32_t val,
1530 uint32_t mask)
1531 {
1532 struct amdgpu_device *adev = ring->adev;
1533
1534 amdgpu_ring_write(ring,
1535 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1536 amdgpu_ring_write(ring, reg << 2);
1537 amdgpu_ring_write(ring,
1538 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1539 amdgpu_ring_write(ring, val);
1540 amdgpu_ring_write(ring,
1541 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1542 amdgpu_ring_write(ring, mask);
1543 amdgpu_ring_write(ring,
1544 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1545 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1546 }
1547
vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1548 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1549 unsigned vmid, uint64_t pd_addr)
1550 {
1551 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1552 uint32_t data0, data1, mask;
1553
1554 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1555
1556 /* wait for register write */
1557 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1558 data1 = lower_32_bits(pd_addr);
1559 mask = 0xffffffff;
1560 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1561 }
1562
vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1563 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1564 uint32_t reg, uint32_t val)
1565 {
1566 struct amdgpu_device *adev = ring->adev;
1567
1568 amdgpu_ring_write(ring,
1569 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1570 amdgpu_ring_write(ring, reg << 2);
1571 amdgpu_ring_write(ring,
1572 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1573 amdgpu_ring_write(ring, val);
1574 amdgpu_ring_write(ring,
1575 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1576 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1577 }
1578
1579 /**
1580 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1581 *
1582 * @ring: amdgpu_ring pointer
1583 *
1584 * Returns the current hardware enc read pointer
1585 */
vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring * ring)1586 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1587 {
1588 struct amdgpu_device *adev = ring->adev;
1589
1590 if (ring == &adev->vcn.inst->ring_enc[0])
1591 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1592 else
1593 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1594 }
1595
1596 /**
1597 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1598 *
1599 * @ring: amdgpu_ring pointer
1600 *
1601 * Returns the current hardware enc write pointer
1602 */
vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring * ring)1603 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1604 {
1605 struct amdgpu_device *adev = ring->adev;
1606
1607 if (ring == &adev->vcn.inst->ring_enc[0])
1608 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1609 else
1610 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1611 }
1612
1613 /**
1614 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1615 *
1616 * @ring: amdgpu_ring pointer
1617 *
1618 * Commits the enc write pointer to the hardware
1619 */
vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring * ring)1620 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1621 {
1622 struct amdgpu_device *adev = ring->adev;
1623
1624 if (ring == &adev->vcn.inst->ring_enc[0])
1625 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1626 lower_32_bits(ring->wptr));
1627 else
1628 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1629 lower_32_bits(ring->wptr));
1630 }
1631
1632 /**
1633 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1634 *
1635 * @ring: amdgpu_ring pointer
1636 * @addr: address
1637 * @seq: sequence number
1638 * @flags: fence related flags
1639 *
1640 * Write enc a fence and a trap command to the ring.
1641 */
vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1642 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1643 u64 seq, unsigned flags)
1644 {
1645 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1646
1647 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1648 amdgpu_ring_write(ring, addr);
1649 amdgpu_ring_write(ring, upper_32_bits(addr));
1650 amdgpu_ring_write(ring, seq);
1651 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1652 }
1653
vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring * ring)1654 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1655 {
1656 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1657 }
1658
1659 /**
1660 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1661 *
1662 * @ring: amdgpu_ring pointer
1663 * @job: job to retrive vmid from
1664 * @ib: indirect buffer to execute
1665 * @flags: unused
1666 *
1667 * Write enc ring commands to execute the indirect buffer
1668 */
vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1669 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1670 struct amdgpu_job *job,
1671 struct amdgpu_ib *ib,
1672 uint32_t flags)
1673 {
1674 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1675
1676 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1677 amdgpu_ring_write(ring, vmid);
1678 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1679 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1680 amdgpu_ring_write(ring, ib->length_dw);
1681 }
1682
vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1683 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1684 uint32_t reg, uint32_t val,
1685 uint32_t mask)
1686 {
1687 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1688 amdgpu_ring_write(ring, reg << 2);
1689 amdgpu_ring_write(ring, mask);
1690 amdgpu_ring_write(ring, val);
1691 }
1692
vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1693 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1694 unsigned int vmid, uint64_t pd_addr)
1695 {
1696 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1697
1698 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1699
1700 /* wait for reg writes */
1701 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1702 vmid * hub->ctx_addr_distance,
1703 lower_32_bits(pd_addr), 0xffffffff);
1704 }
1705
vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1706 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1707 uint32_t reg, uint32_t val)
1708 {
1709 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1710 amdgpu_ring_write(ring, reg << 2);
1711 amdgpu_ring_write(ring, val);
1712 }
1713
vcn_v1_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1714 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1715 struct amdgpu_irq_src *source,
1716 unsigned type,
1717 enum amdgpu_interrupt_state state)
1718 {
1719 return 0;
1720 }
1721
vcn_v1_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1722 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1723 struct amdgpu_irq_src *source,
1724 struct amdgpu_iv_entry *entry)
1725 {
1726 DRM_DEBUG("IH: VCN TRAP\n");
1727
1728 switch (entry->src_id) {
1729 case 124:
1730 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1731 break;
1732 case 119:
1733 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1734 break;
1735 case 120:
1736 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1737 break;
1738 default:
1739 DRM_ERROR("Unhandled interrupt: %d %d\n",
1740 entry->src_id, entry->src_data[0]);
1741 break;
1742 }
1743
1744 return 0;
1745 }
1746
vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1747 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1748 {
1749 struct amdgpu_device *adev = ring->adev;
1750 int i;
1751
1752 WARN_ON(ring->wptr % 2 || count % 2);
1753
1754 for (i = 0; i < count / 2; i++) {
1755 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1756 amdgpu_ring_write(ring, 0);
1757 }
1758 }
1759
vcn_v1_0_set_powergating_state(void * handle,enum amd_powergating_state state)1760 static int vcn_v1_0_set_powergating_state(void *handle,
1761 enum amd_powergating_state state)
1762 {
1763 /* This doesn't actually powergate the VCN block.
1764 * That's done in the dpm code via the SMC. This
1765 * just re-inits the block as necessary. The actual
1766 * gating still happens in the dpm code. We should
1767 * revisit this when there is a cleaner line between
1768 * the smc and the hw blocks
1769 */
1770 int ret;
1771 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1772
1773 if(state == adev->vcn.cur_state)
1774 return 0;
1775
1776 if (state == AMD_PG_STATE_GATE)
1777 ret = vcn_v1_0_stop(adev);
1778 else
1779 ret = vcn_v1_0_start(adev);
1780
1781 if(!ret)
1782 adev->vcn.cur_state = state;
1783 return ret;
1784 }
1785
vcn_v1_0_idle_work_handler(struct work_struct * work)1786 static void vcn_v1_0_idle_work_handler(struct work_struct *work)
1787 {
1788 struct amdgpu_device *adev =
1789 container_of(work, struct amdgpu_device, vcn.idle_work.work);
1790 unsigned int fences = 0, i;
1791
1792 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1793 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1794
1795 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1796 struct dpg_pause_state new_state;
1797
1798 if (fences)
1799 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1800 else
1801 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1802
1803 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1804 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1805 else
1806 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1807
1808 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1809 }
1810
1811 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
1812 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
1813
1814 if (fences == 0) {
1815 amdgpu_gfx_off_ctrl(adev, true);
1816 if (adev->pm.dpm_enabled)
1817 amdgpu_dpm_enable_uvd(adev, false);
1818 else
1819 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1820 AMD_PG_STATE_GATE);
1821 } else {
1822 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1823 }
1824 }
1825
vcn_v1_0_ring_begin_use(struct amdgpu_ring * ring)1826 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
1827 {
1828 struct amdgpu_device *adev = ring->adev;
1829 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
1830
1831 mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
1832
1833 if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
1834 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
1835
1836 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
1837
1838 }
1839
vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring * ring,bool set_clocks)1840 void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
1841 {
1842 struct amdgpu_device *adev = ring->adev;
1843
1844 if (set_clocks) {
1845 amdgpu_gfx_off_ctrl(adev, false);
1846 if (adev->pm.dpm_enabled)
1847 amdgpu_dpm_enable_uvd(adev, true);
1848 else
1849 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1850 AMD_PG_STATE_UNGATE);
1851 }
1852
1853 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1854 struct dpg_pause_state new_state;
1855 unsigned int fences = 0, i;
1856
1857 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1858 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1859
1860 if (fences)
1861 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1862 else
1863 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1864
1865 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1866 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1867 else
1868 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1869
1870 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
1871 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1872 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
1873 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1874
1875 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1876 }
1877 }
1878
vcn_v1_0_ring_end_use(struct amdgpu_ring * ring)1879 void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
1880 {
1881 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1882 mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
1883 }
1884
1885 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1886 .name = "vcn_v1_0",
1887 .early_init = vcn_v1_0_early_init,
1888 .late_init = NULL,
1889 .sw_init = vcn_v1_0_sw_init,
1890 .sw_fini = vcn_v1_0_sw_fini,
1891 .hw_init = vcn_v1_0_hw_init,
1892 .hw_fini = vcn_v1_0_hw_fini,
1893 .suspend = vcn_v1_0_suspend,
1894 .resume = vcn_v1_0_resume,
1895 .is_idle = vcn_v1_0_is_idle,
1896 .wait_for_idle = vcn_v1_0_wait_for_idle,
1897 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1898 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1899 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1900 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1901 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1902 .set_powergating_state = vcn_v1_0_set_powergating_state,
1903 };
1904
1905 /*
1906 * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
1907 * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
1908 * before command submission as a workaround.
1909 */
vcn_v1_0_validate_bo(struct amdgpu_cs_parser * parser,struct amdgpu_job * job,uint64_t addr)1910 static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
1911 struct amdgpu_job *job,
1912 uint64_t addr)
1913 {
1914 struct ttm_operation_ctx ctx = { false, false };
1915 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1916 struct amdgpu_vm *vm = &fpriv->vm;
1917 struct amdgpu_bo_va_mapping *mapping;
1918 struct amdgpu_bo *bo;
1919 int r;
1920
1921 addr &= AMDGPU_GMC_HOLE_MASK;
1922 if (addr & 0x7) {
1923 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1924 return -EINVAL;
1925 }
1926
1927 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
1928 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1929 return -EINVAL;
1930
1931 bo = mapping->bo_va->base.bo;
1932 if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
1933 return 0;
1934
1935 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1936 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1937 if (r) {
1938 DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
1939 return r;
1940 }
1941
1942 return r;
1943 }
1944
vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)1945 static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1946 struct amdgpu_job *job,
1947 struct amdgpu_ib *ib)
1948 {
1949 uint32_t msg_lo = 0, msg_hi = 0;
1950 int i, r;
1951
1952 if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
1953 return 0;
1954
1955 for (i = 0; i < ib->length_dw; i += 2) {
1956 uint32_t reg = amdgpu_ib_get_value(ib, i);
1957 uint32_t val = amdgpu_ib_get_value(ib, i + 1);
1958
1959 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1960 msg_lo = val;
1961 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1962 msg_hi = val;
1963 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
1964 r = vcn_v1_0_validate_bo(p, job,
1965 ((u64)msg_hi) << 32 | msg_lo);
1966 if (r)
1967 return r;
1968 }
1969 }
1970
1971 return 0;
1972 }
1973
1974 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1975 .type = AMDGPU_RING_TYPE_VCN_DEC,
1976 .align_mask = 0xf,
1977 .support_64bit_ptrs = false,
1978 .no_user_fence = true,
1979 .secure_submission_supported = true,
1980 .vmhub = AMDGPU_MMHUB_0,
1981 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1982 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1983 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1984 .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
1985 .emit_frame_size =
1986 6 + 6 + /* hdp invalidate / flush */
1987 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1988 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1989 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1990 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1991 6,
1992 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1993 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1994 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1995 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1996 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1997 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1998 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1999 .insert_start = vcn_v1_0_dec_ring_insert_start,
2000 .insert_end = vcn_v1_0_dec_ring_insert_end,
2001 .pad_ib = amdgpu_ring_generic_pad_ib,
2002 .begin_use = vcn_v1_0_ring_begin_use,
2003 .end_use = vcn_v1_0_ring_end_use,
2004 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2005 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2006 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2007 };
2008
2009 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2010 .type = AMDGPU_RING_TYPE_VCN_ENC,
2011 .align_mask = 0x3f,
2012 .nop = VCN_ENC_CMD_NO_OP,
2013 .support_64bit_ptrs = false,
2014 .no_user_fence = true,
2015 .vmhub = AMDGPU_MMHUB_0,
2016 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2017 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2018 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2019 .emit_frame_size =
2020 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2021 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2022 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2023 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2024 1, /* vcn_v1_0_enc_ring_insert_end */
2025 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2026 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2027 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2028 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2029 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2030 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2031 .insert_nop = amdgpu_ring_insert_nop,
2032 .insert_end = vcn_v1_0_enc_ring_insert_end,
2033 .pad_ib = amdgpu_ring_generic_pad_ib,
2034 .begin_use = vcn_v1_0_ring_begin_use,
2035 .end_use = vcn_v1_0_ring_end_use,
2036 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2037 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2038 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2039 };
2040
vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device * adev)2041 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2042 {
2043 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2044 DRM_INFO("VCN decode is enabled in VM mode\n");
2045 }
2046
vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device * adev)2047 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2048 {
2049 int i;
2050
2051 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2052 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2053
2054 DRM_INFO("VCN encode is enabled in VM mode\n");
2055 }
2056
2057 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2058 .set = vcn_v1_0_set_interrupt_state,
2059 .process = vcn_v1_0_process_interrupt,
2060 };
2061
vcn_v1_0_set_irq_funcs(struct amdgpu_device * adev)2062 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2063 {
2064 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2065 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2066 }
2067
2068 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
2069 {
2070 .type = AMD_IP_BLOCK_TYPE_VCN,
2071 .major = 1,
2072 .minor = 0,
2073 .rev = 0,
2074 .funcs = &vcn_v1_0_ip_funcs,
2075 };
2076