1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
25 #include "amdgpu.h"
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
28 #include "umc_v8_7.h"
29
30 #include "athub/athub_2_0_0_sh_mask.h"
31 #include "athub/athub_2_0_0_offset.h"
32 #include "dcn/dcn_2_0_0_offset.h"
33 #include "dcn/dcn_2_0_0_sh_mask.h"
34 #include "oss/osssys_5_0_0_offset.h"
35 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
36 #include "navi10_enum.h"
37
38 #include "soc15.h"
39 #include "soc15d.h"
40 #include "soc15_common.h"
41
42 #include "nbio_v2_3.h"
43
44 #include "gfxhub_v2_0.h"
45 #include "gfxhub_v2_1.h"
46 #include "mmhub_v2_0.h"
47 #include "mmhub_v2_3.h"
48 #include "athub_v2_0.h"
49 #include "athub_v2_1.h"
50
51 #if 0
52 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
53 {
54 /* TODO add golden setting for hdp */
55 };
56 #endif
57
gmc_v10_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)58 static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
59 struct amdgpu_irq_src *src,
60 unsigned type,
61 enum amdgpu_interrupt_state state)
62 {
63 return 0;
64 }
65
66 static int
gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)67 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
68 struct amdgpu_irq_src *src, unsigned type,
69 enum amdgpu_interrupt_state state)
70 {
71 switch (state) {
72 case AMDGPU_IRQ_STATE_DISABLE:
73 /* MM HUB */
74 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
75 /* GFX HUB */
76 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
77 break;
78 case AMDGPU_IRQ_STATE_ENABLE:
79 /* MM HUB */
80 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
81 /* GFX HUB */
82 amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
83 break;
84 default:
85 break;
86 }
87
88 return 0;
89 }
90
gmc_v10_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)91 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
92 struct amdgpu_irq_src *source,
93 struct amdgpu_iv_entry *entry)
94 {
95 bool retry_fault = !!(entry->src_data[1] & 0x80);
96 bool write_fault = !!(entry->src_data[1] & 0x20);
97 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
98 struct amdgpu_task_info task_info;
99 uint32_t status = 0;
100 u64 addr;
101
102 addr = (u64)entry->src_data[0] << 12;
103 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
104
105 if (retry_fault) {
106 /* Returning 1 here also prevents sending the IV to the KFD */
107
108 /* Process it onyl if it's the first fault for this address */
109 if (entry->ih != &adev->irq.ih_soft &&
110 amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
111 entry->timestamp))
112 return 1;
113
114 /* Delegate it to a different ring if the hardware hasn't
115 * already done it.
116 */
117 if (entry->ih == &adev->irq.ih) {
118 amdgpu_irq_delegate(adev, entry, 8);
119 return 1;
120 }
121
122 /* Try to handle the recoverable page faults by filling page
123 * tables
124 */
125 if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
126 return 1;
127 }
128
129 if (!amdgpu_sriov_vf(adev)) {
130 /*
131 * Issue a dummy read to wait for the status register to
132 * be updated to avoid reading an incorrect value due to
133 * the new fast GRBM interface.
134 */
135 if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
136 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
137 RREG32(hub->vm_l2_pro_fault_status);
138
139 status = RREG32(hub->vm_l2_pro_fault_status);
140 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
141 }
142
143 if (!printk_ratelimit())
144 return 0;
145
146 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
147 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
148
149 dev_err(adev->dev,
150 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
151 "for process %s pid %d thread %s pid %d)\n",
152 entry->vmid_src ? "mmhub" : "gfxhub",
153 entry->src_id, entry->ring_id, entry->vmid,
154 entry->pasid, task_info.process_name, task_info.tgid,
155 task_info.task_name, task_info.pid);
156 dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
157 addr, entry->client_id,
158 soc15_ih_clientid_name[entry->client_id]);
159
160 if (!amdgpu_sriov_vf(adev))
161 hub->vmhub_funcs->print_l2_protection_fault_status(adev,
162 status);
163
164 return 0;
165 }
166
167 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
168 .set = gmc_v10_0_vm_fault_interrupt_state,
169 .process = gmc_v10_0_process_interrupt,
170 };
171
172 static const struct amdgpu_irq_src_funcs gmc_v10_0_ecc_funcs = {
173 .set = gmc_v10_0_ecc_interrupt_state,
174 .process = amdgpu_umc_process_ecc_irq,
175 };
176
gmc_v10_0_set_irq_funcs(struct amdgpu_device * adev)177 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
178 {
179 adev->gmc.vm_fault.num_types = 1;
180 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
181
182 if (!amdgpu_sriov_vf(adev)) {
183 adev->gmc.ecc_irq.num_types = 1;
184 adev->gmc.ecc_irq.funcs = &gmc_v10_0_ecc_funcs;
185 }
186 }
187
188 /**
189 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
190 *
191 * @adev: amdgpu_device pointer
192 * @vmhub: vmhub type
193 *
194 */
gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)195 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
196 uint32_t vmhub)
197 {
198 return ((vmhub == AMDGPU_MMHUB_0 ||
199 vmhub == AMDGPU_MMHUB_1) &&
200 (!amdgpu_sriov_vf(adev)));
201 }
202
gmc_v10_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)203 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
204 struct amdgpu_device *adev,
205 uint8_t vmid, uint16_t *p_pasid)
206 {
207 uint32_t value;
208
209 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
210 + vmid);
211 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
212
213 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
214 }
215
216 /*
217 * GART
218 * VMID 0 is the physical GPU addresses as used by the kernel.
219 * VMIDs 1-15 are used for userspace clients and are handled
220 * by the amdgpu vm/hsa code.
221 */
222
gmc_v10_0_flush_vm_hub(struct amdgpu_device * adev,uint32_t vmid,unsigned int vmhub,uint32_t flush_type)223 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
224 unsigned int vmhub, uint32_t flush_type)
225 {
226 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
227 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
228 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
229 u32 tmp;
230 /* Use register 17 for GART */
231 const unsigned eng = 17;
232 unsigned int i;
233 unsigned char hub_ip = 0;
234
235 hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
236 GC_HWIP : MMHUB_HWIP;
237
238 spin_lock(&adev->gmc.invalidate_lock);
239 /*
240 * It may lose gpuvm invalidate acknowldege state across power-gating
241 * off cycle, add semaphore acquire before invalidation and semaphore
242 * release after invalidation to avoid entering power gated state
243 * to WA the Issue
244 */
245
246 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
247 if (use_semaphore) {
248 for (i = 0; i < adev->usec_timeout; i++) {
249 /* a read return value of 1 means semaphore acuqire */
250 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
251 hub->eng_distance * eng, hub_ip);
252
253 if (tmp & 0x1)
254 break;
255 udelay(1);
256 }
257
258 if (i >= adev->usec_timeout)
259 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
260 }
261
262 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
263 hub->eng_distance * eng,
264 inv_req, hub_ip);
265
266 /*
267 * Issue a dummy read to wait for the ACK register to be cleared
268 * to avoid a false ACK due to the new fast GRBM interface.
269 */
270 if ((vmhub == AMDGPU_GFXHUB_0) &&
271 (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
272 RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
273 hub->eng_distance * eng, hub_ip);
274
275 /* Wait for ACK with a delay.*/
276 for (i = 0; i < adev->usec_timeout; i++) {
277 tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
278 hub->eng_distance * eng, hub_ip);
279
280 tmp &= 1 << vmid;
281 if (tmp)
282 break;
283
284 udelay(1);
285 }
286
287 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
288 if (use_semaphore)
289 /*
290 * add semaphore release after invalidation,
291 * write with 0 means semaphore release
292 */
293 WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
294 hub->eng_distance * eng, 0, hub_ip);
295
296 spin_unlock(&adev->gmc.invalidate_lock);
297
298 if (i < adev->usec_timeout)
299 return;
300
301 DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub);
302 }
303
304 /**
305 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
306 *
307 * @adev: amdgpu_device pointer
308 * @vmid: vm instance to flush
309 * @vmhub: vmhub type
310 * @flush_type: the flush type
311 *
312 * Flush the TLB for the requested page table.
313 */
gmc_v10_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)314 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
315 uint32_t vmhub, uint32_t flush_type)
316 {
317 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
318 struct dma_fence *fence;
319 struct amdgpu_job *job;
320
321 int r;
322
323 /* flush hdp cache */
324 adev->hdp.funcs->flush_hdp(adev, NULL);
325
326 /* For SRIOV run time, driver shouldn't access the register through MMIO
327 * Directly use kiq to do the vm invalidation instead
328 */
329 if (adev->gfx.kiq.ring.sched.ready &&
330 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
331 down_read_trylock(&adev->reset_sem)) {
332 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
333 const unsigned eng = 17;
334 u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
335 u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
336 u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
337
338 amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
339 1 << vmid);
340
341 up_read(&adev->reset_sem);
342 return;
343 }
344
345 mutex_lock(&adev->mman.gtt_window_lock);
346
347 if (vmhub == AMDGPU_MMHUB_0) {
348 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
349 mutex_unlock(&adev->mman.gtt_window_lock);
350 return;
351 }
352
353 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
354
355 if (!adev->mman.buffer_funcs_enabled ||
356 !adev->ib_pool_ready ||
357 amdgpu_in_reset(adev) ||
358 ring->sched.ready == false) {
359 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
360 mutex_unlock(&adev->mman.gtt_window_lock);
361 return;
362 }
363
364 /* The SDMA on Navi has a bug which can theoretically result in memory
365 * corruption if an invalidation happens at the same time as an VA
366 * translation. Avoid this by doing the invalidation from the SDMA
367 * itself.
368 */
369 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
370 &job);
371 if (r)
372 goto error_alloc;
373
374 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
375 job->vm_needs_flush = true;
376 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
377 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
378 r = amdgpu_job_submit(job, &adev->mman.entity,
379 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
380 if (r)
381 goto error_submit;
382
383 mutex_unlock(&adev->mman.gtt_window_lock);
384
385 dma_fence_wait(fence, false);
386 dma_fence_put(fence);
387
388 return;
389
390 error_submit:
391 amdgpu_job_free(job);
392
393 error_alloc:
394 mutex_unlock(&adev->mman.gtt_window_lock);
395 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
396 }
397
398 /**
399 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
400 *
401 * @adev: amdgpu_device pointer
402 * @pasid: pasid to be flush
403 * @flush_type: the flush type
404 * @all_hub: Used with PACKET3_INVALIDATE_TLBS_ALL_HUB()
405 *
406 * Flush the TLB for the requested pasid.
407 */
gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub)408 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
409 uint16_t pasid, uint32_t flush_type,
410 bool all_hub)
411 {
412 int vmid, i;
413 signed long r;
414 uint32_t seq;
415 uint16_t queried_pasid;
416 bool ret;
417 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
418 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
419
420 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
421 spin_lock(&adev->gfx.kiq.ring_lock);
422 /* 2 dwords flush + 8 dwords fence */
423 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
424 kiq->pmf->kiq_invalidate_tlbs(ring,
425 pasid, flush_type, all_hub);
426 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
427 if (r) {
428 amdgpu_ring_undo(ring);
429 spin_unlock(&adev->gfx.kiq.ring_lock);
430 return -ETIME;
431 }
432
433 amdgpu_ring_commit(ring);
434 spin_unlock(&adev->gfx.kiq.ring_lock);
435 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
436 if (r < 1) {
437 dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
438 return -ETIME;
439 }
440
441 return 0;
442 }
443
444 for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
445
446 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
447 &queried_pasid);
448 if (ret && queried_pasid == pasid) {
449 if (all_hub) {
450 for (i = 0; i < adev->num_vmhubs; i++)
451 gmc_v10_0_flush_gpu_tlb(adev, vmid,
452 i, flush_type);
453 } else {
454 gmc_v10_0_flush_gpu_tlb(adev, vmid,
455 AMDGPU_GFXHUB_0, flush_type);
456 }
457 break;
458 }
459 }
460
461 return 0;
462 }
463
gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)464 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
465 unsigned vmid, uint64_t pd_addr)
466 {
467 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
468 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
469 uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
470 unsigned eng = ring->vm_inv_eng;
471
472 /*
473 * It may lose gpuvm invalidate acknowldege state across power-gating
474 * off cycle, add semaphore acquire before invalidation and semaphore
475 * release after invalidation to avoid entering power gated state
476 * to WA the Issue
477 */
478
479 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
480 if (use_semaphore)
481 /* a read return value of 1 means semaphore acuqire */
482 amdgpu_ring_emit_reg_wait(ring,
483 hub->vm_inv_eng0_sem +
484 hub->eng_distance * eng, 0x1, 0x1);
485
486 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
487 (hub->ctx_addr_distance * vmid),
488 lower_32_bits(pd_addr));
489
490 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
491 (hub->ctx_addr_distance * vmid),
492 upper_32_bits(pd_addr));
493
494 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
495 hub->eng_distance * eng,
496 hub->vm_inv_eng0_ack +
497 hub->eng_distance * eng,
498 req, 1 << vmid);
499
500 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
501 if (use_semaphore)
502 /*
503 * add semaphore release after invalidation,
504 * write with 0 means semaphore release
505 */
506 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
507 hub->eng_distance * eng, 0);
508
509 return pd_addr;
510 }
511
gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned vmid,unsigned pasid)512 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
513 unsigned pasid)
514 {
515 struct amdgpu_device *adev = ring->adev;
516 uint32_t reg;
517
518 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
519 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
520 else
521 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
522
523 amdgpu_ring_emit_wreg(ring, reg, pasid);
524 }
525
526 /*
527 * PTE format on NAVI 10:
528 * 63:59 reserved
529 * 58 reserved and for sienna_cichlid is used for MALL noalloc
530 * 57 reserved
531 * 56 F
532 * 55 L
533 * 54 reserved
534 * 53:52 SW
535 * 51 T
536 * 50:48 mtype
537 * 47:12 4k physical page base address
538 * 11:7 fragment
539 * 6 write
540 * 5 read
541 * 4 exe
542 * 3 Z
543 * 2 snooped
544 * 1 system
545 * 0 valid
546 *
547 * PDE format on NAVI 10:
548 * 63:59 block fragment size
549 * 58:55 reserved
550 * 54 P
551 * 53:48 reserved
552 * 47:6 physical base address of PD or PTE
553 * 5:3 reserved
554 * 2 C
555 * 1 system
556 * 0 valid
557 */
558
gmc_v10_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)559 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
560 {
561 switch (flags) {
562 case AMDGPU_VM_MTYPE_DEFAULT:
563 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
564 case AMDGPU_VM_MTYPE_NC:
565 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
566 case AMDGPU_VM_MTYPE_WC:
567 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
568 case AMDGPU_VM_MTYPE_CC:
569 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
570 case AMDGPU_VM_MTYPE_UC:
571 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
572 default:
573 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
574 }
575 }
576
gmc_v10_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)577 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
578 uint64_t *addr, uint64_t *flags)
579 {
580 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
581 *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
582 BUG_ON(*addr & 0xFFFF00000000003FULL);
583
584 if (!adev->gmc.translate_further)
585 return;
586
587 if (level == AMDGPU_VM_PDB1) {
588 /* Set the block fragment size */
589 if (!(*flags & AMDGPU_PDE_PTE))
590 *flags |= AMDGPU_PDE_BFS(0x9);
591
592 } else if (level == AMDGPU_VM_PDB0) {
593 if (*flags & AMDGPU_PDE_PTE)
594 *flags &= ~AMDGPU_PDE_PTE;
595 else
596 *flags |= AMDGPU_PTE_TF;
597 }
598 }
599
gmc_v10_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)600 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
601 struct amdgpu_bo_va_mapping *mapping,
602 uint64_t *flags)
603 {
604 *flags &= ~AMDGPU_PTE_EXECUTABLE;
605 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
606
607 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
608 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
609
610 if (mapping->flags & AMDGPU_PTE_PRT) {
611 *flags |= AMDGPU_PTE_PRT;
612 *flags |= AMDGPU_PTE_SNOOPED;
613 *flags |= AMDGPU_PTE_LOG;
614 *flags |= AMDGPU_PTE_SYSTEM;
615 *flags &= ~AMDGPU_PTE_VALID;
616 }
617 }
618
gmc_v10_0_get_vbios_fb_size(struct amdgpu_device * adev)619 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
620 {
621 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
622 unsigned size;
623
624 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
625 size = AMDGPU_VBIOS_VGA_ALLOCATION;
626 } else {
627 u32 viewport;
628 u32 pitch;
629
630 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
631 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
632 size = (REG_GET_FIELD(viewport,
633 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
634 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
635 4);
636 }
637
638 return size;
639 }
640
641 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
642 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
643 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
644 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
645 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
646 .map_mtype = gmc_v10_0_map_mtype,
647 .get_vm_pde = gmc_v10_0_get_vm_pde,
648 .get_vm_pte = gmc_v10_0_get_vm_pte,
649 .get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
650 };
651
gmc_v10_0_set_gmc_funcs(struct amdgpu_device * adev)652 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
653 {
654 if (adev->gmc.gmc_funcs == NULL)
655 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
656 }
657
gmc_v10_0_set_umc_funcs(struct amdgpu_device * adev)658 static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
659 {
660 switch (adev->ip_versions[UMC_HWIP][0]) {
661 case IP_VERSION(8, 7, 0):
662 adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
663 adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
664 adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
665 adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
666 adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
667 adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
668 break;
669 default:
670 break;
671 }
672 }
673
674
gmc_v10_0_set_mmhub_funcs(struct amdgpu_device * adev)675 static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
676 {
677 switch (adev->ip_versions[MMHUB_HWIP][0]) {
678 case IP_VERSION(2, 3, 0):
679 case IP_VERSION(2, 4, 0):
680 adev->mmhub.funcs = &mmhub_v2_3_funcs;
681 break;
682 default:
683 adev->mmhub.funcs = &mmhub_v2_0_funcs;
684 break;
685 }
686 }
687
gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device * adev)688 static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
689 {
690 switch (adev->ip_versions[GC_HWIP][0]) {
691 case IP_VERSION(10, 3, 0):
692 case IP_VERSION(10, 3, 2):
693 case IP_VERSION(10, 3, 1):
694 case IP_VERSION(10, 3, 4):
695 case IP_VERSION(10, 3, 5):
696 case IP_VERSION(10, 3, 3):
697 adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
698 break;
699 default:
700 adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
701 break;
702 }
703 }
704
705
gmc_v10_0_early_init(void * handle)706 static int gmc_v10_0_early_init(void *handle)
707 {
708 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
709
710 gmc_v10_0_set_mmhub_funcs(adev);
711 gmc_v10_0_set_gfxhub_funcs(adev);
712 gmc_v10_0_set_gmc_funcs(adev);
713 gmc_v10_0_set_irq_funcs(adev);
714 gmc_v10_0_set_umc_funcs(adev);
715
716 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
717 adev->gmc.shared_aperture_end =
718 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
719 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
720 adev->gmc.private_aperture_end =
721 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
722
723 return 0;
724 }
725
gmc_v10_0_late_init(void * handle)726 static int gmc_v10_0_late_init(void *handle)
727 {
728 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
729 int r;
730
731 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
732 if (r)
733 return r;
734
735 r = amdgpu_gmc_ras_late_init(adev);
736 if (r)
737 return r;
738
739 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
740 }
741
gmc_v10_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)742 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
743 struct amdgpu_gmc *mc)
744 {
745 u64 base = 0;
746
747 base = adev->gfxhub.funcs->get_fb_location(adev);
748
749 /* add the xgmi offset of the physical node */
750 base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
751
752 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
753 amdgpu_gmc_gart_location(adev, mc);
754 amdgpu_gmc_agp_location(adev, mc);
755
756 /* base offset of vram pages */
757 adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
758
759 /* add the xgmi offset of the physical node */
760 adev->vm_manager.vram_base_offset +=
761 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
762 }
763
764 /**
765 * gmc_v10_0_mc_init - initialize the memory controller driver params
766 *
767 * @adev: amdgpu_device pointer
768 *
769 * Look up the amount of vram, vram width, and decide how to place
770 * vram and gart within the GPU's physical address space.
771 * Returns 0 for success.
772 */
gmc_v10_0_mc_init(struct amdgpu_device * adev)773 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
774 {
775 int r;
776
777 /* size in MB on si */
778 adev->gmc.mc_vram_size =
779 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
780 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
781
782 if (!(adev->flags & AMD_IS_APU)) {
783 r = amdgpu_device_resize_fb_bar(adev);
784 if (r)
785 return r;
786 }
787 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
788 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
789
790 #ifdef CONFIG_X86_64
791 if (adev->flags & AMD_IS_APU) {
792 adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
793 adev->gmc.aper_size = adev->gmc.real_vram_size;
794 }
795 #endif
796
797 /* In case the PCI BAR is larger than the actual amount of vram */
798 adev->gmc.visible_vram_size = adev->gmc.aper_size;
799 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
800 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
801
802 /* set the gart size */
803 if (amdgpu_gart_size == -1)
804 adev->gmc.gart_size = 512ULL << 20;
805 else
806 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
807
808 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
809
810 return 0;
811 }
812
gmc_v10_0_gart_init(struct amdgpu_device * adev)813 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
814 {
815 int r;
816
817 if (adev->gart.bo) {
818 WARN(1, "NAVI10 PCIE GART already initialized\n");
819 return 0;
820 }
821
822 /* Initialize common gart structure */
823 r = amdgpu_gart_init(adev);
824 if (r)
825 return r;
826
827 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
828 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
829 AMDGPU_PTE_EXECUTABLE;
830
831 return amdgpu_gart_table_vram_alloc(adev);
832 }
833
gmc_v10_0_sw_init(void * handle)834 static int gmc_v10_0_sw_init(void *handle)
835 {
836 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
837 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
838
839 adev->gfxhub.funcs->init(adev);
840
841 adev->mmhub.funcs->init(adev);
842
843 spin_lock_init(&adev->gmc.invalidate_lock);
844
845 if ((adev->flags & AMD_IS_APU) && amdgpu_emu_mode == 1) {
846 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
847 adev->gmc.vram_width = 64;
848 } else if (amdgpu_emu_mode == 1) {
849 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6;
850 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
851 } else {
852 r = amdgpu_atomfirmware_get_vram_info(adev,
853 &vram_width, &vram_type, &vram_vendor);
854 adev->gmc.vram_width = vram_width;
855
856 adev->gmc.vram_type = vram_type;
857 adev->gmc.vram_vendor = vram_vendor;
858 }
859
860 switch (adev->ip_versions[GC_HWIP][0]) {
861 case IP_VERSION(10, 1, 10):
862 case IP_VERSION(10, 1, 1):
863 case IP_VERSION(10, 1, 2):
864 case IP_VERSION(10, 1, 3):
865 case IP_VERSION(10, 3, 0):
866 case IP_VERSION(10, 3, 2):
867 case IP_VERSION(10, 3, 1):
868 case IP_VERSION(10, 3, 4):
869 case IP_VERSION(10, 3, 5):
870 case IP_VERSION(10, 3, 3):
871 adev->num_vmhubs = 2;
872 /*
873 * To fulfill 4-level page support,
874 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
875 * block size 512 (9bit)
876 */
877 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
878 break;
879 default:
880 break;
881 }
882
883 /* This interrupt is VMC page fault.*/
884 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
885 VMC_1_0__SRCID__VM_FAULT,
886 &adev->gmc.vm_fault);
887
888 if (r)
889 return r;
890
891 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
892 UTCL2_1_0__SRCID__FAULT,
893 &adev->gmc.vm_fault);
894 if (r)
895 return r;
896
897 if (!amdgpu_sriov_vf(adev)) {
898 /* interrupt sent to DF. */
899 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
900 &adev->gmc.ecc_irq);
901 if (r)
902 return r;
903 }
904
905 /*
906 * Set the internal MC address mask This is the max address of the GPU's
907 * internal address space.
908 */
909 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
910
911 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
912 if (r) {
913 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
914 return r;
915 }
916
917 if (adev->gmc.xgmi.supported) {
918 r = adev->gfxhub.funcs->get_xgmi_info(adev);
919 if (r)
920 return r;
921 }
922
923 r = gmc_v10_0_mc_init(adev);
924 if (r)
925 return r;
926
927 amdgpu_gmc_get_vbios_allocations(adev);
928 amdgpu_gmc_get_reserved_allocation(adev);
929
930 /* Memory manager */
931 r = amdgpu_bo_init(adev);
932 if (r)
933 return r;
934
935 r = gmc_v10_0_gart_init(adev);
936 if (r)
937 return r;
938
939 /*
940 * number of VMs
941 * VMID 0 is reserved for System
942 * amdgpu graphics/compute will use VMIDs 1-7
943 * amdkfd will use VMIDs 8-15
944 */
945 adev->vm_manager.first_kfd_vmid = 8;
946
947 amdgpu_vm_manager_init(adev);
948
949 return 0;
950 }
951
952 /**
953 * gmc_v10_0_gart_fini - vm fini callback
954 *
955 * @adev: amdgpu_device pointer
956 *
957 * Tears down the driver GART/VM setup (CIK).
958 */
gmc_v10_0_gart_fini(struct amdgpu_device * adev)959 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
960 {
961 amdgpu_gart_table_vram_free(adev);
962 }
963
gmc_v10_0_sw_fini(void * handle)964 static int gmc_v10_0_sw_fini(void *handle)
965 {
966 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
967
968 amdgpu_vm_manager_fini(adev);
969 gmc_v10_0_gart_fini(adev);
970 amdgpu_gem_force_release(adev);
971 amdgpu_bo_fini(adev);
972
973 return 0;
974 }
975
gmc_v10_0_init_golden_registers(struct amdgpu_device * adev)976 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
977 {
978 }
979
980 /**
981 * gmc_v10_0_gart_enable - gart enable
982 *
983 * @adev: amdgpu_device pointer
984 */
gmc_v10_0_gart_enable(struct amdgpu_device * adev)985 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
986 {
987 int r;
988 bool value;
989
990 if (adev->gart.bo == NULL) {
991 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
992 return -EINVAL;
993 }
994
995 r = amdgpu_gart_table_vram_pin(adev);
996 if (r)
997 return r;
998
999 r = adev->gfxhub.funcs->gart_enable(adev);
1000 if (r)
1001 return r;
1002
1003 r = adev->mmhub.funcs->gart_enable(adev);
1004 if (r)
1005 return r;
1006
1007 adev->hdp.funcs->init_registers(adev);
1008
1009 /* Flush HDP after it is initialized */
1010 adev->hdp.funcs->flush_hdp(adev, NULL);
1011
1012 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
1013 false : true;
1014
1015 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
1016 adev->mmhub.funcs->set_fault_enable_default(adev, value);
1017 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
1018 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
1019
1020 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1021 (unsigned)(adev->gmc.gart_size >> 20),
1022 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1023
1024 adev->gart.ready = true;
1025
1026 return 0;
1027 }
1028
gmc_v10_0_hw_init(void * handle)1029 static int gmc_v10_0_hw_init(void *handle)
1030 {
1031 int r;
1032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1033
1034 /* The sequence of these two function calls matters.*/
1035 gmc_v10_0_init_golden_registers(adev);
1036
1037 /*
1038 * harvestable groups in gc_utcl2 need to be programmed before any GFX block
1039 * register setup within GMC, or else system hang when harvesting SA.
1040 */
1041 if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
1042 adev->gfxhub.funcs->utcl2_harvest(adev);
1043
1044 r = gmc_v10_0_gart_enable(adev);
1045 if (r)
1046 return r;
1047
1048 if (adev->umc.funcs && adev->umc.funcs->init_registers)
1049 adev->umc.funcs->init_registers(adev);
1050
1051 return 0;
1052 }
1053
1054 /**
1055 * gmc_v10_0_gart_disable - gart disable
1056 *
1057 * @adev: amdgpu_device pointer
1058 *
1059 * This disables all VM page table.
1060 */
gmc_v10_0_gart_disable(struct amdgpu_device * adev)1061 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
1062 {
1063 adev->gfxhub.funcs->gart_disable(adev);
1064 adev->mmhub.funcs->gart_disable(adev);
1065 amdgpu_gart_table_vram_unpin(adev);
1066 }
1067
gmc_v10_0_hw_fini(void * handle)1068 static int gmc_v10_0_hw_fini(void *handle)
1069 {
1070 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1071
1072 gmc_v10_0_gart_disable(adev);
1073
1074 if (amdgpu_sriov_vf(adev)) {
1075 /* full access mode, so don't touch any GMC register */
1076 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
1077 return 0;
1078 }
1079
1080 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1081 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1082
1083 return 0;
1084 }
1085
gmc_v10_0_suspend(void * handle)1086 static int gmc_v10_0_suspend(void *handle)
1087 {
1088 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1089
1090 gmc_v10_0_hw_fini(adev);
1091
1092 return 0;
1093 }
1094
gmc_v10_0_resume(void * handle)1095 static int gmc_v10_0_resume(void *handle)
1096 {
1097 int r;
1098 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1099
1100 r = gmc_v10_0_hw_init(adev);
1101 if (r)
1102 return r;
1103
1104 amdgpu_vmid_reset_all(adev);
1105
1106 return 0;
1107 }
1108
gmc_v10_0_is_idle(void * handle)1109 static bool gmc_v10_0_is_idle(void *handle)
1110 {
1111 /* MC is always ready in GMC v10.*/
1112 return true;
1113 }
1114
gmc_v10_0_wait_for_idle(void * handle)1115 static int gmc_v10_0_wait_for_idle(void *handle)
1116 {
1117 /* There is no need to wait for MC idle in GMC v10.*/
1118 return 0;
1119 }
1120
gmc_v10_0_soft_reset(void * handle)1121 static int gmc_v10_0_soft_reset(void *handle)
1122 {
1123 return 0;
1124 }
1125
gmc_v10_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1126 static int gmc_v10_0_set_clockgating_state(void *handle,
1127 enum amd_clockgating_state state)
1128 {
1129 int r;
1130 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1131
1132 r = adev->mmhub.funcs->set_clockgating(adev, state);
1133 if (r)
1134 return r;
1135
1136 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1137 return athub_v2_1_set_clockgating(adev, state);
1138 else
1139 return athub_v2_0_set_clockgating(adev, state);
1140 }
1141
gmc_v10_0_get_clockgating_state(void * handle,u32 * flags)1142 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1143 {
1144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1145
1146 adev->mmhub.funcs->get_clockgating(adev, flags);
1147
1148 if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
1149 athub_v2_1_get_clockgating(adev, flags);
1150 else
1151 athub_v2_0_get_clockgating(adev, flags);
1152 }
1153
gmc_v10_0_set_powergating_state(void * handle,enum amd_powergating_state state)1154 static int gmc_v10_0_set_powergating_state(void *handle,
1155 enum amd_powergating_state state)
1156 {
1157 return 0;
1158 }
1159
1160 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1161 .name = "gmc_v10_0",
1162 .early_init = gmc_v10_0_early_init,
1163 .late_init = gmc_v10_0_late_init,
1164 .sw_init = gmc_v10_0_sw_init,
1165 .sw_fini = gmc_v10_0_sw_fini,
1166 .hw_init = gmc_v10_0_hw_init,
1167 .hw_fini = gmc_v10_0_hw_fini,
1168 .suspend = gmc_v10_0_suspend,
1169 .resume = gmc_v10_0_resume,
1170 .is_idle = gmc_v10_0_is_idle,
1171 .wait_for_idle = gmc_v10_0_wait_for_idle,
1172 .soft_reset = gmc_v10_0_soft_reset,
1173 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1174 .set_powergating_state = gmc_v10_0_set_powergating_state,
1175 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1176 };
1177
1178 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1179 {
1180 .type = AMD_IP_BLOCK_TYPE_GMC,
1181 .major = 10,
1182 .minor = 0,
1183 .rev = 0,
1184 .funcs = &gmc_v10_0_ip_funcs,
1185 };
1186