1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gmc_v9_0.h"
34 #include "df_v1_7.h"
35 #include "df_v3_6.h"
36 #include "nbio_v6_1.h"
37 #include "nbio_v7_0.h"
38 #include "nbio_v7_4.h"
39 #include "hdp_v4_0.h"
40 #include "vega10_ih.h"
41 #include "vega20_ih.h"
42 #include "sdma_v4_0.h"
43 #include "uvd_v7_0.h"
44 #include "vce_v4_0.h"
45 #include "vcn_v1_0.h"
46 #include "vcn_v2_5.h"
47 #include "jpeg_v2_5.h"
48 #include "smuio_v9_0.h"
49 #include "gmc_v10_0.h"
50 #include "gfxhub_v2_0.h"
51 #include "mmhub_v2_0.h"
52 #include "nbio_v2_3.h"
53 #include "nbio_v7_2.h"
54 #include "hdp_v5_0.h"
55 #include "nv.h"
56 #include "navi10_ih.h"
57 #include "gfx_v10_0.h"
58 #include "sdma_v5_0.h"
59 #include "sdma_v5_2.h"
60 #include "vcn_v2_0.h"
61 #include "jpeg_v2_0.h"
62 #include "vcn_v3_0.h"
63 #include "jpeg_v3_0.h"
64 #include "amdgpu_vkms.h"
65 #include "mes_v10_1.h"
66 #include "smuio_v11_0.h"
67 #include "smuio_v11_0_6.h"
68 #include "smuio_v13_0.h"
69
70 MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
71
72 #define mmRCC_CONFIG_MEMSIZE 0xde3
73 #define mmMM_INDEX 0x0
74 #define mmMM_INDEX_HI 0x6
75 #define mmMM_DATA 0x1
76
77 static const char *hw_id_names[HW_ID_MAX] = {
78 [MP1_HWID] = "MP1",
79 [MP2_HWID] = "MP2",
80 [THM_HWID] = "THM",
81 [SMUIO_HWID] = "SMUIO",
82 [FUSE_HWID] = "FUSE",
83 [CLKA_HWID] = "CLKA",
84 [PWR_HWID] = "PWR",
85 [GC_HWID] = "GC",
86 [UVD_HWID] = "UVD",
87 [AUDIO_AZ_HWID] = "AUDIO_AZ",
88 [ACP_HWID] = "ACP",
89 [DCI_HWID] = "DCI",
90 [DMU_HWID] = "DMU",
91 [DCO_HWID] = "DCO",
92 [DIO_HWID] = "DIO",
93 [XDMA_HWID] = "XDMA",
94 [DCEAZ_HWID] = "DCEAZ",
95 [DAZ_HWID] = "DAZ",
96 [SDPMUX_HWID] = "SDPMUX",
97 [NTB_HWID] = "NTB",
98 [IOHC_HWID] = "IOHC",
99 [L2IMU_HWID] = "L2IMU",
100 [VCE_HWID] = "VCE",
101 [MMHUB_HWID] = "MMHUB",
102 [ATHUB_HWID] = "ATHUB",
103 [DBGU_NBIO_HWID] = "DBGU_NBIO",
104 [DFX_HWID] = "DFX",
105 [DBGU0_HWID] = "DBGU0",
106 [DBGU1_HWID] = "DBGU1",
107 [OSSSYS_HWID] = "OSSSYS",
108 [HDP_HWID] = "HDP",
109 [SDMA0_HWID] = "SDMA0",
110 [SDMA1_HWID] = "SDMA1",
111 [SDMA2_HWID] = "SDMA2",
112 [SDMA3_HWID] = "SDMA3",
113 [ISP_HWID] = "ISP",
114 [DBGU_IO_HWID] = "DBGU_IO",
115 [DF_HWID] = "DF",
116 [CLKB_HWID] = "CLKB",
117 [FCH_HWID] = "FCH",
118 [DFX_DAP_HWID] = "DFX_DAP",
119 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
120 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
121 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
122 [L1IMU3_HWID] = "L1IMU3",
123 [L1IMU4_HWID] = "L1IMU4",
124 [L1IMU5_HWID] = "L1IMU5",
125 [L1IMU6_HWID] = "L1IMU6",
126 [L1IMU7_HWID] = "L1IMU7",
127 [L1IMU8_HWID] = "L1IMU8",
128 [L1IMU9_HWID] = "L1IMU9",
129 [L1IMU10_HWID] = "L1IMU10",
130 [L1IMU11_HWID] = "L1IMU11",
131 [L1IMU12_HWID] = "L1IMU12",
132 [L1IMU13_HWID] = "L1IMU13",
133 [L1IMU14_HWID] = "L1IMU14",
134 [L1IMU15_HWID] = "L1IMU15",
135 [WAFLC_HWID] = "WAFLC",
136 [FCH_USB_PD_HWID] = "FCH_USB_PD",
137 [PCIE_HWID] = "PCIE",
138 [PCS_HWID] = "PCS",
139 [DDCL_HWID] = "DDCL",
140 [SST_HWID] = "SST",
141 [IOAGR_HWID] = "IOAGR",
142 [NBIF_HWID] = "NBIF",
143 [IOAPIC_HWID] = "IOAPIC",
144 [SYSTEMHUB_HWID] = "SYSTEMHUB",
145 [NTBCCP_HWID] = "NTBCCP",
146 [UMC_HWID] = "UMC",
147 [SATA_HWID] = "SATA",
148 [USB_HWID] = "USB",
149 [CCXSEC_HWID] = "CCXSEC",
150 [XGMI_HWID] = "XGMI",
151 [XGBE_HWID] = "XGBE",
152 [MP0_HWID] = "MP0",
153 };
154
155 static int hw_id_map[MAX_HWIP] = {
156 [GC_HWIP] = GC_HWID,
157 [HDP_HWIP] = HDP_HWID,
158 [SDMA0_HWIP] = SDMA0_HWID,
159 [SDMA1_HWIP] = SDMA1_HWID,
160 [SDMA2_HWIP] = SDMA2_HWID,
161 [SDMA3_HWIP] = SDMA3_HWID,
162 [MMHUB_HWIP] = MMHUB_HWID,
163 [ATHUB_HWIP] = ATHUB_HWID,
164 [NBIO_HWIP] = NBIF_HWID,
165 [MP0_HWIP] = MP0_HWID,
166 [MP1_HWIP] = MP1_HWID,
167 [UVD_HWIP] = UVD_HWID,
168 [VCE_HWIP] = VCE_HWID,
169 [DF_HWIP] = DF_HWID,
170 [DCE_HWIP] = DMU_HWID,
171 [OSSSYS_HWIP] = OSSSYS_HWID,
172 [SMUIO_HWIP] = SMUIO_HWID,
173 [PWR_HWIP] = PWR_HWID,
174 [NBIF_HWIP] = NBIF_HWID,
175 [THM_HWIP] = THM_HWID,
176 [CLK_HWIP] = CLKA_HWID,
177 [UMC_HWIP] = UMC_HWID,
178 [XGMI_HWIP] = XGMI_HWID,
179 [DCI_HWIP] = DCI_HWID,
180 };
181
amdgpu_discovery_read_binary(struct amdgpu_device * adev,uint8_t * binary)182 static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
183 {
184 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
185 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
186
187 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
188 adev->mman.discovery_tmr_size, false);
189 return 0;
190 }
191
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)192 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
193 {
194 uint16_t checksum = 0;
195 int i;
196
197 for (i = 0; i < size; i++)
198 checksum += data[i];
199
200 return checksum;
201 }
202
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)203 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
204 uint16_t expected)
205 {
206 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
207 }
208
amdgpu_discovery_init(struct amdgpu_device * adev)209 static int amdgpu_discovery_init(struct amdgpu_device *adev)
210 {
211 struct table_info *info;
212 struct binary_header *bhdr;
213 struct ip_discovery_header *ihdr;
214 struct gpu_info_header *ghdr;
215 const struct firmware *fw;
216 uint16_t offset;
217 uint16_t size;
218 uint16_t checksum;
219 int r;
220
221 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
222 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
223 if (!adev->mman.discovery_bin)
224 return -ENOMEM;
225
226 if (amdgpu_discovery == 2) {
227 r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev);
228 if (r)
229 goto get_from_vram;
230 dev_info(adev->dev, "Using IP discovery from file\n");
231 memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data,
232 adev->mman.discovery_tmr_size);
233 release_firmware(fw);
234 } else {
235 get_from_vram:
236 r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
237 if (r) {
238 DRM_ERROR("failed to read ip discovery binary\n");
239 goto out;
240 }
241 }
242
243 bhdr = (struct binary_header *)adev->mman.discovery_bin;
244
245 if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
246 DRM_ERROR("invalid ip discovery binary signature\n");
247 r = -EINVAL;
248 goto out;
249 }
250
251 offset = offsetof(struct binary_header, binary_checksum) +
252 sizeof(bhdr->binary_checksum);
253 size = le16_to_cpu(bhdr->binary_size) - offset;
254 checksum = le16_to_cpu(bhdr->binary_checksum);
255
256 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
257 size, checksum)) {
258 DRM_ERROR("invalid ip discovery binary checksum\n");
259 r = -EINVAL;
260 goto out;
261 }
262
263 info = &bhdr->table_list[IP_DISCOVERY];
264 offset = le16_to_cpu(info->offset);
265 checksum = le16_to_cpu(info->checksum);
266 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
267
268 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
269 DRM_ERROR("invalid ip discovery data table signature\n");
270 r = -EINVAL;
271 goto out;
272 }
273
274 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
275 le16_to_cpu(ihdr->size), checksum)) {
276 DRM_ERROR("invalid ip discovery data table checksum\n");
277 r = -EINVAL;
278 goto out;
279 }
280
281 info = &bhdr->table_list[GC];
282 offset = le16_to_cpu(info->offset);
283 checksum = le16_to_cpu(info->checksum);
284 ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
285
286 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
287 le32_to_cpu(ghdr->size), checksum)) {
288 DRM_ERROR("invalid gc data table checksum\n");
289 r = -EINVAL;
290 goto out;
291 }
292
293 return 0;
294
295 out:
296 kfree(adev->mman.discovery_bin);
297 adev->mman.discovery_bin = NULL;
298
299 return r;
300 }
301
amdgpu_discovery_fini(struct amdgpu_device * adev)302 void amdgpu_discovery_fini(struct amdgpu_device *adev)
303 {
304 kfree(adev->mman.discovery_bin);
305 adev->mman.discovery_bin = NULL;
306 }
307
amdgpu_discovery_validate_ip(const struct ip * ip)308 static int amdgpu_discovery_validate_ip(const struct ip *ip)
309 {
310 if (ip->number_instance >= HWIP_MAX_INSTANCE) {
311 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
312 ip->number_instance);
313 return -EINVAL;
314 }
315 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
316 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
317 le16_to_cpu(ip->hw_id));
318 return -EINVAL;
319 }
320
321 return 0;
322 }
323
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)324 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
325 {
326 struct binary_header *bhdr;
327 struct ip_discovery_header *ihdr;
328 struct die_header *dhdr;
329 struct ip *ip;
330 uint16_t die_offset;
331 uint16_t ip_offset;
332 uint16_t num_dies;
333 uint16_t num_ips;
334 uint8_t num_base_address;
335 int hw_ip;
336 int i, j, k;
337 int r;
338
339 r = amdgpu_discovery_init(adev);
340 if (r) {
341 DRM_ERROR("amdgpu_discovery_init failed\n");
342 return r;
343 }
344
345 bhdr = (struct binary_header *)adev->mman.discovery_bin;
346 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
347 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
348 num_dies = le16_to_cpu(ihdr->num_dies);
349
350 DRM_DEBUG("number of dies: %d\n", num_dies);
351
352 for (i = 0; i < num_dies; i++) {
353 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
354 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
355 num_ips = le16_to_cpu(dhdr->num_ips);
356 ip_offset = die_offset + sizeof(*dhdr);
357
358 if (le16_to_cpu(dhdr->die_id) != i) {
359 DRM_ERROR("invalid die id %d, expected %d\n",
360 le16_to_cpu(dhdr->die_id), i);
361 return -EINVAL;
362 }
363
364 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
365 le16_to_cpu(dhdr->die_id), num_ips);
366
367 for (j = 0; j < num_ips; j++) {
368 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
369
370 if (amdgpu_discovery_validate_ip(ip))
371 goto next_ip;
372
373 num_base_address = ip->num_base_address;
374
375 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
376 hw_id_names[le16_to_cpu(ip->hw_id)],
377 le16_to_cpu(ip->hw_id),
378 ip->number_instance,
379 ip->major, ip->minor,
380 ip->revision);
381
382 if (le16_to_cpu(ip->hw_id) == VCN_HWID)
383 adev->vcn.num_vcn_inst++;
384 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
385 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
386 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
387 le16_to_cpu(ip->hw_id) == SDMA3_HWID)
388 adev->sdma.num_instances++;
389
390 for (k = 0; k < num_base_address; k++) {
391 /*
392 * convert the endianness of base addresses in place,
393 * so that we don't need to convert them when accessing adev->reg_offset.
394 */
395 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
396 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
397 }
398
399 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
400 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
401 DRM_DEBUG("set register base offset for %s\n",
402 hw_id_names[le16_to_cpu(ip->hw_id)]);
403 adev->reg_offset[hw_ip][ip->number_instance] =
404 ip->base_address;
405 /* Instance support is somewhat inconsistent.
406 * SDMA is a good example. Sienna cichlid has 4 total
407 * SDMA instances, each enumerated separately (HWIDs
408 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
409 * but they are enumerated as multiple instances of the
410 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
411 * example. On most chips there are multiple instances
412 * with the same HWID.
413 */
414 adev->ip_versions[hw_ip][ip->number_instance] =
415 IP_VERSION(ip->major, ip->minor, ip->revision);
416 }
417 }
418
419 next_ip:
420 ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
421 }
422 }
423
424 return 0;
425 }
426
amdgpu_discovery_get_ip_version(struct amdgpu_device * adev,int hw_id,int number_instance,int * major,int * minor,int * revision)427 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
428 int *major, int *minor, int *revision)
429 {
430 struct binary_header *bhdr;
431 struct ip_discovery_header *ihdr;
432 struct die_header *dhdr;
433 struct ip *ip;
434 uint16_t die_offset;
435 uint16_t ip_offset;
436 uint16_t num_dies;
437 uint16_t num_ips;
438 int i, j;
439
440 if (!adev->mman.discovery_bin) {
441 DRM_ERROR("ip discovery uninitialized\n");
442 return -EINVAL;
443 }
444
445 bhdr = (struct binary_header *)adev->mman.discovery_bin;
446 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
447 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
448 num_dies = le16_to_cpu(ihdr->num_dies);
449
450 for (i = 0; i < num_dies; i++) {
451 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
452 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
453 num_ips = le16_to_cpu(dhdr->num_ips);
454 ip_offset = die_offset + sizeof(*dhdr);
455
456 for (j = 0; j < num_ips; j++) {
457 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
458
459 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
460 if (major)
461 *major = ip->major;
462 if (minor)
463 *minor = ip->minor;
464 if (revision)
465 *revision = ip->revision;
466 return 0;
467 }
468 ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
469 }
470 }
471
472 return -EINVAL;
473 }
474
475
amdgpu_discovery_get_vcn_version(struct amdgpu_device * adev,int vcn_instance,int * major,int * minor,int * revision)476 int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
477 int *major, int *minor, int *revision)
478 {
479 return amdgpu_discovery_get_ip_version(adev, VCN_HWID,
480 vcn_instance, major, minor, revision);
481 }
482
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)483 void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
484 {
485 struct binary_header *bhdr;
486 struct harvest_table *harvest_info;
487 int i, vcn_harvest_count = 0;
488
489 bhdr = (struct binary_header *)adev->mman.discovery_bin;
490 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
491 le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
492
493 for (i = 0; i < 32; i++) {
494 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
495 break;
496
497 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
498 case VCN_HWID:
499 vcn_harvest_count++;
500 if (harvest_info->list[i].number_instance == 0)
501 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
502 else
503 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
504 break;
505 case DMU_HWID:
506 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
507 break;
508 default:
509 break;
510 }
511 }
512 /* some IP discovery tables on Navy Flounder don't have this set correctly */
513 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
514 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)))
515 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
516 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
517 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
518 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
519 }
520 if ((adev->pdev->device == 0x731E &&
521 (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) ||
522 (adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) ||
523 (adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) {
524 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
525 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
526 }
527 }
528
529 union gc_info {
530 struct gc_info_v1_0 v1;
531 struct gc_info_v2_0 v2;
532 };
533
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)534 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
535 {
536 struct binary_header *bhdr;
537 union gc_info *gc_info;
538
539 if (!adev->mman.discovery_bin) {
540 DRM_ERROR("ip discovery uninitialized\n");
541 return -EINVAL;
542 }
543
544 bhdr = (struct binary_header *)adev->mman.discovery_bin;
545 gc_info = (union gc_info *)(adev->mman.discovery_bin +
546 le16_to_cpu(bhdr->table_list[GC].offset));
547 switch (gc_info->v1.header.version_major) {
548 case 1:
549 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
550 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
551 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
552 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
553 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
554 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
555 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
556 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
557 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
558 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
559 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
560 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
561 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
562 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
563 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
564 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
565 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
566 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
567 break;
568 case 2:
569 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
570 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
571 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
572 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
573 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
574 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
575 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
576 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
577 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
578 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
579 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
580 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
581 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
582 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
583 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
584 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
585 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
586 break;
587 default:
588 dev_err(adev->dev,
589 "Unhandled GC info table %d.%d\n",
590 gc_info->v1.header.version_major,
591 gc_info->v1.header.version_minor);
592 return -EINVAL;
593 }
594 return 0;
595 }
596
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)597 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
598 {
599 /* what IP to use for this? */
600 switch (adev->ip_versions[GC_HWIP][0]) {
601 case IP_VERSION(9, 0, 1):
602 case IP_VERSION(9, 1, 0):
603 case IP_VERSION(9, 2, 1):
604 case IP_VERSION(9, 2, 2):
605 case IP_VERSION(9, 3, 0):
606 case IP_VERSION(9, 4, 0):
607 case IP_VERSION(9, 4, 1):
608 case IP_VERSION(9, 4, 2):
609 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
610 break;
611 case IP_VERSION(10, 1, 10):
612 case IP_VERSION(10, 1, 1):
613 case IP_VERSION(10, 1, 2):
614 case IP_VERSION(10, 1, 3):
615 case IP_VERSION(10, 3, 0):
616 case IP_VERSION(10, 3, 1):
617 case IP_VERSION(10, 3, 2):
618 case IP_VERSION(10, 3, 3):
619 case IP_VERSION(10, 3, 4):
620 case IP_VERSION(10, 3, 5):
621 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
622 break;
623 default:
624 dev_err(adev->dev,
625 "Failed to add common ip block(GC_HWIP:0x%x)\n",
626 adev->ip_versions[GC_HWIP][0]);
627 return -EINVAL;
628 }
629 return 0;
630 }
631
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)632 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
633 {
634 /* use GC or MMHUB IP version */
635 switch (adev->ip_versions[GC_HWIP][0]) {
636 case IP_VERSION(9, 0, 1):
637 case IP_VERSION(9, 1, 0):
638 case IP_VERSION(9, 2, 1):
639 case IP_VERSION(9, 2, 2):
640 case IP_VERSION(9, 3, 0):
641 case IP_VERSION(9, 4, 0):
642 case IP_VERSION(9, 4, 1):
643 case IP_VERSION(9, 4, 2):
644 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
645 break;
646 case IP_VERSION(10, 1, 10):
647 case IP_VERSION(10, 1, 1):
648 case IP_VERSION(10, 1, 2):
649 case IP_VERSION(10, 1, 3):
650 case IP_VERSION(10, 3, 0):
651 case IP_VERSION(10, 3, 1):
652 case IP_VERSION(10, 3, 2):
653 case IP_VERSION(10, 3, 3):
654 case IP_VERSION(10, 3, 4):
655 case IP_VERSION(10, 3, 5):
656 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
657 break;
658 default:
659 dev_err(adev->dev,
660 "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
661 adev->ip_versions[GC_HWIP][0]);
662 return -EINVAL;
663 }
664 return 0;
665 }
666
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)667 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
668 {
669 switch (adev->ip_versions[OSSSYS_HWIP][0]) {
670 case IP_VERSION(4, 0, 0):
671 case IP_VERSION(4, 0, 1):
672 case IP_VERSION(4, 1, 0):
673 case IP_VERSION(4, 1, 1):
674 case IP_VERSION(4, 3, 0):
675 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
676 break;
677 case IP_VERSION(4, 2, 0):
678 case IP_VERSION(4, 2, 1):
679 case IP_VERSION(4, 4, 0):
680 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
681 break;
682 case IP_VERSION(5, 0, 0):
683 case IP_VERSION(5, 0, 1):
684 case IP_VERSION(5, 0, 2):
685 case IP_VERSION(5, 0, 3):
686 case IP_VERSION(5, 2, 0):
687 case IP_VERSION(5, 2, 1):
688 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
689 break;
690 default:
691 dev_err(adev->dev,
692 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
693 adev->ip_versions[OSSSYS_HWIP][0]);
694 return -EINVAL;
695 }
696 return 0;
697 }
698
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)699 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
700 {
701 switch (adev->ip_versions[MP0_HWIP][0]) {
702 case IP_VERSION(9, 0, 0):
703 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
704 break;
705 case IP_VERSION(10, 0, 0):
706 case IP_VERSION(10, 0, 1):
707 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
708 break;
709 case IP_VERSION(11, 0, 0):
710 case IP_VERSION(11, 0, 2):
711 case IP_VERSION(11, 0, 4):
712 case IP_VERSION(11, 0, 5):
713 case IP_VERSION(11, 0, 9):
714 case IP_VERSION(11, 0, 7):
715 case IP_VERSION(11, 0, 11):
716 case IP_VERSION(11, 0, 12):
717 case IP_VERSION(11, 0, 13):
718 case IP_VERSION(11, 5, 0):
719 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
720 break;
721 case IP_VERSION(11, 0, 8):
722 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
723 break;
724 case IP_VERSION(11, 0, 3):
725 case IP_VERSION(12, 0, 1):
726 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
727 break;
728 case IP_VERSION(13, 0, 1):
729 case IP_VERSION(13, 0, 2):
730 case IP_VERSION(13, 0, 3):
731 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
732 break;
733 default:
734 dev_err(adev->dev,
735 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
736 adev->ip_versions[MP0_HWIP][0]);
737 return -EINVAL;
738 }
739 return 0;
740 }
741
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)742 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
743 {
744 switch (adev->ip_versions[MP1_HWIP][0]) {
745 case IP_VERSION(9, 0, 0):
746 case IP_VERSION(10, 0, 0):
747 case IP_VERSION(10, 0, 1):
748 case IP_VERSION(11, 0, 2):
749 if (adev->asic_type == CHIP_ARCTURUS)
750 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
751 else
752 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
753 break;
754 case IP_VERSION(11, 0, 0):
755 case IP_VERSION(11, 0, 5):
756 case IP_VERSION(11, 0, 9):
757 case IP_VERSION(11, 0, 7):
758 case IP_VERSION(11, 0, 8):
759 case IP_VERSION(11, 0, 11):
760 case IP_VERSION(11, 0, 12):
761 case IP_VERSION(11, 0, 13):
762 case IP_VERSION(11, 5, 0):
763 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
764 break;
765 case IP_VERSION(12, 0, 0):
766 case IP_VERSION(12, 0, 1):
767 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
768 break;
769 case IP_VERSION(13, 0, 1):
770 case IP_VERSION(13, 0, 2):
771 case IP_VERSION(13, 0, 3):
772 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
773 break;
774 default:
775 dev_err(adev->dev,
776 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
777 adev->ip_versions[MP1_HWIP][0]);
778 return -EINVAL;
779 }
780 return 0;
781 }
782
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)783 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
784 {
785 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
786 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
787 #if defined(CONFIG_DRM_AMD_DC)
788 } else if (adev->ip_versions[DCE_HWIP][0]) {
789 switch (adev->ip_versions[DCE_HWIP][0]) {
790 case IP_VERSION(1, 0, 0):
791 case IP_VERSION(1, 0, 1):
792 case IP_VERSION(2, 0, 2):
793 case IP_VERSION(2, 0, 0):
794 case IP_VERSION(2, 0, 3):
795 case IP_VERSION(2, 1, 0):
796 case IP_VERSION(3, 0, 0):
797 case IP_VERSION(3, 0, 2):
798 case IP_VERSION(3, 0, 3):
799 case IP_VERSION(3, 0, 1):
800 case IP_VERSION(3, 1, 2):
801 case IP_VERSION(3, 1, 3):
802 amdgpu_device_ip_block_add(adev, &dm_ip_block);
803 break;
804 default:
805 dev_err(adev->dev,
806 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
807 adev->ip_versions[DCE_HWIP][0]);
808 return -EINVAL;
809 }
810 } else if (adev->ip_versions[DCI_HWIP][0]) {
811 switch (adev->ip_versions[DCI_HWIP][0]) {
812 case IP_VERSION(12, 0, 0):
813 case IP_VERSION(12, 0, 1):
814 case IP_VERSION(12, 1, 0):
815 amdgpu_device_ip_block_add(adev, &dm_ip_block);
816 break;
817 default:
818 dev_err(adev->dev,
819 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
820 adev->ip_versions[DCI_HWIP][0]);
821 return -EINVAL;
822 }
823 #endif
824 }
825 return 0;
826 }
827
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)828 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
829 {
830 switch (adev->ip_versions[GC_HWIP][0]) {
831 case IP_VERSION(9, 0, 1):
832 case IP_VERSION(9, 1, 0):
833 case IP_VERSION(9, 2, 1):
834 case IP_VERSION(9, 2, 2):
835 case IP_VERSION(9, 3, 0):
836 case IP_VERSION(9, 4, 0):
837 case IP_VERSION(9, 4, 1):
838 case IP_VERSION(9, 4, 2):
839 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
840 break;
841 case IP_VERSION(10, 1, 10):
842 case IP_VERSION(10, 1, 2):
843 case IP_VERSION(10, 1, 1):
844 case IP_VERSION(10, 1, 3):
845 case IP_VERSION(10, 3, 0):
846 case IP_VERSION(10, 3, 2):
847 case IP_VERSION(10, 3, 1):
848 case IP_VERSION(10, 3, 4):
849 case IP_VERSION(10, 3, 5):
850 case IP_VERSION(10, 3, 3):
851 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
852 break;
853 default:
854 dev_err(adev->dev,
855 "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
856 adev->ip_versions[GC_HWIP][0]);
857 return -EINVAL;
858 }
859 return 0;
860 }
861
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)862 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
863 {
864 switch (adev->ip_versions[SDMA0_HWIP][0]) {
865 case IP_VERSION(4, 0, 0):
866 case IP_VERSION(4, 0, 1):
867 case IP_VERSION(4, 1, 0):
868 case IP_VERSION(4, 1, 1):
869 case IP_VERSION(4, 1, 2):
870 case IP_VERSION(4, 2, 0):
871 case IP_VERSION(4, 2, 2):
872 case IP_VERSION(4, 4, 0):
873 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
874 break;
875 case IP_VERSION(5, 0, 0):
876 case IP_VERSION(5, 0, 1):
877 case IP_VERSION(5, 0, 2):
878 case IP_VERSION(5, 0, 5):
879 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
880 break;
881 case IP_VERSION(5, 2, 0):
882 case IP_VERSION(5, 2, 2):
883 case IP_VERSION(5, 2, 4):
884 case IP_VERSION(5, 2, 5):
885 case IP_VERSION(5, 2, 3):
886 case IP_VERSION(5, 2, 1):
887 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
888 break;
889 default:
890 dev_err(adev->dev,
891 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
892 adev->ip_versions[SDMA0_HWIP][0]);
893 return -EINVAL;
894 }
895 return 0;
896 }
897
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)898 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
899 {
900 if (adev->ip_versions[VCE_HWIP][0]) {
901 switch (adev->ip_versions[UVD_HWIP][0]) {
902 case IP_VERSION(7, 0, 0):
903 case IP_VERSION(7, 2, 0):
904 /* UVD is not supported on vega20 SR-IOV */
905 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
906 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
907 break;
908 default:
909 dev_err(adev->dev,
910 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
911 adev->ip_versions[UVD_HWIP][0]);
912 return -EINVAL;
913 }
914 switch (adev->ip_versions[VCE_HWIP][0]) {
915 case IP_VERSION(4, 0, 0):
916 case IP_VERSION(4, 1, 0):
917 /* VCE is not supported on vega20 SR-IOV */
918 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
919 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
920 break;
921 default:
922 dev_err(adev->dev,
923 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
924 adev->ip_versions[VCE_HWIP][0]);
925 return -EINVAL;
926 }
927 } else {
928 switch (adev->ip_versions[UVD_HWIP][0]) {
929 case IP_VERSION(1, 0, 0):
930 case IP_VERSION(1, 0, 1):
931 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
932 break;
933 case IP_VERSION(2, 0, 0):
934 case IP_VERSION(2, 0, 2):
935 case IP_VERSION(2, 2, 0):
936 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
937 if (!amdgpu_sriov_vf(adev))
938 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
939 break;
940 case IP_VERSION(2, 0, 3):
941 break;
942 case IP_VERSION(2, 5, 0):
943 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
944 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
945 break;
946 case IP_VERSION(2, 6, 0):
947 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
948 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
949 break;
950 case IP_VERSION(3, 0, 0):
951 case IP_VERSION(3, 0, 16):
952 case IP_VERSION(3, 0, 64):
953 case IP_VERSION(3, 1, 1):
954 case IP_VERSION(3, 0, 2):
955 case IP_VERSION(3, 0, 192):
956 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
957 if (!amdgpu_sriov_vf(adev))
958 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
959 break;
960 case IP_VERSION(3, 0, 33):
961 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
962 break;
963 default:
964 dev_err(adev->dev,
965 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
966 adev->ip_versions[UVD_HWIP][0]);
967 return -EINVAL;
968 }
969 }
970 return 0;
971 }
972
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)973 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
974 {
975 switch (adev->ip_versions[GC_HWIP][0]) {
976 case IP_VERSION(10, 1, 10):
977 case IP_VERSION(10, 1, 1):
978 case IP_VERSION(10, 1, 2):
979 case IP_VERSION(10, 1, 3):
980 case IP_VERSION(10, 3, 0):
981 case IP_VERSION(10, 3, 1):
982 case IP_VERSION(10, 3, 2):
983 case IP_VERSION(10, 3, 3):
984 case IP_VERSION(10, 3, 4):
985 case IP_VERSION(10, 3, 5):
986 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
987 break;
988 default:
989 break;;
990 }
991 return 0;
992 }
993
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)994 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
995 {
996 int r;
997
998 switch (adev->asic_type) {
999 case CHIP_VEGA10:
1000 vega10_reg_base_init(adev);
1001 adev->sdma.num_instances = 2;
1002 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1003 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1004 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
1005 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
1006 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
1007 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
1008 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
1009 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
1010 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
1011 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
1012 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
1013 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
1014 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
1015 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
1016 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
1017 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
1018 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
1019 break;
1020 case CHIP_VEGA12:
1021 vega10_reg_base_init(adev);
1022 adev->sdma.num_instances = 2;
1023 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1024 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1025 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
1026 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
1027 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
1028 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
1029 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
1030 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
1031 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
1032 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
1033 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
1034 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
1035 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
1036 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
1037 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
1038 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
1039 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
1040 break;
1041 case CHIP_RAVEN:
1042 vega10_reg_base_init(adev);
1043 adev->sdma.num_instances = 1;
1044 adev->vcn.num_vcn_inst = 1;
1045 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1046 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
1047 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
1048 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
1049 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
1050 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
1051 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
1052 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
1053 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
1054 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
1055 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
1056 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
1057 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
1058 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
1059 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
1060 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
1061 } else {
1062 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
1063 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
1064 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
1065 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
1066 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
1067 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
1068 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
1069 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
1070 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
1071 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
1072 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
1073 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
1074 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
1075 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
1076 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
1077 }
1078 break;
1079 case CHIP_VEGA20:
1080 vega20_reg_base_init(adev);
1081 adev->sdma.num_instances = 2;
1082 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
1083 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
1084 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
1085 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
1086 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
1087 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
1088 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
1089 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
1090 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
1091 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
1092 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
1093 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
1094 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
1095 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
1096 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
1097 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
1098 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
1099 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
1100 break;
1101 case CHIP_ARCTURUS:
1102 arct_reg_base_init(adev);
1103 adev->sdma.num_instances = 8;
1104 adev->vcn.num_vcn_inst = 2;
1105 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
1106 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
1107 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
1108 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
1109 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
1110 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
1111 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
1112 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
1113 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
1114 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
1115 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
1116 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
1117 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
1118 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
1119 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
1120 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
1121 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
1122 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
1123 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
1124 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
1125 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
1126 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
1127 break;
1128 case CHIP_ALDEBARAN:
1129 aldebaran_reg_base_init(adev);
1130 adev->sdma.num_instances = 5;
1131 adev->vcn.num_vcn_inst = 2;
1132 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
1133 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
1134 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
1135 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
1136 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
1137 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
1138 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
1139 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
1140 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
1141 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
1142 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
1143 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
1144 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
1145 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
1146 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
1147 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
1148 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
1149 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
1150 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
1151 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
1152 break;
1153 default:
1154 r = amdgpu_discovery_reg_base_init(adev);
1155 if (r)
1156 return -EINVAL;
1157
1158 amdgpu_discovery_harvest_ip(adev);
1159
1160 if (!adev->mman.discovery_bin) {
1161 DRM_ERROR("ip discovery uninitialized\n");
1162 return -EINVAL;
1163 }
1164 break;
1165 }
1166
1167 switch (adev->ip_versions[GC_HWIP][0]) {
1168 case IP_VERSION(9, 0, 1):
1169 case IP_VERSION(9, 2, 1):
1170 case IP_VERSION(9, 4, 0):
1171 case IP_VERSION(9, 4, 1):
1172 case IP_VERSION(9, 4, 2):
1173 adev->family = AMDGPU_FAMILY_AI;
1174 break;
1175 case IP_VERSION(9, 1, 0):
1176 case IP_VERSION(9, 2, 2):
1177 case IP_VERSION(9, 3, 0):
1178 adev->family = AMDGPU_FAMILY_RV;
1179 break;
1180 case IP_VERSION(10, 1, 10):
1181 case IP_VERSION(10, 1, 1):
1182 case IP_VERSION(10, 1, 2):
1183 case IP_VERSION(10, 1, 3):
1184 case IP_VERSION(10, 3, 0):
1185 case IP_VERSION(10, 3, 2):
1186 case IP_VERSION(10, 3, 4):
1187 case IP_VERSION(10, 3, 5):
1188 adev->family = AMDGPU_FAMILY_NV;
1189 break;
1190 case IP_VERSION(10, 3, 1):
1191 adev->family = AMDGPU_FAMILY_VGH;
1192 break;
1193 case IP_VERSION(10, 3, 3):
1194 adev->family = AMDGPU_FAMILY_YC;
1195 break;
1196 default:
1197 return -EINVAL;
1198 }
1199
1200 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
1201 adev->gmc.xgmi.supported = true;
1202
1203 /* set NBIO version */
1204 switch (adev->ip_versions[NBIO_HWIP][0]) {
1205 case IP_VERSION(6, 1, 0):
1206 case IP_VERSION(6, 2, 0):
1207 adev->nbio.funcs = &nbio_v6_1_funcs;
1208 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
1209 break;
1210 case IP_VERSION(7, 0, 0):
1211 case IP_VERSION(7, 0, 1):
1212 case IP_VERSION(2, 5, 0):
1213 adev->nbio.funcs = &nbio_v7_0_funcs;
1214 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
1215 break;
1216 case IP_VERSION(7, 4, 0):
1217 case IP_VERSION(7, 4, 1):
1218 adev->nbio.funcs = &nbio_v7_4_funcs;
1219 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
1220 break;
1221 case IP_VERSION(7, 4, 4):
1222 adev->nbio.funcs = &nbio_v7_4_funcs;
1223 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald;
1224 break;
1225 case IP_VERSION(7, 2, 0):
1226 case IP_VERSION(7, 2, 1):
1227 case IP_VERSION(7, 5, 0):
1228 adev->nbio.funcs = &nbio_v7_2_funcs;
1229 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
1230 break;
1231 case IP_VERSION(2, 1, 1):
1232 case IP_VERSION(2, 3, 0):
1233 case IP_VERSION(2, 3, 1):
1234 case IP_VERSION(2, 3, 2):
1235 adev->nbio.funcs = &nbio_v2_3_funcs;
1236 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
1237 break;
1238 case IP_VERSION(3, 3, 0):
1239 case IP_VERSION(3, 3, 1):
1240 case IP_VERSION(3, 3, 2):
1241 case IP_VERSION(3, 3, 3):
1242 adev->nbio.funcs = &nbio_v2_3_funcs;
1243 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc;
1244 break;
1245 default:
1246 break;
1247 }
1248
1249 switch (adev->ip_versions[HDP_HWIP][0]) {
1250 case IP_VERSION(4, 0, 0):
1251 case IP_VERSION(4, 0, 1):
1252 case IP_VERSION(4, 1, 0):
1253 case IP_VERSION(4, 1, 1):
1254 case IP_VERSION(4, 1, 2):
1255 case IP_VERSION(4, 2, 0):
1256 case IP_VERSION(4, 2, 1):
1257 case IP_VERSION(4, 4, 0):
1258 adev->hdp.funcs = &hdp_v4_0_funcs;
1259 break;
1260 case IP_VERSION(5, 0, 0):
1261 case IP_VERSION(5, 0, 1):
1262 case IP_VERSION(5, 0, 2):
1263 case IP_VERSION(5, 0, 3):
1264 case IP_VERSION(5, 0, 4):
1265 case IP_VERSION(5, 2, 0):
1266 adev->hdp.funcs = &hdp_v5_0_funcs;
1267 break;
1268 default:
1269 break;
1270 }
1271
1272 switch (adev->ip_versions[DF_HWIP][0]) {
1273 case IP_VERSION(3, 6, 0):
1274 case IP_VERSION(3, 6, 1):
1275 case IP_VERSION(3, 6, 2):
1276 adev->df.funcs = &df_v3_6_funcs;
1277 break;
1278 case IP_VERSION(2, 1, 0):
1279 case IP_VERSION(2, 1, 1):
1280 case IP_VERSION(2, 5, 0):
1281 case IP_VERSION(3, 5, 1):
1282 case IP_VERSION(3, 5, 2):
1283 adev->df.funcs = &df_v1_7_funcs;
1284 break;
1285 default:
1286 break;
1287 }
1288
1289 switch (adev->ip_versions[SMUIO_HWIP][0]) {
1290 case IP_VERSION(9, 0, 0):
1291 case IP_VERSION(9, 0, 1):
1292 case IP_VERSION(10, 0, 0):
1293 case IP_VERSION(10, 0, 1):
1294 case IP_VERSION(10, 0, 2):
1295 adev->smuio.funcs = &smuio_v9_0_funcs;
1296 break;
1297 case IP_VERSION(11, 0, 0):
1298 case IP_VERSION(11, 0, 2):
1299 case IP_VERSION(11, 0, 3):
1300 case IP_VERSION(11, 0, 4):
1301 case IP_VERSION(11, 0, 7):
1302 case IP_VERSION(11, 0, 8):
1303 adev->smuio.funcs = &smuio_v11_0_funcs;
1304 break;
1305 case IP_VERSION(11, 0, 6):
1306 case IP_VERSION(11, 0, 10):
1307 case IP_VERSION(11, 0, 11):
1308 case IP_VERSION(11, 5, 0):
1309 case IP_VERSION(13, 0, 1):
1310 adev->smuio.funcs = &smuio_v11_0_6_funcs;
1311 break;
1312 case IP_VERSION(13, 0, 2):
1313 adev->smuio.funcs = &smuio_v13_0_funcs;
1314 break;
1315 default:
1316 break;
1317 }
1318
1319 r = amdgpu_discovery_set_common_ip_blocks(adev);
1320 if (r)
1321 return r;
1322
1323 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
1324 if (r)
1325 return r;
1326
1327 /* For SR-IOV, PSP needs to be initialized before IH */
1328 if (amdgpu_sriov_vf(adev)) {
1329 r = amdgpu_discovery_set_psp_ip_blocks(adev);
1330 if (r)
1331 return r;
1332 r = amdgpu_discovery_set_ih_ip_blocks(adev);
1333 if (r)
1334 return r;
1335 } else {
1336 r = amdgpu_discovery_set_ih_ip_blocks(adev);
1337 if (r)
1338 return r;
1339
1340 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
1341 r = amdgpu_discovery_set_psp_ip_blocks(adev);
1342 if (r)
1343 return r;
1344 }
1345 }
1346
1347 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
1348 r = amdgpu_discovery_set_smu_ip_blocks(adev);
1349 if (r)
1350 return r;
1351 }
1352
1353 r = amdgpu_discovery_set_display_ip_blocks(adev);
1354 if (r)
1355 return r;
1356
1357 r = amdgpu_discovery_set_gc_ip_blocks(adev);
1358 if (r)
1359 return r;
1360
1361 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
1362 if (r)
1363 return r;
1364
1365 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
1366 !amdgpu_sriov_vf(adev)) {
1367 r = amdgpu_discovery_set_smu_ip_blocks(adev);
1368 if (r)
1369 return r;
1370 }
1371
1372 r = amdgpu_discovery_set_mm_ip_blocks(adev);
1373 if (r)
1374 return r;
1375
1376 if (adev->enable_mes) {
1377 r = amdgpu_discovery_set_mes_ip_blocks(adev);
1378 if (r)
1379 return r;
1380 }
1381
1382 return 0;
1383 }
1384
1385