1 /*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gmc_v9_0.h"
34 #include "df_v1_7.h"
35 #include "df_v3_6.h"
36 #include "df_v4_3.h"
37 #include "nbio_v6_1.h"
38 #include "nbio_v7_0.h"
39 #include "nbio_v7_4.h"
40 #include "hdp_v4_0.h"
41 #include "vega10_ih.h"
42 #include "vega20_ih.h"
43 #include "sdma_v4_0.h"
44 #include "uvd_v7_0.h"
45 #include "vce_v4_0.h"
46 #include "vcn_v1_0.h"
47 #include "vcn_v2_5.h"
48 #include "jpeg_v2_5.h"
49 #include "smuio_v9_0.h"
50 #include "gmc_v10_0.h"
51 #include "gmc_v11_0.h"
52 #include "gfxhub_v2_0.h"
53 #include "mmhub_v2_0.h"
54 #include "nbio_v2_3.h"
55 #include "nbio_v4_3.h"
56 #include "nbio_v7_2.h"
57 #include "nbio_v7_7.h"
58 #include "hdp_v5_0.h"
59 #include "hdp_v5_2.h"
60 #include "hdp_v6_0.h"
61 #include "nv.h"
62 #include "soc21.h"
63 #include "navi10_ih.h"
64 #include "ih_v6_0.h"
65 #include "gfx_v10_0.h"
66 #include "gfx_v11_0.h"
67 #include "sdma_v5_0.h"
68 #include "sdma_v5_2.h"
69 #include "sdma_v6_0.h"
70 #include "lsdma_v6_0.h"
71 #include "vcn_v2_0.h"
72 #include "jpeg_v2_0.h"
73 #include "vcn_v3_0.h"
74 #include "jpeg_v3_0.h"
75 #include "vcn_v4_0.h"
76 #include "jpeg_v4_0.h"
77 #include "amdgpu_vkms.h"
78 #include "mes_v10_1.h"
79 #include "mes_v11_0.h"
80 #include "smuio_v11_0.h"
81 #include "smuio_v11_0_6.h"
82 #include "smuio_v13_0.h"
83 #include "smuio_v13_0_6.h"
84
85 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
86 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
87
88 #define mmRCC_CONFIG_MEMSIZE 0xde3
89 #define mmMM_INDEX 0x0
90 #define mmMM_INDEX_HI 0x6
91 #define mmMM_DATA 0x1
92
93 static const char *hw_id_names[HW_ID_MAX] = {
94 [MP1_HWID] = "MP1",
95 [MP2_HWID] = "MP2",
96 [THM_HWID] = "THM",
97 [SMUIO_HWID] = "SMUIO",
98 [FUSE_HWID] = "FUSE",
99 [CLKA_HWID] = "CLKA",
100 [PWR_HWID] = "PWR",
101 [GC_HWID] = "GC",
102 [UVD_HWID] = "UVD",
103 [AUDIO_AZ_HWID] = "AUDIO_AZ",
104 [ACP_HWID] = "ACP",
105 [DCI_HWID] = "DCI",
106 [DMU_HWID] = "DMU",
107 [DCO_HWID] = "DCO",
108 [DIO_HWID] = "DIO",
109 [XDMA_HWID] = "XDMA",
110 [DCEAZ_HWID] = "DCEAZ",
111 [DAZ_HWID] = "DAZ",
112 [SDPMUX_HWID] = "SDPMUX",
113 [NTB_HWID] = "NTB",
114 [IOHC_HWID] = "IOHC",
115 [L2IMU_HWID] = "L2IMU",
116 [VCE_HWID] = "VCE",
117 [MMHUB_HWID] = "MMHUB",
118 [ATHUB_HWID] = "ATHUB",
119 [DBGU_NBIO_HWID] = "DBGU_NBIO",
120 [DFX_HWID] = "DFX",
121 [DBGU0_HWID] = "DBGU0",
122 [DBGU1_HWID] = "DBGU1",
123 [OSSSYS_HWID] = "OSSSYS",
124 [HDP_HWID] = "HDP",
125 [SDMA0_HWID] = "SDMA0",
126 [SDMA1_HWID] = "SDMA1",
127 [SDMA2_HWID] = "SDMA2",
128 [SDMA3_HWID] = "SDMA3",
129 [LSDMA_HWID] = "LSDMA",
130 [ISP_HWID] = "ISP",
131 [DBGU_IO_HWID] = "DBGU_IO",
132 [DF_HWID] = "DF",
133 [CLKB_HWID] = "CLKB",
134 [FCH_HWID] = "FCH",
135 [DFX_DAP_HWID] = "DFX_DAP",
136 [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
137 [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
138 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
139 [L1IMU3_HWID] = "L1IMU3",
140 [L1IMU4_HWID] = "L1IMU4",
141 [L1IMU5_HWID] = "L1IMU5",
142 [L1IMU6_HWID] = "L1IMU6",
143 [L1IMU7_HWID] = "L1IMU7",
144 [L1IMU8_HWID] = "L1IMU8",
145 [L1IMU9_HWID] = "L1IMU9",
146 [L1IMU10_HWID] = "L1IMU10",
147 [L1IMU11_HWID] = "L1IMU11",
148 [L1IMU12_HWID] = "L1IMU12",
149 [L1IMU13_HWID] = "L1IMU13",
150 [L1IMU14_HWID] = "L1IMU14",
151 [L1IMU15_HWID] = "L1IMU15",
152 [WAFLC_HWID] = "WAFLC",
153 [FCH_USB_PD_HWID] = "FCH_USB_PD",
154 [PCIE_HWID] = "PCIE",
155 [PCS_HWID] = "PCS",
156 [DDCL_HWID] = "DDCL",
157 [SST_HWID] = "SST",
158 [IOAGR_HWID] = "IOAGR",
159 [NBIF_HWID] = "NBIF",
160 [IOAPIC_HWID] = "IOAPIC",
161 [SYSTEMHUB_HWID] = "SYSTEMHUB",
162 [NTBCCP_HWID] = "NTBCCP",
163 [UMC_HWID] = "UMC",
164 [SATA_HWID] = "SATA",
165 [USB_HWID] = "USB",
166 [CCXSEC_HWID] = "CCXSEC",
167 [XGMI_HWID] = "XGMI",
168 [XGBE_HWID] = "XGBE",
169 [MP0_HWID] = "MP0",
170 };
171
172 static int hw_id_map[MAX_HWIP] = {
173 [GC_HWIP] = GC_HWID,
174 [HDP_HWIP] = HDP_HWID,
175 [SDMA0_HWIP] = SDMA0_HWID,
176 [SDMA1_HWIP] = SDMA1_HWID,
177 [SDMA2_HWIP] = SDMA2_HWID,
178 [SDMA3_HWIP] = SDMA3_HWID,
179 [LSDMA_HWIP] = LSDMA_HWID,
180 [MMHUB_HWIP] = MMHUB_HWID,
181 [ATHUB_HWIP] = ATHUB_HWID,
182 [NBIO_HWIP] = NBIF_HWID,
183 [MP0_HWIP] = MP0_HWID,
184 [MP1_HWIP] = MP1_HWID,
185 [UVD_HWIP] = UVD_HWID,
186 [VCE_HWIP] = VCE_HWID,
187 [DF_HWIP] = DF_HWID,
188 [DCE_HWIP] = DMU_HWID,
189 [OSSSYS_HWIP] = OSSSYS_HWID,
190 [SMUIO_HWIP] = SMUIO_HWID,
191 [PWR_HWIP] = PWR_HWID,
192 [NBIF_HWIP] = NBIF_HWID,
193 [THM_HWIP] = THM_HWID,
194 [CLK_HWIP] = CLKA_HWID,
195 [UMC_HWIP] = UMC_HWID,
196 [XGMI_HWIP] = XGMI_HWID,
197 [DCI_HWIP] = DCI_HWID,
198 [PCIE_HWIP] = PCIE_HWID,
199 };
200
amdgpu_discovery_read_binary_from_vram(struct amdgpu_device * adev,uint8_t * binary)201 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
202 {
203 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
204 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
205
206 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
207 adev->mman.discovery_tmr_size, false);
208 return 0;
209 }
210
amdgpu_discovery_read_binary_from_file(struct amdgpu_device * adev,uint8_t * binary)211 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
212 {
213 const struct firmware *fw;
214 const char *fw_name;
215 int r;
216
217 switch (amdgpu_discovery) {
218 case 2:
219 fw_name = FIRMWARE_IP_DISCOVERY;
220 break;
221 default:
222 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
223 return -EINVAL;
224 }
225
226 r = request_firmware(&fw, fw_name, adev->dev);
227 if (r) {
228 dev_err(adev->dev, "can't load firmware \"%s\"\n",
229 fw_name);
230 return r;
231 }
232
233 memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
234 release_firmware(fw);
235
236 return 0;
237 }
238
amdgpu_discovery_calculate_checksum(uint8_t * data,uint32_t size)239 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
240 {
241 uint16_t checksum = 0;
242 int i;
243
244 for (i = 0; i < size; i++)
245 checksum += data[i];
246
247 return checksum;
248 }
249
amdgpu_discovery_verify_checksum(uint8_t * data,uint32_t size,uint16_t expected)250 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
251 uint16_t expected)
252 {
253 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
254 }
255
amdgpu_discovery_verify_binary_signature(uint8_t * binary)256 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
257 {
258 struct binary_header *bhdr;
259 bhdr = (struct binary_header *)binary;
260
261 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
262 }
263
amdgpu_discovery_harvest_config_quirk(struct amdgpu_device * adev)264 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
265 {
266 /*
267 * So far, apply this quirk only on those Navy Flounder boards which
268 * have a bad harvest table of VCN config.
269 */
270 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
271 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
272 switch (adev->pdev->revision) {
273 case 0xC1:
274 case 0xC2:
275 case 0xC3:
276 case 0xC5:
277 case 0xC7:
278 case 0xCF:
279 case 0xDF:
280 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
281 break;
282 default:
283 break;
284 }
285 }
286 }
287
amdgpu_discovery_init(struct amdgpu_device * adev)288 static int amdgpu_discovery_init(struct amdgpu_device *adev)
289 {
290 struct table_info *info;
291 struct binary_header *bhdr;
292 uint16_t offset;
293 uint16_t size;
294 uint16_t checksum;
295 int r;
296
297 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
298 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
299 if (!adev->mman.discovery_bin)
300 return -ENOMEM;
301
302 r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
303 if (r) {
304 dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
305 r = -EINVAL;
306 goto out;
307 }
308
309 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin) || amdgpu_discovery == 2) {
310 /* ignore the discovery binary from vram if discovery=2 in kernel module parameter */
311 if (amdgpu_discovery == 2)
312 dev_info(adev->dev,"force read ip discovery binary from file");
313 else
314 dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
315
316 /* retry read ip discovery binary from file */
317 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
318 if (r) {
319 dev_err(adev->dev, "failed to read ip discovery binary from file\n");
320 r = -EINVAL;
321 goto out;
322 }
323 /* check the ip discovery binary signature */
324 if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
325 dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
326 r = -EINVAL;
327 goto out;
328 }
329 }
330
331 bhdr = (struct binary_header *)adev->mman.discovery_bin;
332
333 offset = offsetof(struct binary_header, binary_checksum) +
334 sizeof(bhdr->binary_checksum);
335 size = le16_to_cpu(bhdr->binary_size) - offset;
336 checksum = le16_to_cpu(bhdr->binary_checksum);
337
338 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
339 size, checksum)) {
340 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
341 r = -EINVAL;
342 goto out;
343 }
344
345 info = &bhdr->table_list[IP_DISCOVERY];
346 offset = le16_to_cpu(info->offset);
347 checksum = le16_to_cpu(info->checksum);
348
349 if (offset) {
350 struct ip_discovery_header *ihdr =
351 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
352 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
353 dev_err(adev->dev, "invalid ip discovery data table signature\n");
354 r = -EINVAL;
355 goto out;
356 }
357
358 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
359 le16_to_cpu(ihdr->size), checksum)) {
360 dev_err(adev->dev, "invalid ip discovery data table checksum\n");
361 r = -EINVAL;
362 goto out;
363 }
364 }
365
366 info = &bhdr->table_list[GC];
367 offset = le16_to_cpu(info->offset);
368 checksum = le16_to_cpu(info->checksum);
369
370 if (offset) {
371 struct gpu_info_header *ghdr =
372 (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
373
374 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
375 dev_err(adev->dev, "invalid ip discovery gc table id\n");
376 r = -EINVAL;
377 goto out;
378 }
379
380 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
381 le32_to_cpu(ghdr->size), checksum)) {
382 dev_err(adev->dev, "invalid gc data table checksum\n");
383 r = -EINVAL;
384 goto out;
385 }
386 }
387
388 info = &bhdr->table_list[HARVEST_INFO];
389 offset = le16_to_cpu(info->offset);
390 checksum = le16_to_cpu(info->checksum);
391
392 if (offset) {
393 struct harvest_info_header *hhdr =
394 (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
395
396 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
397 dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
398 r = -EINVAL;
399 goto out;
400 }
401
402 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
403 sizeof(struct harvest_table), checksum)) {
404 dev_err(adev->dev, "invalid harvest data table checksum\n");
405 r = -EINVAL;
406 goto out;
407 }
408 }
409
410 info = &bhdr->table_list[VCN_INFO];
411 offset = le16_to_cpu(info->offset);
412 checksum = le16_to_cpu(info->checksum);
413
414 if (offset) {
415 struct vcn_info_header *vhdr =
416 (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
417
418 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
419 dev_err(adev->dev, "invalid ip discovery vcn table id\n");
420 r = -EINVAL;
421 goto out;
422 }
423
424 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
425 le32_to_cpu(vhdr->size_bytes), checksum)) {
426 dev_err(adev->dev, "invalid vcn data table checksum\n");
427 r = -EINVAL;
428 goto out;
429 }
430 }
431
432 info = &bhdr->table_list[MALL_INFO];
433 offset = le16_to_cpu(info->offset);
434 checksum = le16_to_cpu(info->checksum);
435
436 if (0 && offset) {
437 struct mall_info_header *mhdr =
438 (struct mall_info_header *)(adev->mman.discovery_bin + offset);
439
440 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
441 dev_err(adev->dev, "invalid ip discovery mall table id\n");
442 r = -EINVAL;
443 goto out;
444 }
445
446 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
447 le32_to_cpu(mhdr->size_bytes), checksum)) {
448 dev_err(adev->dev, "invalid mall data table checksum\n");
449 r = -EINVAL;
450 goto out;
451 }
452 }
453
454 return 0;
455
456 out:
457 kfree(adev->mman.discovery_bin);
458 adev->mman.discovery_bin = NULL;
459
460 return r;
461 }
462
463 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
464
amdgpu_discovery_fini(struct amdgpu_device * adev)465 void amdgpu_discovery_fini(struct amdgpu_device *adev)
466 {
467 amdgpu_discovery_sysfs_fini(adev);
468 kfree(adev->mman.discovery_bin);
469 adev->mman.discovery_bin = NULL;
470 }
471
amdgpu_discovery_validate_ip(const struct ip * ip)472 static int amdgpu_discovery_validate_ip(const struct ip *ip)
473 {
474 if (ip->number_instance >= HWIP_MAX_INSTANCE) {
475 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
476 ip->number_instance);
477 return -EINVAL;
478 }
479 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
480 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
481 le16_to_cpu(ip->hw_id));
482 return -EINVAL;
483 }
484
485 return 0;
486 }
487
amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device * adev,uint32_t * vcn_harvest_count)488 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
489 uint32_t *vcn_harvest_count)
490 {
491 struct binary_header *bhdr;
492 struct ip_discovery_header *ihdr;
493 struct die_header *dhdr;
494 struct ip *ip;
495 uint16_t die_offset, ip_offset, num_dies, num_ips;
496 int i, j;
497
498 bhdr = (struct binary_header *)adev->mman.discovery_bin;
499 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
500 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
501 num_dies = le16_to_cpu(ihdr->num_dies);
502
503 /* scan harvest bit of all IP data structures */
504 for (i = 0; i < num_dies; i++) {
505 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
506 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
507 num_ips = le16_to_cpu(dhdr->num_ips);
508 ip_offset = die_offset + sizeof(*dhdr);
509
510 for (j = 0; j < num_ips; j++) {
511 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
512
513 if (amdgpu_discovery_validate_ip(ip))
514 goto next_ip;
515
516 if (le16_to_cpu(ip->harvest) == 1) {
517 switch (le16_to_cpu(ip->hw_id)) {
518 case VCN_HWID:
519 (*vcn_harvest_count)++;
520 if (ip->number_instance == 0)
521 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
522 else
523 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
524 break;
525 case DMU_HWID:
526 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
527 break;
528 default:
529 break;
530 }
531 }
532 next_ip:
533 ip_offset += struct_size(ip, base_address, ip->num_base_address);
534 }
535 }
536 }
537
amdgpu_discovery_read_from_harvest_table(struct amdgpu_device * adev,uint32_t * vcn_harvest_count,uint32_t * umc_harvest_count)538 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
539 uint32_t *vcn_harvest_count,
540 uint32_t *umc_harvest_count)
541 {
542 struct binary_header *bhdr;
543 struct harvest_table *harvest_info;
544 u16 offset;
545 int i;
546 uint32_t umc_harvest_config = 0;
547
548 bhdr = (struct binary_header *)adev->mman.discovery_bin;
549 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
550
551 if (!offset) {
552 dev_err(adev->dev, "invalid harvest table offset\n");
553 return;
554 }
555
556 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
557
558 for (i = 0; i < 32; i++) {
559 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
560 break;
561
562 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
563 case VCN_HWID:
564 (*vcn_harvest_count)++;
565 if (harvest_info->list[i].number_instance == 0)
566 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
567 else
568 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
569 break;
570 case DMU_HWID:
571 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
572 break;
573 case UMC_HWID:
574 umc_harvest_config |=
575 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
576 (*umc_harvest_count)++;
577 break;
578 default:
579 break;
580 }
581 }
582
583 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
584 ~umc_harvest_config;
585 }
586
587 /* ================================================== */
588
589 struct ip_hw_instance {
590 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
591
592 int hw_id;
593 u8 num_instance;
594 u8 major, minor, revision;
595 u8 harvest;
596
597 int num_base_addresses;
598 u32 base_addr[];
599 };
600
601 struct ip_hw_id {
602 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
603 int hw_id;
604 };
605
606 struct ip_die_entry {
607 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
608 u16 num_ips;
609 };
610
611 /* -------------------------------------------------- */
612
613 struct ip_hw_instance_attr {
614 struct attribute attr;
615 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
616 };
617
hw_id_show(struct ip_hw_instance * ip_hw_instance,char * buf)618 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
619 {
620 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
621 }
622
num_instance_show(struct ip_hw_instance * ip_hw_instance,char * buf)623 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
624 {
625 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
626 }
627
major_show(struct ip_hw_instance * ip_hw_instance,char * buf)628 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
629 {
630 return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
631 }
632
minor_show(struct ip_hw_instance * ip_hw_instance,char * buf)633 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
634 {
635 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
636 }
637
revision_show(struct ip_hw_instance * ip_hw_instance,char * buf)638 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
639 {
640 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
641 }
642
harvest_show(struct ip_hw_instance * ip_hw_instance,char * buf)643 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
644 {
645 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
646 }
647
num_base_addresses_show(struct ip_hw_instance * ip_hw_instance,char * buf)648 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
649 {
650 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
651 }
652
base_addr_show(struct ip_hw_instance * ip_hw_instance,char * buf)653 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
654 {
655 ssize_t res, at;
656 int ii;
657
658 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
659 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
660 */
661 if (at + 12 > PAGE_SIZE)
662 break;
663 res = sysfs_emit_at(buf, at, "0x%08X\n",
664 ip_hw_instance->base_addr[ii]);
665 if (res <= 0)
666 break;
667 at += res;
668 }
669
670 return res < 0 ? res : at;
671 }
672
673 static struct ip_hw_instance_attr ip_hw_attr[] = {
674 __ATTR_RO(hw_id),
675 __ATTR_RO(num_instance),
676 __ATTR_RO(major),
677 __ATTR_RO(minor),
678 __ATTR_RO(revision),
679 __ATTR_RO(harvest),
680 __ATTR_RO(num_base_addresses),
681 __ATTR_RO(base_addr),
682 };
683
684 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
685 ATTRIBUTE_GROUPS(ip_hw_instance);
686
687 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
688 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
689
ip_hw_instance_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)690 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
691 struct attribute *attr,
692 char *buf)
693 {
694 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
695 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
696
697 if (!ip_hw_attr->show)
698 return -EIO;
699
700 return ip_hw_attr->show(ip_hw_instance, buf);
701 }
702
703 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
704 .show = ip_hw_instance_attr_show,
705 };
706
ip_hw_instance_release(struct kobject * kobj)707 static void ip_hw_instance_release(struct kobject *kobj)
708 {
709 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
710
711 kfree(ip_hw_instance);
712 }
713
714 static struct kobj_type ip_hw_instance_ktype = {
715 .release = ip_hw_instance_release,
716 .sysfs_ops = &ip_hw_instance_sysfs_ops,
717 .default_groups = ip_hw_instance_groups,
718 };
719
720 /* -------------------------------------------------- */
721
722 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
723
ip_hw_id_release(struct kobject * kobj)724 static void ip_hw_id_release(struct kobject *kobj)
725 {
726 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
727
728 if (!list_empty(&ip_hw_id->hw_id_kset.list))
729 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
730 kfree(ip_hw_id);
731 }
732
733 static struct kobj_type ip_hw_id_ktype = {
734 .release = ip_hw_id_release,
735 .sysfs_ops = &kobj_sysfs_ops,
736 };
737
738 /* -------------------------------------------------- */
739
740 static void die_kobj_release(struct kobject *kobj);
741 static void ip_disc_release(struct kobject *kobj);
742
743 struct ip_die_entry_attribute {
744 struct attribute attr;
745 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
746 };
747
748 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
749
num_ips_show(struct ip_die_entry * ip_die_entry,char * buf)750 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
751 {
752 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
753 }
754
755 /* If there are more ip_die_entry attrs, other than the number of IPs,
756 * we can make this intro an array of attrs, and then initialize
757 * ip_die_entry_attrs in a loop.
758 */
759 static struct ip_die_entry_attribute num_ips_attr =
760 __ATTR_RO(num_ips);
761
762 static struct attribute *ip_die_entry_attrs[] = {
763 &num_ips_attr.attr,
764 NULL,
765 };
766 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
767
768 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
769
ip_die_entry_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)770 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
771 struct attribute *attr,
772 char *buf)
773 {
774 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
775 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
776
777 if (!ip_die_entry_attr->show)
778 return -EIO;
779
780 return ip_die_entry_attr->show(ip_die_entry, buf);
781 }
782
ip_die_entry_release(struct kobject * kobj)783 static void ip_die_entry_release(struct kobject *kobj)
784 {
785 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
786
787 if (!list_empty(&ip_die_entry->ip_kset.list))
788 DRM_ERROR("ip_die_entry->ip_kset is not empty");
789 kfree(ip_die_entry);
790 }
791
792 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
793 .show = ip_die_entry_attr_show,
794 };
795
796 static struct kobj_type ip_die_entry_ktype = {
797 .release = ip_die_entry_release,
798 .sysfs_ops = &ip_die_entry_sysfs_ops,
799 .default_groups = ip_die_entry_groups,
800 };
801
802 static struct kobj_type die_kobj_ktype = {
803 .release = die_kobj_release,
804 .sysfs_ops = &kobj_sysfs_ops,
805 };
806
807 static struct kobj_type ip_discovery_ktype = {
808 .release = ip_disc_release,
809 .sysfs_ops = &kobj_sysfs_ops,
810 };
811
812 struct ip_discovery_top {
813 struct kobject kobj; /* ip_discovery/ */
814 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
815 struct amdgpu_device *adev;
816 };
817
die_kobj_release(struct kobject * kobj)818 static void die_kobj_release(struct kobject *kobj)
819 {
820 struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
821 struct ip_discovery_top,
822 die_kset);
823 if (!list_empty(&ip_top->die_kset.list))
824 DRM_ERROR("ip_top->die_kset is not empty");
825 }
826
ip_disc_release(struct kobject * kobj)827 static void ip_disc_release(struct kobject *kobj)
828 {
829 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
830 kobj);
831 struct amdgpu_device *adev = ip_top->adev;
832
833 adev->ip_top = NULL;
834 kfree(ip_top);
835 }
836
amdgpu_discovery_sysfs_ips(struct amdgpu_device * adev,struct ip_die_entry * ip_die_entry,const size_t _ip_offset,const int num_ips)837 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
838 struct ip_die_entry *ip_die_entry,
839 const size_t _ip_offset, const int num_ips)
840 {
841 int ii, jj, kk, res;
842
843 DRM_DEBUG("num_ips:%d", num_ips);
844
845 /* Find all IPs of a given HW ID, and add their instance to
846 * #die/#hw_id/#instance/<attributes>
847 */
848 for (ii = 0; ii < HW_ID_MAX; ii++) {
849 struct ip_hw_id *ip_hw_id = NULL;
850 size_t ip_offset = _ip_offset;
851
852 for (jj = 0; jj < num_ips; jj++) {
853 struct ip *ip;
854 struct ip_hw_instance *ip_hw_instance;
855
856 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
857 if (amdgpu_discovery_validate_ip(ip) ||
858 le16_to_cpu(ip->hw_id) != ii)
859 goto next_ip;
860
861 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
862
863 /* We have a hw_id match; register the hw
864 * block if not yet registered.
865 */
866 if (!ip_hw_id) {
867 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
868 if (!ip_hw_id)
869 return -ENOMEM;
870 ip_hw_id->hw_id = ii;
871
872 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
873 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
874 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
875 res = kset_register(&ip_hw_id->hw_id_kset);
876 if (res) {
877 DRM_ERROR("Couldn't register ip_hw_id kset");
878 kfree(ip_hw_id);
879 return res;
880 }
881 if (hw_id_names[ii]) {
882 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
883 &ip_hw_id->hw_id_kset.kobj,
884 hw_id_names[ii]);
885 if (res) {
886 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
887 hw_id_names[ii],
888 kobject_name(&ip_die_entry->ip_kset.kobj));
889 }
890 }
891 }
892
893 /* Now register its instance.
894 */
895 ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
896 base_addr,
897 ip->num_base_address),
898 GFP_KERNEL);
899 if (!ip_hw_instance) {
900 DRM_ERROR("no memory for ip_hw_instance");
901 return -ENOMEM;
902 }
903 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
904 ip_hw_instance->num_instance = ip->number_instance;
905 ip_hw_instance->major = ip->major;
906 ip_hw_instance->minor = ip->minor;
907 ip_hw_instance->revision = ip->revision;
908 ip_hw_instance->harvest = ip->harvest;
909 ip_hw_instance->num_base_addresses = ip->num_base_address;
910
911 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
912 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
913
914 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
915 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
916 res = kobject_add(&ip_hw_instance->kobj, NULL,
917 "%d", ip_hw_instance->num_instance);
918 next_ip:
919 ip_offset += struct_size(ip, base_address, ip->num_base_address);
920 }
921 }
922
923 return 0;
924 }
925
amdgpu_discovery_sysfs_recurse(struct amdgpu_device * adev)926 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
927 {
928 struct binary_header *bhdr;
929 struct ip_discovery_header *ihdr;
930 struct die_header *dhdr;
931 struct kset *die_kset = &adev->ip_top->die_kset;
932 u16 num_dies, die_offset, num_ips;
933 size_t ip_offset;
934 int ii, res;
935
936 bhdr = (struct binary_header *)adev->mman.discovery_bin;
937 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
938 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
939 num_dies = le16_to_cpu(ihdr->num_dies);
940
941 DRM_DEBUG("number of dies: %d\n", num_dies);
942
943 for (ii = 0; ii < num_dies; ii++) {
944 struct ip_die_entry *ip_die_entry;
945
946 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
947 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
948 num_ips = le16_to_cpu(dhdr->num_ips);
949 ip_offset = die_offset + sizeof(*dhdr);
950
951 /* Add the die to the kset.
952 *
953 * dhdr->die_id == ii, which was checked in
954 * amdgpu_discovery_reg_base_init().
955 */
956
957 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
958 if (!ip_die_entry)
959 return -ENOMEM;
960
961 ip_die_entry->num_ips = num_ips;
962
963 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
964 ip_die_entry->ip_kset.kobj.kset = die_kset;
965 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
966 res = kset_register(&ip_die_entry->ip_kset);
967 if (res) {
968 DRM_ERROR("Couldn't register ip_die_entry kset");
969 kfree(ip_die_entry);
970 return res;
971 }
972
973 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips);
974 }
975
976 return 0;
977 }
978
amdgpu_discovery_sysfs_init(struct amdgpu_device * adev)979 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
980 {
981 struct kset *die_kset;
982 int res, ii;
983
984 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
985 if (!adev->ip_top)
986 return -ENOMEM;
987
988 adev->ip_top->adev = adev;
989
990 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
991 &adev->dev->kobj, "ip_discovery");
992 if (res) {
993 DRM_ERROR("Couldn't init and add ip_discovery/");
994 goto Err;
995 }
996
997 die_kset = &adev->ip_top->die_kset;
998 kobject_set_name(&die_kset->kobj, "%s", "die");
999 die_kset->kobj.parent = &adev->ip_top->kobj;
1000 die_kset->kobj.ktype = &die_kobj_ktype;
1001 res = kset_register(&adev->ip_top->die_kset);
1002 if (res) {
1003 DRM_ERROR("Couldn't register die_kset");
1004 goto Err;
1005 }
1006
1007 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
1008 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
1009 ip_hw_instance_attrs[ii] = NULL;
1010
1011 res = amdgpu_discovery_sysfs_recurse(adev);
1012
1013 return res;
1014 Err:
1015 kobject_put(&adev->ip_top->kobj);
1016 return res;
1017 }
1018
1019 /* -------------------------------------------------- */
1020
1021 #define list_to_kobj(el) container_of(el, struct kobject, entry)
1022
amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id * ip_hw_id)1023 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
1024 {
1025 struct list_head *el, *tmp;
1026 struct kset *hw_id_kset;
1027
1028 hw_id_kset = &ip_hw_id->hw_id_kset;
1029 spin_lock(&hw_id_kset->list_lock);
1030 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1031 list_del_init(el);
1032 spin_unlock(&hw_id_kset->list_lock);
1033 /* kobject is embedded in ip_hw_instance */
1034 kobject_put(list_to_kobj(el));
1035 spin_lock(&hw_id_kset->list_lock);
1036 }
1037 spin_unlock(&hw_id_kset->list_lock);
1038 kobject_put(&ip_hw_id->hw_id_kset.kobj);
1039 }
1040
amdgpu_discovery_sysfs_die_free(struct ip_die_entry * ip_die_entry)1041 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1042 {
1043 struct list_head *el, *tmp;
1044 struct kset *ip_kset;
1045
1046 ip_kset = &ip_die_entry->ip_kset;
1047 spin_lock(&ip_kset->list_lock);
1048 list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1049 list_del_init(el);
1050 spin_unlock(&ip_kset->list_lock);
1051 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1052 spin_lock(&ip_kset->list_lock);
1053 }
1054 spin_unlock(&ip_kset->list_lock);
1055 kobject_put(&ip_die_entry->ip_kset.kobj);
1056 }
1057
amdgpu_discovery_sysfs_fini(struct amdgpu_device * adev)1058 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1059 {
1060 struct list_head *el, *tmp;
1061 struct kset *die_kset;
1062
1063 die_kset = &adev->ip_top->die_kset;
1064 spin_lock(&die_kset->list_lock);
1065 list_for_each_prev_safe(el, tmp, &die_kset->list) {
1066 list_del_init(el);
1067 spin_unlock(&die_kset->list_lock);
1068 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1069 spin_lock(&die_kset->list_lock);
1070 }
1071 spin_unlock(&die_kset->list_lock);
1072 kobject_put(&adev->ip_top->die_kset.kobj);
1073 kobject_put(&adev->ip_top->kobj);
1074 }
1075
1076 /* ================================================== */
1077
amdgpu_discovery_reg_base_init(struct amdgpu_device * adev)1078 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1079 {
1080 struct binary_header *bhdr;
1081 struct ip_discovery_header *ihdr;
1082 struct die_header *dhdr;
1083 struct ip *ip;
1084 uint16_t die_offset;
1085 uint16_t ip_offset;
1086 uint16_t num_dies;
1087 uint16_t num_ips;
1088 uint8_t num_base_address;
1089 int hw_ip;
1090 int i, j, k;
1091 int r;
1092
1093 r = amdgpu_discovery_init(adev);
1094 if (r) {
1095 DRM_ERROR("amdgpu_discovery_init failed\n");
1096 return r;
1097 }
1098
1099 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1100 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1101 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1102 num_dies = le16_to_cpu(ihdr->num_dies);
1103
1104 DRM_DEBUG("number of dies: %d\n", num_dies);
1105
1106 for (i = 0; i < num_dies; i++) {
1107 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1108 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1109 num_ips = le16_to_cpu(dhdr->num_ips);
1110 ip_offset = die_offset + sizeof(*dhdr);
1111
1112 if (le16_to_cpu(dhdr->die_id) != i) {
1113 DRM_ERROR("invalid die id %d, expected %d\n",
1114 le16_to_cpu(dhdr->die_id), i);
1115 return -EINVAL;
1116 }
1117
1118 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1119 le16_to_cpu(dhdr->die_id), num_ips);
1120
1121 for (j = 0; j < num_ips; j++) {
1122 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1123
1124 if (amdgpu_discovery_validate_ip(ip))
1125 goto next_ip;
1126
1127 num_base_address = ip->num_base_address;
1128
1129 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1130 hw_id_names[le16_to_cpu(ip->hw_id)],
1131 le16_to_cpu(ip->hw_id),
1132 ip->number_instance,
1133 ip->major, ip->minor,
1134 ip->revision);
1135
1136 if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1137 /* Bit [5:0]: original revision value
1138 * Bit [7:6]: en/decode capability:
1139 * 0b00 : VCN function normally
1140 * 0b10 : encode is disabled
1141 * 0b01 : decode is disabled
1142 */
1143 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1144 ip->revision & 0xc0;
1145 ip->revision &= ~0xc0;
1146 if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES)
1147 adev->vcn.num_vcn_inst++;
1148 else
1149 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
1150 adev->vcn.num_vcn_inst + 1,
1151 AMDGPU_MAX_VCN_INSTANCES);
1152 }
1153 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1154 le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1155 le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1156 le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
1157 if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES)
1158 adev->sdma.num_instances++;
1159 else
1160 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
1161 adev->sdma.num_instances + 1,
1162 AMDGPU_MAX_SDMA_INSTANCES);
1163 }
1164
1165 if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
1166 adev->gmc.num_umc++;
1167 adev->umc.node_inst_num++;
1168 }
1169
1170 for (k = 0; k < num_base_address; k++) {
1171 /*
1172 * convert the endianness of base addresses in place,
1173 * so that we don't need to convert them when accessing adev->reg_offset.
1174 */
1175 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1176 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1177 }
1178
1179 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1180 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
1181 DRM_DEBUG("set register base offset for %s\n",
1182 hw_id_names[le16_to_cpu(ip->hw_id)]);
1183 adev->reg_offset[hw_ip][ip->number_instance] =
1184 ip->base_address;
1185 /* Instance support is somewhat inconsistent.
1186 * SDMA is a good example. Sienna cichlid has 4 total
1187 * SDMA instances, each enumerated separately (HWIDs
1188 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
1189 * but they are enumerated as multiple instances of the
1190 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
1191 * example. On most chips there are multiple instances
1192 * with the same HWID.
1193 */
1194 adev->ip_versions[hw_ip][ip->number_instance] =
1195 IP_VERSION(ip->major, ip->minor, ip->revision);
1196 }
1197 }
1198
1199 next_ip:
1200 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1201 }
1202 }
1203
1204 amdgpu_discovery_sysfs_init(adev);
1205
1206 return 0;
1207 }
1208
amdgpu_discovery_get_ip_version(struct amdgpu_device * adev,int hw_id,int number_instance,int * major,int * minor,int * revision)1209 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
1210 int *major, int *minor, int *revision)
1211 {
1212 struct binary_header *bhdr;
1213 struct ip_discovery_header *ihdr;
1214 struct die_header *dhdr;
1215 struct ip *ip;
1216 uint16_t die_offset;
1217 uint16_t ip_offset;
1218 uint16_t num_dies;
1219 uint16_t num_ips;
1220 int i, j;
1221
1222 if (!adev->mman.discovery_bin) {
1223 DRM_ERROR("ip discovery uninitialized\n");
1224 return -EINVAL;
1225 }
1226
1227 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1228 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1229 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1230 num_dies = le16_to_cpu(ihdr->num_dies);
1231
1232 for (i = 0; i < num_dies; i++) {
1233 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1234 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1235 num_ips = le16_to_cpu(dhdr->num_ips);
1236 ip_offset = die_offset + sizeof(*dhdr);
1237
1238 for (j = 0; j < num_ips; j++) {
1239 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1240
1241 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
1242 if (major)
1243 *major = ip->major;
1244 if (minor)
1245 *minor = ip->minor;
1246 if (revision)
1247 *revision = ip->revision;
1248 return 0;
1249 }
1250 ip_offset += struct_size(ip, base_address, ip->num_base_address);
1251 }
1252 }
1253
1254 return -EINVAL;
1255 }
1256
amdgpu_discovery_harvest_ip(struct amdgpu_device * adev)1257 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1258 {
1259 int vcn_harvest_count = 0;
1260 int umc_harvest_count = 0;
1261
1262 /*
1263 * Harvest table does not fit Navi1x and legacy GPUs,
1264 * so read harvest bit per IP data structure to set
1265 * harvest configuration.
1266 */
1267 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) {
1268 if ((adev->pdev->device == 0x731E &&
1269 (adev->pdev->revision == 0xC6 ||
1270 adev->pdev->revision == 0xC7)) ||
1271 (adev->pdev->device == 0x7340 &&
1272 adev->pdev->revision == 0xC9) ||
1273 (adev->pdev->device == 0x7360 &&
1274 adev->pdev->revision == 0xC7))
1275 amdgpu_discovery_read_harvest_bit_per_ip(adev,
1276 &vcn_harvest_count);
1277 } else {
1278 amdgpu_discovery_read_from_harvest_table(adev,
1279 &vcn_harvest_count,
1280 &umc_harvest_count);
1281 }
1282
1283 amdgpu_discovery_harvest_config_quirk(adev);
1284
1285 if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1286 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1287 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1288 }
1289
1290 if (umc_harvest_count < adev->gmc.num_umc) {
1291 adev->gmc.num_umc -= umc_harvest_count;
1292 }
1293 }
1294
1295 union gc_info {
1296 struct gc_info_v1_0 v1;
1297 struct gc_info_v1_1 v1_1;
1298 struct gc_info_v1_2 v1_2;
1299 struct gc_info_v2_0 v2;
1300 };
1301
amdgpu_discovery_get_gfx_info(struct amdgpu_device * adev)1302 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1303 {
1304 struct binary_header *bhdr;
1305 union gc_info *gc_info;
1306 u16 offset;
1307
1308 if (!adev->mman.discovery_bin) {
1309 DRM_ERROR("ip discovery uninitialized\n");
1310 return -EINVAL;
1311 }
1312
1313 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1314 offset = le16_to_cpu(bhdr->table_list[GC].offset);
1315
1316 if (!offset)
1317 return 0;
1318
1319 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1320
1321 switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1322 case 1:
1323 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1324 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1325 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1326 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1327 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1328 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1329 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1330 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1331 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1332 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1333 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1334 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1335 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1336 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1337 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1338 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1339 le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1340 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1341 if (gc_info->v1.header.version_minor >= 1) {
1342 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1343 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1344 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1345 }
1346 if (gc_info->v1.header.version_minor >= 2) {
1347 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1348 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1349 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1350 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1351 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1352 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1353 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1354 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1355 }
1356 break;
1357 case 2:
1358 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1359 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1360 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1361 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1362 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1363 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1364 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1365 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1366 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1367 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1368 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1369 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1370 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1371 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1372 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1373 le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1374 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1375 break;
1376 default:
1377 dev_err(adev->dev,
1378 "Unhandled GC info table %d.%d\n",
1379 le16_to_cpu(gc_info->v1.header.version_major),
1380 le16_to_cpu(gc_info->v1.header.version_minor));
1381 return -EINVAL;
1382 }
1383 return 0;
1384 }
1385
1386 union mall_info {
1387 struct mall_info_v1_0 v1;
1388 };
1389
amdgpu_discovery_get_mall_info(struct amdgpu_device * adev)1390 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1391 {
1392 struct binary_header *bhdr;
1393 union mall_info *mall_info;
1394 u32 u, mall_size_per_umc, m_s_present, half_use;
1395 u64 mall_size;
1396 u16 offset;
1397
1398 if (!adev->mman.discovery_bin) {
1399 DRM_ERROR("ip discovery uninitialized\n");
1400 return -EINVAL;
1401 }
1402
1403 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1404 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1405
1406 if (!offset)
1407 return 0;
1408
1409 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1410
1411 switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1412 case 1:
1413 mall_size = 0;
1414 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1415 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1416 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1417 for (u = 0; u < adev->gmc.num_umc; u++) {
1418 if (m_s_present & (1 << u))
1419 mall_size += mall_size_per_umc * 2;
1420 else if (half_use & (1 << u))
1421 mall_size += mall_size_per_umc / 2;
1422 else
1423 mall_size += mall_size_per_umc;
1424 }
1425 adev->gmc.mall_size = mall_size;
1426 break;
1427 default:
1428 dev_err(adev->dev,
1429 "Unhandled MALL info table %d.%d\n",
1430 le16_to_cpu(mall_info->v1.header.version_major),
1431 le16_to_cpu(mall_info->v1.header.version_minor));
1432 return -EINVAL;
1433 }
1434 return 0;
1435 }
1436
1437 union vcn_info {
1438 struct vcn_info_v1_0 v1;
1439 };
1440
amdgpu_discovery_get_vcn_info(struct amdgpu_device * adev)1441 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
1442 {
1443 struct binary_header *bhdr;
1444 union vcn_info *vcn_info;
1445 u16 offset;
1446 int v;
1447
1448 if (!adev->mman.discovery_bin) {
1449 DRM_ERROR("ip discovery uninitialized\n");
1450 return -EINVAL;
1451 }
1452
1453 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1454 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
1455 * but that may change in the future with new GPUs so keep this
1456 * check for defensive purposes.
1457 */
1458 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
1459 dev_err(adev->dev, "invalid vcn instances\n");
1460 return -EINVAL;
1461 }
1462
1463 bhdr = (struct binary_header *)adev->mman.discovery_bin;
1464 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
1465
1466 if (!offset)
1467 return 0;
1468
1469 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
1470
1471 switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
1472 case 1:
1473 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
1474 * so this won't overflow.
1475 */
1476 for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
1477 adev->vcn.vcn_codec_disable_mask[v] =
1478 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
1479 }
1480 break;
1481 default:
1482 dev_err(adev->dev,
1483 "Unhandled VCN info table %d.%d\n",
1484 le16_to_cpu(vcn_info->v1.header.version_major),
1485 le16_to_cpu(vcn_info->v1.header.version_minor));
1486 return -EINVAL;
1487 }
1488 return 0;
1489 }
1490
amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device * adev)1491 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1492 {
1493 /* what IP to use for this? */
1494 switch (adev->ip_versions[GC_HWIP][0]) {
1495 case IP_VERSION(9, 0, 1):
1496 case IP_VERSION(9, 1, 0):
1497 case IP_VERSION(9, 2, 1):
1498 case IP_VERSION(9, 2, 2):
1499 case IP_VERSION(9, 3, 0):
1500 case IP_VERSION(9, 4, 0):
1501 case IP_VERSION(9, 4, 1):
1502 case IP_VERSION(9, 4, 2):
1503 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1504 break;
1505 case IP_VERSION(10, 1, 10):
1506 case IP_VERSION(10, 1, 1):
1507 case IP_VERSION(10, 1, 2):
1508 case IP_VERSION(10, 1, 3):
1509 case IP_VERSION(10, 1, 4):
1510 case IP_VERSION(10, 3, 0):
1511 case IP_VERSION(10, 3, 1):
1512 case IP_VERSION(10, 3, 2):
1513 case IP_VERSION(10, 3, 3):
1514 case IP_VERSION(10, 3, 4):
1515 case IP_VERSION(10, 3, 5):
1516 case IP_VERSION(10, 3, 6):
1517 case IP_VERSION(10, 3, 7):
1518 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1519 break;
1520 case IP_VERSION(11, 0, 0):
1521 case IP_VERSION(11, 0, 1):
1522 case IP_VERSION(11, 0, 2):
1523 case IP_VERSION(11, 0, 3):
1524 case IP_VERSION(11, 0, 4):
1525 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
1526 break;
1527 default:
1528 dev_err(adev->dev,
1529 "Failed to add common ip block(GC_HWIP:0x%x)\n",
1530 adev->ip_versions[GC_HWIP][0]);
1531 return -EINVAL;
1532 }
1533 return 0;
1534 }
1535
amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device * adev)1536 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1537 {
1538 /* use GC or MMHUB IP version */
1539 switch (adev->ip_versions[GC_HWIP][0]) {
1540 case IP_VERSION(9, 0, 1):
1541 case IP_VERSION(9, 1, 0):
1542 case IP_VERSION(9, 2, 1):
1543 case IP_VERSION(9, 2, 2):
1544 case IP_VERSION(9, 3, 0):
1545 case IP_VERSION(9, 4, 0):
1546 case IP_VERSION(9, 4, 1):
1547 case IP_VERSION(9, 4, 2):
1548 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1549 break;
1550 case IP_VERSION(10, 1, 10):
1551 case IP_VERSION(10, 1, 1):
1552 case IP_VERSION(10, 1, 2):
1553 case IP_VERSION(10, 1, 3):
1554 case IP_VERSION(10, 1, 4):
1555 case IP_VERSION(10, 3, 0):
1556 case IP_VERSION(10, 3, 1):
1557 case IP_VERSION(10, 3, 2):
1558 case IP_VERSION(10, 3, 3):
1559 case IP_VERSION(10, 3, 4):
1560 case IP_VERSION(10, 3, 5):
1561 case IP_VERSION(10, 3, 6):
1562 case IP_VERSION(10, 3, 7):
1563 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1564 break;
1565 case IP_VERSION(11, 0, 0):
1566 case IP_VERSION(11, 0, 1):
1567 case IP_VERSION(11, 0, 2):
1568 case IP_VERSION(11, 0, 3):
1569 case IP_VERSION(11, 0, 4):
1570 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
1571 break;
1572 default:
1573 dev_err(adev->dev,
1574 "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1575 adev->ip_versions[GC_HWIP][0]);
1576 return -EINVAL;
1577 }
1578 return 0;
1579 }
1580
amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device * adev)1581 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1582 {
1583 switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1584 case IP_VERSION(4, 0, 0):
1585 case IP_VERSION(4, 0, 1):
1586 case IP_VERSION(4, 1, 0):
1587 case IP_VERSION(4, 1, 1):
1588 case IP_VERSION(4, 3, 0):
1589 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1590 break;
1591 case IP_VERSION(4, 2, 0):
1592 case IP_VERSION(4, 2, 1):
1593 case IP_VERSION(4, 4, 0):
1594 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1595 break;
1596 case IP_VERSION(5, 0, 0):
1597 case IP_VERSION(5, 0, 1):
1598 case IP_VERSION(5, 0, 2):
1599 case IP_VERSION(5, 0, 3):
1600 case IP_VERSION(5, 2, 0):
1601 case IP_VERSION(5, 2, 1):
1602 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1603 break;
1604 case IP_VERSION(6, 0, 0):
1605 case IP_VERSION(6, 0, 1):
1606 case IP_VERSION(6, 0, 2):
1607 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
1608 break;
1609 default:
1610 dev_err(adev->dev,
1611 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1612 adev->ip_versions[OSSSYS_HWIP][0]);
1613 return -EINVAL;
1614 }
1615 return 0;
1616 }
1617
amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device * adev)1618 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1619 {
1620 switch (adev->ip_versions[MP0_HWIP][0]) {
1621 case IP_VERSION(9, 0, 0):
1622 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1623 break;
1624 case IP_VERSION(10, 0, 0):
1625 case IP_VERSION(10, 0, 1):
1626 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1627 break;
1628 case IP_VERSION(11, 0, 0):
1629 case IP_VERSION(11, 0, 2):
1630 case IP_VERSION(11, 0, 4):
1631 case IP_VERSION(11, 0, 5):
1632 case IP_VERSION(11, 0, 9):
1633 case IP_VERSION(11, 0, 7):
1634 case IP_VERSION(11, 0, 11):
1635 case IP_VERSION(11, 0, 12):
1636 case IP_VERSION(11, 0, 13):
1637 case IP_VERSION(11, 5, 0):
1638 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1639 break;
1640 case IP_VERSION(11, 0, 8):
1641 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1642 break;
1643 case IP_VERSION(11, 0, 3):
1644 case IP_VERSION(12, 0, 1):
1645 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1646 break;
1647 case IP_VERSION(13, 0, 0):
1648 case IP_VERSION(13, 0, 1):
1649 case IP_VERSION(13, 0, 2):
1650 case IP_VERSION(13, 0, 3):
1651 case IP_VERSION(13, 0, 5):
1652 case IP_VERSION(13, 0, 7):
1653 case IP_VERSION(13, 0, 8):
1654 case IP_VERSION(13, 0, 10):
1655 case IP_VERSION(13, 0, 11):
1656 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1657 break;
1658 case IP_VERSION(13, 0, 4):
1659 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
1660 break;
1661 default:
1662 dev_err(adev->dev,
1663 "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1664 adev->ip_versions[MP0_HWIP][0]);
1665 return -EINVAL;
1666 }
1667 return 0;
1668 }
1669
amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device * adev)1670 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1671 {
1672 switch (adev->ip_versions[MP1_HWIP][0]) {
1673 case IP_VERSION(9, 0, 0):
1674 case IP_VERSION(10, 0, 0):
1675 case IP_VERSION(10, 0, 1):
1676 case IP_VERSION(11, 0, 2):
1677 if (adev->asic_type == CHIP_ARCTURUS)
1678 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1679 else
1680 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1681 break;
1682 case IP_VERSION(11, 0, 0):
1683 case IP_VERSION(11, 0, 5):
1684 case IP_VERSION(11, 0, 9):
1685 case IP_VERSION(11, 0, 7):
1686 case IP_VERSION(11, 0, 8):
1687 case IP_VERSION(11, 0, 11):
1688 case IP_VERSION(11, 0, 12):
1689 case IP_VERSION(11, 0, 13):
1690 case IP_VERSION(11, 5, 0):
1691 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1692 break;
1693 case IP_VERSION(12, 0, 0):
1694 case IP_VERSION(12, 0, 1):
1695 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1696 break;
1697 case IP_VERSION(13, 0, 0):
1698 case IP_VERSION(13, 0, 1):
1699 case IP_VERSION(13, 0, 2):
1700 case IP_VERSION(13, 0, 3):
1701 case IP_VERSION(13, 0, 4):
1702 case IP_VERSION(13, 0, 5):
1703 case IP_VERSION(13, 0, 7):
1704 case IP_VERSION(13, 0, 8):
1705 case IP_VERSION(13, 0, 10):
1706 case IP_VERSION(13, 0, 11):
1707 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1708 break;
1709 default:
1710 dev_err(adev->dev,
1711 "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1712 adev->ip_versions[MP1_HWIP][0]);
1713 return -EINVAL;
1714 }
1715 return 0;
1716 }
1717
1718 #if defined(CONFIG_DRM_AMD_DC)
amdgpu_discovery_set_sriov_display(struct amdgpu_device * adev)1719 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
1720 {
1721 amdgpu_device_set_sriov_virtual_display(adev);
1722 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1723 }
1724 #endif
1725
amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device * adev)1726 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1727 {
1728 if (adev->enable_virtual_display) {
1729 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1730 return 0;
1731 }
1732
1733 if (!amdgpu_device_has_dc_support(adev))
1734 return 0;
1735
1736 #if defined(CONFIG_DRM_AMD_DC)
1737 if (adev->ip_versions[DCE_HWIP][0]) {
1738 switch (adev->ip_versions[DCE_HWIP][0]) {
1739 case IP_VERSION(1, 0, 0):
1740 case IP_VERSION(1, 0, 1):
1741 case IP_VERSION(2, 0, 2):
1742 case IP_VERSION(2, 0, 0):
1743 case IP_VERSION(2, 0, 3):
1744 case IP_VERSION(2, 1, 0):
1745 case IP_VERSION(3, 0, 0):
1746 case IP_VERSION(3, 0, 2):
1747 case IP_VERSION(3, 0, 3):
1748 case IP_VERSION(3, 0, 1):
1749 case IP_VERSION(3, 1, 2):
1750 case IP_VERSION(3, 1, 3):
1751 case IP_VERSION(3, 1, 4):
1752 case IP_VERSION(3, 1, 5):
1753 case IP_VERSION(3, 1, 6):
1754 case IP_VERSION(3, 2, 0):
1755 case IP_VERSION(3, 2, 1):
1756 if (amdgpu_sriov_vf(adev))
1757 amdgpu_discovery_set_sriov_display(adev);
1758 else
1759 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1760 break;
1761 default:
1762 dev_err(adev->dev,
1763 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1764 adev->ip_versions[DCE_HWIP][0]);
1765 return -EINVAL;
1766 }
1767 } else if (adev->ip_versions[DCI_HWIP][0]) {
1768 switch (adev->ip_versions[DCI_HWIP][0]) {
1769 case IP_VERSION(12, 0, 0):
1770 case IP_VERSION(12, 0, 1):
1771 case IP_VERSION(12, 1, 0):
1772 if (amdgpu_sriov_vf(adev))
1773 amdgpu_discovery_set_sriov_display(adev);
1774 else
1775 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1776 break;
1777 default:
1778 dev_err(adev->dev,
1779 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1780 adev->ip_versions[DCI_HWIP][0]);
1781 return -EINVAL;
1782 }
1783 }
1784 #endif
1785 return 0;
1786 }
1787
amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device * adev)1788 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1789 {
1790 switch (adev->ip_versions[GC_HWIP][0]) {
1791 case IP_VERSION(9, 0, 1):
1792 case IP_VERSION(9, 1, 0):
1793 case IP_VERSION(9, 2, 1):
1794 case IP_VERSION(9, 2, 2):
1795 case IP_VERSION(9, 3, 0):
1796 case IP_VERSION(9, 4, 0):
1797 case IP_VERSION(9, 4, 1):
1798 case IP_VERSION(9, 4, 2):
1799 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1800 break;
1801 case IP_VERSION(10, 1, 10):
1802 case IP_VERSION(10, 1, 2):
1803 case IP_VERSION(10, 1, 1):
1804 case IP_VERSION(10, 1, 3):
1805 case IP_VERSION(10, 1, 4):
1806 case IP_VERSION(10, 3, 0):
1807 case IP_VERSION(10, 3, 2):
1808 case IP_VERSION(10, 3, 1):
1809 case IP_VERSION(10, 3, 4):
1810 case IP_VERSION(10, 3, 5):
1811 case IP_VERSION(10, 3, 6):
1812 case IP_VERSION(10, 3, 3):
1813 case IP_VERSION(10, 3, 7):
1814 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1815 break;
1816 case IP_VERSION(11, 0, 0):
1817 case IP_VERSION(11, 0, 1):
1818 case IP_VERSION(11, 0, 2):
1819 case IP_VERSION(11, 0, 3):
1820 case IP_VERSION(11, 0, 4):
1821 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
1822 break;
1823 default:
1824 dev_err(adev->dev,
1825 "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1826 adev->ip_versions[GC_HWIP][0]);
1827 return -EINVAL;
1828 }
1829 return 0;
1830 }
1831
amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device * adev)1832 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1833 {
1834 switch (adev->ip_versions[SDMA0_HWIP][0]) {
1835 case IP_VERSION(4, 0, 0):
1836 case IP_VERSION(4, 0, 1):
1837 case IP_VERSION(4, 1, 0):
1838 case IP_VERSION(4, 1, 1):
1839 case IP_VERSION(4, 1, 2):
1840 case IP_VERSION(4, 2, 0):
1841 case IP_VERSION(4, 2, 2):
1842 case IP_VERSION(4, 4, 0):
1843 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1844 break;
1845 case IP_VERSION(5, 0, 0):
1846 case IP_VERSION(5, 0, 1):
1847 case IP_VERSION(5, 0, 2):
1848 case IP_VERSION(5, 0, 5):
1849 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1850 break;
1851 case IP_VERSION(5, 2, 0):
1852 case IP_VERSION(5, 2, 2):
1853 case IP_VERSION(5, 2, 4):
1854 case IP_VERSION(5, 2, 5):
1855 case IP_VERSION(5, 2, 6):
1856 case IP_VERSION(5, 2, 3):
1857 case IP_VERSION(5, 2, 1):
1858 case IP_VERSION(5, 2, 7):
1859 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1860 break;
1861 case IP_VERSION(6, 0, 0):
1862 case IP_VERSION(6, 0, 1):
1863 case IP_VERSION(6, 0, 2):
1864 case IP_VERSION(6, 0, 3):
1865 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
1866 break;
1867 default:
1868 dev_err(adev->dev,
1869 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
1870 adev->ip_versions[SDMA0_HWIP][0]);
1871 return -EINVAL;
1872 }
1873 return 0;
1874 }
1875
amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device * adev)1876 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
1877 {
1878 if (adev->ip_versions[VCE_HWIP][0]) {
1879 switch (adev->ip_versions[UVD_HWIP][0]) {
1880 case IP_VERSION(7, 0, 0):
1881 case IP_VERSION(7, 2, 0):
1882 /* UVD is not supported on vega20 SR-IOV */
1883 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1884 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
1885 break;
1886 default:
1887 dev_err(adev->dev,
1888 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
1889 adev->ip_versions[UVD_HWIP][0]);
1890 return -EINVAL;
1891 }
1892 switch (adev->ip_versions[VCE_HWIP][0]) {
1893 case IP_VERSION(4, 0, 0):
1894 case IP_VERSION(4, 1, 0):
1895 /* VCE is not supported on vega20 SR-IOV */
1896 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1897 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
1898 break;
1899 default:
1900 dev_err(adev->dev,
1901 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
1902 adev->ip_versions[VCE_HWIP][0]);
1903 return -EINVAL;
1904 }
1905 } else {
1906 switch (adev->ip_versions[UVD_HWIP][0]) {
1907 case IP_VERSION(1, 0, 0):
1908 case IP_VERSION(1, 0, 1):
1909 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
1910 break;
1911 case IP_VERSION(2, 0, 0):
1912 case IP_VERSION(2, 0, 2):
1913 case IP_VERSION(2, 2, 0):
1914 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
1915 if (!amdgpu_sriov_vf(adev))
1916 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
1917 break;
1918 case IP_VERSION(2, 0, 3):
1919 break;
1920 case IP_VERSION(2, 5, 0):
1921 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
1922 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
1923 break;
1924 case IP_VERSION(2, 6, 0):
1925 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
1926 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
1927 break;
1928 case IP_VERSION(3, 0, 0):
1929 case IP_VERSION(3, 0, 16):
1930 case IP_VERSION(3, 1, 1):
1931 case IP_VERSION(3, 1, 2):
1932 case IP_VERSION(3, 0, 2):
1933 case IP_VERSION(3, 0, 192):
1934 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1935 if (!amdgpu_sriov_vf(adev))
1936 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
1937 break;
1938 case IP_VERSION(3, 0, 33):
1939 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1940 break;
1941 case IP_VERSION(4, 0, 0):
1942 case IP_VERSION(4, 0, 2):
1943 case IP_VERSION(4, 0, 4):
1944 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
1945 if (!amdgpu_sriov_vf(adev))
1946 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
1947 break;
1948 default:
1949 dev_err(adev->dev,
1950 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
1951 adev->ip_versions[UVD_HWIP][0]);
1952 return -EINVAL;
1953 }
1954 }
1955 return 0;
1956 }
1957
amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device * adev)1958 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
1959 {
1960 switch (adev->ip_versions[GC_HWIP][0]) {
1961 case IP_VERSION(10, 1, 10):
1962 case IP_VERSION(10, 1, 1):
1963 case IP_VERSION(10, 1, 2):
1964 case IP_VERSION(10, 1, 3):
1965 case IP_VERSION(10, 1, 4):
1966 case IP_VERSION(10, 3, 0):
1967 case IP_VERSION(10, 3, 1):
1968 case IP_VERSION(10, 3, 2):
1969 case IP_VERSION(10, 3, 3):
1970 case IP_VERSION(10, 3, 4):
1971 case IP_VERSION(10, 3, 5):
1972 case IP_VERSION(10, 3, 6):
1973 if (amdgpu_mes) {
1974 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
1975 adev->enable_mes = true;
1976 if (amdgpu_mes_kiq)
1977 adev->enable_mes_kiq = true;
1978 }
1979 break;
1980 case IP_VERSION(11, 0, 0):
1981 case IP_VERSION(11, 0, 1):
1982 case IP_VERSION(11, 0, 2):
1983 case IP_VERSION(11, 0, 3):
1984 case IP_VERSION(11, 0, 4):
1985 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
1986 adev->enable_mes = true;
1987 adev->enable_mes_kiq = true;
1988 break;
1989 default:
1990 break;
1991 }
1992 return 0;
1993 }
1994
amdgpu_discovery_set_ip_blocks(struct amdgpu_device * adev)1995 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
1996 {
1997 int r;
1998
1999 switch (adev->asic_type) {
2000 case CHIP_VEGA10:
2001 vega10_reg_base_init(adev);
2002 adev->sdma.num_instances = 2;
2003 adev->gmc.num_umc = 4;
2004 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2005 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
2006 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
2007 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
2008 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
2009 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
2010 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2011 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
2012 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
2013 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2014 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2015 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2016 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
2017 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
2018 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2019 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2020 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
2021 break;
2022 case CHIP_VEGA12:
2023 vega10_reg_base_init(adev);
2024 adev->sdma.num_instances = 2;
2025 adev->gmc.num_umc = 4;
2026 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2027 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
2028 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
2029 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
2030 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
2031 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
2032 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
2033 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
2034 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
2035 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
2036 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
2037 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
2038 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
2039 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
2040 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
2041 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
2042 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
2043 break;
2044 case CHIP_RAVEN:
2045 vega10_reg_base_init(adev);
2046 adev->sdma.num_instances = 1;
2047 adev->vcn.num_vcn_inst = 1;
2048 adev->gmc.num_umc = 2;
2049 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
2050 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2051 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
2052 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
2053 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
2054 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
2055 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
2056 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
2057 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
2058 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
2059 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
2060 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
2061 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
2062 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
2063 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
2064 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
2065 } else {
2066 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2067 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
2068 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
2069 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
2070 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
2071 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
2072 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
2073 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
2074 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
2075 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
2076 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
2077 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
2078 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
2079 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
2080 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
2081 }
2082 break;
2083 case CHIP_VEGA20:
2084 vega20_reg_base_init(adev);
2085 adev->sdma.num_instances = 2;
2086 adev->gmc.num_umc = 8;
2087 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2088 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
2089 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
2090 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
2091 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
2092 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
2093 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
2094 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
2095 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
2096 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
2097 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2098 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
2099 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
2100 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
2101 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
2102 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
2103 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
2104 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
2105 break;
2106 case CHIP_ARCTURUS:
2107 arct_reg_base_init(adev);
2108 adev->sdma.num_instances = 8;
2109 adev->vcn.num_vcn_inst = 2;
2110 adev->gmc.num_umc = 8;
2111 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2112 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
2113 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
2114 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
2115 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
2116 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
2117 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
2118 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
2119 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
2120 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
2121 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
2122 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
2123 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
2124 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
2125 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
2126 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
2127 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
2128 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
2129 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
2130 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
2131 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
2132 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
2133 break;
2134 case CHIP_ALDEBARAN:
2135 aldebaran_reg_base_init(adev);
2136 adev->sdma.num_instances = 5;
2137 adev->vcn.num_vcn_inst = 2;
2138 adev->gmc.num_umc = 4;
2139 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2140 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
2141 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
2142 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
2143 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
2144 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
2145 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
2146 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
2147 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
2148 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
2149 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
2150 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
2151 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
2152 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
2153 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
2154 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
2155 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
2156 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
2157 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
2158 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
2159 break;
2160 default:
2161 r = amdgpu_discovery_reg_base_init(adev);
2162 if (r)
2163 return -EINVAL;
2164
2165 amdgpu_discovery_harvest_ip(adev);
2166 amdgpu_discovery_get_gfx_info(adev);
2167 amdgpu_discovery_get_mall_info(adev);
2168 amdgpu_discovery_get_vcn_info(adev);
2169 break;
2170 }
2171
2172 switch (adev->ip_versions[GC_HWIP][0]) {
2173 case IP_VERSION(9, 0, 1):
2174 case IP_VERSION(9, 2, 1):
2175 case IP_VERSION(9, 4, 0):
2176 case IP_VERSION(9, 4, 1):
2177 case IP_VERSION(9, 4, 2):
2178 adev->family = AMDGPU_FAMILY_AI;
2179 break;
2180 case IP_VERSION(9, 1, 0):
2181 case IP_VERSION(9, 2, 2):
2182 case IP_VERSION(9, 3, 0):
2183 adev->family = AMDGPU_FAMILY_RV;
2184 break;
2185 case IP_VERSION(10, 1, 10):
2186 case IP_VERSION(10, 1, 1):
2187 case IP_VERSION(10, 1, 2):
2188 case IP_VERSION(10, 1, 3):
2189 case IP_VERSION(10, 1, 4):
2190 case IP_VERSION(10, 3, 0):
2191 case IP_VERSION(10, 3, 2):
2192 case IP_VERSION(10, 3, 4):
2193 case IP_VERSION(10, 3, 5):
2194 adev->family = AMDGPU_FAMILY_NV;
2195 break;
2196 case IP_VERSION(10, 3, 1):
2197 adev->family = AMDGPU_FAMILY_VGH;
2198 adev->apu_flags |= AMD_APU_IS_VANGOGH;
2199 break;
2200 case IP_VERSION(10, 3, 3):
2201 adev->family = AMDGPU_FAMILY_YC;
2202 break;
2203 case IP_VERSION(10, 3, 6):
2204 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2205 break;
2206 case IP_VERSION(10, 3, 7):
2207 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2208 break;
2209 case IP_VERSION(11, 0, 0):
2210 case IP_VERSION(11, 0, 2):
2211 case IP_VERSION(11, 0, 3):
2212 adev->family = AMDGPU_FAMILY_GC_11_0_0;
2213 break;
2214 case IP_VERSION(11, 0, 1):
2215 case IP_VERSION(11, 0, 4):
2216 adev->family = AMDGPU_FAMILY_GC_11_0_1;
2217 break;
2218 default:
2219 return -EINVAL;
2220 }
2221
2222 switch (adev->ip_versions[GC_HWIP][0]) {
2223 case IP_VERSION(9, 1, 0):
2224 case IP_VERSION(9, 2, 2):
2225 case IP_VERSION(9, 3, 0):
2226 case IP_VERSION(10, 1, 3):
2227 case IP_VERSION(10, 1, 4):
2228 case IP_VERSION(10, 3, 1):
2229 case IP_VERSION(10, 3, 3):
2230 case IP_VERSION(10, 3, 6):
2231 case IP_VERSION(10, 3, 7):
2232 case IP_VERSION(11, 0, 1):
2233 case IP_VERSION(11, 0, 4):
2234 adev->flags |= AMD_IS_APU;
2235 break;
2236 default:
2237 break;
2238 }
2239
2240 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2241 adev->gmc.xgmi.supported = true;
2242
2243 /* set NBIO version */
2244 switch (adev->ip_versions[NBIO_HWIP][0]) {
2245 case IP_VERSION(6, 1, 0):
2246 case IP_VERSION(6, 2, 0):
2247 adev->nbio.funcs = &nbio_v6_1_funcs;
2248 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2249 break;
2250 case IP_VERSION(7, 0, 0):
2251 case IP_VERSION(7, 0, 1):
2252 case IP_VERSION(2, 5, 0):
2253 adev->nbio.funcs = &nbio_v7_0_funcs;
2254 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2255 break;
2256 case IP_VERSION(7, 4, 0):
2257 case IP_VERSION(7, 4, 1):
2258 case IP_VERSION(7, 4, 4):
2259 adev->nbio.funcs = &nbio_v7_4_funcs;
2260 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2261 break;
2262 case IP_VERSION(7, 2, 0):
2263 case IP_VERSION(7, 2, 1):
2264 case IP_VERSION(7, 3, 0):
2265 case IP_VERSION(7, 5, 0):
2266 case IP_VERSION(7, 5, 1):
2267 adev->nbio.funcs = &nbio_v7_2_funcs;
2268 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2269 break;
2270 case IP_VERSION(2, 1, 1):
2271 case IP_VERSION(2, 3, 0):
2272 case IP_VERSION(2, 3, 1):
2273 case IP_VERSION(2, 3, 2):
2274 case IP_VERSION(3, 3, 0):
2275 case IP_VERSION(3, 3, 1):
2276 case IP_VERSION(3, 3, 2):
2277 case IP_VERSION(3, 3, 3):
2278 adev->nbio.funcs = &nbio_v2_3_funcs;
2279 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2280 break;
2281 case IP_VERSION(4, 3, 0):
2282 case IP_VERSION(4, 3, 1):
2283 if (amdgpu_sriov_vf(adev))
2284 adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
2285 else
2286 adev->nbio.funcs = &nbio_v4_3_funcs;
2287 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
2288 break;
2289 case IP_VERSION(7, 7, 0):
2290 case IP_VERSION(7, 7, 1):
2291 adev->nbio.funcs = &nbio_v7_7_funcs;
2292 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
2293 break;
2294 default:
2295 break;
2296 }
2297
2298 switch (adev->ip_versions[HDP_HWIP][0]) {
2299 case IP_VERSION(4, 0, 0):
2300 case IP_VERSION(4, 0, 1):
2301 case IP_VERSION(4, 1, 0):
2302 case IP_VERSION(4, 1, 1):
2303 case IP_VERSION(4, 1, 2):
2304 case IP_VERSION(4, 2, 0):
2305 case IP_VERSION(4, 2, 1):
2306 case IP_VERSION(4, 4, 0):
2307 adev->hdp.funcs = &hdp_v4_0_funcs;
2308 break;
2309 case IP_VERSION(5, 0, 0):
2310 case IP_VERSION(5, 0, 1):
2311 case IP_VERSION(5, 0, 2):
2312 case IP_VERSION(5, 0, 3):
2313 case IP_VERSION(5, 0, 4):
2314 case IP_VERSION(5, 2, 0):
2315 adev->hdp.funcs = &hdp_v5_0_funcs;
2316 break;
2317 case IP_VERSION(5, 2, 1):
2318 adev->hdp.funcs = &hdp_v5_2_funcs;
2319 break;
2320 case IP_VERSION(6, 0, 0):
2321 case IP_VERSION(6, 0, 1):
2322 adev->hdp.funcs = &hdp_v6_0_funcs;
2323 break;
2324 default:
2325 break;
2326 }
2327
2328 switch (adev->ip_versions[DF_HWIP][0]) {
2329 case IP_VERSION(3, 6, 0):
2330 case IP_VERSION(3, 6, 1):
2331 case IP_VERSION(3, 6, 2):
2332 adev->df.funcs = &df_v3_6_funcs;
2333 break;
2334 case IP_VERSION(2, 1, 0):
2335 case IP_VERSION(2, 1, 1):
2336 case IP_VERSION(2, 5, 0):
2337 case IP_VERSION(3, 5, 1):
2338 case IP_VERSION(3, 5, 2):
2339 adev->df.funcs = &df_v1_7_funcs;
2340 break;
2341 case IP_VERSION(4, 3, 0):
2342 adev->df.funcs = &df_v4_3_funcs;
2343 break;
2344 default:
2345 break;
2346 }
2347
2348 switch (adev->ip_versions[SMUIO_HWIP][0]) {
2349 case IP_VERSION(9, 0, 0):
2350 case IP_VERSION(9, 0, 1):
2351 case IP_VERSION(10, 0, 0):
2352 case IP_VERSION(10, 0, 1):
2353 case IP_VERSION(10, 0, 2):
2354 adev->smuio.funcs = &smuio_v9_0_funcs;
2355 break;
2356 case IP_VERSION(11, 0, 0):
2357 case IP_VERSION(11, 0, 2):
2358 case IP_VERSION(11, 0, 3):
2359 case IP_VERSION(11, 0, 4):
2360 case IP_VERSION(11, 0, 7):
2361 case IP_VERSION(11, 0, 8):
2362 adev->smuio.funcs = &smuio_v11_0_funcs;
2363 break;
2364 case IP_VERSION(11, 0, 6):
2365 case IP_VERSION(11, 0, 10):
2366 case IP_VERSION(11, 0, 11):
2367 case IP_VERSION(11, 5, 0):
2368 case IP_VERSION(13, 0, 1):
2369 case IP_VERSION(13, 0, 9):
2370 case IP_VERSION(13, 0, 10):
2371 adev->smuio.funcs = &smuio_v11_0_6_funcs;
2372 break;
2373 case IP_VERSION(13, 0, 2):
2374 adev->smuio.funcs = &smuio_v13_0_funcs;
2375 break;
2376 case IP_VERSION(13, 0, 6):
2377 case IP_VERSION(13, 0, 8):
2378 adev->smuio.funcs = &smuio_v13_0_6_funcs;
2379 break;
2380 default:
2381 break;
2382 }
2383
2384 switch (adev->ip_versions[LSDMA_HWIP][0]) {
2385 case IP_VERSION(6, 0, 0):
2386 case IP_VERSION(6, 0, 1):
2387 case IP_VERSION(6, 0, 2):
2388 case IP_VERSION(6, 0, 3):
2389 adev->lsdma.funcs = &lsdma_v6_0_funcs;
2390 break;
2391 default:
2392 break;
2393 }
2394
2395 r = amdgpu_discovery_set_common_ip_blocks(adev);
2396 if (r)
2397 return r;
2398
2399 r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2400 if (r)
2401 return r;
2402
2403 /* For SR-IOV, PSP needs to be initialized before IH */
2404 if (amdgpu_sriov_vf(adev)) {
2405 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2406 if (r)
2407 return r;
2408 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2409 if (r)
2410 return r;
2411 } else {
2412 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2413 if (r)
2414 return r;
2415
2416 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2417 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2418 if (r)
2419 return r;
2420 }
2421 }
2422
2423 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2424 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2425 if (r)
2426 return r;
2427 }
2428
2429 r = amdgpu_discovery_set_display_ip_blocks(adev);
2430 if (r)
2431 return r;
2432
2433 r = amdgpu_discovery_set_gc_ip_blocks(adev);
2434 if (r)
2435 return r;
2436
2437 r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2438 if (r)
2439 return r;
2440
2441 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2442 !amdgpu_sriov_vf(adev)) ||
2443 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
2444 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2445 if (r)
2446 return r;
2447 }
2448
2449 r = amdgpu_discovery_set_mm_ip_blocks(adev);
2450 if (r)
2451 return r;
2452
2453 r = amdgpu_discovery_set_mes_ip_blocks(adev);
2454 if (r)
2455 return r;
2456
2457 return 0;
2458 }
2459
2460