1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
34 #include "amdgpu_dpm_internal.h"
35 #include "amdgpu_display.h"
36
37 static const struct amd_pm_funcs pp_dpm_funcs;
38
amd_powerplay_create(struct amdgpu_device * adev)39 static int amd_powerplay_create(struct amdgpu_device *adev)
40 {
41 struct pp_hwmgr *hwmgr;
42
43 if (adev == NULL)
44 return -EINVAL;
45
46 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
47 if (hwmgr == NULL)
48 return -ENOMEM;
49
50 hwmgr->adev = adev;
51 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
52 hwmgr->device = amdgpu_cgs_create_device(adev);
53 mutex_init(&hwmgr->msg_lock);
54 hwmgr->chip_family = adev->family;
55 hwmgr->chip_id = adev->asic_type;
56 hwmgr->feature_mask = adev->pm.pp_feature;
57 hwmgr->display_config = &adev->pm.pm_display_cfg;
58 adev->powerplay.pp_handle = hwmgr;
59 adev->powerplay.pp_funcs = &pp_dpm_funcs;
60 return 0;
61 }
62
63
amd_powerplay_destroy(struct amdgpu_device * adev)64 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 {
66 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67
68 mutex_destroy(&hwmgr->msg_lock);
69
70 kfree(hwmgr->hardcode_pp_table);
71 hwmgr->hardcode_pp_table = NULL;
72
73 kfree(hwmgr);
74 hwmgr = NULL;
75 }
76
pp_early_init(void * handle)77 static int pp_early_init(void *handle)
78 {
79 int ret;
80 struct amdgpu_device *adev = handle;
81
82 ret = amd_powerplay_create(adev);
83
84 if (ret != 0)
85 return ret;
86
87 ret = hwmgr_early_init(adev->powerplay.pp_handle);
88 if (ret)
89 return -EINVAL;
90
91 return 0;
92 }
93
pp_sw_init(void * handle)94 static int pp_sw_init(void *handle)
95 {
96 struct amdgpu_device *adev = handle;
97 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
98 int ret = 0;
99
100 ret = hwmgr_sw_init(hwmgr);
101
102 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
103
104 return ret;
105 }
106
pp_sw_fini(void * handle)107 static int pp_sw_fini(void *handle)
108 {
109 struct amdgpu_device *adev = handle;
110 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
111
112 hwmgr_sw_fini(hwmgr);
113
114 amdgpu_ucode_release(&adev->pm.fw);
115
116 return 0;
117 }
118
pp_hw_init(void * handle)119 static int pp_hw_init(void *handle)
120 {
121 int ret = 0;
122 struct amdgpu_device *adev = handle;
123 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
124
125 ret = hwmgr_hw_init(hwmgr);
126
127 if (ret)
128 pr_err("powerplay hw init failed\n");
129
130 return ret;
131 }
132
pp_hw_fini(void * handle)133 static int pp_hw_fini(void *handle)
134 {
135 struct amdgpu_device *adev = handle;
136 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
137
138 hwmgr_hw_fini(hwmgr);
139
140 return 0;
141 }
142
pp_reserve_vram_for_smu(struct amdgpu_device * adev)143 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
144 {
145 int r = -EINVAL;
146 void *cpu_ptr = NULL;
147 uint64_t gpu_addr;
148 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
149
150 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
151 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
152 &adev->pm.smu_prv_buffer,
153 &gpu_addr,
154 &cpu_ptr)) {
155 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
156 return;
157 }
158
159 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
160 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
161 lower_32_bits((unsigned long)cpu_ptr),
162 upper_32_bits((unsigned long)cpu_ptr),
163 lower_32_bits(gpu_addr),
164 upper_32_bits(gpu_addr),
165 adev->pm.smu_prv_buffer_size);
166
167 if (r) {
168 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
169 adev->pm.smu_prv_buffer = NULL;
170 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
171 }
172 }
173
pp_late_init(void * handle)174 static int pp_late_init(void *handle)
175 {
176 struct amdgpu_device *adev = handle;
177 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
178
179 if (hwmgr && hwmgr->pm_en)
180 hwmgr_handle_task(hwmgr,
181 AMD_PP_TASK_COMPLETE_INIT, NULL);
182 if (adev->pm.smu_prv_buffer_size != 0)
183 pp_reserve_vram_for_smu(adev);
184
185 return 0;
186 }
187
pp_late_fini(void * handle)188 static void pp_late_fini(void *handle)
189 {
190 struct amdgpu_device *adev = handle;
191
192 if (adev->pm.smu_prv_buffer)
193 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
194 amd_powerplay_destroy(adev);
195 }
196
197
pp_is_idle(void * handle)198 static bool pp_is_idle(void *handle)
199 {
200 return false;
201 }
202
pp_wait_for_idle(void * handle)203 static int pp_wait_for_idle(void *handle)
204 {
205 return 0;
206 }
207
pp_sw_reset(void * handle)208 static int pp_sw_reset(void *handle)
209 {
210 return 0;
211 }
212
pp_set_powergating_state(void * handle,enum amd_powergating_state state)213 static int pp_set_powergating_state(void *handle,
214 enum amd_powergating_state state)
215 {
216 return 0;
217 }
218
pp_suspend(void * handle)219 static int pp_suspend(void *handle)
220 {
221 struct amdgpu_device *adev = handle;
222 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
223
224 return hwmgr_suspend(hwmgr);
225 }
226
pp_resume(void * handle)227 static int pp_resume(void *handle)
228 {
229 struct amdgpu_device *adev = handle;
230 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
231
232 return hwmgr_resume(hwmgr);
233 }
234
pp_set_clockgating_state(void * handle,enum amd_clockgating_state state)235 static int pp_set_clockgating_state(void *handle,
236 enum amd_clockgating_state state)
237 {
238 return 0;
239 }
240
241 static const struct amd_ip_funcs pp_ip_funcs = {
242 .name = "powerplay",
243 .early_init = pp_early_init,
244 .late_init = pp_late_init,
245 .sw_init = pp_sw_init,
246 .sw_fini = pp_sw_fini,
247 .hw_init = pp_hw_init,
248 .hw_fini = pp_hw_fini,
249 .late_fini = pp_late_fini,
250 .suspend = pp_suspend,
251 .resume = pp_resume,
252 .is_idle = pp_is_idle,
253 .wait_for_idle = pp_wait_for_idle,
254 .soft_reset = pp_sw_reset,
255 .set_clockgating_state = pp_set_clockgating_state,
256 .set_powergating_state = pp_set_powergating_state,
257 };
258
259 const struct amdgpu_ip_block_version pp_smu_ip_block =
260 {
261 .type = AMD_IP_BLOCK_TYPE_SMC,
262 .major = 1,
263 .minor = 0,
264 .rev = 0,
265 .funcs = &pp_ip_funcs,
266 };
267
268 /* This interface only be supported On Vi,
269 * because only smu7/8 can help to load gfx/sdma fw,
270 * smu need to be enabled before load other ip's fw.
271 * so call start smu to load smu7 fw and other ip's fw
272 */
pp_dpm_load_fw(void * handle)273 static int pp_dpm_load_fw(void *handle)
274 {
275 struct pp_hwmgr *hwmgr = handle;
276
277 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
278 return -EINVAL;
279
280 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
281 pr_err("fw load failed\n");
282 return -EINVAL;
283 }
284
285 return 0;
286 }
287
pp_dpm_fw_loading_complete(void * handle)288 static int pp_dpm_fw_loading_complete(void *handle)
289 {
290 return 0;
291 }
292
pp_set_clockgating_by_smu(void * handle,uint32_t msg_id)293 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
294 {
295 struct pp_hwmgr *hwmgr = handle;
296
297 if (!hwmgr || !hwmgr->pm_en)
298 return -EINVAL;
299
300 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
301 pr_info_ratelimited("%s was not implemented.\n", __func__);
302 return 0;
303 }
304
305 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
306 }
307
pp_dpm_en_umd_pstate(struct pp_hwmgr * hwmgr,enum amd_dpm_forced_level * level)308 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
309 enum amd_dpm_forced_level *level)
310 {
311 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
312 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
313 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
314 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
315
316 if (!(hwmgr->dpm_level & profile_mode_mask)) {
317 /* enter umd pstate, save current level, disable gfx cg*/
318 if (*level & profile_mode_mask) {
319 hwmgr->saved_dpm_level = hwmgr->dpm_level;
320 hwmgr->en_umd_pstate = true;
321 }
322 } else {
323 /* exit umd pstate, restore level, enable gfx cg*/
324 if (!(*level & profile_mode_mask)) {
325 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
326 *level = hwmgr->saved_dpm_level;
327 hwmgr->en_umd_pstate = false;
328 }
329 }
330 }
331
pp_dpm_force_performance_level(void * handle,enum amd_dpm_forced_level level)332 static int pp_dpm_force_performance_level(void *handle,
333 enum amd_dpm_forced_level level)
334 {
335 struct pp_hwmgr *hwmgr = handle;
336
337 if (!hwmgr || !hwmgr->pm_en)
338 return -EINVAL;
339
340 if (level == hwmgr->dpm_level)
341 return 0;
342
343 pp_dpm_en_umd_pstate(hwmgr, &level);
344 hwmgr->request_dpm_level = level;
345 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
346
347 return 0;
348 }
349
pp_dpm_get_performance_level(void * handle)350 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
351 void *handle)
352 {
353 struct pp_hwmgr *hwmgr = handle;
354
355 if (!hwmgr || !hwmgr->pm_en)
356 return -EINVAL;
357
358 return hwmgr->dpm_level;
359 }
360
pp_dpm_get_sclk(void * handle,bool low)361 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
362 {
363 struct pp_hwmgr *hwmgr = handle;
364
365 if (!hwmgr || !hwmgr->pm_en)
366 return 0;
367
368 if (hwmgr->hwmgr_func->get_sclk == NULL) {
369 pr_info_ratelimited("%s was not implemented.\n", __func__);
370 return 0;
371 }
372 return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
373 }
374
pp_dpm_get_mclk(void * handle,bool low)375 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
376 {
377 struct pp_hwmgr *hwmgr = handle;
378
379 if (!hwmgr || !hwmgr->pm_en)
380 return 0;
381
382 if (hwmgr->hwmgr_func->get_mclk == NULL) {
383 pr_info_ratelimited("%s was not implemented.\n", __func__);
384 return 0;
385 }
386 return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
387 }
388
pp_dpm_powergate_vce(void * handle,bool gate)389 static void pp_dpm_powergate_vce(void *handle, bool gate)
390 {
391 struct pp_hwmgr *hwmgr = handle;
392
393 if (!hwmgr || !hwmgr->pm_en)
394 return;
395
396 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
397 pr_info_ratelimited("%s was not implemented.\n", __func__);
398 return;
399 }
400 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
401 }
402
pp_dpm_powergate_uvd(void * handle,bool gate)403 static void pp_dpm_powergate_uvd(void *handle, bool gate)
404 {
405 struct pp_hwmgr *hwmgr = handle;
406
407 if (!hwmgr || !hwmgr->pm_en)
408 return;
409
410 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
411 pr_info_ratelimited("%s was not implemented.\n", __func__);
412 return;
413 }
414 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
415 }
416
pp_dpm_dispatch_tasks(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)417 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
418 enum amd_pm_state_type *user_state)
419 {
420 struct pp_hwmgr *hwmgr = handle;
421
422 if (!hwmgr || !hwmgr->pm_en)
423 return -EINVAL;
424
425 return hwmgr_handle_task(hwmgr, task_id, user_state);
426 }
427
pp_dpm_get_current_power_state(void * handle)428 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
429 {
430 struct pp_hwmgr *hwmgr = handle;
431 struct pp_power_state *state;
432 enum amd_pm_state_type pm_type;
433
434 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
435 return -EINVAL;
436
437 state = hwmgr->current_ps;
438
439 switch (state->classification.ui_label) {
440 case PP_StateUILabel_Battery:
441 pm_type = POWER_STATE_TYPE_BATTERY;
442 break;
443 case PP_StateUILabel_Balanced:
444 pm_type = POWER_STATE_TYPE_BALANCED;
445 break;
446 case PP_StateUILabel_Performance:
447 pm_type = POWER_STATE_TYPE_PERFORMANCE;
448 break;
449 default:
450 if (state->classification.flags & PP_StateClassificationFlag_Boot)
451 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
452 else
453 pm_type = POWER_STATE_TYPE_DEFAULT;
454 break;
455 }
456
457 return pm_type;
458 }
459
pp_dpm_set_fan_control_mode(void * handle,uint32_t mode)460 static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
461 {
462 struct pp_hwmgr *hwmgr = handle;
463
464 if (!hwmgr || !hwmgr->pm_en)
465 return -EOPNOTSUPP;
466
467 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
468 return -EOPNOTSUPP;
469
470 if (mode == U32_MAX)
471 return -EINVAL;
472
473 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
474
475 return 0;
476 }
477
pp_dpm_get_fan_control_mode(void * handle,uint32_t * fan_mode)478 static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
479 {
480 struct pp_hwmgr *hwmgr = handle;
481
482 if (!hwmgr || !hwmgr->pm_en)
483 return -EOPNOTSUPP;
484
485 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
486 return -EOPNOTSUPP;
487
488 if (!fan_mode)
489 return -EINVAL;
490
491 *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
492 return 0;
493 }
494
pp_dpm_set_fan_speed_pwm(void * handle,uint32_t speed)495 static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
496 {
497 struct pp_hwmgr *hwmgr = handle;
498
499 if (!hwmgr || !hwmgr->pm_en)
500 return -EOPNOTSUPP;
501
502 if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
503 return -EOPNOTSUPP;
504
505 if (speed == U32_MAX)
506 return -EINVAL;
507
508 return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
509 }
510
pp_dpm_get_fan_speed_pwm(void * handle,uint32_t * speed)511 static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
512 {
513 struct pp_hwmgr *hwmgr = handle;
514
515 if (!hwmgr || !hwmgr->pm_en)
516 return -EOPNOTSUPP;
517
518 if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
519 return -EOPNOTSUPP;
520
521 if (!speed)
522 return -EINVAL;
523
524 return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
525 }
526
pp_dpm_get_fan_speed_rpm(void * handle,uint32_t * rpm)527 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
528 {
529 struct pp_hwmgr *hwmgr = handle;
530
531 if (!hwmgr || !hwmgr->pm_en)
532 return -EOPNOTSUPP;
533
534 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
535 return -EOPNOTSUPP;
536
537 if (!rpm)
538 return -EINVAL;
539
540 return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
541 }
542
pp_dpm_set_fan_speed_rpm(void * handle,uint32_t rpm)543 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
544 {
545 struct pp_hwmgr *hwmgr = handle;
546
547 if (!hwmgr || !hwmgr->pm_en)
548 return -EOPNOTSUPP;
549
550 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
551 return -EOPNOTSUPP;
552
553 if (rpm == U32_MAX)
554 return -EINVAL;
555
556 return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
557 }
558
pp_dpm_get_pp_num_states(void * handle,struct pp_states_info * data)559 static int pp_dpm_get_pp_num_states(void *handle,
560 struct pp_states_info *data)
561 {
562 struct pp_hwmgr *hwmgr = handle;
563 int i;
564
565 memset(data, 0, sizeof(*data));
566
567 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
568 return -EINVAL;
569
570 data->nums = hwmgr->num_ps;
571
572 for (i = 0; i < hwmgr->num_ps; i++) {
573 struct pp_power_state *state = (struct pp_power_state *)
574 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
575 switch (state->classification.ui_label) {
576 case PP_StateUILabel_Battery:
577 data->states[i] = POWER_STATE_TYPE_BATTERY;
578 break;
579 case PP_StateUILabel_Balanced:
580 data->states[i] = POWER_STATE_TYPE_BALANCED;
581 break;
582 case PP_StateUILabel_Performance:
583 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
584 break;
585 default:
586 if (state->classification.flags & PP_StateClassificationFlag_Boot)
587 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
588 else
589 data->states[i] = POWER_STATE_TYPE_DEFAULT;
590 }
591 }
592 return 0;
593 }
594
pp_dpm_get_pp_table(void * handle,char ** table)595 static int pp_dpm_get_pp_table(void *handle, char **table)
596 {
597 struct pp_hwmgr *hwmgr = handle;
598
599 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
600 return -EINVAL;
601
602 *table = (char *)hwmgr->soft_pp_table;
603 return hwmgr->soft_pp_table_size;
604 }
605
amd_powerplay_reset(void * handle)606 static int amd_powerplay_reset(void *handle)
607 {
608 struct pp_hwmgr *hwmgr = handle;
609 int ret;
610
611 ret = hwmgr_hw_fini(hwmgr);
612 if (ret)
613 return ret;
614
615 ret = hwmgr_hw_init(hwmgr);
616 if (ret)
617 return ret;
618
619 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
620 }
621
pp_dpm_set_pp_table(void * handle,const char * buf,size_t size)622 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
623 {
624 struct pp_hwmgr *hwmgr = handle;
625 int ret = -ENOMEM;
626
627 if (!hwmgr || !hwmgr->pm_en)
628 return -EINVAL;
629
630 if (!hwmgr->hardcode_pp_table) {
631 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
632 hwmgr->soft_pp_table_size,
633 GFP_KERNEL);
634 if (!hwmgr->hardcode_pp_table)
635 return ret;
636 }
637
638 memcpy(hwmgr->hardcode_pp_table, buf, size);
639
640 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
641
642 ret = amd_powerplay_reset(handle);
643 if (ret)
644 return ret;
645
646 if (hwmgr->hwmgr_func->avfs_control)
647 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
648
649 return ret;
650 }
651
pp_dpm_force_clock_level(void * handle,enum pp_clock_type type,uint32_t mask)652 static int pp_dpm_force_clock_level(void *handle,
653 enum pp_clock_type type, uint32_t mask)
654 {
655 struct pp_hwmgr *hwmgr = handle;
656
657 if (!hwmgr || !hwmgr->pm_en)
658 return -EINVAL;
659
660 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
661 pr_info_ratelimited("%s was not implemented.\n", __func__);
662 return 0;
663 }
664
665 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
666 pr_debug("force clock level is for dpm manual mode only.\n");
667 return -EINVAL;
668 }
669
670 return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
671 }
672
pp_dpm_emit_clock_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)673 static int pp_dpm_emit_clock_levels(void *handle,
674 enum pp_clock_type type,
675 char *buf,
676 int *offset)
677 {
678 struct pp_hwmgr *hwmgr = handle;
679
680 if (!hwmgr || !hwmgr->pm_en)
681 return -EOPNOTSUPP;
682
683 if (!hwmgr->hwmgr_func->emit_clock_levels)
684 return -ENOENT;
685
686 return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
687 }
688
pp_dpm_print_clock_levels(void * handle,enum pp_clock_type type,char * buf)689 static int pp_dpm_print_clock_levels(void *handle,
690 enum pp_clock_type type, char *buf)
691 {
692 struct pp_hwmgr *hwmgr = handle;
693
694 if (!hwmgr || !hwmgr->pm_en)
695 return -EINVAL;
696
697 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
698 pr_info_ratelimited("%s was not implemented.\n", __func__);
699 return 0;
700 }
701 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
702 }
703
pp_dpm_get_sclk_od(void * handle)704 static int pp_dpm_get_sclk_od(void *handle)
705 {
706 struct pp_hwmgr *hwmgr = handle;
707
708 if (!hwmgr || !hwmgr->pm_en)
709 return -EINVAL;
710
711 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
712 pr_info_ratelimited("%s was not implemented.\n", __func__);
713 return 0;
714 }
715 return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
716 }
717
pp_dpm_set_sclk_od(void * handle,uint32_t value)718 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
719 {
720 struct pp_hwmgr *hwmgr = handle;
721
722 if (!hwmgr || !hwmgr->pm_en)
723 return -EINVAL;
724
725 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
726 pr_info_ratelimited("%s was not implemented.\n", __func__);
727 return 0;
728 }
729
730 return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
731 }
732
pp_dpm_get_mclk_od(void * handle)733 static int pp_dpm_get_mclk_od(void *handle)
734 {
735 struct pp_hwmgr *hwmgr = handle;
736
737 if (!hwmgr || !hwmgr->pm_en)
738 return -EINVAL;
739
740 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
741 pr_info_ratelimited("%s was not implemented.\n", __func__);
742 return 0;
743 }
744 return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
745 }
746
pp_dpm_set_mclk_od(void * handle,uint32_t value)747 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
748 {
749 struct pp_hwmgr *hwmgr = handle;
750
751 if (!hwmgr || !hwmgr->pm_en)
752 return -EINVAL;
753
754 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
755 pr_info_ratelimited("%s was not implemented.\n", __func__);
756 return 0;
757 }
758 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
759 }
760
pp_dpm_read_sensor(void * handle,int idx,void * value,int * size)761 static int pp_dpm_read_sensor(void *handle, int idx,
762 void *value, int *size)
763 {
764 struct pp_hwmgr *hwmgr = handle;
765
766 if (!hwmgr || !hwmgr->pm_en || !value)
767 return -EINVAL;
768
769 switch (idx) {
770 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
771 *((uint32_t *)value) = hwmgr->pstate_sclk * 100;
772 return 0;
773 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
774 *((uint32_t *)value) = hwmgr->pstate_mclk * 100;
775 return 0;
776 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
777 *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
778 return 0;
779 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
780 *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
781 return 0;
782 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
783 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
784 return 0;
785 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
786 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
787 return 0;
788 default:
789 return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
790 }
791 }
792
793 static struct amd_vce_state*
pp_dpm_get_vce_clock_state(void * handle,unsigned idx)794 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
795 {
796 struct pp_hwmgr *hwmgr = handle;
797
798 if (!hwmgr || !hwmgr->pm_en)
799 return NULL;
800
801 if (idx < hwmgr->num_vce_state_tables)
802 return &hwmgr->vce_states[idx];
803 return NULL;
804 }
805
pp_get_power_profile_mode(void * handle,char * buf)806 static int pp_get_power_profile_mode(void *handle, char *buf)
807 {
808 struct pp_hwmgr *hwmgr = handle;
809
810 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
811 return -EOPNOTSUPP;
812 if (!buf)
813 return -EINVAL;
814
815 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
816 }
817
pp_set_power_profile_mode(void * handle,long * input,uint32_t size)818 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
819 {
820 struct pp_hwmgr *hwmgr = handle;
821
822 if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
823 return -EOPNOTSUPP;
824
825 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
826 pr_debug("power profile setting is for manual dpm mode only.\n");
827 return -EINVAL;
828 }
829
830 return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
831 }
832
pp_set_fine_grain_clk_vol(void * handle,uint32_t type,long * input,uint32_t size)833 static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
834 {
835 struct pp_hwmgr *hwmgr = handle;
836
837 if (!hwmgr || !hwmgr->pm_en)
838 return -EINVAL;
839
840 if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
841 return 0;
842
843 return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
844 }
845
pp_odn_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)846 static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
847 long *input, uint32_t size)
848 {
849 struct pp_hwmgr *hwmgr = handle;
850
851 if (!hwmgr || !hwmgr->pm_en)
852 return -EINVAL;
853
854 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
855 pr_info_ratelimited("%s was not implemented.\n", __func__);
856 return 0;
857 }
858
859 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
860 }
861
pp_dpm_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)862 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
863 {
864 struct pp_hwmgr *hwmgr = handle;
865
866 if (!hwmgr)
867 return -EINVAL;
868
869 if (!hwmgr->pm_en)
870 return 0;
871
872 if (hwmgr->hwmgr_func->set_mp1_state)
873 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
874
875 return 0;
876 }
877
pp_dpm_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)878 static int pp_dpm_switch_power_profile(void *handle,
879 enum PP_SMC_POWER_PROFILE type, bool en)
880 {
881 struct pp_hwmgr *hwmgr = handle;
882 long workload;
883 uint32_t index;
884
885 if (!hwmgr || !hwmgr->pm_en)
886 return -EINVAL;
887
888 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
889 pr_info_ratelimited("%s was not implemented.\n", __func__);
890 return -EINVAL;
891 }
892
893 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
894 return -EINVAL;
895
896 if (!en) {
897 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
898 index = fls(hwmgr->workload_mask);
899 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
900 workload = hwmgr->workload_setting[index];
901 } else {
902 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
903 index = fls(hwmgr->workload_mask);
904 index = index <= Workload_Policy_Max ? index - 1 : 0;
905 workload = hwmgr->workload_setting[index];
906 }
907
908 if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
909 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
910 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
911 return -EINVAL;
912 }
913
914 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
915 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
916
917 return 0;
918 }
919
pp_set_power_limit(void * handle,uint32_t limit)920 static int pp_set_power_limit(void *handle, uint32_t limit)
921 {
922 struct pp_hwmgr *hwmgr = handle;
923 uint32_t max_power_limit;
924
925 if (!hwmgr || !hwmgr->pm_en)
926 return -EINVAL;
927
928 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
929 pr_info_ratelimited("%s was not implemented.\n", __func__);
930 return -EINVAL;
931 }
932
933 if (limit == 0)
934 limit = hwmgr->default_power_limit;
935
936 max_power_limit = hwmgr->default_power_limit;
937 if (hwmgr->od_enabled) {
938 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
939 max_power_limit /= 100;
940 }
941
942 if (limit > max_power_limit)
943 return -EINVAL;
944
945 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
946 hwmgr->power_limit = limit;
947 return 0;
948 }
949
pp_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type power_type)950 static int pp_get_power_limit(void *handle, uint32_t *limit,
951 enum pp_power_limit_level pp_limit_level,
952 enum pp_power_type power_type)
953 {
954 struct pp_hwmgr *hwmgr = handle;
955 int ret = 0;
956
957 if (!hwmgr || !hwmgr->pm_en ||!limit)
958 return -EINVAL;
959
960 if (power_type != PP_PWR_TYPE_SUSTAINED)
961 return -EOPNOTSUPP;
962
963 switch (pp_limit_level) {
964 case PP_PWR_LIMIT_CURRENT:
965 *limit = hwmgr->power_limit;
966 break;
967 case PP_PWR_LIMIT_DEFAULT:
968 *limit = hwmgr->default_power_limit;
969 break;
970 case PP_PWR_LIMIT_MAX:
971 *limit = hwmgr->default_power_limit;
972 if (hwmgr->od_enabled) {
973 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
974 *limit /= 100;
975 }
976 break;
977 default:
978 ret = -EOPNOTSUPP;
979 break;
980 }
981
982 return ret;
983 }
984
pp_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)985 static int pp_display_configuration_change(void *handle,
986 const struct amd_pp_display_configuration *display_config)
987 {
988 struct pp_hwmgr *hwmgr = handle;
989
990 if (!hwmgr || !hwmgr->pm_en)
991 return -EINVAL;
992
993 phm_store_dal_configuration_data(hwmgr, display_config);
994 return 0;
995 }
996
pp_get_display_power_level(void * handle,struct amd_pp_simple_clock_info * output)997 static int pp_get_display_power_level(void *handle,
998 struct amd_pp_simple_clock_info *output)
999 {
1000 struct pp_hwmgr *hwmgr = handle;
1001
1002 if (!hwmgr || !hwmgr->pm_en ||!output)
1003 return -EINVAL;
1004
1005 return phm_get_dal_power_level(hwmgr, output);
1006 }
1007
pp_get_current_clocks(void * handle,struct amd_pp_clock_info * clocks)1008 static int pp_get_current_clocks(void *handle,
1009 struct amd_pp_clock_info *clocks)
1010 {
1011 struct amd_pp_simple_clock_info simple_clocks = { 0 };
1012 struct pp_clock_info hw_clocks;
1013 struct pp_hwmgr *hwmgr = handle;
1014 int ret = 0;
1015
1016 if (!hwmgr || !hwmgr->pm_en)
1017 return -EINVAL;
1018
1019 phm_get_dal_power_level(hwmgr, &simple_clocks);
1020
1021 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1022 PHM_PlatformCaps_PowerContainment))
1023 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1024 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1025 else
1026 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1027 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1028
1029 if (ret) {
1030 pr_debug("Error in phm_get_clock_info \n");
1031 return -EINVAL;
1032 }
1033
1034 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1035 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1036 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1037 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1038 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1039 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1040
1041 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1042 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1043
1044 if (simple_clocks.level == 0)
1045 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1046 else
1047 clocks->max_clocks_state = simple_clocks.level;
1048
1049 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1050 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1051 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1052 }
1053 return 0;
1054 }
1055
pp_get_clock_by_type(void * handle,enum amd_pp_clock_type type,struct amd_pp_clocks * clocks)1056 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1057 {
1058 struct pp_hwmgr *hwmgr = handle;
1059
1060 if (!hwmgr || !hwmgr->pm_en)
1061 return -EINVAL;
1062
1063 if (clocks == NULL)
1064 return -EINVAL;
1065
1066 return phm_get_clock_by_type(hwmgr, type, clocks);
1067 }
1068
pp_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)1069 static int pp_get_clock_by_type_with_latency(void *handle,
1070 enum amd_pp_clock_type type,
1071 struct pp_clock_levels_with_latency *clocks)
1072 {
1073 struct pp_hwmgr *hwmgr = handle;
1074
1075 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1076 return -EINVAL;
1077
1078 return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1079 }
1080
pp_get_clock_by_type_with_voltage(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_voltage * clocks)1081 static int pp_get_clock_by_type_with_voltage(void *handle,
1082 enum amd_pp_clock_type type,
1083 struct pp_clock_levels_with_voltage *clocks)
1084 {
1085 struct pp_hwmgr *hwmgr = handle;
1086
1087 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1088 return -EINVAL;
1089
1090 return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1091 }
1092
pp_set_watermarks_for_clocks_ranges(void * handle,void * clock_ranges)1093 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1094 void *clock_ranges)
1095 {
1096 struct pp_hwmgr *hwmgr = handle;
1097
1098 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1099 return -EINVAL;
1100
1101 return phm_set_watermarks_for_clocks_ranges(hwmgr,
1102 clock_ranges);
1103 }
1104
pp_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock)1105 static int pp_display_clock_voltage_request(void *handle,
1106 struct pp_display_clock_request *clock)
1107 {
1108 struct pp_hwmgr *hwmgr = handle;
1109
1110 if (!hwmgr || !hwmgr->pm_en ||!clock)
1111 return -EINVAL;
1112
1113 return phm_display_clock_voltage_request(hwmgr, clock);
1114 }
1115
pp_get_display_mode_validation_clocks(void * handle,struct amd_pp_simple_clock_info * clocks)1116 static int pp_get_display_mode_validation_clocks(void *handle,
1117 struct amd_pp_simple_clock_info *clocks)
1118 {
1119 struct pp_hwmgr *hwmgr = handle;
1120 int ret = 0;
1121
1122 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1123 return -EINVAL;
1124
1125 clocks->level = PP_DAL_POWERLEVEL_7;
1126
1127 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1128 ret = phm_get_max_high_clocks(hwmgr, clocks);
1129
1130 return ret;
1131 }
1132
pp_dpm_powergate_mmhub(void * handle)1133 static int pp_dpm_powergate_mmhub(void *handle)
1134 {
1135 struct pp_hwmgr *hwmgr = handle;
1136
1137 if (!hwmgr || !hwmgr->pm_en)
1138 return -EINVAL;
1139
1140 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1141 pr_info_ratelimited("%s was not implemented.\n", __func__);
1142 return 0;
1143 }
1144
1145 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1146 }
1147
pp_dpm_powergate_gfx(void * handle,bool gate)1148 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1149 {
1150 struct pp_hwmgr *hwmgr = handle;
1151
1152 if (!hwmgr || !hwmgr->pm_en)
1153 return 0;
1154
1155 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1156 pr_info_ratelimited("%s was not implemented.\n", __func__);
1157 return 0;
1158 }
1159
1160 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1161 }
1162
pp_dpm_powergate_acp(void * handle,bool gate)1163 static void pp_dpm_powergate_acp(void *handle, bool gate)
1164 {
1165 struct pp_hwmgr *hwmgr = handle;
1166
1167 if (!hwmgr || !hwmgr->pm_en)
1168 return;
1169
1170 if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1171 pr_info_ratelimited("%s was not implemented.\n", __func__);
1172 return;
1173 }
1174
1175 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1176 }
1177
pp_dpm_powergate_sdma(void * handle,bool gate)1178 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1179 {
1180 struct pp_hwmgr *hwmgr = handle;
1181
1182 if (!hwmgr)
1183 return;
1184
1185 if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1186 pr_info_ratelimited("%s was not implemented.\n", __func__);
1187 return;
1188 }
1189
1190 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1191 }
1192
pp_set_powergating_by_smu(void * handle,uint32_t block_type,bool gate)1193 static int pp_set_powergating_by_smu(void *handle,
1194 uint32_t block_type, bool gate)
1195 {
1196 int ret = 0;
1197
1198 switch (block_type) {
1199 case AMD_IP_BLOCK_TYPE_UVD:
1200 case AMD_IP_BLOCK_TYPE_VCN:
1201 pp_dpm_powergate_uvd(handle, gate);
1202 break;
1203 case AMD_IP_BLOCK_TYPE_VCE:
1204 pp_dpm_powergate_vce(handle, gate);
1205 break;
1206 case AMD_IP_BLOCK_TYPE_GMC:
1207 /*
1208 * For now, this is only used on PICASSO.
1209 * And only "gate" operation is supported.
1210 */
1211 if (gate)
1212 pp_dpm_powergate_mmhub(handle);
1213 break;
1214 case AMD_IP_BLOCK_TYPE_GFX:
1215 ret = pp_dpm_powergate_gfx(handle, gate);
1216 break;
1217 case AMD_IP_BLOCK_TYPE_ACP:
1218 pp_dpm_powergate_acp(handle, gate);
1219 break;
1220 case AMD_IP_BLOCK_TYPE_SDMA:
1221 pp_dpm_powergate_sdma(handle, gate);
1222 break;
1223 default:
1224 break;
1225 }
1226 return ret;
1227 }
1228
pp_notify_smu_enable_pwe(void * handle)1229 static int pp_notify_smu_enable_pwe(void *handle)
1230 {
1231 struct pp_hwmgr *hwmgr = handle;
1232
1233 if (!hwmgr || !hwmgr->pm_en)
1234 return -EINVAL;
1235
1236 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1237 pr_info_ratelimited("%s was not implemented.\n", __func__);
1238 return -EINVAL;
1239 }
1240
1241 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1242
1243 return 0;
1244 }
1245
pp_enable_mgpu_fan_boost(void * handle)1246 static int pp_enable_mgpu_fan_boost(void *handle)
1247 {
1248 struct pp_hwmgr *hwmgr = handle;
1249
1250 if (!hwmgr)
1251 return -EINVAL;
1252
1253 if (!hwmgr->pm_en ||
1254 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1255 return 0;
1256
1257 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1258
1259 return 0;
1260 }
1261
pp_set_min_deep_sleep_dcefclk(void * handle,uint32_t clock)1262 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1263 {
1264 struct pp_hwmgr *hwmgr = handle;
1265
1266 if (!hwmgr || !hwmgr->pm_en)
1267 return -EINVAL;
1268
1269 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1270 pr_debug("%s was not implemented.\n", __func__);
1271 return -EINVAL;
1272 }
1273
1274 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1275
1276 return 0;
1277 }
1278
pp_set_hard_min_dcefclk_by_freq(void * handle,uint32_t clock)1279 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1280 {
1281 struct pp_hwmgr *hwmgr = handle;
1282
1283 if (!hwmgr || !hwmgr->pm_en)
1284 return -EINVAL;
1285
1286 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1287 pr_debug("%s was not implemented.\n", __func__);
1288 return -EINVAL;
1289 }
1290
1291 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1292
1293 return 0;
1294 }
1295
pp_set_hard_min_fclk_by_freq(void * handle,uint32_t clock)1296 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1297 {
1298 struct pp_hwmgr *hwmgr = handle;
1299
1300 if (!hwmgr || !hwmgr->pm_en)
1301 return -EINVAL;
1302
1303 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1304 pr_debug("%s was not implemented.\n", __func__);
1305 return -EINVAL;
1306 }
1307
1308 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1309
1310 return 0;
1311 }
1312
pp_set_active_display_count(void * handle,uint32_t count)1313 static int pp_set_active_display_count(void *handle, uint32_t count)
1314 {
1315 struct pp_hwmgr *hwmgr = handle;
1316
1317 if (!hwmgr || !hwmgr->pm_en)
1318 return -EINVAL;
1319
1320 return phm_set_active_display_count(hwmgr, count);
1321 }
1322
pp_get_asic_baco_capability(void * handle,bool * cap)1323 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1324 {
1325 struct pp_hwmgr *hwmgr = handle;
1326
1327 *cap = false;
1328 if (!hwmgr)
1329 return -EINVAL;
1330
1331 if (!(hwmgr->not_vf && amdgpu_dpm) ||
1332 !hwmgr->hwmgr_func->get_asic_baco_capability)
1333 return 0;
1334
1335 hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1336
1337 return 0;
1338 }
1339
pp_get_asic_baco_state(void * handle,int * state)1340 static int pp_get_asic_baco_state(void *handle, int *state)
1341 {
1342 struct pp_hwmgr *hwmgr = handle;
1343
1344 if (!hwmgr)
1345 return -EINVAL;
1346
1347 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1348 return 0;
1349
1350 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1351
1352 return 0;
1353 }
1354
pp_set_asic_baco_state(void * handle,int state)1355 static int pp_set_asic_baco_state(void *handle, int state)
1356 {
1357 struct pp_hwmgr *hwmgr = handle;
1358
1359 if (!hwmgr)
1360 return -EINVAL;
1361
1362 if (!(hwmgr->not_vf && amdgpu_dpm) ||
1363 !hwmgr->hwmgr_func->set_asic_baco_state)
1364 return 0;
1365
1366 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1367
1368 return 0;
1369 }
1370
pp_get_ppfeature_status(void * handle,char * buf)1371 static int pp_get_ppfeature_status(void *handle, char *buf)
1372 {
1373 struct pp_hwmgr *hwmgr = handle;
1374
1375 if (!hwmgr || !hwmgr->pm_en || !buf)
1376 return -EINVAL;
1377
1378 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1379 pr_info_ratelimited("%s was not implemented.\n", __func__);
1380 return -EINVAL;
1381 }
1382
1383 return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1384 }
1385
pp_set_ppfeature_status(void * handle,uint64_t ppfeature_masks)1386 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1387 {
1388 struct pp_hwmgr *hwmgr = handle;
1389
1390 if (!hwmgr || !hwmgr->pm_en)
1391 return -EINVAL;
1392
1393 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1394 pr_info_ratelimited("%s was not implemented.\n", __func__);
1395 return -EINVAL;
1396 }
1397
1398 return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1399 }
1400
pp_asic_reset_mode_2(void * handle)1401 static int pp_asic_reset_mode_2(void *handle)
1402 {
1403 struct pp_hwmgr *hwmgr = handle;
1404
1405 if (!hwmgr || !hwmgr->pm_en)
1406 return -EINVAL;
1407
1408 if (hwmgr->hwmgr_func->asic_reset == NULL) {
1409 pr_info_ratelimited("%s was not implemented.\n", __func__);
1410 return -EINVAL;
1411 }
1412
1413 return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1414 }
1415
pp_smu_i2c_bus_access(void * handle,bool acquire)1416 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1417 {
1418 struct pp_hwmgr *hwmgr = handle;
1419
1420 if (!hwmgr || !hwmgr->pm_en)
1421 return -EINVAL;
1422
1423 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1424 pr_info_ratelimited("%s was not implemented.\n", __func__);
1425 return -EINVAL;
1426 }
1427
1428 return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1429 }
1430
pp_set_df_cstate(void * handle,enum pp_df_cstate state)1431 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1432 {
1433 struct pp_hwmgr *hwmgr = handle;
1434
1435 if (!hwmgr)
1436 return -EINVAL;
1437
1438 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1439 return 0;
1440
1441 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1442
1443 return 0;
1444 }
1445
pp_set_xgmi_pstate(void * handle,uint32_t pstate)1446 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1447 {
1448 struct pp_hwmgr *hwmgr = handle;
1449
1450 if (!hwmgr)
1451 return -EINVAL;
1452
1453 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1454 return 0;
1455
1456 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1457
1458 return 0;
1459 }
1460
pp_get_gpu_metrics(void * handle,void ** table)1461 static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1462 {
1463 struct pp_hwmgr *hwmgr = handle;
1464
1465 if (!hwmgr)
1466 return -EINVAL;
1467
1468 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1469 return -EOPNOTSUPP;
1470
1471 return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1472 }
1473
pp_gfx_state_change_set(void * handle,uint32_t state)1474 static int pp_gfx_state_change_set(void *handle, uint32_t state)
1475 {
1476 struct pp_hwmgr *hwmgr = handle;
1477
1478 if (!hwmgr || !hwmgr->pm_en)
1479 return -EINVAL;
1480
1481 if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1482 pr_info_ratelimited("%s was not implemented.\n", __func__);
1483 return -EINVAL;
1484 }
1485
1486 hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1487 return 0;
1488 }
1489
pp_get_prv_buffer_details(void * handle,void ** addr,size_t * size)1490 static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1491 {
1492 struct pp_hwmgr *hwmgr = handle;
1493 struct amdgpu_device *adev = hwmgr->adev;
1494 int err;
1495
1496 if (!addr || !size)
1497 return -EINVAL;
1498
1499 *addr = NULL;
1500 *size = 0;
1501 if (adev->pm.smu_prv_buffer) {
1502 err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1503 if (err)
1504 return err;
1505 *size = adev->pm.smu_prv_buffer_size;
1506 }
1507
1508 return 0;
1509 }
1510
pp_pm_compute_clocks(void * handle)1511 static void pp_pm_compute_clocks(void *handle)
1512 {
1513 struct pp_hwmgr *hwmgr = handle;
1514 struct amdgpu_device *adev = hwmgr->adev;
1515
1516 if (!adev->dc_enabled) {
1517 amdgpu_dpm_get_active_displays(adev);
1518 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1519 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1520 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1521 /* we have issues with mclk switching with
1522 * refresh rates over 120 hz on the non-DC code.
1523 */
1524 if (adev->pm.pm_display_cfg.vrefresh > 120)
1525 adev->pm.pm_display_cfg.min_vblank_time = 0;
1526
1527 pp_display_configuration_change(handle,
1528 &adev->pm.pm_display_cfg);
1529 }
1530
1531 pp_dpm_dispatch_tasks(handle,
1532 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1533 NULL);
1534 }
1535
1536 static const struct amd_pm_funcs pp_dpm_funcs = {
1537 .load_firmware = pp_dpm_load_fw,
1538 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1539 .force_performance_level = pp_dpm_force_performance_level,
1540 .get_performance_level = pp_dpm_get_performance_level,
1541 .get_current_power_state = pp_dpm_get_current_power_state,
1542 .dispatch_tasks = pp_dpm_dispatch_tasks,
1543 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1544 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1545 .set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1546 .get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1547 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1548 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1549 .get_pp_num_states = pp_dpm_get_pp_num_states,
1550 .get_pp_table = pp_dpm_get_pp_table,
1551 .set_pp_table = pp_dpm_set_pp_table,
1552 .force_clock_level = pp_dpm_force_clock_level,
1553 .emit_clock_levels = pp_dpm_emit_clock_levels,
1554 .print_clock_levels = pp_dpm_print_clock_levels,
1555 .get_sclk_od = pp_dpm_get_sclk_od,
1556 .set_sclk_od = pp_dpm_set_sclk_od,
1557 .get_mclk_od = pp_dpm_get_mclk_od,
1558 .set_mclk_od = pp_dpm_set_mclk_od,
1559 .read_sensor = pp_dpm_read_sensor,
1560 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1561 .switch_power_profile = pp_dpm_switch_power_profile,
1562 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1563 .set_powergating_by_smu = pp_set_powergating_by_smu,
1564 .get_power_profile_mode = pp_get_power_profile_mode,
1565 .set_power_profile_mode = pp_set_power_profile_mode,
1566 .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1567 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1568 .set_mp1_state = pp_dpm_set_mp1_state,
1569 .set_power_limit = pp_set_power_limit,
1570 .get_power_limit = pp_get_power_limit,
1571 /* export to DC */
1572 .get_sclk = pp_dpm_get_sclk,
1573 .get_mclk = pp_dpm_get_mclk,
1574 .display_configuration_change = pp_display_configuration_change,
1575 .get_display_power_level = pp_get_display_power_level,
1576 .get_current_clocks = pp_get_current_clocks,
1577 .get_clock_by_type = pp_get_clock_by_type,
1578 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1579 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1580 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1581 .display_clock_voltage_request = pp_display_clock_voltage_request,
1582 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1583 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1584 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1585 .set_active_display_count = pp_set_active_display_count,
1586 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1587 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1588 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1589 .get_asic_baco_capability = pp_get_asic_baco_capability,
1590 .get_asic_baco_state = pp_get_asic_baco_state,
1591 .set_asic_baco_state = pp_set_asic_baco_state,
1592 .get_ppfeature_status = pp_get_ppfeature_status,
1593 .set_ppfeature_status = pp_set_ppfeature_status,
1594 .asic_reset_mode_2 = pp_asic_reset_mode_2,
1595 .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1596 .set_df_cstate = pp_set_df_cstate,
1597 .set_xgmi_pstate = pp_set_xgmi_pstate,
1598 .get_gpu_metrics = pp_get_gpu_metrics,
1599 .gfx_state_change_set = pp_gfx_state_change_set,
1600 .get_smu_prv_buf_details = pp_get_prv_buffer_details,
1601 .pm_compute_clocks = pp_pm_compute_clocks,
1602 };
1603