1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license. When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
10 // Rander Wang <rander.wang@intel.com>
11 // Keyon Jie <yang.jie@linux.intel.com>
12 //
13
14 /*
15 * Hardware interface for generic Intel audio DSP HDA IP
16 */
17
18 #include <linux/module.h>
19 #include <sound/hdaudio_ext.h>
20 #include <sound/hda_register.h>
21 #include "../sof-audio.h"
22 #include "../ops.h"
23 #include "hda.h"
24 #include "hda-ipc.h"
25
26 static bool hda_enable_trace_D0I3_S0;
27 #if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG)
28 module_param_named(enable_trace_D0I3_S0, hda_enable_trace_D0I3_S0, bool, 0444);
29 MODULE_PARM_DESC(enable_trace_D0I3_S0,
30 "SOF HDA enable trace when the DSP is in D0I3 in S0");
31 #endif
32
33 /*
34 * DSP Core control.
35 */
36
hda_dsp_core_reset_enter(struct snd_sof_dev * sdev,unsigned int core_mask)37 static int hda_dsp_core_reset_enter(struct snd_sof_dev *sdev, unsigned int core_mask)
38 {
39 u32 adspcs;
40 u32 reset;
41 int ret;
42
43 /* set reset bits for cores */
44 reset = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
45 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
46 HDA_DSP_REG_ADSPCS,
47 reset, reset);
48
49 /* poll with timeout to check if operation successful */
50 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
51 HDA_DSP_REG_ADSPCS, adspcs,
52 ((adspcs & reset) == reset),
53 HDA_DSP_REG_POLL_INTERVAL_US,
54 HDA_DSP_RESET_TIMEOUT_US);
55 if (ret < 0) {
56 dev_err(sdev->dev,
57 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
58 __func__);
59 return ret;
60 }
61
62 /* has core entered reset ? */
63 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
64 HDA_DSP_REG_ADSPCS);
65 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) !=
66 HDA_DSP_ADSPCS_CRST_MASK(core_mask)) {
67 dev_err(sdev->dev,
68 "error: reset enter failed: core_mask %x adspcs 0x%x\n",
69 core_mask, adspcs);
70 ret = -EIO;
71 }
72
73 return ret;
74 }
75
hda_dsp_core_reset_leave(struct snd_sof_dev * sdev,unsigned int core_mask)76 static int hda_dsp_core_reset_leave(struct snd_sof_dev *sdev, unsigned int core_mask)
77 {
78 unsigned int crst;
79 u32 adspcs;
80 int ret;
81
82 /* clear reset bits for cores */
83 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
84 HDA_DSP_REG_ADSPCS,
85 HDA_DSP_ADSPCS_CRST_MASK(core_mask),
86 0);
87
88 /* poll with timeout to check if operation successful */
89 crst = HDA_DSP_ADSPCS_CRST_MASK(core_mask);
90 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
91 HDA_DSP_REG_ADSPCS, adspcs,
92 !(adspcs & crst),
93 HDA_DSP_REG_POLL_INTERVAL_US,
94 HDA_DSP_RESET_TIMEOUT_US);
95
96 if (ret < 0) {
97 dev_err(sdev->dev,
98 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
99 __func__);
100 return ret;
101 }
102
103 /* has core left reset ? */
104 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
105 HDA_DSP_REG_ADSPCS);
106 if ((adspcs & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) != 0) {
107 dev_err(sdev->dev,
108 "error: reset leave failed: core_mask %x adspcs 0x%x\n",
109 core_mask, adspcs);
110 ret = -EIO;
111 }
112
113 return ret;
114 }
115
hda_dsp_core_stall_reset(struct snd_sof_dev * sdev,unsigned int core_mask)116 static int hda_dsp_core_stall_reset(struct snd_sof_dev *sdev, unsigned int core_mask)
117 {
118 /* stall core */
119 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
120 HDA_DSP_REG_ADSPCS,
121 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
122 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
123
124 /* set reset state */
125 return hda_dsp_core_reset_enter(sdev, core_mask);
126 }
127
hda_dsp_core_is_enabled(struct snd_sof_dev * sdev,unsigned int core_mask)128 static bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev, unsigned int core_mask)
129 {
130 int val;
131 bool is_enable;
132
133 val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
134
135 #define MASK_IS_EQUAL(v, m, field) ({ \
136 u32 _m = field(m); \
137 ((v) & _m) == _m; \
138 })
139
140 is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
141 MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
142 !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
143 !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
144
145 #undef MASK_IS_EQUAL
146
147 dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
148 is_enable, core_mask);
149
150 return is_enable;
151 }
152
hda_dsp_core_run(struct snd_sof_dev * sdev,unsigned int core_mask)153 int hda_dsp_core_run(struct snd_sof_dev *sdev, unsigned int core_mask)
154 {
155 int ret;
156
157 /* leave reset state */
158 ret = hda_dsp_core_reset_leave(sdev, core_mask);
159 if (ret < 0)
160 return ret;
161
162 /* run core */
163 dev_dbg(sdev->dev, "unstall/run core: core_mask = %x\n", core_mask);
164 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
165 HDA_DSP_REG_ADSPCS,
166 HDA_DSP_ADSPCS_CSTALL_MASK(core_mask),
167 0);
168
169 /* is core now running ? */
170 if (!hda_dsp_core_is_enabled(sdev, core_mask)) {
171 hda_dsp_core_stall_reset(sdev, core_mask);
172 dev_err(sdev->dev, "error: DSP start core failed: core_mask %x\n",
173 core_mask);
174 ret = -EIO;
175 }
176
177 return ret;
178 }
179
180 /*
181 * Power Management.
182 */
183
hda_dsp_core_power_up(struct snd_sof_dev * sdev,unsigned int core_mask)184 static int hda_dsp_core_power_up(struct snd_sof_dev *sdev, unsigned int core_mask)
185 {
186 unsigned int cpa;
187 u32 adspcs;
188 int ret;
189
190 /* update bits */
191 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS,
192 HDA_DSP_ADSPCS_SPA_MASK(core_mask),
193 HDA_DSP_ADSPCS_SPA_MASK(core_mask));
194
195 /* poll with timeout to check if operation successful */
196 cpa = HDA_DSP_ADSPCS_CPA_MASK(core_mask);
197 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
198 HDA_DSP_REG_ADSPCS, adspcs,
199 (adspcs & cpa) == cpa,
200 HDA_DSP_REG_POLL_INTERVAL_US,
201 HDA_DSP_RESET_TIMEOUT_US);
202 if (ret < 0) {
203 dev_err(sdev->dev,
204 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
205 __func__);
206 return ret;
207 }
208
209 /* did core power up ? */
210 adspcs = snd_sof_dsp_read(sdev, HDA_DSP_BAR,
211 HDA_DSP_REG_ADSPCS);
212 if ((adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) !=
213 HDA_DSP_ADSPCS_CPA_MASK(core_mask)) {
214 dev_err(sdev->dev,
215 "error: power up core failed core_mask %xadspcs 0x%x\n",
216 core_mask, adspcs);
217 ret = -EIO;
218 }
219
220 return ret;
221 }
222
hda_dsp_core_power_down(struct snd_sof_dev * sdev,unsigned int core_mask)223 static int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
224 {
225 u32 adspcs;
226 int ret;
227
228 /* update bits */
229 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR,
230 HDA_DSP_REG_ADSPCS,
231 HDA_DSP_ADSPCS_SPA_MASK(core_mask), 0);
232
233 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
234 HDA_DSP_REG_ADSPCS, adspcs,
235 !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
236 HDA_DSP_REG_POLL_INTERVAL_US,
237 HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
238 if (ret < 0)
239 dev_err(sdev->dev,
240 "error: %s: timeout on HDA_DSP_REG_ADSPCS read\n",
241 __func__);
242
243 return ret;
244 }
245
hda_dsp_enable_core(struct snd_sof_dev * sdev,unsigned int core_mask)246 int hda_dsp_enable_core(struct snd_sof_dev *sdev, unsigned int core_mask)
247 {
248 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
249 const struct sof_intel_dsp_desc *chip = hda->desc;
250 int ret;
251
252 /* restrict core_mask to host managed cores mask */
253 core_mask &= chip->host_managed_cores_mask;
254
255 /* return if core_mask is not valid or cores are already enabled */
256 if (!core_mask || hda_dsp_core_is_enabled(sdev, core_mask))
257 return 0;
258
259 /* power up */
260 ret = hda_dsp_core_power_up(sdev, core_mask);
261 if (ret < 0) {
262 dev_err(sdev->dev, "error: dsp core power up failed: core_mask %x\n",
263 core_mask);
264 return ret;
265 }
266
267 return hda_dsp_core_run(sdev, core_mask);
268 }
269
hda_dsp_core_reset_power_down(struct snd_sof_dev * sdev,unsigned int core_mask)270 int hda_dsp_core_reset_power_down(struct snd_sof_dev *sdev,
271 unsigned int core_mask)
272 {
273 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
274 const struct sof_intel_dsp_desc *chip = hda->desc;
275 int ret;
276
277 /* restrict core_mask to host managed cores mask */
278 core_mask &= chip->host_managed_cores_mask;
279
280 /* return if core_mask is not valid */
281 if (!core_mask)
282 return 0;
283
284 /* place core in reset prior to power down */
285 ret = hda_dsp_core_stall_reset(sdev, core_mask);
286 if (ret < 0) {
287 dev_err(sdev->dev, "error: dsp core reset failed: core_mask %x\n",
288 core_mask);
289 return ret;
290 }
291
292 /* power down core */
293 ret = hda_dsp_core_power_down(sdev, core_mask);
294 if (ret < 0) {
295 dev_err(sdev->dev, "error: dsp core power down fail mask %x: %d\n",
296 core_mask, ret);
297 return ret;
298 }
299
300 /* make sure we are in OFF state */
301 if (hda_dsp_core_is_enabled(sdev, core_mask)) {
302 dev_err(sdev->dev, "error: dsp core disable fail mask %x: %d\n",
303 core_mask, ret);
304 ret = -EIO;
305 }
306
307 return ret;
308 }
309
hda_dsp_ipc_int_enable(struct snd_sof_dev * sdev)310 void hda_dsp_ipc_int_enable(struct snd_sof_dev *sdev)
311 {
312 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
313 const struct sof_intel_dsp_desc *chip = hda->desc;
314
315 /* enable IPC DONE and BUSY interrupts */
316 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
317 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY,
318 HDA_DSP_REG_HIPCCTL_DONE | HDA_DSP_REG_HIPCCTL_BUSY);
319
320 /* enable IPC interrupt */
321 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
322 HDA_DSP_ADSPIC_IPC, HDA_DSP_ADSPIC_IPC);
323 }
324
hda_dsp_ipc_int_disable(struct snd_sof_dev * sdev)325 void hda_dsp_ipc_int_disable(struct snd_sof_dev *sdev)
326 {
327 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
328 const struct sof_intel_dsp_desc *chip = hda->desc;
329
330 /* disable IPC interrupt */
331 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPIC,
332 HDA_DSP_ADSPIC_IPC, 0);
333
334 /* disable IPC BUSY and DONE interrupt */
335 snd_sof_dsp_update_bits(sdev, HDA_DSP_BAR, chip->ipc_ctl,
336 HDA_DSP_REG_HIPCCTL_BUSY | HDA_DSP_REG_HIPCCTL_DONE, 0);
337 }
338
hda_dsp_wait_d0i3c_done(struct snd_sof_dev * sdev)339 static int hda_dsp_wait_d0i3c_done(struct snd_sof_dev *sdev)
340 {
341 struct hdac_bus *bus = sof_to_bus(sdev);
342 int retry = HDA_DSP_REG_POLL_RETRY_COUNT;
343
344 while (snd_hdac_chip_readb(bus, VS_D0I3C) & SOF_HDA_VS_D0I3C_CIP) {
345 if (!retry--)
346 return -ETIMEDOUT;
347 usleep_range(10, 15);
348 }
349
350 return 0;
351 }
352
hda_dsp_send_pm_gate_ipc(struct snd_sof_dev * sdev,u32 flags)353 static int hda_dsp_send_pm_gate_ipc(struct snd_sof_dev *sdev, u32 flags)
354 {
355 struct sof_ipc_pm_gate pm_gate;
356 struct sof_ipc_reply reply;
357
358 memset(&pm_gate, 0, sizeof(pm_gate));
359
360 /* configure pm_gate ipc message */
361 pm_gate.hdr.size = sizeof(pm_gate);
362 pm_gate.hdr.cmd = SOF_IPC_GLB_PM_MSG | SOF_IPC_PM_GATE;
363 pm_gate.flags = flags;
364
365 /* send pm_gate ipc to dsp */
366 return sof_ipc_tx_message_no_pm(sdev->ipc, pm_gate.hdr.cmd,
367 &pm_gate, sizeof(pm_gate), &reply,
368 sizeof(reply));
369 }
370
hda_dsp_update_d0i3c_register(struct snd_sof_dev * sdev,u8 value)371 static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value)
372 {
373 struct hdac_bus *bus = sof_to_bus(sdev);
374 int ret;
375
376 /* Write to D0I3C after Command-In-Progress bit is cleared */
377 ret = hda_dsp_wait_d0i3c_done(sdev);
378 if (ret < 0) {
379 dev_err(bus->dev, "CIP timeout before D0I3C update!\n");
380 return ret;
381 }
382
383 /* Update D0I3C register */
384 snd_hdac_chip_updateb(bus, VS_D0I3C, SOF_HDA_VS_D0I3C_I3, value);
385
386 /* Wait for cmd in progress to be cleared before exiting the function */
387 ret = hda_dsp_wait_d0i3c_done(sdev);
388 if (ret < 0) {
389 dev_err(bus->dev, "CIP timeout after D0I3C update!\n");
390 return ret;
391 }
392
393 dev_vdbg(bus->dev, "D0I3C updated, register = 0x%x\n",
394 snd_hdac_chip_readb(bus, VS_D0I3C));
395
396 return 0;
397 }
398
hda_dsp_set_D0_state(struct snd_sof_dev * sdev,const struct sof_dsp_power_state * target_state)399 static int hda_dsp_set_D0_state(struct snd_sof_dev *sdev,
400 const struct sof_dsp_power_state *target_state)
401 {
402 u32 flags = 0;
403 int ret;
404 u8 value = 0;
405
406 /*
407 * Sanity check for illegal state transitions
408 * The only allowed transitions are:
409 * 1. D3 -> D0I0
410 * 2. D0I0 -> D0I3
411 * 3. D0I3 -> D0I0
412 */
413 switch (sdev->dsp_power_state.state) {
414 case SOF_DSP_PM_D0:
415 /* Follow the sequence below for D0 substate transitions */
416 break;
417 case SOF_DSP_PM_D3:
418 /* Follow regular flow for D3 -> D0 transition */
419 return 0;
420 default:
421 dev_err(sdev->dev, "error: transition from %d to %d not allowed\n",
422 sdev->dsp_power_state.state, target_state->state);
423 return -EINVAL;
424 }
425
426 /* Set flags and register value for D0 target substate */
427 if (target_state->substate == SOF_HDA_DSP_PM_D0I3) {
428 value = SOF_HDA_VS_D0I3C_I3;
429
430 /*
431 * Trace DMA need to be disabled when the DSP enters
432 * D0I3 for S0Ix suspend, but it can be kept enabled
433 * when the DSP enters D0I3 while the system is in S0
434 * for debug purpose.
435 */
436 if (!sdev->dtrace_is_supported ||
437 !hda_enable_trace_D0I3_S0 ||
438 sdev->system_suspend_target != SOF_SUSPEND_NONE)
439 flags = HDA_PM_NO_DMA_TRACE;
440 } else {
441 /* prevent power gating in D0I0 */
442 flags = HDA_PM_PPG;
443 }
444
445 /* update D0I3C register */
446 ret = hda_dsp_update_d0i3c_register(sdev, value);
447 if (ret < 0)
448 return ret;
449
450 /*
451 * Notify the DSP of the state change.
452 * If this IPC fails, revert the D0I3C register update in order
453 * to prevent partial state change.
454 */
455 ret = hda_dsp_send_pm_gate_ipc(sdev, flags);
456 if (ret < 0) {
457 dev_err(sdev->dev,
458 "error: PM_GATE ipc error %d\n", ret);
459 goto revert;
460 }
461
462 return ret;
463
464 revert:
465 /* fallback to the previous register value */
466 value = value ? 0 : SOF_HDA_VS_D0I3C_I3;
467
468 /*
469 * This can fail but return the IPC error to signal that
470 * the state change failed.
471 */
472 hda_dsp_update_d0i3c_register(sdev, value);
473
474 return ret;
475 }
476
477 /* helper to log DSP state */
hda_dsp_state_log(struct snd_sof_dev * sdev)478 static void hda_dsp_state_log(struct snd_sof_dev *sdev)
479 {
480 switch (sdev->dsp_power_state.state) {
481 case SOF_DSP_PM_D0:
482 switch (sdev->dsp_power_state.substate) {
483 case SOF_HDA_DSP_PM_D0I0:
484 dev_dbg(sdev->dev, "Current DSP power state: D0I0\n");
485 break;
486 case SOF_HDA_DSP_PM_D0I3:
487 dev_dbg(sdev->dev, "Current DSP power state: D0I3\n");
488 break;
489 default:
490 dev_dbg(sdev->dev, "Unknown DSP D0 substate: %d\n",
491 sdev->dsp_power_state.substate);
492 break;
493 }
494 break;
495 case SOF_DSP_PM_D1:
496 dev_dbg(sdev->dev, "Current DSP power state: D1\n");
497 break;
498 case SOF_DSP_PM_D2:
499 dev_dbg(sdev->dev, "Current DSP power state: D2\n");
500 break;
501 case SOF_DSP_PM_D3_HOT:
502 dev_dbg(sdev->dev, "Current DSP power state: D3_HOT\n");
503 break;
504 case SOF_DSP_PM_D3:
505 dev_dbg(sdev->dev, "Current DSP power state: D3\n");
506 break;
507 case SOF_DSP_PM_D3_COLD:
508 dev_dbg(sdev->dev, "Current DSP power state: D3_COLD\n");
509 break;
510 default:
511 dev_dbg(sdev->dev, "Unknown DSP power state: %d\n",
512 sdev->dsp_power_state.state);
513 break;
514 }
515 }
516
517 /*
518 * All DSP power state transitions are initiated by the driver.
519 * If the requested state change fails, the error is simply returned.
520 * Further state transitions are attempted only when the set_power_save() op
521 * is called again either because of a new IPC sent to the DSP or
522 * during system suspend/resume.
523 */
hda_dsp_set_power_state(struct snd_sof_dev * sdev,const struct sof_dsp_power_state * target_state)524 int hda_dsp_set_power_state(struct snd_sof_dev *sdev,
525 const struct sof_dsp_power_state *target_state)
526 {
527 int ret = 0;
528
529 /*
530 * When the DSP is already in D0I3 and the target state is D0I3,
531 * it could be the case that the DSP is in D0I3 during S0
532 * and the system is suspending to S0Ix. Therefore,
533 * hda_dsp_set_D0_state() must be called to disable trace DMA
534 * by sending the PM_GATE IPC to the FW.
535 */
536 if (target_state->substate == SOF_HDA_DSP_PM_D0I3 &&
537 sdev->system_suspend_target == SOF_SUSPEND_S0IX)
538 goto set_state;
539
540 /*
541 * For all other cases, return without doing anything if
542 * the DSP is already in the target state.
543 */
544 if (target_state->state == sdev->dsp_power_state.state &&
545 target_state->substate == sdev->dsp_power_state.substate)
546 return 0;
547
548 set_state:
549 switch (target_state->state) {
550 case SOF_DSP_PM_D0:
551 ret = hda_dsp_set_D0_state(sdev, target_state);
552 break;
553 case SOF_DSP_PM_D3:
554 /* The only allowed transition is: D0I0 -> D3 */
555 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0 &&
556 sdev->dsp_power_state.substate == SOF_HDA_DSP_PM_D0I0)
557 break;
558
559 dev_err(sdev->dev,
560 "error: transition from %d to %d not allowed\n",
561 sdev->dsp_power_state.state, target_state->state);
562 return -EINVAL;
563 default:
564 dev_err(sdev->dev, "error: target state unsupported %d\n",
565 target_state->state);
566 return -EINVAL;
567 }
568 if (ret < 0) {
569 dev_err(sdev->dev,
570 "failed to set requested target DSP state %d substate %d\n",
571 target_state->state, target_state->substate);
572 return ret;
573 }
574
575 sdev->dsp_power_state = *target_state;
576 hda_dsp_state_log(sdev);
577 return ret;
578 }
579
580 /*
581 * Audio DSP states may transform as below:-
582 *
583 * Opportunistic D0I3 in S0
584 * Runtime +---------------------+ Delayed D0i3 work timeout
585 * suspend | +--------------------+
586 * +------------+ D0I0(active) | |
587 * | | <---------------+ |
588 * | +--------> | New IPC | |
589 * | |Runtime +--^--+---------^--+--+ (via mailbox) | |
590 * | |resume | | | | | |
591 * | | | | | | | |
592 * | | System| | | | | |
593 * | | resume| | S3/S0IX | | | |
594 * | | | | suspend | | S0IX | |
595 * | | | | | |suspend | |
596 * | | | | | | | |
597 * | | | | | | | |
598 * +-v---+-----------+--v-------+ | | +------+----v----+
599 * | | | +-----------> |
600 * | D3 (suspended) | | | D0I3 |
601 * | | +--------------+ |
602 * | | System resume | |
603 * +----------------------------+ +----------------+
604 *
605 * S0IX suspend: The DSP is in D0I3 if any D0I3-compatible streams
606 * ignored the suspend trigger. Otherwise the DSP
607 * is in D3.
608 */
609
hda_suspend(struct snd_sof_dev * sdev,bool runtime_suspend)610 static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
611 {
612 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
613 const struct sof_intel_dsp_desc *chip = hda->desc;
614 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
615 struct hdac_bus *bus = sof_to_bus(sdev);
616 #endif
617 int ret;
618
619 hda_sdw_int_enable(sdev, false);
620
621 /* disable IPC interrupts */
622 hda_dsp_ipc_int_disable(sdev);
623
624 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
625 hda_codec_jack_wake_enable(sdev, runtime_suspend);
626
627 /* power down all hda link */
628 snd_hdac_ext_bus_link_power_down_all(bus);
629 #endif
630
631 /* power down DSP */
632 ret = snd_sof_dsp_core_power_down(sdev, chip->host_managed_cores_mask);
633 if (ret < 0) {
634 dev_err(sdev->dev,
635 "error: failed to power down core during suspend\n");
636 return ret;
637 }
638
639 /* disable ppcap interrupt */
640 hda_dsp_ctrl_ppcap_enable(sdev, false);
641 hda_dsp_ctrl_ppcap_int_enable(sdev, false);
642
643 /* disable hda bus irq and streams */
644 hda_dsp_ctrl_stop_chip(sdev);
645
646 /* disable LP retention mode */
647 snd_sof_pci_update_bits(sdev, PCI_PGCTL,
648 PCI_PGCTL_LSRMD_MASK, PCI_PGCTL_LSRMD_MASK);
649
650 /* reset controller */
651 ret = hda_dsp_ctrl_link_reset(sdev, true);
652 if (ret < 0) {
653 dev_err(sdev->dev,
654 "error: failed to reset controller during suspend\n");
655 return ret;
656 }
657
658 /* display codec can powered off after link reset */
659 hda_codec_i915_display_power(sdev, false);
660
661 return 0;
662 }
663
hda_resume(struct snd_sof_dev * sdev,bool runtime_resume)664 static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
665 {
666 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
667 struct hdac_bus *bus = sof_to_bus(sdev);
668 struct hdac_ext_link *hlink = NULL;
669 #endif
670 int ret;
671
672 /* display codec must be powered before link reset */
673 hda_codec_i915_display_power(sdev, true);
674
675 /*
676 * clear TCSEL to clear playback on some HD Audio
677 * codecs. PCI TCSEL is defined in the Intel manuals.
678 */
679 snd_sof_pci_update_bits(sdev, PCI_TCSEL, 0x07, 0);
680
681 /* reset and start hda controller */
682 ret = hda_dsp_ctrl_init_chip(sdev, true);
683 if (ret < 0) {
684 dev_err(sdev->dev,
685 "error: failed to start controller after resume\n");
686 goto cleanup;
687 }
688
689 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
690 /* check jack status */
691 if (runtime_resume) {
692 hda_codec_jack_wake_enable(sdev, false);
693 if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
694 hda_codec_jack_check(sdev);
695 }
696
697 /* turn off the links that were off before suspend */
698 list_for_each_entry(hlink, &bus->hlink_list, list) {
699 if (!hlink->ref_count)
700 snd_hdac_ext_bus_link_power_down(hlink);
701 }
702
703 /* check dma status and clean up CORB/RIRB buffers */
704 if (!bus->cmd_dma_state)
705 snd_hdac_bus_stop_cmd_io(bus);
706 #endif
707
708 /* enable ppcap interrupt */
709 hda_dsp_ctrl_ppcap_enable(sdev, true);
710 hda_dsp_ctrl_ppcap_int_enable(sdev, true);
711
712 cleanup:
713 /* display codec can powered off after controller init */
714 hda_codec_i915_display_power(sdev, false);
715
716 return 0;
717 }
718
hda_dsp_resume(struct snd_sof_dev * sdev)719 int hda_dsp_resume(struct snd_sof_dev *sdev)
720 {
721 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
722 struct pci_dev *pci = to_pci_dev(sdev->dev);
723 const struct sof_dsp_power_state target_state = {
724 .state = SOF_DSP_PM_D0,
725 .substate = SOF_HDA_DSP_PM_D0I0,
726 };
727 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
728 struct hdac_bus *bus = sof_to_bus(sdev);
729 struct hdac_ext_link *hlink = NULL;
730 #endif
731 int ret;
732
733 /* resume from D0I3 */
734 if (sdev->dsp_power_state.state == SOF_DSP_PM_D0) {
735 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
736 /* power up links that were active before suspend */
737 list_for_each_entry(hlink, &bus->hlink_list, list) {
738 if (hlink->ref_count) {
739 ret = snd_hdac_ext_bus_link_power_up(hlink);
740 if (ret < 0) {
741 dev_dbg(sdev->dev,
742 "error %d in %s: failed to power up links",
743 ret, __func__);
744 return ret;
745 }
746 }
747 }
748
749 /* set up CORB/RIRB buffers if was on before suspend */
750 if (bus->cmd_dma_state)
751 snd_hdac_bus_init_cmd_io(bus);
752 #endif
753
754 /* Set DSP power state */
755 ret = snd_sof_dsp_set_power_state(sdev, &target_state);
756 if (ret < 0) {
757 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
758 target_state.state, target_state.substate);
759 return ret;
760 }
761
762 /* restore L1SEN bit */
763 if (hda->l1_support_changed)
764 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
765 HDA_VS_INTEL_EM2,
766 HDA_VS_INTEL_EM2_L1SEN, 0);
767
768 /* restore and disable the system wakeup */
769 pci_restore_state(pci);
770 disable_irq_wake(pci->irq);
771 return 0;
772 }
773
774 /* init hda controller. DSP cores will be powered up during fw boot */
775 ret = hda_resume(sdev, false);
776 if (ret < 0)
777 return ret;
778
779 return snd_sof_dsp_set_power_state(sdev, &target_state);
780 }
781
hda_dsp_runtime_resume(struct snd_sof_dev * sdev)782 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev)
783 {
784 const struct sof_dsp_power_state target_state = {
785 .state = SOF_DSP_PM_D0,
786 };
787 int ret;
788
789 /* init hda controller. DSP cores will be powered up during fw boot */
790 ret = hda_resume(sdev, true);
791 if (ret < 0)
792 return ret;
793
794 return snd_sof_dsp_set_power_state(sdev, &target_state);
795 }
796
hda_dsp_runtime_idle(struct snd_sof_dev * sdev)797 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev)
798 {
799 struct hdac_bus *hbus = sof_to_bus(sdev);
800
801 if (hbus->codec_powered) {
802 dev_dbg(sdev->dev, "some codecs still powered (%08X), not idle\n",
803 (unsigned int)hbus->codec_powered);
804 return -EBUSY;
805 }
806
807 return 0;
808 }
809
hda_dsp_runtime_suspend(struct snd_sof_dev * sdev)810 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev)
811 {
812 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
813 const struct sof_dsp_power_state target_state = {
814 .state = SOF_DSP_PM_D3,
815 };
816 int ret;
817
818 /* cancel any attempt for DSP D0I3 */
819 cancel_delayed_work_sync(&hda->d0i3_work);
820
821 /* stop hda controller and power dsp off */
822 ret = hda_suspend(sdev, true);
823 if (ret < 0)
824 return ret;
825
826 return snd_sof_dsp_set_power_state(sdev, &target_state);
827 }
828
hda_dsp_suspend(struct snd_sof_dev * sdev,u32 target_state)829 int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
830 {
831 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
832 struct hdac_bus *bus = sof_to_bus(sdev);
833 struct pci_dev *pci = to_pci_dev(sdev->dev);
834 const struct sof_dsp_power_state target_dsp_state = {
835 .state = target_state,
836 .substate = target_state == SOF_DSP_PM_D0 ?
837 SOF_HDA_DSP_PM_D0I3 : 0,
838 };
839 int ret;
840
841 /* cancel any attempt for DSP D0I3 */
842 cancel_delayed_work_sync(&hda->d0i3_work);
843
844 if (target_state == SOF_DSP_PM_D0) {
845 /* Set DSP power state */
846 ret = snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
847 if (ret < 0) {
848 dev_err(sdev->dev, "error: setting dsp state %d substate %d\n",
849 target_dsp_state.state,
850 target_dsp_state.substate);
851 return ret;
852 }
853
854 /* enable L1SEN to make sure the system can enter S0Ix */
855 hda->l1_support_changed =
856 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
857 HDA_VS_INTEL_EM2,
858 HDA_VS_INTEL_EM2_L1SEN,
859 HDA_VS_INTEL_EM2_L1SEN);
860
861 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
862 /* stop the CORB/RIRB DMA if it is On */
863 if (bus->cmd_dma_state)
864 snd_hdac_bus_stop_cmd_io(bus);
865
866 /* no link can be powered in s0ix state */
867 ret = snd_hdac_ext_bus_link_power_down_all(bus);
868 if (ret < 0) {
869 dev_dbg(sdev->dev,
870 "error %d in %s: failed to power down links",
871 ret, __func__);
872 return ret;
873 }
874 #endif
875
876 /* enable the system waking up via IPC IRQ */
877 enable_irq_wake(pci->irq);
878 pci_save_state(pci);
879 return 0;
880 }
881
882 /* stop hda controller and power dsp off */
883 ret = hda_suspend(sdev, false);
884 if (ret < 0) {
885 dev_err(bus->dev, "error: suspending dsp\n");
886 return ret;
887 }
888
889 return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
890 }
891
hda_dsp_shutdown(struct snd_sof_dev * sdev)892 int hda_dsp_shutdown(struct snd_sof_dev *sdev)
893 {
894 sdev->system_suspend_target = SOF_SUSPEND_S3;
895 return snd_sof_suspend(sdev->dev);
896 }
897
hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev * sdev)898 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
899 {
900 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
901 struct hdac_bus *bus = sof_to_bus(sdev);
902 struct snd_soc_pcm_runtime *rtd;
903 struct hdac_ext_stream *stream;
904 struct hdac_ext_link *link;
905 struct hdac_stream *s;
906 const char *name;
907 int stream_tag;
908
909 /* set internal flag for BE */
910 list_for_each_entry(s, &bus->stream_list, list) {
911 stream = stream_to_hdac_ext_stream(s);
912
913 /*
914 * clear stream. This should already be taken care for running
915 * streams when the SUSPEND trigger is called. But paused
916 * streams do not get suspended, so this needs to be done
917 * explicitly during suspend.
918 */
919 if (stream->link_substream) {
920 rtd = asoc_substream_to_rtd(stream->link_substream);
921 name = asoc_rtd_to_codec(rtd, 0)->component->name;
922 link = snd_hdac_ext_bus_get_link(bus, name);
923 if (!link)
924 return -EINVAL;
925
926 stream->link_prepared = 0;
927
928 if (hdac_stream(stream)->direction ==
929 SNDRV_PCM_STREAM_CAPTURE)
930 continue;
931
932 stream_tag = hdac_stream(stream)->stream_tag;
933 snd_hdac_ext_link_clear_stream_id(link, stream_tag);
934 }
935 }
936 #endif
937 return 0;
938 }
939
hda_dsp_d0i3_work(struct work_struct * work)940 void hda_dsp_d0i3_work(struct work_struct *work)
941 {
942 struct sof_intel_hda_dev *hdev = container_of(work,
943 struct sof_intel_hda_dev,
944 d0i3_work.work);
945 struct hdac_bus *bus = &hdev->hbus.core;
946 struct snd_sof_dev *sdev = dev_get_drvdata(bus->dev);
947 struct sof_dsp_power_state target_state = {
948 .state = SOF_DSP_PM_D0,
949 .substate = SOF_HDA_DSP_PM_D0I3,
950 };
951 int ret;
952
953 /* DSP can enter D0I3 iff only D0I3-compatible streams are active */
954 if (!snd_sof_dsp_only_d0i3_compatible_stream_active(sdev))
955 /* remain in D0I0 */
956 return;
957
958 /* This can fail but error cannot be propagated */
959 ret = snd_sof_dsp_set_power_state(sdev, &target_state);
960 if (ret < 0)
961 dev_err_ratelimited(sdev->dev,
962 "error: failed to set DSP state %d substate %d\n",
963 target_state.state, target_state.substate);
964 }
965