1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 MediaTek Inc.
4 * Authors:
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
7 */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/phy/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_qos.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 #include <linux/soc/mediatek/mtk_sip_svc.h>
23
24 #include <ufs/ufshcd.h>
25 #include "ufshcd-pltfrm.h"
26 #include <ufs/ufs_quirks.h>
27 #include <ufs/unipro.h>
28 #include "ufs-mediatek.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "ufs-mediatek-trace.h"
32
33 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
34 { .wmanufacturerid = UFS_ANY_VENDOR,
35 .model = UFS_ANY_MODEL,
36 .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
37 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
38 { .wmanufacturerid = UFS_VENDOR_SKHYNIX,
39 .model = "H9HQ21AFAMZDAR",
40 .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
41 {}
42 };
43
44 static const struct of_device_id ufs_mtk_of_match[] = {
45 { .compatible = "mediatek,mt8183-ufshci" },
46 {},
47 };
48
49 /*
50 * Details of UIC Errors
51 */
52 static const char *const ufs_uic_err_str[] = {
53 "PHY Adapter Layer",
54 "Data Link Layer",
55 "Network Link Layer",
56 "Transport Link Layer",
57 "DME"
58 };
59
60 static const char *const ufs_uic_pa_err_str[] = {
61 "PHY error on Lane 0",
62 "PHY error on Lane 1",
63 "PHY error on Lane 2",
64 "PHY error on Lane 3",
65 "Generic PHY Adapter Error. This should be the LINERESET indication"
66 };
67
68 static const char *const ufs_uic_dl_err_str[] = {
69 "NAC_RECEIVED",
70 "TCx_REPLAY_TIMER_EXPIRED",
71 "AFCx_REQUEST_TIMER_EXPIRED",
72 "FCx_PROTECTION_TIMER_EXPIRED",
73 "CRC_ERROR",
74 "RX_BUFFER_OVERFLOW",
75 "MAX_FRAME_LENGTH_EXCEEDED",
76 "WRONG_SEQUENCE_NUMBER",
77 "AFC_FRAME_SYNTAX_ERROR",
78 "NAC_FRAME_SYNTAX_ERROR",
79 "EOF_SYNTAX_ERROR",
80 "FRAME_SYNTAX_ERROR",
81 "BAD_CTRL_SYMBOL_TYPE",
82 "PA_INIT_ERROR",
83 "PA_ERROR_IND_RECEIVED",
84 "PA_INIT"
85 };
86
ufs_mtk_is_boost_crypt_enabled(struct ufs_hba * hba)87 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
88 {
89 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
90
91 return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
92 }
93
ufs_mtk_is_va09_supported(struct ufs_hba * hba)94 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
95 {
96 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
97
98 return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
99 }
100
ufs_mtk_is_broken_vcc(struct ufs_hba * hba)101 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
102 {
103 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
104
105 return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
106 }
107
ufs_mtk_is_pmc_via_fastauto(struct ufs_hba * hba)108 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
109 {
110 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
111
112 return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
113 }
114
ufs_mtk_cfg_unipro_cg(struct ufs_hba * hba,bool enable)115 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
116 {
117 u32 tmp;
118
119 if (enable) {
120 ufshcd_dme_get(hba,
121 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
122 tmp = tmp |
123 (1 << RX_SYMBOL_CLK_GATE_EN) |
124 (1 << SYS_CLK_GATE_EN) |
125 (1 << TX_CLK_GATE_EN);
126 ufshcd_dme_set(hba,
127 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
128
129 ufshcd_dme_get(hba,
130 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
131 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
132 ufshcd_dme_set(hba,
133 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
134 } else {
135 ufshcd_dme_get(hba,
136 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
137 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
138 (1 << SYS_CLK_GATE_EN) |
139 (1 << TX_CLK_GATE_EN));
140 ufshcd_dme_set(hba,
141 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
142
143 ufshcd_dme_get(hba,
144 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
145 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
146 ufshcd_dme_set(hba,
147 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
148 }
149 }
150
ufs_mtk_crypto_enable(struct ufs_hba * hba)151 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
152 {
153 struct arm_smccc_res res;
154
155 ufs_mtk_crypto_ctrl(res, 1);
156 if (res.a0) {
157 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
158 __func__, res.a0);
159 hba->caps &= ~UFSHCD_CAP_CRYPTO;
160 }
161 }
162
ufs_mtk_host_reset(struct ufs_hba * hba)163 static void ufs_mtk_host_reset(struct ufs_hba *hba)
164 {
165 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
166
167 reset_control_assert(host->hci_reset);
168 reset_control_assert(host->crypto_reset);
169 reset_control_assert(host->unipro_reset);
170
171 usleep_range(100, 110);
172
173 reset_control_deassert(host->unipro_reset);
174 reset_control_deassert(host->crypto_reset);
175 reset_control_deassert(host->hci_reset);
176 }
177
ufs_mtk_init_reset_control(struct ufs_hba * hba,struct reset_control ** rc,char * str)178 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
179 struct reset_control **rc,
180 char *str)
181 {
182 *rc = devm_reset_control_get(hba->dev, str);
183 if (IS_ERR(*rc)) {
184 dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
185 str, PTR_ERR(*rc));
186 *rc = NULL;
187 }
188 }
189
ufs_mtk_init_reset(struct ufs_hba * hba)190 static void ufs_mtk_init_reset(struct ufs_hba *hba)
191 {
192 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
193
194 ufs_mtk_init_reset_control(hba, &host->hci_reset,
195 "hci_rst");
196 ufs_mtk_init_reset_control(hba, &host->unipro_reset,
197 "unipro_rst");
198 ufs_mtk_init_reset_control(hba, &host->crypto_reset,
199 "crypto_rst");
200 }
201
ufs_mtk_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)202 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
203 enum ufs_notify_change_status status)
204 {
205 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
206
207 if (status == PRE_CHANGE) {
208 if (host->unipro_lpm) {
209 hba->vps->hba_enable_delay_us = 0;
210 } else {
211 hba->vps->hba_enable_delay_us = 600;
212 ufs_mtk_host_reset(hba);
213 }
214
215 if (hba->caps & UFSHCD_CAP_CRYPTO)
216 ufs_mtk_crypto_enable(hba);
217
218 if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
219 ufshcd_writel(hba, 0,
220 REG_AUTO_HIBERNATE_IDLE_TIMER);
221 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
222 hba->ahit = 0;
223 }
224
225 /*
226 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
227 * to prevent host hang issue
228 */
229 ufshcd_writel(hba,
230 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
231 REG_UFS_XOUFS_CTRL);
232 }
233
234 return 0;
235 }
236
ufs_mtk_bind_mphy(struct ufs_hba * hba)237 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
238 {
239 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 struct device *dev = hba->dev;
241 struct device_node *np = dev->of_node;
242 int err = 0;
243
244 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
245
246 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
247 /*
248 * UFS driver might be probed before the phy driver does.
249 * In that case we would like to return EPROBE_DEFER code.
250 */
251 err = -EPROBE_DEFER;
252 dev_info(dev,
253 "%s: required phy hasn't probed yet. err = %d\n",
254 __func__, err);
255 } else if (IS_ERR(host->mphy)) {
256 err = PTR_ERR(host->mphy);
257 if (err != -ENODEV) {
258 dev_info(dev, "%s: PHY get failed %d\n", __func__,
259 err);
260 }
261 }
262
263 if (err)
264 host->mphy = NULL;
265 /*
266 * Allow unbound mphy because not every platform needs specific
267 * mphy control.
268 */
269 if (err == -ENODEV)
270 err = 0;
271
272 return err;
273 }
274
ufs_mtk_setup_ref_clk(struct ufs_hba * hba,bool on)275 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
276 {
277 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
278 struct arm_smccc_res res;
279 ktime_t timeout, time_checked;
280 u32 value;
281
282 if (host->ref_clk_enabled == on)
283 return 0;
284
285 ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
286
287 if (on) {
288 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
289 } else {
290 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
291 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
292 }
293
294 /* Wait for ack */
295 timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
296 do {
297 time_checked = ktime_get();
298 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
299
300 /* Wait until ack bit equals to req bit */
301 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
302 goto out;
303
304 usleep_range(100, 200);
305 } while (ktime_before(time_checked, timeout));
306
307 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
308
309 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
310
311 return -ETIMEDOUT;
312
313 out:
314 host->ref_clk_enabled = on;
315 if (on)
316 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
317
318 ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
319
320 return 0;
321 }
322
ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba * hba,u16 gating_us)323 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
324 u16 gating_us)
325 {
326 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
327
328 if (hba->dev_info.clk_gating_wait_us) {
329 host->ref_clk_gating_wait_us =
330 hba->dev_info.clk_gating_wait_us;
331 } else {
332 host->ref_clk_gating_wait_us = gating_us;
333 }
334
335 host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
336 }
337
ufs_mtk_dbg_sel(struct ufs_hba * hba)338 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
339 {
340 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
341
342 if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
343 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
344 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
345 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
346 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
347 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
348 } else {
349 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
350 }
351 }
352
ufs_mtk_wait_idle_state(struct ufs_hba * hba,unsigned long retry_ms)353 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
354 unsigned long retry_ms)
355 {
356 u64 timeout, time_checked;
357 u32 val, sm;
358 bool wait_idle;
359
360 /* cannot use plain ktime_get() in suspend */
361 timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
362
363 /* wait a specific time after check base */
364 udelay(10);
365 wait_idle = false;
366
367 do {
368 time_checked = ktime_get_mono_fast_ns();
369 ufs_mtk_dbg_sel(hba);
370 val = ufshcd_readl(hba, REG_UFS_PROBE);
371
372 sm = val & 0x1f;
373
374 /*
375 * if state is in H8 enter and H8 enter confirm
376 * wait until return to idle state.
377 */
378 if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
379 wait_idle = true;
380 udelay(50);
381 continue;
382 } else if (!wait_idle)
383 break;
384
385 if (wait_idle && (sm == VS_HCE_BASE))
386 break;
387 } while (time_checked < timeout);
388
389 if (wait_idle && sm != VS_HCE_BASE)
390 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
391 }
392
ufs_mtk_wait_link_state(struct ufs_hba * hba,u32 state,unsigned long max_wait_ms)393 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
394 unsigned long max_wait_ms)
395 {
396 ktime_t timeout, time_checked;
397 u32 val;
398
399 timeout = ktime_add_ms(ktime_get(), max_wait_ms);
400 do {
401 time_checked = ktime_get();
402 ufs_mtk_dbg_sel(hba);
403 val = ufshcd_readl(hba, REG_UFS_PROBE);
404 val = val >> 28;
405
406 if (val == state)
407 return 0;
408
409 /* Sleep for max. 200us */
410 usleep_range(100, 200);
411 } while (ktime_before(time_checked, timeout));
412
413 if (val == state)
414 return 0;
415
416 return -ETIMEDOUT;
417 }
418
ufs_mtk_mphy_power_on(struct ufs_hba * hba,bool on)419 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
420 {
421 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
422 struct phy *mphy = host->mphy;
423 struct arm_smccc_res res;
424 int ret = 0;
425
426 if (!mphy || !(on ^ host->mphy_powered_on))
427 return 0;
428
429 if (on) {
430 if (ufs_mtk_is_va09_supported(hba)) {
431 ret = regulator_enable(host->reg_va09);
432 if (ret < 0)
433 goto out;
434 /* wait 200 us to stablize VA09 */
435 usleep_range(200, 210);
436 ufs_mtk_va09_pwr_ctrl(res, 1);
437 }
438 phy_power_on(mphy);
439 } else {
440 phy_power_off(mphy);
441 if (ufs_mtk_is_va09_supported(hba)) {
442 ufs_mtk_va09_pwr_ctrl(res, 0);
443 ret = regulator_disable(host->reg_va09);
444 }
445 }
446 out:
447 if (ret) {
448 dev_info(hba->dev,
449 "failed to %s va09: %d\n",
450 on ? "enable" : "disable",
451 ret);
452 } else {
453 host->mphy_powered_on = on;
454 }
455
456 return ret;
457 }
458
ufs_mtk_get_host_clk(struct device * dev,const char * name,struct clk ** clk_out)459 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
460 struct clk **clk_out)
461 {
462 struct clk *clk;
463 int err = 0;
464
465 clk = devm_clk_get(dev, name);
466 if (IS_ERR(clk))
467 err = PTR_ERR(clk);
468 else
469 *clk_out = clk;
470
471 return err;
472 }
473
ufs_mtk_boost_crypt(struct ufs_hba * hba,bool boost)474 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
475 {
476 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
477 struct ufs_mtk_crypt_cfg *cfg;
478 struct regulator *reg;
479 int volt, ret;
480
481 if (!ufs_mtk_is_boost_crypt_enabled(hba))
482 return;
483
484 cfg = host->crypt;
485 volt = cfg->vcore_volt;
486 reg = cfg->reg_vcore;
487
488 ret = clk_prepare_enable(cfg->clk_crypt_mux);
489 if (ret) {
490 dev_info(hba->dev, "clk_prepare_enable(): %d\n",
491 ret);
492 return;
493 }
494
495 if (boost) {
496 ret = regulator_set_voltage(reg, volt, INT_MAX);
497 if (ret) {
498 dev_info(hba->dev,
499 "failed to set vcore to %d\n", volt);
500 goto out;
501 }
502
503 ret = clk_set_parent(cfg->clk_crypt_mux,
504 cfg->clk_crypt_perf);
505 if (ret) {
506 dev_info(hba->dev,
507 "failed to set clk_crypt_perf\n");
508 regulator_set_voltage(reg, 0, INT_MAX);
509 goto out;
510 }
511 } else {
512 ret = clk_set_parent(cfg->clk_crypt_mux,
513 cfg->clk_crypt_lp);
514 if (ret) {
515 dev_info(hba->dev,
516 "failed to set clk_crypt_lp\n");
517 goto out;
518 }
519
520 ret = regulator_set_voltage(reg, 0, INT_MAX);
521 if (ret) {
522 dev_info(hba->dev,
523 "failed to set vcore to MIN\n");
524 }
525 }
526 out:
527 clk_disable_unprepare(cfg->clk_crypt_mux);
528 }
529
ufs_mtk_init_host_clk(struct ufs_hba * hba,const char * name,struct clk ** clk)530 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
531 struct clk **clk)
532 {
533 int ret;
534
535 ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
536 if (ret) {
537 dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
538 name, ret);
539 }
540
541 return ret;
542 }
543
ufs_mtk_init_boost_crypt(struct ufs_hba * hba)544 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
545 {
546 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
547 struct ufs_mtk_crypt_cfg *cfg;
548 struct device *dev = hba->dev;
549 struct regulator *reg;
550 u32 volt;
551
552 host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
553 GFP_KERNEL);
554 if (!host->crypt)
555 goto disable_caps;
556
557 reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
558 if (IS_ERR(reg)) {
559 dev_info(dev, "failed to get dvfsrc-vcore: %ld",
560 PTR_ERR(reg));
561 goto disable_caps;
562 }
563
564 if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
565 &volt)) {
566 dev_info(dev, "failed to get boost-crypt-vcore-min");
567 goto disable_caps;
568 }
569
570 cfg = host->crypt;
571 if (ufs_mtk_init_host_clk(hba, "crypt_mux",
572 &cfg->clk_crypt_mux))
573 goto disable_caps;
574
575 if (ufs_mtk_init_host_clk(hba, "crypt_lp",
576 &cfg->clk_crypt_lp))
577 goto disable_caps;
578
579 if (ufs_mtk_init_host_clk(hba, "crypt_perf",
580 &cfg->clk_crypt_perf))
581 goto disable_caps;
582
583 cfg->reg_vcore = reg;
584 cfg->vcore_volt = volt;
585 host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
586
587 disable_caps:
588 return;
589 }
590
ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba * hba)591 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
592 {
593 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
594
595 host->reg_va09 = regulator_get(hba->dev, "va09");
596 if (IS_ERR(host->reg_va09))
597 dev_info(hba->dev, "failed to get va09");
598 else
599 host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
600 }
601
ufs_mtk_init_host_caps(struct ufs_hba * hba)602 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
603 {
604 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
605 struct device_node *np = hba->dev->of_node;
606
607 if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
608 ufs_mtk_init_boost_crypt(hba);
609
610 if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
611 ufs_mtk_init_va09_pwr_ctrl(hba);
612
613 if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
614 host->caps |= UFS_MTK_CAP_DISABLE_AH8;
615
616 if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
617 host->caps |= UFS_MTK_CAP_BROKEN_VCC;
618
619 if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
620 host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
621
622 dev_info(hba->dev, "caps: 0x%x", host->caps);
623 }
624
ufs_mtk_boost_pm_qos(struct ufs_hba * hba,bool boost)625 static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
626 {
627 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
628
629 if (!host || !host->pm_qos_init)
630 return;
631
632 cpu_latency_qos_update_request(&host->pm_qos_req,
633 boost ? 0 : PM_QOS_DEFAULT_VALUE);
634 }
635
ufs_mtk_scale_perf(struct ufs_hba * hba,bool scale_up)636 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
637 {
638 ufs_mtk_boost_crypt(hba, scale_up);
639 ufs_mtk_boost_pm_qos(hba, scale_up);
640 }
641
ufs_mtk_pwr_ctrl(struct ufs_hba * hba,bool on)642 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
643 {
644 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
645
646 if (on) {
647 phy_power_on(host->mphy);
648 ufs_mtk_setup_ref_clk(hba, on);
649 if (!ufshcd_is_clkscaling_supported(hba))
650 ufs_mtk_scale_perf(hba, on);
651 } else {
652 if (!ufshcd_is_clkscaling_supported(hba))
653 ufs_mtk_scale_perf(hba, on);
654 ufs_mtk_setup_ref_clk(hba, on);
655 phy_power_off(host->mphy);
656 }
657 }
658
659 /**
660 * ufs_mtk_setup_clocks - enables/disable clocks
661 * @hba: host controller instance
662 * @on: If true, enable clocks else disable them.
663 * @status: PRE_CHANGE or POST_CHANGE notify
664 *
665 * Returns 0 on success, non-zero on failure.
666 */
ufs_mtk_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)667 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
668 enum ufs_notify_change_status status)
669 {
670 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
671 bool clk_pwr_off = false;
672 int ret = 0;
673
674 /*
675 * In case ufs_mtk_init() is not yet done, simply ignore.
676 * This ufs_mtk_setup_clocks() shall be called from
677 * ufs_mtk_init() after init is done.
678 */
679 if (!host)
680 return 0;
681
682 if (!on && status == PRE_CHANGE) {
683 if (ufshcd_is_link_off(hba)) {
684 clk_pwr_off = true;
685 } else if (ufshcd_is_link_hibern8(hba) ||
686 (!ufshcd_can_hibern8_during_gating(hba) &&
687 ufshcd_is_auto_hibern8_enabled(hba))) {
688 /*
689 * Gate ref-clk and poweroff mphy if link state is in
690 * OFF or Hibern8 by either Auto-Hibern8 or
691 * ufshcd_link_state_transition().
692 */
693 ret = ufs_mtk_wait_link_state(hba,
694 VS_LINK_HIBERN8,
695 15);
696 if (!ret)
697 clk_pwr_off = true;
698 }
699
700 if (clk_pwr_off)
701 ufs_mtk_pwr_ctrl(hba, false);
702 } else if (on && status == POST_CHANGE) {
703 ufs_mtk_pwr_ctrl(hba, true);
704 }
705
706 return ret;
707 }
708
ufs_mtk_get_controller_version(struct ufs_hba * hba)709 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
710 {
711 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
712 int ret, ver = 0;
713
714 if (host->hw_ver.major)
715 return;
716
717 /* Set default (minimum) version anyway */
718 host->hw_ver.major = 2;
719
720 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
721 if (!ret) {
722 if (ver >= UFS_UNIPRO_VER_1_8) {
723 host->hw_ver.major = 3;
724 /*
725 * Fix HCI version for some platforms with
726 * incorrect version
727 */
728 if (hba->ufs_version < ufshci_version(3, 0))
729 hba->ufs_version = ufshci_version(3, 0);
730 }
731 }
732 }
733
ufs_mtk_get_ufs_hci_version(struct ufs_hba * hba)734 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
735 {
736 return hba->ufs_version;
737 }
738
739 /**
740 * ufs_mtk_init_clocks - Init mtk driver private clocks
741 *
742 * @hba: per adapter instance
743 */
ufs_mtk_init_clocks(struct ufs_hba * hba)744 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
745 {
746 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
747 struct list_head *head = &hba->clk_list_head;
748 struct ufs_mtk_clk *mclk = &host->mclk;
749 struct ufs_clk_info *clki, *clki_tmp;
750
751 /*
752 * Find private clocks and store them in struct ufs_mtk_clk.
753 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
754 * being switched on/off in clock gating.
755 */
756 list_for_each_entry_safe(clki, clki_tmp, head, list) {
757 if (!strcmp(clki->name, "ufs_sel")) {
758 host->mclk.ufs_sel_clki = clki;
759 } else if (!strcmp(clki->name, "ufs_sel_max_src")) {
760 host->mclk.ufs_sel_max_clki = clki;
761 clk_disable_unprepare(clki->clk);
762 list_del(&clki->list);
763 } else if (!strcmp(clki->name, "ufs_sel_min_src")) {
764 host->mclk.ufs_sel_min_clki = clki;
765 clk_disable_unprepare(clki->clk);
766 list_del(&clki->list);
767 }
768 }
769
770 if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
771 !mclk->ufs_sel_min_clki) {
772 hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
773 dev_info(hba->dev,
774 "%s: Clk-scaling not ready. Feature disabled.",
775 __func__);
776 }
777 }
778
779 #define MAX_VCC_NAME 30
ufs_mtk_vreg_fix_vcc(struct ufs_hba * hba)780 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
781 {
782 struct ufs_vreg_info *info = &hba->vreg_info;
783 struct device_node *np = hba->dev->of_node;
784 struct device *dev = hba->dev;
785 char vcc_name[MAX_VCC_NAME];
786 struct arm_smccc_res res;
787 int err, ver;
788
789 if (hba->vreg_info.vcc)
790 return 0;
791
792 if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
793 ufs_mtk_get_vcc_num(res);
794 if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
795 snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
796 else
797 return -ENODEV;
798 } else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
799 ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
800 snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
801 } else {
802 return 0;
803 }
804
805 err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
806 if (err)
807 return err;
808
809 err = ufshcd_get_vreg(dev, info->vcc);
810 if (err)
811 return err;
812
813 err = regulator_enable(info->vcc->reg);
814 if (!err) {
815 info->vcc->enabled = true;
816 dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
817 }
818
819 return err;
820 }
821
ufs_mtk_vreg_fix_vccqx(struct ufs_hba * hba)822 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
823 {
824 struct ufs_vreg_info *info = &hba->vreg_info;
825 struct ufs_vreg **vreg_on, **vreg_off;
826
827 if (hba->dev_info.wspecversion >= 0x0300) {
828 vreg_on = &info->vccq;
829 vreg_off = &info->vccq2;
830 } else {
831 vreg_on = &info->vccq2;
832 vreg_off = &info->vccq;
833 }
834
835 if (*vreg_on)
836 (*vreg_on)->always_on = true;
837
838 if (*vreg_off) {
839 regulator_disable((*vreg_off)->reg);
840 devm_kfree(hba->dev, (*vreg_off)->name);
841 devm_kfree(hba->dev, *vreg_off);
842 *vreg_off = NULL;
843 }
844 }
845
846 /**
847 * ufs_mtk_init - find other essential mmio bases
848 * @hba: host controller instance
849 *
850 * Binds PHY with controller and powers up PHY enabling clocks
851 * and regulators.
852 *
853 * Returns -EPROBE_DEFER if binding fails, returns negative error
854 * on phy power up failure and returns zero on success.
855 */
ufs_mtk_init(struct ufs_hba * hba)856 static int ufs_mtk_init(struct ufs_hba *hba)
857 {
858 const struct of_device_id *id;
859 struct device *dev = hba->dev;
860 struct ufs_mtk_host *host;
861 int err = 0;
862
863 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
864 if (!host) {
865 err = -ENOMEM;
866 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
867 goto out;
868 }
869
870 host->hba = hba;
871 ufshcd_set_variant(hba, host);
872
873 id = of_match_device(ufs_mtk_of_match, dev);
874 if (!id) {
875 err = -EINVAL;
876 goto out;
877 }
878
879 /* Initialize host capability */
880 ufs_mtk_init_host_caps(hba);
881
882 err = ufs_mtk_bind_mphy(hba);
883 if (err)
884 goto out_variant_clear;
885
886 ufs_mtk_init_reset(hba);
887
888 /* Enable runtime autosuspend */
889 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
890
891 /* Enable clock-gating */
892 hba->caps |= UFSHCD_CAP_CLK_GATING;
893
894 /* Enable inline encryption */
895 hba->caps |= UFSHCD_CAP_CRYPTO;
896
897 /* Enable WriteBooster */
898 hba->caps |= UFSHCD_CAP_WB_EN;
899
900 /* Enable clk scaling*/
901 hba->caps |= UFSHCD_CAP_CLK_SCALING;
902
903 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
904 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
905
906 if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
907 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
908
909 ufs_mtk_init_clocks(hba);
910
911 /*
912 * ufshcd_vops_init() is invoked after
913 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
914 * phy clock setup is skipped.
915 *
916 * Enable phy clocks specifically here.
917 */
918 ufs_mtk_mphy_power_on(hba, true);
919 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
920
921 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
922
923 /* Initialize pm-qos request */
924 cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
925 host->pm_qos_init = true;
926
927 goto out;
928
929 out_variant_clear:
930 ufshcd_set_variant(hba, NULL);
931 out:
932 return err;
933 }
934
ufs_mtk_pmc_via_fastauto(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_req_params)935 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
936 struct ufs_pa_layer_attr *dev_req_params)
937 {
938 if (!ufs_mtk_is_pmc_via_fastauto(hba))
939 return false;
940
941 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
942 return false;
943
944 if (dev_req_params->pwr_tx != FAST_MODE &&
945 dev_req_params->gear_tx < UFS_HS_G4)
946 return false;
947
948 if (dev_req_params->pwr_rx != FAST_MODE &&
949 dev_req_params->gear_rx < UFS_HS_G4)
950 return false;
951
952 return true;
953 }
954
ufs_mtk_pre_pwr_change(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)955 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
956 struct ufs_pa_layer_attr *dev_max_params,
957 struct ufs_pa_layer_attr *dev_req_params)
958 {
959 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
960 struct ufs_dev_params host_cap;
961 int ret;
962
963 ufshcd_init_pwr_dev_param(&host_cap);
964 host_cap.hs_rx_gear = UFS_HS_G5;
965 host_cap.hs_tx_gear = UFS_HS_G5;
966
967 ret = ufshcd_get_pwr_dev_param(&host_cap,
968 dev_max_params,
969 dev_req_params);
970 if (ret) {
971 pr_info("%s: failed to determine capabilities\n",
972 __func__);
973 }
974
975 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
976 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
977 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
978
979 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
980 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
981
982 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
983 dev_req_params->lane_tx);
984 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
985 dev_req_params->lane_rx);
986 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
987 dev_req_params->hs_rate);
988
989 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
990 PA_NO_ADAPT);
991
992 ret = ufshcd_uic_change_pwr_mode(hba,
993 FASTAUTO_MODE << 4 | FASTAUTO_MODE);
994
995 if (ret) {
996 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
997 __func__, ret);
998 }
999 }
1000
1001 if (host->hw_ver.major >= 3) {
1002 ret = ufshcd_dme_configure_adapt(hba,
1003 dev_req_params->gear_tx,
1004 PA_INITIAL_ADAPT);
1005 }
1006
1007 return ret;
1008 }
1009
ufs_mtk_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1010 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1011 enum ufs_notify_change_status stage,
1012 struct ufs_pa_layer_attr *dev_max_params,
1013 struct ufs_pa_layer_attr *dev_req_params)
1014 {
1015 int ret = 0;
1016
1017 switch (stage) {
1018 case PRE_CHANGE:
1019 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1020 dev_req_params);
1021 break;
1022 case POST_CHANGE:
1023 break;
1024 default:
1025 ret = -EINVAL;
1026 break;
1027 }
1028
1029 return ret;
1030 }
1031
ufs_mtk_unipro_set_lpm(struct ufs_hba * hba,bool lpm)1032 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1033 {
1034 int ret;
1035 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1036
1037 ret = ufshcd_dme_set(hba,
1038 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1039 lpm ? 1 : 0);
1040 if (!ret || !lpm) {
1041 /*
1042 * Forcibly set as non-LPM mode if UIC commands is failed
1043 * to use default hba_enable_delay_us value for re-enabling
1044 * the host.
1045 */
1046 host->unipro_lpm = lpm;
1047 }
1048
1049 return ret;
1050 }
1051
ufs_mtk_pre_link(struct ufs_hba * hba)1052 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1053 {
1054 int ret;
1055 u32 tmp;
1056
1057 ufs_mtk_get_controller_version(hba);
1058
1059 ret = ufs_mtk_unipro_set_lpm(hba, false);
1060 if (ret)
1061 return ret;
1062
1063 /*
1064 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1065 * to make sure that both host and device TX LCC are disabled
1066 * once link startup is completed.
1067 */
1068 ret = ufshcd_disable_host_tx_lcc(hba);
1069 if (ret)
1070 return ret;
1071
1072 /* disable deep stall */
1073 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1074 if (ret)
1075 return ret;
1076
1077 tmp &= ~(1 << 6);
1078
1079 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1080
1081 return ret;
1082 }
1083
ufs_mtk_setup_clk_gating(struct ufs_hba * hba)1084 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1085 {
1086 u32 ah_ms;
1087
1088 if (ufshcd_is_clkgating_allowed(hba)) {
1089 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1090 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1091 hba->ahit);
1092 else
1093 ah_ms = 10;
1094 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1095 }
1096 }
1097
ufs_mtk_post_link(struct ufs_hba * hba)1098 static void ufs_mtk_post_link(struct ufs_hba *hba)
1099 {
1100 /* enable unipro clock gating feature */
1101 ufs_mtk_cfg_unipro_cg(hba, true);
1102
1103 /* will be configured during probe hba */
1104 if (ufshcd_is_auto_hibern8_supported(hba))
1105 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1106 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1107
1108 ufs_mtk_setup_clk_gating(hba);
1109 }
1110
ufs_mtk_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage)1111 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1112 enum ufs_notify_change_status stage)
1113 {
1114 int ret = 0;
1115
1116 switch (stage) {
1117 case PRE_CHANGE:
1118 ret = ufs_mtk_pre_link(hba);
1119 break;
1120 case POST_CHANGE:
1121 ufs_mtk_post_link(hba);
1122 break;
1123 default:
1124 ret = -EINVAL;
1125 break;
1126 }
1127
1128 return ret;
1129 }
1130
ufs_mtk_device_reset(struct ufs_hba * hba)1131 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1132 {
1133 struct arm_smccc_res res;
1134
1135 /* disable hba before device reset */
1136 ufshcd_hba_stop(hba);
1137
1138 ufs_mtk_device_reset_ctrl(0, res);
1139
1140 /*
1141 * The reset signal is active low. UFS devices shall detect
1142 * more than or equal to 1us of positive or negative RST_n
1143 * pulse width.
1144 *
1145 * To be on safe side, keep the reset low for at least 10us.
1146 */
1147 usleep_range(10, 15);
1148
1149 ufs_mtk_device_reset_ctrl(1, res);
1150
1151 /* Some devices may need time to respond to rst_n */
1152 usleep_range(10000, 15000);
1153
1154 dev_info(hba->dev, "device reset done\n");
1155
1156 return 0;
1157 }
1158
ufs_mtk_link_set_hpm(struct ufs_hba * hba)1159 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1160 {
1161 int err;
1162
1163 err = ufshcd_hba_enable(hba);
1164 if (err)
1165 return err;
1166
1167 err = ufs_mtk_unipro_set_lpm(hba, false);
1168 if (err)
1169 return err;
1170
1171 err = ufshcd_uic_hibern8_exit(hba);
1172 if (!err)
1173 ufshcd_set_link_active(hba);
1174 else
1175 return err;
1176
1177 err = ufshcd_make_hba_operational(hba);
1178 if (err)
1179 return err;
1180
1181 return 0;
1182 }
1183
ufs_mtk_link_set_lpm(struct ufs_hba * hba)1184 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1185 {
1186 int err;
1187
1188 /* Disable reset confirm feature by UniPro */
1189 ufshcd_writel(hba,
1190 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1191 REG_UFS_XOUFS_CTRL);
1192
1193 err = ufs_mtk_unipro_set_lpm(hba, true);
1194 if (err) {
1195 /* Resume UniPro state for following error recovery */
1196 ufs_mtk_unipro_set_lpm(hba, false);
1197 return err;
1198 }
1199
1200 return 0;
1201 }
1202
ufs_mtk_vccqx_set_lpm(struct ufs_hba * hba,bool lpm)1203 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1204 {
1205 struct ufs_vreg *vccqx = NULL;
1206
1207 if (hba->vreg_info.vccq)
1208 vccqx = hba->vreg_info.vccq;
1209 else
1210 vccqx = hba->vreg_info.vccq2;
1211
1212 regulator_set_mode(vccqx->reg,
1213 lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1214 }
1215
ufs_mtk_vsx_set_lpm(struct ufs_hba * hba,bool lpm)1216 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1217 {
1218 struct arm_smccc_res res;
1219
1220 ufs_mtk_device_pwr_ctrl(!lpm,
1221 (unsigned long)hba->dev_info.wspecversion,
1222 res);
1223 }
1224
ufs_mtk_dev_vreg_set_lpm(struct ufs_hba * hba,bool lpm)1225 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1226 {
1227 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1228 return;
1229
1230 /* Skip if VCC is assumed always-on */
1231 if (!hba->vreg_info.vcc)
1232 return;
1233
1234 /* Bypass LPM when device is still active */
1235 if (lpm && ufshcd_is_ufs_dev_active(hba))
1236 return;
1237
1238 /* Bypass LPM if VCC is enabled */
1239 if (lpm && hba->vreg_info.vcc->enabled)
1240 return;
1241
1242 if (lpm) {
1243 ufs_mtk_vccqx_set_lpm(hba, lpm);
1244 ufs_mtk_vsx_set_lpm(hba, lpm);
1245 } else {
1246 ufs_mtk_vsx_set_lpm(hba, lpm);
1247 ufs_mtk_vccqx_set_lpm(hba, lpm);
1248 }
1249 }
1250
ufs_mtk_auto_hibern8_disable(struct ufs_hba * hba)1251 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1252 {
1253 int ret;
1254
1255 /* disable auto-hibern8 */
1256 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1257
1258 /* wait host return to idle state when auto-hibern8 off */
1259 ufs_mtk_wait_idle_state(hba, 5);
1260
1261 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1262 if (ret)
1263 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1264 }
1265
ufs_mtk_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)1266 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1267 enum ufs_notify_change_status status)
1268 {
1269 int err;
1270 struct arm_smccc_res res;
1271
1272 if (status == PRE_CHANGE) {
1273 if (ufshcd_is_auto_hibern8_supported(hba))
1274 ufs_mtk_auto_hibern8_disable(hba);
1275 return 0;
1276 }
1277
1278 if (ufshcd_is_link_hibern8(hba)) {
1279 err = ufs_mtk_link_set_lpm(hba);
1280 if (err)
1281 goto fail;
1282 }
1283
1284 if (!ufshcd_is_link_active(hba)) {
1285 /*
1286 * Make sure no error will be returned to prevent
1287 * ufshcd_suspend() re-enabling regulators while vreg is still
1288 * in low-power mode.
1289 */
1290 err = ufs_mtk_mphy_power_on(hba, false);
1291 if (err)
1292 goto fail;
1293 }
1294
1295 if (ufshcd_is_link_off(hba))
1296 ufs_mtk_device_reset_ctrl(0, res);
1297
1298 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
1299
1300 return 0;
1301 fail:
1302 /*
1303 * Set link as off state enforcedly to trigger
1304 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1305 * for completed host reset.
1306 */
1307 ufshcd_set_link_off(hba);
1308 return -EAGAIN;
1309 }
1310
ufs_mtk_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)1311 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1312 {
1313 int err;
1314 struct arm_smccc_res res;
1315
1316 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1317 ufs_mtk_dev_vreg_set_lpm(hba, false);
1318
1319 ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
1320
1321 err = ufs_mtk_mphy_power_on(hba, true);
1322 if (err)
1323 goto fail;
1324
1325 if (ufshcd_is_link_hibern8(hba)) {
1326 err = ufs_mtk_link_set_hpm(hba);
1327 if (err)
1328 goto fail;
1329 }
1330
1331 return 0;
1332 fail:
1333 return ufshcd_link_recovery(hba);
1334 }
1335
ufs_mtk_dbg_register_dump(struct ufs_hba * hba)1336 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1337 {
1338 /* Dump ufshci register 0x140 ~ 0x14C */
1339 ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1340 "XOUFS Ctrl (0x140): ");
1341
1342 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1343
1344 /* Dump ufshci register 0x2200 ~ 0x22AC */
1345 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1346 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1347 "MPHY Ctrl (0x2200): ");
1348
1349 /* Direct debugging information to REG_MTK_PROBE */
1350 ufs_mtk_dbg_sel(hba);
1351 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1352 }
1353
ufs_mtk_apply_dev_quirks(struct ufs_hba * hba)1354 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1355 {
1356 struct ufs_dev_info *dev_info = &hba->dev_info;
1357 u16 mid = dev_info->wmanufacturerid;
1358
1359 if (mid == UFS_VENDOR_SAMSUNG) {
1360 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1361 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1362 }
1363
1364 /*
1365 * Decide waiting time before gating reference clock and
1366 * after ungating reference clock according to vendors'
1367 * requirements.
1368 */
1369 if (mid == UFS_VENDOR_SAMSUNG)
1370 ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1371 else if (mid == UFS_VENDOR_SKHYNIX)
1372 ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1373 else if (mid == UFS_VENDOR_TOSHIBA)
1374 ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1375 else
1376 ufs_mtk_setup_ref_clk_wait_us(hba,
1377 REFCLK_DEFAULT_WAIT_US);
1378 return 0;
1379 }
1380
ufs_mtk_fixup_dev_quirks(struct ufs_hba * hba)1381 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1382 {
1383 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1384
1385 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1386 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1387 hba->vreg_info.vcc->always_on = true;
1388 /*
1389 * VCC will be kept always-on thus we don't
1390 * need any delay during regulator operations
1391 */
1392 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1393 UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1394 }
1395
1396 ufs_mtk_vreg_fix_vcc(hba);
1397 ufs_mtk_vreg_fix_vccqx(hba);
1398 }
1399
ufs_mtk_event_notify(struct ufs_hba * hba,enum ufs_event_type evt,void * data)1400 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1401 enum ufs_event_type evt, void *data)
1402 {
1403 unsigned int val = *(u32 *)data;
1404 unsigned long reg;
1405 u8 bit;
1406
1407 trace_ufs_mtk_event(evt, val);
1408
1409 /* Print details of UIC Errors */
1410 if (evt <= UFS_EVT_DME_ERR) {
1411 dev_info(hba->dev,
1412 "Host UIC Error Code (%s): %08x\n",
1413 ufs_uic_err_str[evt], val);
1414 reg = val;
1415 }
1416
1417 if (evt == UFS_EVT_PA_ERR) {
1418 for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_pa_err_str))
1419 dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1420 }
1421
1422 if (evt == UFS_EVT_DL_ERR) {
1423 for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_dl_err_str))
1424 dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1425 }
1426 }
1427
ufs_mtk_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * profile,struct devfreq_simple_ondemand_data * data)1428 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1429 struct devfreq_dev_profile *profile,
1430 struct devfreq_simple_ondemand_data *data)
1431 {
1432 /* Customize min gear in clk scaling */
1433 hba->clk_scaling.min_gear = UFS_HS_G4;
1434
1435 hba->vps->devfreq_profile.polling_ms = 200;
1436 hba->vps->ondemand_data.upthreshold = 50;
1437 hba->vps->ondemand_data.downdifferential = 20;
1438 }
1439
1440 /**
1441 * ufs_mtk_clk_scale - Internal clk scaling operation
1442 *
1443 * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1444 * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1445 * Max and min clocks rate of ufs_sel defined in dts should match rate of
1446 * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1447 * This prevent changing rate of pll clock that is shared between modules.
1448 *
1449 * @hba: per adapter instance
1450 * @scale_up: True for scaling up and false for scaling down
1451 */
ufs_mtk_clk_scale(struct ufs_hba * hba,bool scale_up)1452 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1453 {
1454 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1455 struct ufs_mtk_clk *mclk = &host->mclk;
1456 struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1457 int ret = 0;
1458
1459 ret = clk_prepare_enable(clki->clk);
1460 if (ret) {
1461 dev_info(hba->dev,
1462 "clk_prepare_enable() fail, ret: %d\n", ret);
1463 return;
1464 }
1465
1466 if (scale_up) {
1467 ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1468 clki->curr_freq = clki->max_freq;
1469 } else {
1470 ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1471 clki->curr_freq = clki->min_freq;
1472 }
1473
1474 if (ret) {
1475 dev_info(hba->dev,
1476 "Failed to set ufs_sel_clki, ret: %d\n", ret);
1477 }
1478
1479 clk_disable_unprepare(clki->clk);
1480
1481 trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1482 }
1483
ufs_mtk_clk_scale_notify(struct ufs_hba * hba,bool scale_up,enum ufs_notify_change_status status)1484 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1485 enum ufs_notify_change_status status)
1486 {
1487 if (!ufshcd_is_clkscaling_supported(hba))
1488 return 0;
1489
1490 if (status == PRE_CHANGE) {
1491 /* Switch parent before clk_set_rate() */
1492 ufs_mtk_clk_scale(hba, scale_up);
1493 } else {
1494 /* Request interrupt latency QoS accordingly */
1495 ufs_mtk_scale_perf(hba, scale_up);
1496 }
1497
1498 return 0;
1499 }
1500
1501 /*
1502 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
1503 *
1504 * The variant operations configure the necessary controller and PHY
1505 * handshake during initialization.
1506 */
1507 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
1508 .name = "mediatek.ufshci",
1509 .init = ufs_mtk_init,
1510 .get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
1511 .setup_clocks = ufs_mtk_setup_clocks,
1512 .hce_enable_notify = ufs_mtk_hce_enable_notify,
1513 .link_startup_notify = ufs_mtk_link_startup_notify,
1514 .pwr_change_notify = ufs_mtk_pwr_change_notify,
1515 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
1516 .fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
1517 .suspend = ufs_mtk_suspend,
1518 .resume = ufs_mtk_resume,
1519 .dbg_register_dump = ufs_mtk_dbg_register_dump,
1520 .device_reset = ufs_mtk_device_reset,
1521 .event_notify = ufs_mtk_event_notify,
1522 .config_scaling_param = ufs_mtk_config_scaling_param,
1523 .clk_scale_notify = ufs_mtk_clk_scale_notify,
1524 };
1525
1526 /**
1527 * ufs_mtk_probe - probe routine of the driver
1528 * @pdev: pointer to Platform device handle
1529 *
1530 * Return zero for success and non-zero for failure
1531 */
ufs_mtk_probe(struct platform_device * pdev)1532 static int ufs_mtk_probe(struct platform_device *pdev)
1533 {
1534 int err;
1535 struct device *dev = &pdev->dev;
1536 struct device_node *reset_node;
1537 struct platform_device *reset_pdev;
1538 struct device_link *link;
1539
1540 reset_node = of_find_compatible_node(NULL, NULL,
1541 "ti,syscon-reset");
1542 if (!reset_node) {
1543 dev_notice(dev, "find ti,syscon-reset fail\n");
1544 goto skip_reset;
1545 }
1546 reset_pdev = of_find_device_by_node(reset_node);
1547 if (!reset_pdev) {
1548 dev_notice(dev, "find reset_pdev fail\n");
1549 goto skip_reset;
1550 }
1551 link = device_link_add(dev, &reset_pdev->dev,
1552 DL_FLAG_AUTOPROBE_CONSUMER);
1553 put_device(&reset_pdev->dev);
1554 if (!link) {
1555 dev_notice(dev, "add reset device_link fail\n");
1556 goto skip_reset;
1557 }
1558 /* supplier is not probed */
1559 if (link->status == DL_STATE_DORMANT) {
1560 err = -EPROBE_DEFER;
1561 goto out;
1562 }
1563
1564 skip_reset:
1565 /* perform generic probe */
1566 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
1567
1568 out:
1569 if (err)
1570 dev_info(dev, "probe failed %d\n", err);
1571
1572 of_node_put(reset_node);
1573 return err;
1574 }
1575
1576 /**
1577 * ufs_mtk_remove - set driver_data of the device to NULL
1578 * @pdev: pointer to platform device handle
1579 *
1580 * Always return 0
1581 */
ufs_mtk_remove(struct platform_device * pdev)1582 static int ufs_mtk_remove(struct platform_device *pdev)
1583 {
1584 struct ufs_hba *hba = platform_get_drvdata(pdev);
1585
1586 pm_runtime_get_sync(&(pdev)->dev);
1587 ufshcd_remove(hba);
1588 return 0;
1589 }
1590
1591 #ifdef CONFIG_PM_SLEEP
ufs_mtk_system_suspend(struct device * dev)1592 static int ufs_mtk_system_suspend(struct device *dev)
1593 {
1594 struct ufs_hba *hba = dev_get_drvdata(dev);
1595 int ret;
1596
1597 ret = ufshcd_system_suspend(dev);
1598 if (ret)
1599 return ret;
1600
1601 ufs_mtk_dev_vreg_set_lpm(hba, true);
1602
1603 return 0;
1604 }
1605
ufs_mtk_system_resume(struct device * dev)1606 static int ufs_mtk_system_resume(struct device *dev)
1607 {
1608 struct ufs_hba *hba = dev_get_drvdata(dev);
1609
1610 ufs_mtk_dev_vreg_set_lpm(hba, false);
1611
1612 return ufshcd_system_resume(dev);
1613 }
1614 #endif
1615
1616 #ifdef CONFIG_PM
ufs_mtk_runtime_suspend(struct device * dev)1617 static int ufs_mtk_runtime_suspend(struct device *dev)
1618 {
1619 struct ufs_hba *hba = dev_get_drvdata(dev);
1620 int ret = 0;
1621
1622 ret = ufshcd_runtime_suspend(dev);
1623 if (ret)
1624 return ret;
1625
1626 ufs_mtk_dev_vreg_set_lpm(hba, true);
1627
1628 return 0;
1629 }
1630
ufs_mtk_runtime_resume(struct device * dev)1631 static int ufs_mtk_runtime_resume(struct device *dev)
1632 {
1633 struct ufs_hba *hba = dev_get_drvdata(dev);
1634
1635 ufs_mtk_dev_vreg_set_lpm(hba, false);
1636
1637 return ufshcd_runtime_resume(dev);
1638 }
1639 #endif
1640
1641 static const struct dev_pm_ops ufs_mtk_pm_ops = {
1642 SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
1643 ufs_mtk_system_resume)
1644 SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
1645 ufs_mtk_runtime_resume, NULL)
1646 .prepare = ufshcd_suspend_prepare,
1647 .complete = ufshcd_resume_complete,
1648 };
1649
1650 static struct platform_driver ufs_mtk_pltform = {
1651 .probe = ufs_mtk_probe,
1652 .remove = ufs_mtk_remove,
1653 .shutdown = ufshcd_pltfrm_shutdown,
1654 .driver = {
1655 .name = "ufshcd-mtk",
1656 .pm = &ufs_mtk_pm_ops,
1657 .of_match_table = ufs_mtk_of_match,
1658 },
1659 };
1660
1661 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
1662 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
1663 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
1664 MODULE_LICENSE("GPL v2");
1665
1666 module_platform_driver(ufs_mtk_pltform);
1667