1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Universal Flash Storage Host controller PCI glue driver
4 *
5 * This code is based on drivers/scsi/ufs/ufshcd-pci.c
6 * Copyright (C) 2011-2013 Samsung India Software Operations
7 *
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
11 */
12
13 #include "ufshcd.h"
14 #include <linux/pci.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_qos.h>
17 #include <linux/debugfs.h>
18 #include <linux/uuid.h>
19 #include <linux/acpi.h>
20 #include <linux/gpio/consumer.h>
21
22 struct ufs_host {
23 void (*late_init)(struct ufs_hba *hba);
24 };
25
26 enum {
27 INTEL_DSM_FNS = 0,
28 INTEL_DSM_RESET = 1,
29 };
30
31 struct intel_host {
32 struct ufs_host ufs_host;
33 u32 dsm_fns;
34 u32 active_ltr;
35 u32 idle_ltr;
36 struct dentry *debugfs_root;
37 struct gpio_desc *reset_gpio;
38 };
39
40 static const guid_t intel_dsm_guid =
41 GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
42 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
43
__intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)44 static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
45 unsigned int fn, u32 *result)
46 {
47 union acpi_object *obj;
48 int err = 0;
49 size_t len;
50
51 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
52 if (!obj)
53 return -EOPNOTSUPP;
54
55 if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
56 err = -EINVAL;
57 goto out;
58 }
59
60 len = min_t(size_t, obj->buffer.length, 4);
61
62 *result = 0;
63 memcpy(result, obj->buffer.pointer, len);
64 out:
65 ACPI_FREE(obj);
66
67 return err;
68 }
69
intel_dsm(struct intel_host * intel_host,struct device * dev,unsigned int fn,u32 * result)70 static int intel_dsm(struct intel_host *intel_host, struct device *dev,
71 unsigned int fn, u32 *result)
72 {
73 if (fn > 31 || !(intel_host->dsm_fns & (1 << fn)))
74 return -EOPNOTSUPP;
75
76 return __intel_dsm(intel_host, dev, fn, result);
77 }
78
intel_dsm_init(struct intel_host * intel_host,struct device * dev)79 static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
80 {
81 int err;
82
83 err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
84 dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
85 }
86
ufs_intel_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)87 static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
88 enum ufs_notify_change_status status)
89 {
90 /* Cannot enable ICE until after HC enable */
91 if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
92 u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
93
94 hce |= CRYPTO_GENERAL_ENABLE;
95 ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
96 }
97
98 return 0;
99 }
100
ufs_intel_disable_lcc(struct ufs_hba * hba)101 static int ufs_intel_disable_lcc(struct ufs_hba *hba)
102 {
103 u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
104 u32 lcc_enable = 0;
105
106 ufshcd_dme_get(hba, attr, &lcc_enable);
107 if (lcc_enable)
108 ufshcd_disable_host_tx_lcc(hba);
109
110 return 0;
111 }
112
ufs_intel_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)113 static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
114 enum ufs_notify_change_status status)
115 {
116 int err = 0;
117
118 switch (status) {
119 case PRE_CHANGE:
120 err = ufs_intel_disable_lcc(hba);
121 break;
122 case POST_CHANGE:
123 break;
124 default:
125 break;
126 }
127
128 return err;
129 }
130
ufs_intel_set_lanes(struct ufs_hba * hba,u32 lanes)131 static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
132 {
133 struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
134 int ret;
135
136 pwr_info.lane_rx = lanes;
137 pwr_info.lane_tx = lanes;
138 ret = ufshcd_config_pwr_mode(hba, &pwr_info);
139 if (ret)
140 dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
141 __func__, lanes, ret);
142 return ret;
143 }
144
ufs_intel_lkf_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status status,struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)145 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
146 enum ufs_notify_change_status status,
147 struct ufs_pa_layer_attr *dev_max_params,
148 struct ufs_pa_layer_attr *dev_req_params)
149 {
150 int err = 0;
151
152 switch (status) {
153 case PRE_CHANGE:
154 if (ufshcd_is_hs_mode(dev_max_params) &&
155 (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
156 ufs_intel_set_lanes(hba, 2);
157 memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
158 break;
159 case POST_CHANGE:
160 if (ufshcd_is_hs_mode(dev_req_params)) {
161 u32 peer_granularity;
162
163 usleep_range(1000, 1250);
164 err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
165 &peer_granularity);
166 }
167 break;
168 default:
169 break;
170 }
171
172 return err;
173 }
174
ufs_intel_lkf_apply_dev_quirks(struct ufs_hba * hba)175 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
176 {
177 u32 granularity, peer_granularity;
178 u32 pa_tactivate, peer_pa_tactivate;
179 int ret;
180
181 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
182 if (ret)
183 goto out;
184
185 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
186 if (ret)
187 goto out;
188
189 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
190 if (ret)
191 goto out;
192
193 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
194 if (ret)
195 goto out;
196
197 if (granularity == peer_granularity) {
198 u32 new_peer_pa_tactivate = pa_tactivate + 2;
199
200 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
201 }
202 out:
203 return ret;
204 }
205
206 #define INTEL_ACTIVELTR 0x804
207 #define INTEL_IDLELTR 0x808
208
209 #define INTEL_LTR_REQ BIT(15)
210 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
211 #define INTEL_LTR_SCALE_1US (2 << 10)
212 #define INTEL_LTR_SCALE_32US (3 << 10)
213 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
214
intel_cache_ltr(struct ufs_hba * hba)215 static void intel_cache_ltr(struct ufs_hba *hba)
216 {
217 struct intel_host *host = ufshcd_get_variant(hba);
218
219 host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
220 host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
221 }
222
intel_ltr_set(struct device * dev,s32 val)223 static void intel_ltr_set(struct device *dev, s32 val)
224 {
225 struct ufs_hba *hba = dev_get_drvdata(dev);
226 struct intel_host *host = ufshcd_get_variant(hba);
227 u32 ltr;
228
229 pm_runtime_get_sync(dev);
230
231 /*
232 * Program latency tolerance (LTR) accordingly what has been asked
233 * by the PM QoS layer or disable it in case we were passed
234 * negative value or PM_QOS_LATENCY_ANY.
235 */
236 ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
237
238 if (val == PM_QOS_LATENCY_ANY || val < 0) {
239 ltr &= ~INTEL_LTR_REQ;
240 } else {
241 ltr |= INTEL_LTR_REQ;
242 ltr &= ~INTEL_LTR_SCALE_MASK;
243 ltr &= ~INTEL_LTR_VALUE_MASK;
244
245 if (val > INTEL_LTR_VALUE_MASK) {
246 val >>= 5;
247 if (val > INTEL_LTR_VALUE_MASK)
248 val = INTEL_LTR_VALUE_MASK;
249 ltr |= INTEL_LTR_SCALE_32US | val;
250 } else {
251 ltr |= INTEL_LTR_SCALE_1US | val;
252 }
253 }
254
255 if (ltr == host->active_ltr)
256 goto out;
257
258 writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
259 writel(ltr, hba->mmio_base + INTEL_IDLELTR);
260
261 /* Cache the values into intel_host structure */
262 intel_cache_ltr(hba);
263 out:
264 pm_runtime_put(dev);
265 }
266
intel_ltr_expose(struct device * dev)267 static void intel_ltr_expose(struct device *dev)
268 {
269 dev->power.set_latency_tolerance = intel_ltr_set;
270 dev_pm_qos_expose_latency_tolerance(dev);
271 }
272
intel_ltr_hide(struct device * dev)273 static void intel_ltr_hide(struct device *dev)
274 {
275 dev_pm_qos_hide_latency_tolerance(dev);
276 dev->power.set_latency_tolerance = NULL;
277 }
278
intel_add_debugfs(struct ufs_hba * hba)279 static void intel_add_debugfs(struct ufs_hba *hba)
280 {
281 struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
282 struct intel_host *host = ufshcd_get_variant(hba);
283
284 intel_cache_ltr(hba);
285
286 host->debugfs_root = dir;
287 debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
288 debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
289 }
290
intel_remove_debugfs(struct ufs_hba * hba)291 static void intel_remove_debugfs(struct ufs_hba *hba)
292 {
293 struct intel_host *host = ufshcd_get_variant(hba);
294
295 debugfs_remove_recursive(host->debugfs_root);
296 }
297
ufs_intel_device_reset(struct ufs_hba * hba)298 static int ufs_intel_device_reset(struct ufs_hba *hba)
299 {
300 struct intel_host *host = ufshcd_get_variant(hba);
301
302 if (host->dsm_fns & INTEL_DSM_RESET) {
303 u32 result = 0;
304 int err;
305
306 err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
307 if (!err && !result)
308 err = -EIO;
309 if (err)
310 dev_err(hba->dev, "%s: DSM error %d result %u\n",
311 __func__, err, result);
312 return err;
313 }
314
315 if (!host->reset_gpio)
316 return -EOPNOTSUPP;
317
318 gpiod_set_value_cansleep(host->reset_gpio, 1);
319 usleep_range(10, 15);
320
321 gpiod_set_value_cansleep(host->reset_gpio, 0);
322 usleep_range(10, 15);
323
324 return 0;
325 }
326
ufs_intel_get_reset_gpio(struct device * dev)327 static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
328 {
329 /* GPIO in _DSD has active low setting */
330 return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
331 }
332
ufs_intel_common_init(struct ufs_hba * hba)333 static int ufs_intel_common_init(struct ufs_hba *hba)
334 {
335 struct intel_host *host;
336
337 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
338
339 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
340 if (!host)
341 return -ENOMEM;
342 ufshcd_set_variant(hba, host);
343 intel_dsm_init(host, hba->dev);
344 if (host->dsm_fns & INTEL_DSM_RESET) {
345 if (hba->vops->device_reset)
346 hba->caps |= UFSHCD_CAP_DEEPSLEEP;
347 } else {
348 if (hba->vops->device_reset)
349 host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
350 if (IS_ERR(host->reset_gpio)) {
351 dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
352 __func__, PTR_ERR(host->reset_gpio));
353 host->reset_gpio = NULL;
354 }
355 if (host->reset_gpio) {
356 gpiod_set_value_cansleep(host->reset_gpio, 0);
357 hba->caps |= UFSHCD_CAP_DEEPSLEEP;
358 }
359 }
360 intel_ltr_expose(hba->dev);
361 intel_add_debugfs(hba);
362 return 0;
363 }
364
ufs_intel_common_exit(struct ufs_hba * hba)365 static void ufs_intel_common_exit(struct ufs_hba *hba)
366 {
367 intel_remove_debugfs(hba);
368 intel_ltr_hide(hba->dev);
369 }
370
ufs_intel_resume(struct ufs_hba * hba,enum ufs_pm_op op)371 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
372 {
373 if (ufshcd_is_link_hibern8(hba)) {
374 int ret = ufshcd_uic_hibern8_exit(hba);
375
376 if (!ret) {
377 ufshcd_set_link_active(hba);
378 } else {
379 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
380 __func__, ret);
381 /*
382 * Force reset and restore. Any other actions can lead
383 * to an unrecoverable state.
384 */
385 ufshcd_set_link_off(hba);
386 }
387 }
388
389 return 0;
390 }
391
ufs_intel_ehl_init(struct ufs_hba * hba)392 static int ufs_intel_ehl_init(struct ufs_hba *hba)
393 {
394 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
395 return ufs_intel_common_init(hba);
396 }
397
ufs_intel_lkf_late_init(struct ufs_hba * hba)398 static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
399 {
400 /* LKF always needs a full reset, so set PM accordingly */
401 if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
402 hba->spm_lvl = UFS_PM_LVL_6;
403 hba->rpm_lvl = UFS_PM_LVL_6;
404 } else {
405 hba->spm_lvl = UFS_PM_LVL_5;
406 hba->rpm_lvl = UFS_PM_LVL_5;
407 }
408 }
409
ufs_intel_lkf_init(struct ufs_hba * hba)410 static int ufs_intel_lkf_init(struct ufs_hba *hba)
411 {
412 struct ufs_host *ufs_host;
413 int err;
414
415 hba->nop_out_timeout = 200;
416 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
417 hba->caps |= UFSHCD_CAP_CRYPTO;
418 err = ufs_intel_common_init(hba);
419 ufs_host = ufshcd_get_variant(hba);
420 ufs_host->late_init = ufs_intel_lkf_late_init;
421 return err;
422 }
423
ufs_intel_adl_init(struct ufs_hba * hba)424 static int ufs_intel_adl_init(struct ufs_hba *hba)
425 {
426 hba->nop_out_timeout = 200;
427 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
428 return ufs_intel_common_init(hba);
429 }
430
431 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
432 .name = "intel-pci",
433 .init = ufs_intel_common_init,
434 .exit = ufs_intel_common_exit,
435 .link_startup_notify = ufs_intel_link_startup_notify,
436 .resume = ufs_intel_resume,
437 };
438
439 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
440 .name = "intel-pci",
441 .init = ufs_intel_ehl_init,
442 .exit = ufs_intel_common_exit,
443 .link_startup_notify = ufs_intel_link_startup_notify,
444 .resume = ufs_intel_resume,
445 };
446
447 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
448 .name = "intel-pci",
449 .init = ufs_intel_lkf_init,
450 .exit = ufs_intel_common_exit,
451 .hce_enable_notify = ufs_intel_hce_enable_notify,
452 .link_startup_notify = ufs_intel_link_startup_notify,
453 .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
454 .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
455 .resume = ufs_intel_resume,
456 .device_reset = ufs_intel_device_reset,
457 };
458
459 static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
460 .name = "intel-pci",
461 .init = ufs_intel_adl_init,
462 .exit = ufs_intel_common_exit,
463 .link_startup_notify = ufs_intel_link_startup_notify,
464 .resume = ufs_intel_resume,
465 .device_reset = ufs_intel_device_reset,
466 };
467
468 #ifdef CONFIG_PM_SLEEP
ufshcd_pci_restore(struct device * dev)469 static int ufshcd_pci_restore(struct device *dev)
470 {
471 struct ufs_hba *hba = dev_get_drvdata(dev);
472
473 /* Force a full reset and restore */
474 ufshcd_set_link_off(hba);
475
476 return ufshcd_system_resume(dev);
477 }
478 #endif
479
480 /**
481 * ufshcd_pci_shutdown - main function to put the controller in reset state
482 * @pdev: pointer to PCI device handle
483 */
ufshcd_pci_shutdown(struct pci_dev * pdev)484 static void ufshcd_pci_shutdown(struct pci_dev *pdev)
485 {
486 ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
487 }
488
489 /**
490 * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
491 * data structure memory
492 * @pdev: pointer to PCI handle
493 */
ufshcd_pci_remove(struct pci_dev * pdev)494 static void ufshcd_pci_remove(struct pci_dev *pdev)
495 {
496 struct ufs_hba *hba = pci_get_drvdata(pdev);
497
498 pm_runtime_forbid(&pdev->dev);
499 pm_runtime_get_noresume(&pdev->dev);
500 ufshcd_remove(hba);
501 ufshcd_dealloc_host(hba);
502 }
503
504 /**
505 * ufshcd_pci_probe - probe routine of the driver
506 * @pdev: pointer to PCI device handle
507 * @id: PCI device id
508 *
509 * Returns 0 on success, non-zero value on failure
510 */
511 static int
ufshcd_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)512 ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
513 {
514 struct ufs_host *ufs_host;
515 struct ufs_hba *hba;
516 void __iomem *mmio_base;
517 int err;
518
519 err = pcim_enable_device(pdev);
520 if (err) {
521 dev_err(&pdev->dev, "pcim_enable_device failed\n");
522 return err;
523 }
524
525 pci_set_master(pdev);
526
527 err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
528 if (err < 0) {
529 dev_err(&pdev->dev, "request and iomap failed\n");
530 return err;
531 }
532
533 mmio_base = pcim_iomap_table(pdev)[0];
534
535 err = ufshcd_alloc_host(&pdev->dev, &hba);
536 if (err) {
537 dev_err(&pdev->dev, "Allocation failed\n");
538 return err;
539 }
540
541 pci_set_drvdata(pdev, hba);
542
543 hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
544
545 err = ufshcd_init(hba, mmio_base, pdev->irq);
546 if (err) {
547 dev_err(&pdev->dev, "Initialization failed\n");
548 ufshcd_dealloc_host(hba);
549 return err;
550 }
551
552 ufs_host = ufshcd_get_variant(hba);
553 if (ufs_host && ufs_host->late_init)
554 ufs_host->late_init(hba);
555
556 pm_runtime_put_noidle(&pdev->dev);
557 pm_runtime_allow(&pdev->dev);
558
559 return 0;
560 }
561
562 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
563 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
564 #ifdef CONFIG_PM_SLEEP
565 .suspend = ufshcd_system_suspend,
566 .resume = ufshcd_system_resume,
567 .freeze = ufshcd_system_suspend,
568 .thaw = ufshcd_system_resume,
569 .poweroff = ufshcd_system_suspend,
570 .restore = ufshcd_pci_restore,
571 .prepare = ufshcd_suspend_prepare,
572 .complete = ufshcd_resume_complete,
573 #endif
574 };
575
576 static const struct pci_device_id ufshcd_pci_tbl[] = {
577 { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
578 { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
579 { PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
580 { PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
581 { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
582 { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
583 { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
584 { } /* terminate list */
585 };
586
587 MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
588
589 static struct pci_driver ufshcd_pci_driver = {
590 .name = UFSHCD,
591 .id_table = ufshcd_pci_tbl,
592 .probe = ufshcd_pci_probe,
593 .remove = ufshcd_pci_remove,
594 .shutdown = ufshcd_pci_shutdown,
595 .driver = {
596 .pm = &ufshcd_pci_pm_ops
597 },
598 };
599
600 module_pci_driver(ufshcd_pci_driver);
601
602 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
603 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
604 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
605 MODULE_LICENSE("GPL");
606 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
607