1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2011, Marvell Semiconductor Inc.
4  * Lei Wen <leiwen@marvell.com>
5  *
6  * Back ported to the 8xx platform (from the 8260 platform) by
7  * Murray.Jensen@cmst.csiro.au, 27-Jan-01.
8  */
9 
10 #include <common.h>
11 #include <cpu_func.h>
12 #include <dm.h>
13 #include <errno.h>
14 #include <log.h>
15 #include <malloc.h>
16 #include <mmc.h>
17 #include <sdhci.h>
18 #include <asm/cache.h>
19 #include <linux/bitops.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include <phys2bus.h>
23 #include <power/regulator.h>
24 
sdhci_reset(struct sdhci_host * host,u8 mask)25 static void sdhci_reset(struct sdhci_host *host, u8 mask)
26 {
27 	unsigned long timeout;
28 
29 	/* Wait max 100 ms */
30 	timeout = 100;
31 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
32 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
33 		if (timeout == 0) {
34 			printf("%s: Reset 0x%x never completed.\n",
35 			       __func__, (int)mask);
36 			return;
37 		}
38 		timeout--;
39 		udelay(1000);
40 	}
41 }
42 
sdhci_cmd_done(struct sdhci_host * host,struct mmc_cmd * cmd)43 static void sdhci_cmd_done(struct sdhci_host *host, struct mmc_cmd *cmd)
44 {
45 	int i;
46 	if (cmd->resp_type & MMC_RSP_136) {
47 		/* CRC is stripped so we need to do some shifting. */
48 		for (i = 0; i < 4; i++) {
49 			cmd->response[i] = sdhci_readl(host,
50 					SDHCI_RESPONSE + (3-i)*4) << 8;
51 			if (i != 3)
52 				cmd->response[i] |= sdhci_readb(host,
53 						SDHCI_RESPONSE + (3-i)*4-1);
54 		}
55 	} else {
56 		cmd->response[0] = sdhci_readl(host, SDHCI_RESPONSE);
57 	}
58 }
59 
sdhci_transfer_pio(struct sdhci_host * host,struct mmc_data * data)60 static void sdhci_transfer_pio(struct sdhci_host *host, struct mmc_data *data)
61 {
62 	int i;
63 	char *offs;
64 	for (i = 0; i < data->blocksize; i += 4) {
65 		offs = data->dest + i;
66 		if (data->flags == MMC_DATA_READ)
67 			*(u32 *)offs = sdhci_readl(host, SDHCI_BUFFER);
68 		else
69 			sdhci_writel(host, *(u32 *)offs, SDHCI_BUFFER);
70 	}
71 }
72 
73 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
sdhci_prepare_dma(struct sdhci_host * host,struct mmc_data * data,int * is_aligned,int trans_bytes)74 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
75 			      int *is_aligned, int trans_bytes)
76 {
77 	dma_addr_t dma_addr;
78 	unsigned char ctrl;
79 	void *buf;
80 
81 	if (data->flags == MMC_DATA_READ)
82 		buf = data->dest;
83 	else
84 		buf = (void *)data->src;
85 
86 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
87 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
88 	if (host->flags & USE_ADMA64)
89 		ctrl |= SDHCI_CTRL_ADMA64;
90 	else if (host->flags & USE_ADMA)
91 		ctrl |= SDHCI_CTRL_ADMA32;
92 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
93 
94 	if (host->flags & USE_SDMA &&
95 	    (host->force_align_buffer ||
96 	     (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR &&
97 	      ((unsigned long)buf & 0x7) != 0x0))) {
98 		*is_aligned = 0;
99 		if (data->flags != MMC_DATA_READ)
100 			memcpy(host->align_buffer, buf, trans_bytes);
101 		buf = host->align_buffer;
102 	}
103 
104 	host->start_addr = dma_map_single(buf, trans_bytes,
105 					  mmc_get_dma_dir(data));
106 
107 	if (host->flags & USE_SDMA) {
108 		dma_addr = dev_phys_to_bus(mmc_to_dev(host->mmc), host->start_addr);
109 		sdhci_writel(host, dma_addr, SDHCI_DMA_ADDRESS);
110 	}
111 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
112 	else if (host->flags & (USE_ADMA | USE_ADMA64)) {
113 		sdhci_prepare_adma_table(host->adma_desc_table, data,
114 					 host->start_addr);
115 
116 		sdhci_writel(host, lower_32_bits(host->adma_addr),
117 			     SDHCI_ADMA_ADDRESS);
118 		if (host->flags & USE_ADMA64)
119 			sdhci_writel(host, upper_32_bits(host->adma_addr),
120 				     SDHCI_ADMA_ADDRESS_HI);
121 	}
122 #endif
123 }
124 #else
sdhci_prepare_dma(struct sdhci_host * host,struct mmc_data * data,int * is_aligned,int trans_bytes)125 static void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data,
126 			      int *is_aligned, int trans_bytes)
127 {}
128 #endif
sdhci_transfer_data(struct sdhci_host * host,struct mmc_data * data)129 static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data)
130 {
131 	dma_addr_t start_addr = host->start_addr;
132 	unsigned int stat, rdy, mask, timeout, block = 0;
133 	bool transfer_done = false;
134 
135 	timeout = 1000000;
136 	rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL;
137 	mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE;
138 	do {
139 		stat = sdhci_readl(host, SDHCI_INT_STATUS);
140 		if (stat & SDHCI_INT_ERROR) {
141 			pr_debug("%s: Error detected in status(0x%X)!\n",
142 				 __func__, stat);
143 			return -EIO;
144 		}
145 		if (!transfer_done && (stat & rdy)) {
146 			if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask))
147 				continue;
148 			sdhci_writel(host, rdy, SDHCI_INT_STATUS);
149 			sdhci_transfer_pio(host, data);
150 			data->dest += data->blocksize;
151 			if (++block >= data->blocks) {
152 				/* Keep looping until the SDHCI_INT_DATA_END is
153 				 * cleared, even if we finished sending all the
154 				 * blocks.
155 				 */
156 				transfer_done = true;
157 				continue;
158 			}
159 		}
160 		if ((host->flags & USE_DMA) && !transfer_done &&
161 		    (stat & SDHCI_INT_DMA_END)) {
162 			sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS);
163 			if (host->flags & USE_SDMA) {
164 				start_addr &=
165 				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1);
166 				start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE;
167 				start_addr = dev_phys_to_bus(mmc_to_dev(host->mmc),
168 							     start_addr);
169 				sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS);
170 			}
171 		}
172 		if (timeout-- > 0)
173 			udelay(10);
174 		else {
175 			printf("%s: Transfer data timeout\n", __func__);
176 			return -ETIMEDOUT;
177 		}
178 	} while (!(stat & SDHCI_INT_DATA_END));
179 
180 #if (defined(CONFIG_MMC_SDHCI_SDMA) || CONFIG_IS_ENABLED(MMC_SDHCI_ADMA))
181 	dma_unmap_single(host->start_addr, data->blocks * data->blocksize,
182 			 mmc_get_dma_dir(data));
183 #endif
184 
185 	return 0;
186 }
187 
188 /*
189  * No command will be sent by driver if card is busy, so driver must wait
190  * for card ready state.
191  * Every time when card is busy after timeout then (last) timeout value will be
192  * increased twice but only if it doesn't exceed global defined maximum.
193  * Each function call will use last timeout value.
194  */
195 #define SDHCI_CMD_MAX_TIMEOUT			3200
196 #define SDHCI_CMD_DEFAULT_TIMEOUT		100
197 #define SDHCI_READ_STATUS_TIMEOUT		1000
198 
199 #ifdef CONFIG_DM_MMC
sdhci_send_command(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)200 static int sdhci_send_command(struct udevice *dev, struct mmc_cmd *cmd,
201 			      struct mmc_data *data)
202 {
203 	struct mmc *mmc = mmc_get_mmc_dev(dev);
204 
205 #else
206 static int sdhci_send_command(struct mmc *mmc, struct mmc_cmd *cmd,
207 			      struct mmc_data *data)
208 {
209 #endif
210 	struct sdhci_host *host = mmc->priv;
211 	unsigned int stat = 0;
212 	int ret = 0;
213 	int trans_bytes = 0, is_aligned = 1;
214 	u32 mask, flags, mode;
215 	unsigned int time = 0;
216 	int mmc_dev = mmc_get_blk_desc(mmc)->devnum;
217 	ulong start = get_timer(0);
218 
219 	host->start_addr = 0;
220 	/* Timeout unit - ms */
221 	static unsigned int cmd_timeout = SDHCI_CMD_DEFAULT_TIMEOUT;
222 
223 	mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
224 
225 	/* We shouldn't wait for data inihibit for stop commands, even
226 	   though they might use busy signaling */
227 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION ||
228 	    ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
229 	      cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data))
230 		mask &= ~SDHCI_DATA_INHIBIT;
231 
232 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
233 		if (time >= cmd_timeout) {
234 			printf("%s: MMC: %d busy ", __func__, mmc_dev);
235 			if (2 * cmd_timeout <= SDHCI_CMD_MAX_TIMEOUT) {
236 				cmd_timeout += cmd_timeout;
237 				printf("timeout increasing to: %u ms.\n",
238 				       cmd_timeout);
239 			} else {
240 				puts("timeout.\n");
241 				return -ECOMM;
242 			}
243 		}
244 		time++;
245 		udelay(1000);
246 	}
247 
248 	sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
249 
250 	mask = SDHCI_INT_RESPONSE;
251 	if ((cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK ||
252 	     cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200) && !data)
253 		mask = SDHCI_INT_DATA_AVAIL;
254 
255 	if (!(cmd->resp_type & MMC_RSP_PRESENT))
256 		flags = SDHCI_CMD_RESP_NONE;
257 	else if (cmd->resp_type & MMC_RSP_136)
258 		flags = SDHCI_CMD_RESP_LONG;
259 	else if (cmd->resp_type & MMC_RSP_BUSY) {
260 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
261 		mask |= SDHCI_INT_DATA_END;
262 	} else
263 		flags = SDHCI_CMD_RESP_SHORT;
264 
265 	if (cmd->resp_type & MMC_RSP_CRC)
266 		flags |= SDHCI_CMD_CRC;
267 	if (cmd->resp_type & MMC_RSP_OPCODE)
268 		flags |= SDHCI_CMD_INDEX;
269 	if (data || cmd->cmdidx ==  MMC_CMD_SEND_TUNING_BLOCK ||
270 	    cmd->cmdidx == MMC_CMD_SEND_TUNING_BLOCK_HS200)
271 		flags |= SDHCI_CMD_DATA;
272 
273 	/* Set Transfer mode regarding to data flag */
274 	if (data) {
275 		sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
276 		mode = SDHCI_TRNS_BLK_CNT_EN;
277 		trans_bytes = data->blocks * data->blocksize;
278 		if (data->blocks > 1)
279 			mode |= SDHCI_TRNS_MULTI;
280 
281 		if (data->flags == MMC_DATA_READ)
282 			mode |= SDHCI_TRNS_READ;
283 
284 		if (host->flags & USE_DMA) {
285 			mode |= SDHCI_TRNS_DMA;
286 			sdhci_prepare_dma(host, data, &is_aligned, trans_bytes);
287 		}
288 
289 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
290 				data->blocksize),
291 				SDHCI_BLOCK_SIZE);
292 		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
293 		sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
294 	} else if (cmd->resp_type & MMC_RSP_BUSY) {
295 		sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
296 	}
297 
298 	sdhci_writel(host, cmd->cmdarg, SDHCI_ARGUMENT);
299 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->cmdidx, flags), SDHCI_COMMAND);
300 	start = get_timer(0);
301 	do {
302 		stat = sdhci_readl(host, SDHCI_INT_STATUS);
303 		if (stat & SDHCI_INT_ERROR)
304 			break;
305 
306 		if (get_timer(start) >= SDHCI_READ_STATUS_TIMEOUT) {
307 			if (host->quirks & SDHCI_QUIRK_BROKEN_R1B) {
308 				return 0;
309 			} else {
310 				printf("%s: Timeout for status update!\n",
311 				       __func__);
312 				return -ETIMEDOUT;
313 			}
314 		}
315 	} while ((stat & mask) != mask);
316 
317 	if ((stat & (SDHCI_INT_ERROR | mask)) == mask) {
318 		sdhci_cmd_done(host, cmd);
319 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
320 	} else
321 		ret = -1;
322 
323 	if (!ret && data)
324 		ret = sdhci_transfer_data(host, data);
325 
326 	if (host->quirks & SDHCI_QUIRK_WAIT_SEND_CMD)
327 		udelay(1000);
328 
329 	stat = sdhci_readl(host, SDHCI_INT_STATUS);
330 	sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_STATUS);
331 	if (!ret) {
332 		if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) &&
333 				!is_aligned && (data->flags == MMC_DATA_READ))
334 			memcpy(data->dest, host->align_buffer, trans_bytes);
335 		return 0;
336 	}
337 
338 	sdhci_reset(host, SDHCI_RESET_CMD);
339 	sdhci_reset(host, SDHCI_RESET_DATA);
340 	if (stat & SDHCI_INT_TIMEOUT)
341 		return -ETIMEDOUT;
342 	else
343 		return -ECOMM;
344 }
345 
346 #if defined(CONFIG_DM_MMC) && defined(MMC_SUPPORTS_TUNING)
347 static int sdhci_execute_tuning(struct udevice *dev, uint opcode)
348 {
349 	int err;
350 	struct mmc *mmc = mmc_get_mmc_dev(dev);
351 	struct sdhci_host *host = mmc->priv;
352 
353 	debug("%s\n", __func__);
354 
355 	if (host->ops && host->ops->platform_execute_tuning) {
356 		err = host->ops->platform_execute_tuning(mmc, opcode);
357 		if (err)
358 			return err;
359 		return 0;
360 	}
361 	return 0;
362 }
363 #endif
364 int sdhci_set_clock(struct mmc *mmc, unsigned int clock)
365 {
366 	struct sdhci_host *host = mmc->priv;
367 	unsigned int div, clk = 0, timeout;
368 	int ret;
369 
370 	/* Wait max 20 ms */
371 	timeout = 200;
372 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) &
373 			   (SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT)) {
374 		if (timeout == 0) {
375 			printf("%s: Timeout to wait cmd & data inhibit\n",
376 			       __func__);
377 			return -EBUSY;
378 		}
379 
380 		timeout--;
381 		udelay(100);
382 	}
383 
384 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
385 
386 	if (clock == 0)
387 		return 0;
388 
389 	if (host->ops && host->ops->set_delay) {
390 		ret = host->ops->set_delay(host);
391 		if (ret) {
392 			printf("%s: Error while setting tap delay\n", __func__);
393 			return ret;
394 		}
395 	}
396 
397 	if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
398 		/*
399 		 * Check if the Host Controller supports Programmable Clock
400 		 * Mode.
401 		 */
402 		if (host->clk_mul) {
403 			for (div = 1; div <= 1024; div++) {
404 				if ((host->max_clk / div) <= clock)
405 					break;
406 			}
407 
408 			/*
409 			 * Set Programmable Clock Mode in the Clock
410 			 * Control register.
411 			 */
412 			clk = SDHCI_PROG_CLOCK_MODE;
413 			div--;
414 		} else {
415 			/* Version 3.00 divisors must be a multiple of 2. */
416 			if (host->max_clk <= clock) {
417 				div = 1;
418 			} else {
419 				for (div = 2;
420 				     div < SDHCI_MAX_DIV_SPEC_300;
421 				     div += 2) {
422 					if ((host->max_clk / div) <= clock)
423 						break;
424 				}
425 			}
426 			div >>= 1;
427 		}
428 	} else {
429 		/* Version 2.00 divisors must be a power of 2. */
430 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
431 			if ((host->max_clk / div) <= clock)
432 				break;
433 		}
434 		div >>= 1;
435 	}
436 
437 	if (host->ops && host->ops->set_clock)
438 		host->ops->set_clock(host, div);
439 
440 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
441 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
442 		<< SDHCI_DIVIDER_HI_SHIFT;
443 	clk |= SDHCI_CLOCK_INT_EN;
444 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
445 
446 	/* Wait max 20 ms */
447 	timeout = 20;
448 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
449 		& SDHCI_CLOCK_INT_STABLE)) {
450 		if (timeout == 0) {
451 			printf("%s: Internal clock never stabilised.\n",
452 			       __func__);
453 			return -EBUSY;
454 		}
455 		timeout--;
456 		udelay(1000);
457 	}
458 
459 	clk |= SDHCI_CLOCK_CARD_EN;
460 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
461 	return 0;
462 }
463 
464 static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
465 {
466 	u8 pwr = 0;
467 
468 	if (power != (unsigned short)-1) {
469 		switch (1 << power) {
470 		case MMC_VDD_165_195:
471 			pwr = SDHCI_POWER_180;
472 			break;
473 		case MMC_VDD_29_30:
474 		case MMC_VDD_30_31:
475 			pwr = SDHCI_POWER_300;
476 			break;
477 		case MMC_VDD_32_33:
478 		case MMC_VDD_33_34:
479 			pwr = SDHCI_POWER_330;
480 			break;
481 		}
482 	}
483 
484 	if (pwr == 0) {
485 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
486 		return;
487 	}
488 
489 	pwr |= SDHCI_POWER_ON;
490 
491 	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
492 }
493 
494 void sdhci_set_uhs_timing(struct sdhci_host *host)
495 {
496 	struct mmc *mmc = host->mmc;
497 	u32 reg;
498 
499 	reg = sdhci_readw(host, SDHCI_HOST_CONTROL2);
500 	reg &= ~SDHCI_CTRL_UHS_MASK;
501 
502 	switch (mmc->selected_mode) {
503 	case UHS_SDR50:
504 	case MMC_HS_52:
505 		reg |= SDHCI_CTRL_UHS_SDR50;
506 		break;
507 	case UHS_DDR50:
508 	case MMC_DDR_52:
509 		reg |= SDHCI_CTRL_UHS_DDR50;
510 		break;
511 	case UHS_SDR104:
512 	case MMC_HS_200:
513 		reg |= SDHCI_CTRL_UHS_SDR104;
514 		break;
515 	case MMC_HS_400:
516 		reg |= SDHCI_CTRL_HS400;
517 		break;
518 	default:
519 		reg |= SDHCI_CTRL_UHS_SDR12;
520 	}
521 
522 	sdhci_writew(host, reg, SDHCI_HOST_CONTROL2);
523 }
524 
525 static void sdhci_set_voltage(struct sdhci_host *host)
526 {
527 	if (IS_ENABLED(CONFIG_MMC_IO_VOLTAGE)) {
528 		struct mmc *mmc = (struct mmc *)host->mmc;
529 		u32 ctrl;
530 
531 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
532 
533 		switch (mmc->signal_voltage) {
534 		case MMC_SIGNAL_VOLTAGE_330:
535 #if CONFIG_IS_ENABLED(DM_REGULATOR)
536 			if (mmc->vqmmc_supply) {
537 				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
538 					pr_err("failed to disable vqmmc-supply\n");
539 					return;
540 				}
541 
542 				if (regulator_set_value(mmc->vqmmc_supply, 3300000)) {
543 					pr_err("failed to set vqmmc-voltage to 3.3V\n");
544 					return;
545 				}
546 
547 				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
548 					pr_err("failed to enable vqmmc-supply\n");
549 					return;
550 				}
551 			}
552 #endif
553 			if (IS_SD(mmc)) {
554 				ctrl &= ~SDHCI_CTRL_VDD_180;
555 				sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
556 			}
557 
558 			/* Wait for 5ms */
559 			mdelay(5);
560 
561 			/* 3.3V regulator output should be stable within 5 ms */
562 			if (IS_SD(mmc)) {
563 				if (ctrl & SDHCI_CTRL_VDD_180) {
564 					pr_err("3.3V regulator output did not become stable\n");
565 					return;
566 				}
567 			}
568 
569 			break;
570 		case MMC_SIGNAL_VOLTAGE_180:
571 #if CONFIG_IS_ENABLED(DM_REGULATOR)
572 			if (mmc->vqmmc_supply) {
573 				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, false)) {
574 					pr_err("failed to disable vqmmc-supply\n");
575 					return;
576 				}
577 
578 				if (regulator_set_value(mmc->vqmmc_supply, 1800000)) {
579 					pr_err("failed to set vqmmc-voltage to 1.8V\n");
580 					return;
581 				}
582 
583 				if (regulator_set_enable_if_allowed(mmc->vqmmc_supply, true)) {
584 					pr_err("failed to enable vqmmc-supply\n");
585 					return;
586 				}
587 			}
588 #endif
589 			if (IS_SD(mmc)) {
590 				ctrl |= SDHCI_CTRL_VDD_180;
591 				sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
592 			}
593 
594 			/* Wait for 5 ms */
595 			mdelay(5);
596 
597 			/* 1.8V regulator output has to be stable within 5 ms */
598 			if (IS_SD(mmc)) {
599 				if (!(ctrl & SDHCI_CTRL_VDD_180)) {
600 					pr_err("1.8V regulator output did not become stable\n");
601 					return;
602 				}
603 			}
604 
605 			break;
606 		default:
607 			/* No signal voltage switch required */
608 			return;
609 		}
610 	}
611 }
612 
613 void sdhci_set_control_reg(struct sdhci_host *host)
614 {
615 	sdhci_set_voltage(host);
616 	sdhci_set_uhs_timing(host);
617 }
618 
619 #ifdef CONFIG_DM_MMC
620 static int sdhci_set_ios(struct udevice *dev)
621 {
622 	struct mmc *mmc = mmc_get_mmc_dev(dev);
623 #else
624 static int sdhci_set_ios(struct mmc *mmc)
625 {
626 #endif
627 	u32 ctrl;
628 	struct sdhci_host *host = mmc->priv;
629 	bool no_hispd_bit = false;
630 
631 	if (host->ops && host->ops->set_control_reg)
632 		host->ops->set_control_reg(host);
633 
634 	if (mmc->clock != host->clock)
635 		sdhci_set_clock(mmc, mmc->clock);
636 
637 	if (mmc->clk_disable)
638 		sdhci_set_clock(mmc, 0);
639 
640 	/* Set bus width */
641 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
642 	if (mmc->bus_width == 8) {
643 		ctrl &= ~SDHCI_CTRL_4BITBUS;
644 		if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
645 				(host->quirks & SDHCI_QUIRK_USE_WIDE8))
646 			ctrl |= SDHCI_CTRL_8BITBUS;
647 	} else {
648 		if ((SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) ||
649 				(host->quirks & SDHCI_QUIRK_USE_WIDE8))
650 			ctrl &= ~SDHCI_CTRL_8BITBUS;
651 		if (mmc->bus_width == 4)
652 			ctrl |= SDHCI_CTRL_4BITBUS;
653 		else
654 			ctrl &= ~SDHCI_CTRL_4BITBUS;
655 	}
656 
657 	if ((host->quirks & SDHCI_QUIRK_NO_HISPD_BIT) ||
658 	    (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE)) {
659 		ctrl &= ~SDHCI_CTRL_HISPD;
660 		no_hispd_bit = true;
661 	}
662 
663 	if (!no_hispd_bit) {
664 		if (mmc->selected_mode == MMC_HS ||
665 		    mmc->selected_mode == SD_HS ||
666 		    mmc->selected_mode == MMC_DDR_52 ||
667 		    mmc->selected_mode == MMC_HS_200 ||
668 		    mmc->selected_mode == MMC_HS_400 ||
669 		    mmc->selected_mode == UHS_SDR25 ||
670 		    mmc->selected_mode == UHS_SDR50 ||
671 		    mmc->selected_mode == UHS_SDR104 ||
672 		    mmc->selected_mode == UHS_DDR50)
673 			ctrl |= SDHCI_CTRL_HISPD;
674 		else
675 			ctrl &= ~SDHCI_CTRL_HISPD;
676 	}
677 
678 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
679 
680 	/* If available, call the driver specific "post" set_ios() function */
681 	if (host->ops && host->ops->set_ios_post)
682 		return host->ops->set_ios_post(host);
683 
684 	return 0;
685 }
686 
687 static int sdhci_init(struct mmc *mmc)
688 {
689 	struct sdhci_host *host = mmc->priv;
690 #if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_GPIO)
691 	struct udevice *dev = mmc->dev;
692 
693 	gpio_request_by_name(dev, "cd-gpios", 0,
694 			     &host->cd_gpio, GPIOD_IS_IN);
695 #endif
696 
697 	sdhci_reset(host, SDHCI_RESET_ALL);
698 
699 #if defined(CONFIG_FIXED_SDHCI_ALIGNED_BUFFER)
700 	host->align_buffer = (void *)CONFIG_FIXED_SDHCI_ALIGNED_BUFFER;
701 	/*
702 	 * Always use this bounce-buffer when CONFIG_FIXED_SDHCI_ALIGNED_BUFFER
703 	 * is defined.
704 	 */
705 	host->force_align_buffer = true;
706 #else
707 	if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) {
708 		host->align_buffer = memalign(8, 512 * 1024);
709 		if (!host->align_buffer) {
710 			printf("%s: Aligned buffer alloc failed!!!\n",
711 			       __func__);
712 			return -ENOMEM;
713 		}
714 	}
715 #endif
716 
717 	sdhci_set_power(host, fls(mmc->cfg->voltages) - 1);
718 
719 	if (host->ops && host->ops->get_cd)
720 		host->ops->get_cd(host);
721 
722 	/* Enable only interrupts served by the SD controller */
723 	sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK,
724 		     SDHCI_INT_ENABLE);
725 	/* Mask all sdhci interrupt sources */
726 	sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE);
727 
728 	return 0;
729 }
730 
731 #ifdef CONFIG_DM_MMC
732 int sdhci_probe(struct udevice *dev)
733 {
734 	struct mmc *mmc = mmc_get_mmc_dev(dev);
735 
736 	return sdhci_init(mmc);
737 }
738 
739 static int sdhci_deferred_probe(struct udevice *dev)
740 {
741 	int err;
742 	struct mmc *mmc = mmc_get_mmc_dev(dev);
743 	struct sdhci_host *host = mmc->priv;
744 
745 	if (host->ops && host->ops->deferred_probe) {
746 		err = host->ops->deferred_probe(host);
747 		if (err)
748 			return err;
749 	}
750 	return 0;
751 }
752 
753 static int sdhci_get_cd(struct udevice *dev)
754 {
755 	struct mmc *mmc = mmc_get_mmc_dev(dev);
756 	struct sdhci_host *host = mmc->priv;
757 	int value;
758 
759 	/* If nonremovable, assume that the card is always present. */
760 	if (mmc->cfg->host_caps & MMC_CAP_NONREMOVABLE)
761 		return 1;
762 	/* If polling, assume that the card is always present. */
763 	if (mmc->cfg->host_caps & MMC_CAP_NEEDS_POLL)
764 		return 1;
765 
766 #if CONFIG_IS_ENABLED(DM_GPIO)
767 	value = dm_gpio_get_value(&host->cd_gpio);
768 	if (value >= 0) {
769 		if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
770 			return !value;
771 		else
772 			return value;
773 	}
774 #endif
775 	value = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) &
776 		   SDHCI_CARD_PRESENT);
777 	if (mmc->cfg->host_caps & MMC_CAP_CD_ACTIVE_HIGH)
778 		return !value;
779 	else
780 		return value;
781 }
782 
783 static int sdhci_wait_dat0(struct udevice *dev, int state,
784 			   int timeout_us)
785 {
786 	int tmp;
787 	struct mmc *mmc = mmc_get_mmc_dev(dev);
788 	struct sdhci_host *host = mmc->priv;
789 	unsigned long timeout = timer_get_us() + timeout_us;
790 
791 	// readx_poll_timeout is unsuitable because sdhci_readl accepts
792 	// two arguments
793 	do {
794 		tmp = sdhci_readl(host, SDHCI_PRESENT_STATE);
795 		if (!!(tmp & SDHCI_DATA_0_LVL_MASK) == !!state)
796 			return 0;
797 	} while (!timeout_us || !time_after(timer_get_us(), timeout));
798 
799 	return -ETIMEDOUT;
800 }
801 
802 const struct dm_mmc_ops sdhci_ops = {
803 	.send_cmd	= sdhci_send_command,
804 	.set_ios	= sdhci_set_ios,
805 	.get_cd		= sdhci_get_cd,
806 	.deferred_probe	= sdhci_deferred_probe,
807 #ifdef MMC_SUPPORTS_TUNING
808 	.execute_tuning	= sdhci_execute_tuning,
809 #endif
810 	.wait_dat0	= sdhci_wait_dat0,
811 };
812 #else
813 static const struct mmc_ops sdhci_ops = {
814 	.send_cmd	= sdhci_send_command,
815 	.set_ios	= sdhci_set_ios,
816 	.init		= sdhci_init,
817 };
818 #endif
819 
820 int sdhci_setup_cfg(struct mmc_config *cfg, struct sdhci_host *host,
821 		u32 f_max, u32 f_min)
822 {
823 	u32 caps, caps_1 = 0;
824 #if CONFIG_IS_ENABLED(DM_MMC)
825 	u64 dt_caps, dt_caps_mask;
826 
827 	dt_caps_mask = dev_read_u64_default(host->mmc->dev,
828 					    "sdhci-caps-mask", 0);
829 	dt_caps = dev_read_u64_default(host->mmc->dev,
830 				       "sdhci-caps", 0);
831 	caps = ~lower_32_bits(dt_caps_mask) &
832 	       sdhci_readl(host, SDHCI_CAPABILITIES);
833 	caps |= lower_32_bits(dt_caps);
834 #else
835 	caps = sdhci_readl(host, SDHCI_CAPABILITIES);
836 #endif
837 	debug("%s, caps: 0x%x\n", __func__, caps);
838 
839 #ifdef CONFIG_MMC_SDHCI_SDMA
840 	if ((caps & SDHCI_CAN_DO_SDMA)) {
841 		host->flags |= USE_SDMA;
842 	} else {
843 		debug("%s: Your controller doesn't support SDMA!!\n",
844 		      __func__);
845 	}
846 #endif
847 #if CONFIG_IS_ENABLED(MMC_SDHCI_ADMA)
848 	if (!(caps & SDHCI_CAN_DO_ADMA2)) {
849 		printf("%s: Your controller doesn't support SDMA!!\n",
850 		       __func__);
851 		return -EINVAL;
852 	}
853 	host->adma_desc_table = sdhci_adma_init();
854 	host->adma_addr = (dma_addr_t)host->adma_desc_table;
855 
856 #ifdef CONFIG_DMA_ADDR_T_64BIT
857 	host->flags |= USE_ADMA64;
858 #else
859 	host->flags |= USE_ADMA;
860 #endif
861 #endif
862 	if (host->quirks & SDHCI_QUIRK_REG32_RW)
863 		host->version =
864 			sdhci_readl(host, SDHCI_HOST_VERSION - 2) >> 16;
865 	else
866 		host->version = sdhci_readw(host, SDHCI_HOST_VERSION);
867 
868 	cfg->name = host->name;
869 #ifndef CONFIG_DM_MMC
870 	cfg->ops = &sdhci_ops;
871 #endif
872 
873 	/* Check whether the clock multiplier is supported or not */
874 	if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
875 #if CONFIG_IS_ENABLED(DM_MMC)
876 		caps_1 = ~upper_32_bits(dt_caps_mask) &
877 			 sdhci_readl(host, SDHCI_CAPABILITIES_1);
878 		caps_1 |= upper_32_bits(dt_caps);
879 #else
880 		caps_1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
881 #endif
882 		debug("%s, caps_1: 0x%x\n", __func__, caps_1);
883 		host->clk_mul = (caps_1 & SDHCI_CLOCK_MUL_MASK) >>
884 				SDHCI_CLOCK_MUL_SHIFT;
885 	}
886 
887 	if (host->max_clk == 0) {
888 		if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
889 			host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) >>
890 				SDHCI_CLOCK_BASE_SHIFT;
891 		else
892 			host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) >>
893 				SDHCI_CLOCK_BASE_SHIFT;
894 		host->max_clk *= 1000000;
895 		if (host->clk_mul)
896 			host->max_clk *= host->clk_mul;
897 	}
898 	if (host->max_clk == 0) {
899 		printf("%s: Hardware doesn't specify base clock frequency\n",
900 		       __func__);
901 		return -EINVAL;
902 	}
903 	if (f_max && (f_max < host->max_clk))
904 		cfg->f_max = f_max;
905 	else
906 		cfg->f_max = host->max_clk;
907 	if (f_min)
908 		cfg->f_min = f_min;
909 	else {
910 		if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300)
911 			cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_300;
912 		else
913 			cfg->f_min = cfg->f_max / SDHCI_MAX_DIV_SPEC_200;
914 	}
915 	cfg->voltages = 0;
916 	if (caps & SDHCI_CAN_VDD_330)
917 		cfg->voltages |= MMC_VDD_32_33 | MMC_VDD_33_34;
918 	if (caps & SDHCI_CAN_VDD_300)
919 		cfg->voltages |= MMC_VDD_29_30 | MMC_VDD_30_31;
920 	if (caps & SDHCI_CAN_VDD_180)
921 		cfg->voltages |= MMC_VDD_165_195;
922 
923 	if (host->quirks & SDHCI_QUIRK_BROKEN_VOLTAGE)
924 		cfg->voltages |= host->voltages;
925 
926 	if (caps & SDHCI_CAN_DO_HISPD)
927 		cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
928 
929 	cfg->host_caps |= MMC_MODE_4BIT;
930 
931 	/* Since Host Controller Version3.0 */
932 	if (SDHCI_GET_VERSION(host) >= SDHCI_SPEC_300) {
933 		if (!(caps & SDHCI_CAN_DO_8BIT))
934 			cfg->host_caps &= ~MMC_MODE_8BIT;
935 	}
936 
937 	if (host->quirks & SDHCI_QUIRK_BROKEN_HISPD_MODE) {
938 		cfg->host_caps &= ~MMC_MODE_HS;
939 		cfg->host_caps &= ~MMC_MODE_HS_52MHz;
940 	}
941 
942 	if (!(cfg->voltages & MMC_VDD_165_195) ||
943 	    (host->quirks & SDHCI_QUIRK_NO_1_8_V))
944 		caps_1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
945 			    SDHCI_SUPPORT_DDR50);
946 
947 	if (caps_1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
948 		      SDHCI_SUPPORT_DDR50))
949 		cfg->host_caps |= MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25);
950 
951 	if (caps_1 & SDHCI_SUPPORT_SDR104) {
952 		cfg->host_caps |= MMC_CAP(UHS_SDR104) | MMC_CAP(UHS_SDR50);
953 		/*
954 		 * SD3.0: SDR104 is supported so (for eMMC) the caps2
955 		 * field can be promoted to support HS200.
956 		 */
957 		cfg->host_caps |= MMC_CAP(MMC_HS_200);
958 	} else if (caps_1 & SDHCI_SUPPORT_SDR50) {
959 		cfg->host_caps |= MMC_CAP(UHS_SDR50);
960 	}
961 
962 	if (caps_1 & SDHCI_SUPPORT_DDR50)
963 		cfg->host_caps |= MMC_CAP(UHS_DDR50);
964 
965 	if (host->host_caps)
966 		cfg->host_caps |= host->host_caps;
967 
968 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
969 
970 	return 0;
971 }
972 
973 #ifdef CONFIG_BLK
974 int sdhci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
975 {
976 	return mmc_bind(dev, mmc, cfg);
977 }
978 #else
979 int add_sdhci(struct sdhci_host *host, u32 f_max, u32 f_min)
980 {
981 	int ret;
982 
983 	ret = sdhci_setup_cfg(&host->cfg, host, f_max, f_min);
984 	if (ret)
985 		return ret;
986 
987 	host->mmc = mmc_create(&host->cfg, host);
988 	if (host->mmc == NULL) {
989 		printf("%s: mmc create fail!\n", __func__);
990 		return -ENOMEM;
991 	}
992 
993 	return 0;
994 }
995 #endif
996