1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * DMA support for Internal DMAC with SDHI SD/SDIO controller
4 *
5 * Copyright (C) 2016-19 Renesas Electronics Corporation
6 * Copyright (C) 2016-17 Horms Solutions, Simon Horman
7 * Copyright (C) 2018-19 Sang Engineering, Wolfram Sang
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/io-64-nonatomic-hi-lo.h>
14 #include <linux/mfd/tmio.h>
15 #include <linux/mmc/host.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/of_device.h>
19 #include <linux/pagemap.h>
20 #include <linux/scatterlist.h>
21 #include <linux/sys_soc.h>
22
23 #include "renesas_sdhi.h"
24 #include "tmio_mmc.h"
25
26 #define DM_CM_DTRAN_MODE 0x820
27 #define DM_CM_DTRAN_CTRL 0x828
28 #define DM_CM_RST 0x830
29 #define DM_CM_INFO1 0x840
30 #define DM_CM_INFO1_MASK 0x848
31 #define DM_CM_INFO2 0x850
32 #define DM_CM_INFO2_MASK 0x858
33 #define DM_DTRAN_ADDR 0x880
34
35 /* DM_CM_DTRAN_MODE */
36 #define DTRAN_MODE_CH_NUM_CH0 0 /* "downstream" = for write commands */
37 #define DTRAN_MODE_CH_NUM_CH1 BIT(16) /* "upstream" = for read commands */
38 #define DTRAN_MODE_BUS_WIDTH (BIT(5) | BIT(4))
39 #define DTRAN_MODE_ADDR_MODE BIT(0) /* 1 = Increment address, 0 = Fixed */
40
41 /* DM_CM_DTRAN_CTRL */
42 #define DTRAN_CTRL_DM_START BIT(0)
43
44 /* DM_CM_RST */
45 #define RST_DTRANRST1 BIT(9)
46 #define RST_DTRANRST0 BIT(8)
47 #define RST_RESERVED_BITS GENMASK_ULL(31, 0)
48
49 /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
50 #define INFO1_MASK_CLEAR GENMASK_ULL(31, 0)
51 #define INFO1_DTRANEND1 BIT(20)
52 #define INFO1_DTRANEND1_OLD BIT(17)
53 #define INFO1_DTRANEND0 BIT(16)
54
55 /* DM_CM_INFO2 and DM_CM_INFO2_MASK */
56 #define INFO2_MASK_CLEAR GENMASK_ULL(31, 0)
57 #define INFO2_DTRANERR1 BIT(17)
58 #define INFO2_DTRANERR0 BIT(16)
59
60 enum renesas_sdhi_dma_cookie {
61 COOKIE_UNMAPPED,
62 COOKIE_PRE_MAPPED,
63 COOKIE_MAPPED,
64 };
65
66 /*
67 * Specification of this driver:
68 * - host->chan_{rx,tx} will be used as a flag of enabling/disabling the dma
69 * - Since this SDHI DMAC register set has 16 but 32-bit width, we
70 * need a custom accessor.
71 */
72
73 static unsigned long global_flags;
74 /*
75 * Workaround for avoiding to use RX DMAC by multiple channels.
76 * On R-Car H3 ES1.* and M3-W ES1.0, when multiple SDHI channels use
77 * RX DMAC simultaneously, sometimes hundreds of bytes data are not
78 * stored into the system memory even if the DMAC interrupt happened.
79 * So, this driver then uses one RX DMAC channel only.
80 */
81 #define SDHI_INTERNAL_DMAC_RX_IN_USE 0
82
83 /* Definitions for sampling clocks */
84 static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = {
85 {
86 .clk_rate = 0,
87 .tap = 0x00000300,
88 .tap_hs400_4tap = 0x00000100,
89 },
90 };
91
92 static const struct renesas_sdhi_of_data of_data_rza2 = {
93 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
94 TMIO_MMC_HAVE_CBSY,
95 .tmio_ocr_mask = MMC_VDD_32_33,
96 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
97 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
98 .bus_shift = 2,
99 .scc_offset = 0 - 0x1000,
100 .taps = rcar_gen3_scc_taps,
101 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
102 /* DMAC can handle 32bit blk count but only 1 segment */
103 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
104 .max_segs = 1,
105 };
106
107 static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
108 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
109 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
110 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
111 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
112 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
113 .bus_shift = 2,
114 .scc_offset = 0x1000,
115 .taps = rcar_gen3_scc_taps,
116 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
117 /* DMAC can handle 32bit blk count but only 1 segment */
118 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
119 .max_segs = 1,
120 .sdhi_flags = SDHI_FLAG_NEED_CLKH_FALLBACK,
121 };
122
123 static const struct renesas_sdhi_of_data of_data_rcar_gen3_no_sdh_fallback = {
124 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
125 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
126 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
127 MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
128 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
129 .bus_shift = 2,
130 .scc_offset = 0x1000,
131 .taps = rcar_gen3_scc_taps,
132 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
133 /* DMAC can handle 32bit blk count but only 1 segment */
134 .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
135 .max_segs = 1,
136 };
137
138 static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
139 { 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 15,
140 16, 16, 16, 16, 16, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25 },
141 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 11,
142 12, 17, 18, 18, 18, 18, 18, 18, 18, 19, 20, 21, 22, 23, 25, 25 }
143 };
144
145 static const u8 r8a77965_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
146 { 1, 2, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 15, 16,
147 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 26, 27, 28, 29, 30, 31 },
148 { 2, 3, 4, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17,
149 17, 17, 20, 21, 22, 23, 24, 25, 27, 28, 29, 30, 31, 31, 31, 31 }
150 };
151
152 static const u8 r8a77990_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
153 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
154 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
155 { 0, 0, 0, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 8, 9, 10,
156 11, 12, 13, 15, 16, 17, 17, 18, 18, 19, 20, 22, 24, 25, 26, 26 }
157 };
158
159 static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400 = {
160 .hs400_disabled = true,
161 .hs400_4taps = true,
162 };
163
164 static const struct renesas_sdhi_quirks sdhi_quirks_4tap_nohs400_one_rx = {
165 .hs400_disabled = true,
166 .hs400_4taps = true,
167 .dma_one_rx_only = true,
168 .old_info1_layout = true,
169 };
170
171 static const struct renesas_sdhi_quirks sdhi_quirks_4tap = {
172 .hs400_4taps = true,
173 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
174 .manual_tap_correction = true,
175 };
176
177 static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = {
178 .hs400_disabled = true,
179 };
180
181 static const struct renesas_sdhi_quirks sdhi_quirks_fixed_addr = {
182 .fixed_addr_mode = true,
183 };
184
185 static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps1357 = {
186 .hs400_bad_taps = BIT(1) | BIT(3) | BIT(5) | BIT(7),
187 .manual_tap_correction = true,
188 };
189
190 static const struct renesas_sdhi_quirks sdhi_quirks_bad_taps2367 = {
191 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
192 .manual_tap_correction = true,
193 };
194
195 static const struct renesas_sdhi_quirks sdhi_quirks_r8a7796_es13 = {
196 .hs400_4taps = true,
197 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
198 .hs400_calib_table = r8a7796_es13_calib_table,
199 .manual_tap_correction = true,
200 };
201
202 static const struct renesas_sdhi_quirks sdhi_quirks_r8a77965 = {
203 .hs400_bad_taps = BIT(2) | BIT(3) | BIT(6) | BIT(7),
204 .hs400_calib_table = r8a77965_calib_table,
205 .manual_tap_correction = true,
206 };
207
208 static const struct renesas_sdhi_quirks sdhi_quirks_r8a77990 = {
209 .hs400_calib_table = r8a77990_calib_table,
210 .manual_tap_correction = true,
211 };
212
213 static const struct renesas_sdhi_quirks sdhi_quirks_r9a09g011 = {
214 .fixed_addr_mode = true,
215 .hs400_disabled = true,
216 };
217
218 /*
219 * Note for r8a7796 / r8a774a1: we can't distinguish ES1.1 and 1.2 as of now.
220 * So, we want to treat them equally and only have a match for ES1.2 to enforce
221 * this if there ever will be a way to distinguish ES1.2.
222 */
223 static const struct soc_device_attribute sdhi_quirks_match[] = {
224 { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 },
225 { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400_one_rx },
226 { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap },
227 { .soc_id = "r8a7796", .revision = "ES1.0", .data = &sdhi_quirks_4tap_nohs400_one_rx },
228 { .soc_id = "r8a7796", .revision = "ES1.[12]", .data = &sdhi_quirks_4tap_nohs400 },
229 { .soc_id = "r8a7796", .revision = "ES1.*", .data = &sdhi_quirks_r8a7796_es13 },
230 { .soc_id = "r8a77980", .revision = "ES1.*", .data = &sdhi_quirks_nohs400 },
231 { /* Sentinel. */ }
232 };
233
234 static const struct renesas_sdhi_of_data_with_quirks of_r8a7795_compatible = {
235 .of_data = &of_data_rcar_gen3,
236 .quirks = &sdhi_quirks_bad_taps2367,
237 };
238
239 static const struct renesas_sdhi_of_data_with_quirks of_r8a77961_compatible = {
240 .of_data = &of_data_rcar_gen3,
241 .quirks = &sdhi_quirks_bad_taps1357,
242 };
243
244 static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
245 .of_data = &of_data_rcar_gen3,
246 .quirks = &sdhi_quirks_r8a77965,
247 };
248
249 static const struct renesas_sdhi_of_data_with_quirks of_r8a77970_compatible = {
250 .of_data = &of_data_rcar_gen3_no_sdh_fallback,
251 .quirks = &sdhi_quirks_nohs400,
252 };
253
254 static const struct renesas_sdhi_of_data_with_quirks of_r8a77990_compatible = {
255 .of_data = &of_data_rcar_gen3,
256 .quirks = &sdhi_quirks_r8a77990,
257 };
258
259 static const struct renesas_sdhi_of_data_with_quirks of_r9a09g011_compatible = {
260 .of_data = &of_data_rcar_gen3,
261 .quirks = &sdhi_quirks_r9a09g011,
262 };
263
264 static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_compatible = {
265 .of_data = &of_data_rcar_gen3,
266 };
267
268 static const struct renesas_sdhi_of_data_with_quirks of_rcar_gen3_nohs400_compatible = {
269 .of_data = &of_data_rcar_gen3,
270 .quirks = &sdhi_quirks_nohs400,
271 };
272
273 static const struct renesas_sdhi_of_data_with_quirks of_rza2_compatible = {
274 .of_data = &of_data_rza2,
275 .quirks = &sdhi_quirks_fixed_addr,
276 };
277
278 static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
279 { .compatible = "renesas,sdhi-r7s9210", .data = &of_rza2_compatible, },
280 { .compatible = "renesas,sdhi-mmc-r8a77470", .data = &of_rcar_gen3_compatible, },
281 { .compatible = "renesas,sdhi-r8a7795", .data = &of_r8a7795_compatible, },
282 { .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
283 { .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
284 { .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
285 { .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
286 { .compatible = "renesas,sdhi-r8a77995", .data = &of_rcar_gen3_nohs400_compatible, },
287 { .compatible = "renesas,sdhi-r9a09g011", .data = &of_r9a09g011_compatible, },
288 { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
289 { .compatible = "renesas,rcar-gen4-sdhi", .data = &of_rcar_gen3_compatible, },
290 {},
291 };
292 MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match);
293
294 static void
renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host * host,bool enable)295 renesas_sdhi_internal_dmac_enable_dma(struct tmio_mmc_host *host, bool enable)
296 {
297 struct renesas_sdhi *priv = host_to_priv(host);
298 u32 dma_irqs = INFO1_DTRANEND0 |
299 (sdhi_has_quirk(priv, old_info1_layout) ?
300 INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
301
302 if (!host->chan_tx || !host->chan_rx)
303 return;
304
305 writel(enable ? ~dma_irqs : INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
306
307 if (priv->dma_priv.enable)
308 priv->dma_priv.enable(host, enable);
309 }
310
311 static void
renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host * host)312 renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host)
313 {
314 u64 val = RST_DTRANRST1 | RST_DTRANRST0;
315
316 renesas_sdhi_internal_dmac_enable_dma(host, false);
317
318 writel(RST_RESERVED_BITS & ~val, host->ctl + DM_CM_RST);
319 writel(RST_RESERVED_BITS | val, host->ctl + DM_CM_RST);
320
321 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
322
323 renesas_sdhi_internal_dmac_enable_dma(host, true);
324 }
325
renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host * host)326 static bool renesas_sdhi_internal_dmac_dma_irq(struct tmio_mmc_host *host)
327 {
328 struct renesas_sdhi *priv = host_to_priv(host);
329 struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
330
331 u32 dma_irqs = INFO1_DTRANEND0 |
332 (sdhi_has_quirk(priv, old_info1_layout) ?
333 INFO1_DTRANEND1_OLD : INFO1_DTRANEND1);
334 u32 status = readl(host->ctl + DM_CM_INFO1);
335
336 if (status & dma_irqs) {
337 writel(status ^ dma_irqs, host->ctl + DM_CM_INFO1);
338 set_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags);
339 if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags))
340 tasklet_schedule(&dma_priv->dma_complete);
341 }
342
343 return status & dma_irqs;
344 }
345
346 static void
renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host * host)347 renesas_sdhi_internal_dmac_dataend_dma(struct tmio_mmc_host *host)
348 {
349 struct renesas_sdhi *priv = host_to_priv(host);
350 struct renesas_sdhi_dma *dma_priv = &priv->dma_priv;
351
352 set_bit(SDHI_DMA_END_FLAG_ACCESS, &dma_priv->end_flags);
353 if (test_bit(SDHI_DMA_END_FLAG_DMA, &dma_priv->end_flags) ||
354 host->data->error)
355 tasklet_schedule(&dma_priv->dma_complete);
356 }
357
358 /*
359 * renesas_sdhi_internal_dmac_map() will be called with two different
360 * sg pointers in two mmc_data by .pre_req(), but tmio host can have a single
361 * sg_ptr only. So, renesas_sdhi_internal_dmac_{un}map() should use a sg
362 * pointer in a mmc_data instead of host->sg_ptr.
363 */
364 static void
renesas_sdhi_internal_dmac_unmap(struct tmio_mmc_host * host,struct mmc_data * data,enum renesas_sdhi_dma_cookie cookie)365 renesas_sdhi_internal_dmac_unmap(struct tmio_mmc_host *host,
366 struct mmc_data *data,
367 enum renesas_sdhi_dma_cookie cookie)
368 {
369 bool unmap = cookie == COOKIE_UNMAPPED ? (data->host_cookie != cookie) :
370 (data->host_cookie == cookie);
371
372 if (unmap) {
373 dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
374 mmc_get_dma_dir(data));
375 data->host_cookie = COOKIE_UNMAPPED;
376 }
377 }
378
379 static bool
renesas_sdhi_internal_dmac_map(struct tmio_mmc_host * host,struct mmc_data * data,enum renesas_sdhi_dma_cookie cookie)380 renesas_sdhi_internal_dmac_map(struct tmio_mmc_host *host,
381 struct mmc_data *data,
382 enum renesas_sdhi_dma_cookie cookie)
383 {
384 if (data->host_cookie == COOKIE_PRE_MAPPED)
385 return true;
386
387 if (!dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
388 mmc_get_dma_dir(data)))
389 return false;
390
391 data->host_cookie = cookie;
392
393 /* This DMAC needs buffers to be 128-byte aligned */
394 if (!IS_ALIGNED(sg_dma_address(data->sg), 128)) {
395 renesas_sdhi_internal_dmac_unmap(host, data, cookie);
396 return false;
397 }
398
399 return true;
400 }
401
402 static void
renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host * host,struct mmc_data * data)403 renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
404 struct mmc_data *data)
405 {
406 struct renesas_sdhi *priv = host_to_priv(host);
407 struct scatterlist *sg = host->sg_ptr;
408 u32 dtran_mode = DTRAN_MODE_BUS_WIDTH;
409
410 if (!sdhi_has_quirk(priv, fixed_addr_mode))
411 dtran_mode |= DTRAN_MODE_ADDR_MODE;
412
413 if (!renesas_sdhi_internal_dmac_map(host, data, COOKIE_MAPPED))
414 goto force_pio;
415
416 if (data->flags & MMC_DATA_READ) {
417 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
418 if (sdhi_has_quirk(priv, dma_one_rx_only) &&
419 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
420 goto force_pio_with_unmap;
421 } else {
422 dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
423 }
424
425 priv->dma_priv.end_flags = 0;
426 renesas_sdhi_internal_dmac_enable_dma(host, true);
427
428 /* set dma parameters */
429 writel(dtran_mode, host->ctl + DM_CM_DTRAN_MODE);
430 writel(sg_dma_address(sg), host->ctl + DM_DTRAN_ADDR);
431
432 host->dma_on = true;
433
434 return;
435
436 force_pio_with_unmap:
437 renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
438
439 force_pio:
440 renesas_sdhi_internal_dmac_enable_dma(host, false);
441 }
442
renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)443 static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
444 {
445 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
446 struct renesas_sdhi *priv = host_to_priv(host);
447
448 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
449
450 if (!host->cmd->error) {
451 /* start the DMAC */
452 writel(DTRAN_CTRL_DM_START, host->ctl + DM_CM_DTRAN_CTRL);
453 } else {
454 /* on CMD errors, simulate DMA end immediately */
455 set_bit(SDHI_DMA_END_FLAG_DMA, &priv->dma_priv.end_flags);
456 if (test_bit(SDHI_DMA_END_FLAG_ACCESS, &priv->dma_priv.end_flags))
457 tasklet_schedule(&priv->dma_priv.dma_complete);
458 }
459 }
460
renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host * host)461 static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
462 {
463 enum dma_data_direction dir;
464
465 if (!host->dma_on)
466 return false;
467
468 if (!host->data)
469 return false;
470
471 if (host->data->flags & MMC_DATA_READ)
472 dir = DMA_FROM_DEVICE;
473 else
474 dir = DMA_TO_DEVICE;
475
476 renesas_sdhi_internal_dmac_enable_dma(host, false);
477 renesas_sdhi_internal_dmac_unmap(host, host->data, COOKIE_MAPPED);
478
479 if (dir == DMA_FROM_DEVICE)
480 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
481
482 host->dma_on = false;
483
484 return true;
485 }
486
renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)487 static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
488 {
489 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
490
491 spin_lock_irq(&host->lock);
492 if (!renesas_sdhi_internal_dmac_complete(host))
493 goto out;
494
495 tmio_mmc_do_data_irq(host);
496 out:
497 spin_unlock_irq(&host->lock);
498 }
499
renesas_sdhi_internal_dmac_end_dma(struct tmio_mmc_host * host)500 static void renesas_sdhi_internal_dmac_end_dma(struct tmio_mmc_host *host)
501 {
502 if (host->data)
503 renesas_sdhi_internal_dmac_complete(host);
504 }
505
renesas_sdhi_internal_dmac_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)506 static void renesas_sdhi_internal_dmac_post_req(struct mmc_host *mmc,
507 struct mmc_request *mrq,
508 int err)
509 {
510 struct tmio_mmc_host *host = mmc_priv(mmc);
511 struct mmc_data *data = mrq->data;
512
513 if (!data)
514 return;
515
516 renesas_sdhi_internal_dmac_unmap(host, data, COOKIE_UNMAPPED);
517 }
518
renesas_sdhi_internal_dmac_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)519 static void renesas_sdhi_internal_dmac_pre_req(struct mmc_host *mmc,
520 struct mmc_request *mrq)
521 {
522 struct tmio_mmc_host *host = mmc_priv(mmc);
523 struct mmc_data *data = mrq->data;
524
525 if (!data)
526 return;
527
528 data->host_cookie = COOKIE_UNMAPPED;
529 renesas_sdhi_internal_dmac_map(host, data, COOKIE_PRE_MAPPED);
530 }
531
532 static void
renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host * host,struct tmio_mmc_data * pdata)533 renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
534 struct tmio_mmc_data *pdata)
535 {
536 struct renesas_sdhi *priv = host_to_priv(host);
537
538 /* Disable DMAC interrupts initially */
539 writel(INFO1_MASK_CLEAR, host->ctl + DM_CM_INFO1_MASK);
540 writel(INFO2_MASK_CLEAR, host->ctl + DM_CM_INFO2_MASK);
541 writel(0, host->ctl + DM_CM_INFO1);
542 writel(0, host->ctl + DM_CM_INFO2);
543
544 /* Each value is set to non-zero to assume "enabling" each DMA */
545 host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
546
547 tasklet_init(&priv->dma_priv.dma_complete,
548 renesas_sdhi_internal_dmac_complete_tasklet_fn,
549 (unsigned long)host);
550 tasklet_init(&host->dma_issue,
551 renesas_sdhi_internal_dmac_issue_tasklet_fn,
552 (unsigned long)host);
553
554 /* Add pre_req and post_req */
555 host->ops.pre_req = renesas_sdhi_internal_dmac_pre_req;
556 host->ops.post_req = renesas_sdhi_internal_dmac_post_req;
557 }
558
559 static void
renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host * host)560 renesas_sdhi_internal_dmac_release_dma(struct tmio_mmc_host *host)
561 {
562 /* Each value is set to zero to assume "disabling" each DMA */
563 host->chan_rx = host->chan_tx = NULL;
564 }
565
566 static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = {
567 .start = renesas_sdhi_internal_dmac_start_dma,
568 .enable = renesas_sdhi_internal_dmac_enable_dma,
569 .request = renesas_sdhi_internal_dmac_request_dma,
570 .release = renesas_sdhi_internal_dmac_release_dma,
571 .abort = renesas_sdhi_internal_dmac_abort_dma,
572 .dataend = renesas_sdhi_internal_dmac_dataend_dma,
573 .end = renesas_sdhi_internal_dmac_end_dma,
574 .dma_irq = renesas_sdhi_internal_dmac_dma_irq,
575 };
576
renesas_sdhi_internal_dmac_probe(struct platform_device * pdev)577 static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev)
578 {
579 const struct soc_device_attribute *attr;
580 const struct renesas_sdhi_of_data_with_quirks *of_data_quirks;
581 const struct renesas_sdhi_quirks *quirks;
582 struct device *dev = &pdev->dev;
583
584 of_data_quirks = of_device_get_match_data(&pdev->dev);
585 quirks = of_data_quirks->quirks;
586
587 attr = soc_device_match(sdhi_quirks_match);
588 if (attr)
589 quirks = attr->data;
590
591 /* value is max of SD_SECCNT. Confirmed by HW engineers */
592 dma_set_max_seg_size(dev, 0xffffffff);
593
594 return renesas_sdhi_probe(pdev, &renesas_sdhi_internal_dmac_dma_ops,
595 of_data_quirks->of_data, quirks);
596 }
597
598 static const struct dev_pm_ops renesas_sdhi_internal_dmac_dev_pm_ops = {
599 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
600 pm_runtime_force_resume)
601 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
602 tmio_mmc_host_runtime_resume,
603 NULL)
604 };
605
606 static struct platform_driver renesas_internal_dmac_sdhi_driver = {
607 .driver = {
608 .name = "renesas_sdhi_internal_dmac",
609 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
610 .pm = &renesas_sdhi_internal_dmac_dev_pm_ops,
611 .of_match_table = renesas_sdhi_internal_dmac_of_match,
612 },
613 .probe = renesas_sdhi_internal_dmac_probe,
614 .remove = renesas_sdhi_remove,
615 };
616
617 module_platform_driver(renesas_internal_dmac_sdhi_driver);
618
619 MODULE_DESCRIPTION("Renesas SDHI driver for internal DMAC");
620 MODULE_AUTHOR("Yoshihiro Shimoda");
621 MODULE_LICENSE("GPL v2");
622