1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Direct Memory Access U-Class driver
4  *
5  * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com>
6  * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com>
7  * Written by Mugunthan V N <mugunthanvnm@ti.com>
8  *
9  * Author: Mugunthan V N <mugunthanvnm@ti.com>
10  */
11 
12 #define LOG_CATEGORY UCLASS_DMA
13 
14 #include <common.h>
15 #include <cpu_func.h>
16 #include <dm.h>
17 #include <log.h>
18 #include <malloc.h>
19 #include <asm/cache.h>
20 #include <dm/read.h>
21 #include <dma-uclass.h>
22 #include <dt-structs.h>
23 #include <errno.h>
24 
25 #ifdef CONFIG_DMA_CHANNELS
dma_dev_ops(struct udevice * dev)26 static inline struct dma_ops *dma_dev_ops(struct udevice *dev)
27 {
28 	return (struct dma_ops *)dev->driver->ops;
29 }
30 
31 # if CONFIG_IS_ENABLED(OF_CONTROL)
dma_of_xlate_default(struct dma * dma,struct ofnode_phandle_args * args)32 static int dma_of_xlate_default(struct dma *dma,
33 				struct ofnode_phandle_args *args)
34 {
35 	debug("%s(dma=%p)\n", __func__, dma);
36 
37 	if (args->args_count > 1) {
38 		pr_err("Invaild args_count: %d\n", args->args_count);
39 		return -EINVAL;
40 	}
41 
42 	if (args->args_count)
43 		dma->id = args->args[0];
44 	else
45 		dma->id = 0;
46 
47 	return 0;
48 }
49 
dma_get_by_index(struct udevice * dev,int index,struct dma * dma)50 int dma_get_by_index(struct udevice *dev, int index, struct dma *dma)
51 {
52 	int ret;
53 	struct ofnode_phandle_args args;
54 	struct udevice *dev_dma;
55 	const struct dma_ops *ops;
56 
57 	debug("%s(dev=%p, index=%d, dma=%p)\n", __func__, dev, index, dma);
58 
59 	assert(dma);
60 	dma->dev = NULL;
61 
62 	ret = dev_read_phandle_with_args(dev, "dmas", "#dma-cells", 0, index,
63 					 &args);
64 	if (ret) {
65 		pr_err("%s: dev_read_phandle_with_args failed: err=%d\n",
66 		       __func__, ret);
67 		return ret;
68 	}
69 
70 	ret = uclass_get_device_by_ofnode(UCLASS_DMA, args.node, &dev_dma);
71 	if (ret) {
72 		pr_err("%s: uclass_get_device_by_ofnode failed: err=%d\n",
73 		       __func__, ret);
74 		return ret;
75 	}
76 
77 	dma->dev = dev_dma;
78 
79 	ops = dma_dev_ops(dev_dma);
80 
81 	if (ops->of_xlate)
82 		ret = ops->of_xlate(dma, &args);
83 	else
84 		ret = dma_of_xlate_default(dma, &args);
85 	if (ret) {
86 		pr_err("of_xlate() failed: %d\n", ret);
87 		return ret;
88 	}
89 
90 	return dma_request(dev_dma, dma);
91 }
92 
dma_get_by_name(struct udevice * dev,const char * name,struct dma * dma)93 int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma)
94 {
95 	int index;
96 
97 	debug("%s(dev=%p, name=%s, dma=%p)\n", __func__, dev, name, dma);
98 	dma->dev = NULL;
99 
100 	index = dev_read_stringlist_search(dev, "dma-names", name);
101 	if (index < 0) {
102 		pr_err("dev_read_stringlist_search() failed: %d\n", index);
103 		return index;
104 	}
105 
106 	return dma_get_by_index(dev, index, dma);
107 }
108 # endif /* OF_CONTROL */
109 
dma_request(struct udevice * dev,struct dma * dma)110 int dma_request(struct udevice *dev, struct dma *dma)
111 {
112 	struct dma_ops *ops = dma_dev_ops(dev);
113 
114 	debug("%s(dev=%p, dma=%p)\n", __func__, dev, dma);
115 
116 	dma->dev = dev;
117 
118 	if (!ops->request)
119 		return 0;
120 
121 	return ops->request(dma);
122 }
123 
dma_free(struct dma * dma)124 int dma_free(struct dma *dma)
125 {
126 	struct dma_ops *ops = dma_dev_ops(dma->dev);
127 
128 	debug("%s(dma=%p)\n", __func__, dma);
129 
130 	if (!ops->rfree)
131 		return 0;
132 
133 	return ops->rfree(dma);
134 }
135 
dma_enable(struct dma * dma)136 int dma_enable(struct dma *dma)
137 {
138 	struct dma_ops *ops = dma_dev_ops(dma->dev);
139 
140 	debug("%s(dma=%p)\n", __func__, dma);
141 
142 	if (!ops->enable)
143 		return -ENOSYS;
144 
145 	return ops->enable(dma);
146 }
147 
dma_disable(struct dma * dma)148 int dma_disable(struct dma *dma)
149 {
150 	struct dma_ops *ops = dma_dev_ops(dma->dev);
151 
152 	debug("%s(dma=%p)\n", __func__, dma);
153 
154 	if (!ops->disable)
155 		return -ENOSYS;
156 
157 	return ops->disable(dma);
158 }
159 
dma_prepare_rcv_buf(struct dma * dma,void * dst,size_t size)160 int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size)
161 {
162 	struct dma_ops *ops = dma_dev_ops(dma->dev);
163 
164 	debug("%s(dma=%p)\n", __func__, dma);
165 
166 	if (!ops->prepare_rcv_buf)
167 		return -1;
168 
169 	return ops->prepare_rcv_buf(dma, dst, size);
170 }
171 
dma_receive(struct dma * dma,void ** dst,void * metadata)172 int dma_receive(struct dma *dma, void **dst, void *metadata)
173 {
174 	struct dma_ops *ops = dma_dev_ops(dma->dev);
175 
176 	debug("%s(dma=%p)\n", __func__, dma);
177 
178 	if (!ops->receive)
179 		return -ENOSYS;
180 
181 	return ops->receive(dma, dst, metadata);
182 }
183 
dma_send(struct dma * dma,void * src,size_t len,void * metadata)184 int dma_send(struct dma *dma, void *src, size_t len, void *metadata)
185 {
186 	struct dma_ops *ops = dma_dev_ops(dma->dev);
187 
188 	debug("%s(dma=%p)\n", __func__, dma);
189 
190 	if (!ops->send)
191 		return -ENOSYS;
192 
193 	return ops->send(dma, src, len, metadata);
194 }
195 
dma_get_cfg(struct dma * dma,u32 cfg_id,void ** cfg_data)196 int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data)
197 {
198 	struct dma_ops *ops = dma_dev_ops(dma->dev);
199 
200 	debug("%s(dma=%p)\n", __func__, dma);
201 
202 	if (!ops->get_cfg)
203 		return -ENOSYS;
204 
205 	return ops->get_cfg(dma, cfg_id, cfg_data);
206 }
207 #endif /* CONFIG_DMA_CHANNELS */
208 
dma_get_device(u32 transfer_type,struct udevice ** devp)209 int dma_get_device(u32 transfer_type, struct udevice **devp)
210 {
211 	struct udevice *dev;
212 	int ret;
213 
214 	for (ret = uclass_first_device(UCLASS_DMA, &dev); dev && !ret;
215 	     ret = uclass_next_device(&dev)) {
216 		struct dma_dev_priv *uc_priv;
217 
218 		uc_priv = dev_get_uclass_priv(dev);
219 		if (uc_priv->supported & transfer_type)
220 			break;
221 	}
222 
223 	if (!dev) {
224 		pr_debug("No DMA device found that supports %x type\n",
225 			 transfer_type);
226 		return -EPROTONOSUPPORT;
227 	}
228 
229 	*devp = dev;
230 
231 	return ret;
232 }
233 
dma_memcpy(void * dst,void * src,size_t len)234 int dma_memcpy(void *dst, void *src, size_t len)
235 {
236 	struct udevice *dev;
237 	const struct dma_ops *ops;
238 	int ret;
239 
240 	ret = dma_get_device(DMA_SUPPORTS_MEM_TO_MEM, &dev);
241 	if (ret < 0)
242 		return ret;
243 
244 	ops = device_get_ops(dev);
245 	if (!ops->transfer)
246 		return -ENOSYS;
247 
248 	/* Invalidate the area, so no writeback into the RAM races with DMA */
249 	invalidate_dcache_range((unsigned long)dst, (unsigned long)dst +
250 				roundup(len, ARCH_DMA_MINALIGN));
251 
252 	return ops->transfer(dev, DMA_MEM_TO_MEM, dst, src, len);
253 }
254 
255 UCLASS_DRIVER(dma) = {
256 	.id		= UCLASS_DMA,
257 	.name		= "dma",
258 	.flags		= DM_UC_FLAG_SEQ_ALIAS,
259 	.per_device_auto	= sizeof(struct dma_dev_priv),
260 };
261