1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
4 * Written by Jean-Jacques Hiblot <jjhiblot@ti.com>
5 */
6
7 #define LOG_CATEGORY UCLASS_PHY
8
9 #include <common.h>
10 #include <dm.h>
11 #include <dm/device_compat.h>
12 #include <dm/devres.h>
13 #include <generic-phy.h>
14
phy_dev_ops(struct udevice * dev)15 static inline struct phy_ops *phy_dev_ops(struct udevice *dev)
16 {
17 return (struct phy_ops *)dev->driver->ops;
18 }
19
generic_phy_xlate_offs_flags(struct phy * phy,struct ofnode_phandle_args * args)20 static int generic_phy_xlate_offs_flags(struct phy *phy,
21 struct ofnode_phandle_args *args)
22 {
23 debug("%s(phy=%p)\n", __func__, phy);
24
25 if (args->args_count > 1) {
26 debug("Invaild args_count: %d\n", args->args_count);
27 return -EINVAL;
28 }
29
30 if (args->args_count)
31 phy->id = args->args[0];
32 else
33 phy->id = 0;
34
35 return 0;
36 }
37
generic_phy_get_by_index_nodev(ofnode node,int index,struct phy * phy)38 int generic_phy_get_by_index_nodev(ofnode node, int index, struct phy *phy)
39 {
40 struct ofnode_phandle_args args;
41 struct phy_ops *ops;
42 struct udevice *phydev;
43 int i, ret;
44
45 debug("%s(node=%s, index=%d, phy=%p)\n",
46 __func__, ofnode_get_name(node), index, phy);
47
48 assert(phy);
49 phy->dev = NULL;
50 ret = ofnode_parse_phandle_with_args(node, "phys", "#phy-cells", 0,
51 index, &args);
52 if (ret) {
53 debug("%s: dev_read_phandle_with_args failed: err=%d\n",
54 __func__, ret);
55 return ret;
56 }
57
58 ret = uclass_get_device_by_ofnode(UCLASS_PHY, args.node, &phydev);
59 if (ret) {
60 debug("%s: uclass_get_device_by_ofnode failed: err=%d\n",
61 __func__, ret);
62
63 /* Check if args.node's parent is a PHY provider */
64 ret = uclass_get_device_by_ofnode(UCLASS_PHY,
65 ofnode_get_parent(args.node),
66 &phydev);
67 if (ret)
68 return ret;
69
70 /* insert phy idx at first position into args array */
71 for (i = args.args_count; i >= 1 ; i--)
72 args.args[i] = args.args[i - 1];
73
74 args.args_count++;
75 args.args[0] = ofnode_read_u32_default(args.node, "reg", -1);
76 }
77
78 phy->dev = phydev;
79
80 ops = phy_dev_ops(phydev);
81
82 if (ops->of_xlate)
83 ret = ops->of_xlate(phy, &args);
84 else
85 ret = generic_phy_xlate_offs_flags(phy, &args);
86 if (ret) {
87 debug("of_xlate() failed: %d\n", ret);
88 goto err;
89 }
90
91 return 0;
92
93 err:
94 return ret;
95 }
96
generic_phy_get_by_index(struct udevice * dev,int index,struct phy * phy)97 int generic_phy_get_by_index(struct udevice *dev, int index,
98 struct phy *phy)
99 {
100 return generic_phy_get_by_index_nodev(dev_ofnode(dev), index, phy);
101 }
102
generic_phy_get_by_name(struct udevice * dev,const char * phy_name,struct phy * phy)103 int generic_phy_get_by_name(struct udevice *dev, const char *phy_name,
104 struct phy *phy)
105 {
106 int index;
107
108 debug("%s(dev=%p, name=%s, phy=%p)\n", __func__, dev, phy_name, phy);
109
110 index = dev_read_stringlist_search(dev, "phy-names", phy_name);
111 if (index < 0) {
112 debug("dev_read_stringlist_search() failed: %d\n", index);
113 return index;
114 }
115
116 return generic_phy_get_by_index(dev, index, phy);
117 }
118
generic_phy_init(struct phy * phy)119 int generic_phy_init(struct phy *phy)
120 {
121 struct phy_ops const *ops;
122 int ret;
123
124 if (!generic_phy_valid(phy))
125 return 0;
126 ops = phy_dev_ops(phy->dev);
127 if (!ops->init)
128 return 0;
129 ret = ops->init(phy);
130 if (ret)
131 dev_err(phy->dev, "PHY: Failed to init %s: %d.\n",
132 phy->dev->name, ret);
133
134 return ret;
135 }
136
generic_phy_reset(struct phy * phy)137 int generic_phy_reset(struct phy *phy)
138 {
139 struct phy_ops const *ops;
140 int ret;
141
142 if (!generic_phy_valid(phy))
143 return 0;
144 ops = phy_dev_ops(phy->dev);
145 if (!ops->reset)
146 return 0;
147 ret = ops->reset(phy);
148 if (ret)
149 dev_err(phy->dev, "PHY: Failed to reset %s: %d.\n",
150 phy->dev->name, ret);
151
152 return ret;
153 }
154
generic_phy_exit(struct phy * phy)155 int generic_phy_exit(struct phy *phy)
156 {
157 struct phy_ops const *ops;
158 int ret;
159
160 if (!generic_phy_valid(phy))
161 return 0;
162 ops = phy_dev_ops(phy->dev);
163 if (!ops->exit)
164 return 0;
165 ret = ops->exit(phy);
166 if (ret)
167 dev_err(phy->dev, "PHY: Failed to exit %s: %d.\n",
168 phy->dev->name, ret);
169
170 return ret;
171 }
172
generic_phy_power_on(struct phy * phy)173 int generic_phy_power_on(struct phy *phy)
174 {
175 struct phy_ops const *ops;
176 int ret;
177
178 if (!generic_phy_valid(phy))
179 return 0;
180 ops = phy_dev_ops(phy->dev);
181 if (!ops->power_on)
182 return 0;
183 ret = ops->power_on(phy);
184 if (ret)
185 dev_err(phy->dev, "PHY: Failed to power on %s: %d.\n",
186 phy->dev->name, ret);
187
188 return ret;
189 }
190
generic_phy_power_off(struct phy * phy)191 int generic_phy_power_off(struct phy *phy)
192 {
193 struct phy_ops const *ops;
194 int ret;
195
196 if (!generic_phy_valid(phy))
197 return 0;
198 ops = phy_dev_ops(phy->dev);
199 if (!ops->power_off)
200 return 0;
201 ret = ops->power_off(phy);
202 if (ret)
203 dev_err(phy->dev, "PHY: Failed to power off %s: %d.\n",
204 phy->dev->name, ret);
205
206 return ret;
207 }
208
generic_phy_configure(struct phy * phy,void * params)209 int generic_phy_configure(struct phy *phy, void *params)
210 {
211 struct phy_ops const *ops;
212
213 if (!generic_phy_valid(phy))
214 return 0;
215 ops = phy_dev_ops(phy->dev);
216
217 return ops->configure ? ops->configure(phy, params) : 0;
218 }
219
generic_phy_get_bulk(struct udevice * dev,struct phy_bulk * bulk)220 int generic_phy_get_bulk(struct udevice *dev, struct phy_bulk *bulk)
221 {
222 int i, ret, count;
223
224 bulk->count = 0;
225
226 /* Return if no phy declared */
227 if (!dev_read_prop(dev, "phys", NULL))
228 return 0;
229
230 count = dev_count_phandle_with_args(dev, "phys", "#phy-cells", 0);
231 if (count < 1)
232 return count;
233
234 bulk->phys = devm_kcalloc(dev, count, sizeof(struct phy), GFP_KERNEL);
235 if (!bulk->phys)
236 return -ENOMEM;
237
238 for (i = 0; i < count; i++) {
239 ret = generic_phy_get_by_index(dev, i, &bulk->phys[i]);
240 if (ret) {
241 pr_err("Failed to get PHY%d for %s\n", i, dev->name);
242 return ret;
243 }
244 bulk->count++;
245 }
246
247 return 0;
248 }
249
generic_phy_init_bulk(struct phy_bulk * bulk)250 int generic_phy_init_bulk(struct phy_bulk *bulk)
251 {
252 struct phy *phys = bulk->phys;
253 int i, ret;
254
255 for (i = 0; i < bulk->count; i++) {
256 ret = generic_phy_init(&phys[i]);
257 if (ret) {
258 pr_err("Can't init PHY%d\n", i);
259 goto phys_init_err;
260 }
261 }
262
263 return 0;
264
265 phys_init_err:
266 for (; i > 0; i--)
267 generic_phy_exit(&phys[i - 1]);
268
269 return ret;
270 }
271
generic_phy_exit_bulk(struct phy_bulk * bulk)272 int generic_phy_exit_bulk(struct phy_bulk *bulk)
273 {
274 struct phy *phys = bulk->phys;
275 int i, ret = 0;
276
277 for (i = 0; i < bulk->count; i++)
278 ret |= generic_phy_exit(&phys[i]);
279
280 return ret;
281 }
282
generic_phy_power_on_bulk(struct phy_bulk * bulk)283 int generic_phy_power_on_bulk(struct phy_bulk *bulk)
284 {
285 struct phy *phys = bulk->phys;
286 int i, ret;
287
288 for (i = 0; i < bulk->count; i++) {
289 ret = generic_phy_power_on(&phys[i]);
290 if (ret) {
291 pr_err("Can't power on PHY%d\n", i);
292 goto phys_poweron_err;
293 }
294 }
295
296 return 0;
297
298 phys_poweron_err:
299 for (; i > 0; i--)
300 generic_phy_power_off(&phys[i - 1]);
301
302 return ret;
303 }
304
generic_phy_power_off_bulk(struct phy_bulk * bulk)305 int generic_phy_power_off_bulk(struct phy_bulk *bulk)
306 {
307 struct phy *phys = bulk->phys;
308 int i, ret = 0;
309
310 for (i = 0; i < bulk->count; i++)
311 ret |= generic_phy_power_off(&phys[i]);
312
313 return ret;
314 }
315
316 UCLASS_DRIVER(phy) = {
317 .id = UCLASS_PHY,
318 .name = "phy",
319 };
320