blob: c94a0d2c45161c1723eb340ec82b8f08ea6e27e0 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * phy-core.c -- Generic Phy framework.
4 *
5 * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Author: Kishon Vijay Abraham I <kishon@ti.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/module.h>
13#include <linux/err.h>
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/of.h>
17#include <linux/phy/phy.h>
18#include <linux/idr.h>
19#include <linux/pm_runtime.h>
20#include <linux/regulator/consumer.h>
21
22static struct class *phy_class;
23static DEFINE_MUTEX(phy_provider_mutex);
24static LIST_HEAD(phy_provider_list);
25static LIST_HEAD(phys);
26static DEFINE_IDA(phy_ida);
27
28static void devm_phy_release(struct device *dev, void *res)
29{
30 struct phy *phy = *(struct phy **)res;
31
32 phy_put(phy);
33}
34
35static void devm_phy_provider_release(struct device *dev, void *res)
36{
37 struct phy_provider *phy_provider = *(struct phy_provider **)res;
38
39 of_phy_provider_unregister(phy_provider);
40}
41
42static void devm_phy_consume(struct device *dev, void *res)
43{
44 struct phy *phy = *(struct phy **)res;
45
46 phy_destroy(phy);
47}
48
49static int devm_phy_match(struct device *dev, void *res, void *match_data)
50{
51 struct phy **phy = res;
52
53 return *phy == match_data;
54}
55
56/**
57 * phy_create_lookup() - allocate and register PHY/device association
58 * @phy: the phy of the association
59 * @con_id: connection ID string on device
60 * @dev_id: the device of the association
61 *
62 * Creates and registers phy_lookup entry.
63 */
64int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
65{
66 struct phy_lookup *pl;
67
68 if (!phy || !dev_id || !con_id)
69 return -EINVAL;
70
71 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
72 if (!pl)
73 return -ENOMEM;
74
75 pl->dev_id = dev_id;
76 pl->con_id = con_id;
77 pl->phy = phy;
78
79 mutex_lock(&phy_provider_mutex);
80 list_add_tail(&pl->node, &phys);
81 mutex_unlock(&phy_provider_mutex);
82
83 return 0;
84}
85EXPORT_SYMBOL_GPL(phy_create_lookup);
86
87/**
88 * phy_remove_lookup() - find and remove PHY/device association
89 * @phy: the phy of the association
90 * @con_id: connection ID string on device
91 * @dev_id: the device of the association
92 *
93 * Finds and unregisters phy_lookup entry that was created with
94 * phy_create_lookup().
95 */
96void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id)
97{
98 struct phy_lookup *pl;
99
100 if (!phy || !dev_id || !con_id)
101 return;
102
103 mutex_lock(&phy_provider_mutex);
104 list_for_each_entry(pl, &phys, node)
105 if (pl->phy == phy && !strcmp(pl->dev_id, dev_id) &&
106 !strcmp(pl->con_id, con_id)) {
107 list_del(&pl->node);
108 kfree(pl);
109 break;
110 }
111 mutex_unlock(&phy_provider_mutex);
112}
113EXPORT_SYMBOL_GPL(phy_remove_lookup);
114
115static struct phy *phy_find(struct device *dev, const char *con_id)
116{
117 const char *dev_id = dev_name(dev);
118 struct phy_lookup *p, *pl = NULL;
119
120 mutex_lock(&phy_provider_mutex);
121 list_for_each_entry(p, &phys, node)
122 if (!strcmp(p->dev_id, dev_id) && !strcmp(p->con_id, con_id)) {
123 pl = p;
124 break;
125 }
126 mutex_unlock(&phy_provider_mutex);
127
128 return pl ? pl->phy : ERR_PTR(-ENODEV);
129}
130
131static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
132{
133 struct phy_provider *phy_provider;
134 struct device_node *child;
135
136 list_for_each_entry(phy_provider, &phy_provider_list, list) {
137 if (phy_provider->dev->of_node == node)
138 return phy_provider;
139
140 for_each_child_of_node(phy_provider->children, child)
141 if (child == node) {
142 of_node_put(child);
143 return phy_provider;
144 }
145 }
146
147 return ERR_PTR(-EPROBE_DEFER);
148}
149
150int phy_pm_runtime_get(struct phy *phy)
151{
152 int ret;
153
154 if (!phy)
155 return 0;
156
157 if (!pm_runtime_enabled(&phy->dev))
158 return -ENOTSUPP;
159
160 ret = pm_runtime_get(&phy->dev);
161 if (ret < 0 && ret != -EINPROGRESS)
162 pm_runtime_put_noidle(&phy->dev);
163
164 return ret;
165}
166EXPORT_SYMBOL_GPL(phy_pm_runtime_get);
167
168int phy_pm_runtime_get_sync(struct phy *phy)
169{
170 int ret;
171
172 if (!phy)
173 return 0;
174
175 if (!pm_runtime_enabled(&phy->dev))
176 return -ENOTSUPP;
177
178 ret = pm_runtime_get_sync(&phy->dev);
179 if (ret < 0)
180 pm_runtime_put_sync(&phy->dev);
181
182 return ret;
183}
184EXPORT_SYMBOL_GPL(phy_pm_runtime_get_sync);
185
186int phy_pm_runtime_put(struct phy *phy)
187{
188 if (!phy)
189 return 0;
190
191 if (!pm_runtime_enabled(&phy->dev))
192 return -ENOTSUPP;
193
194 return pm_runtime_put(&phy->dev);
195}
196EXPORT_SYMBOL_GPL(phy_pm_runtime_put);
197
198int phy_pm_runtime_put_sync(struct phy *phy)
199{
200 if (!phy)
201 return 0;
202
203 if (!pm_runtime_enabled(&phy->dev))
204 return -ENOTSUPP;
205
206 return pm_runtime_put_sync(&phy->dev);
207}
208EXPORT_SYMBOL_GPL(phy_pm_runtime_put_sync);
209
210void phy_pm_runtime_allow(struct phy *phy)
211{
212 if (!phy)
213 return;
214
215 if (!pm_runtime_enabled(&phy->dev))
216 return;
217
218 pm_runtime_allow(&phy->dev);
219}
220EXPORT_SYMBOL_GPL(phy_pm_runtime_allow);
221
222void phy_pm_runtime_forbid(struct phy *phy)
223{
224 if (!phy)
225 return;
226
227 if (!pm_runtime_enabled(&phy->dev))
228 return;
229
230 pm_runtime_forbid(&phy->dev);
231}
232EXPORT_SYMBOL_GPL(phy_pm_runtime_forbid);
233
234int phy_init(struct phy *phy)
235{
236 int ret;
237
238 if (!phy)
239 return 0;
240
241 ret = phy_pm_runtime_get_sync(phy);
242 if (ret < 0 && ret != -ENOTSUPP)
243 return ret;
244 ret = 0; /* Override possible ret == -ENOTSUPP */
245
246 mutex_lock(&phy->mutex);
247 if (phy->init_count == 0 && phy->ops->init) {
248 ret = phy->ops->init(phy);
249 if (ret < 0) {
250 dev_err(&phy->dev, "phy init failed --> %d\n", ret);
251 goto out;
252 }
253 }
254 ++phy->init_count;
255
256out:
257 mutex_unlock(&phy->mutex);
258 phy_pm_runtime_put(phy);
259 return ret;
260}
261EXPORT_SYMBOL_GPL(phy_init);
262
263int phy_exit(struct phy *phy)
264{
265 int ret;
266
267 if (!phy)
268 return 0;
269
270 ret = phy_pm_runtime_get_sync(phy);
271 if (ret < 0 && ret != -ENOTSUPP)
272 return ret;
273 ret = 0; /* Override possible ret == -ENOTSUPP */
274
275 mutex_lock(&phy->mutex);
276 if (phy->init_count == 1 && phy->ops->exit) {
277 ret = phy->ops->exit(phy);
278 if (ret < 0) {
279 dev_err(&phy->dev, "phy exit failed --> %d\n", ret);
280 goto out;
281 }
282 }
283 --phy->init_count;
284
285out:
286 mutex_unlock(&phy->mutex);
287 phy_pm_runtime_put(phy);
288 return ret;
289}
290EXPORT_SYMBOL_GPL(phy_exit);
291
292int phy_power_on(struct phy *phy)
293{
294 int ret = 0;
295
296 if (!phy)
297 goto out;
298
299 if (phy->pwr) {
300 ret = regulator_enable(phy->pwr);
301 if (ret)
302 goto out;
303 }
304
305 ret = phy_pm_runtime_get_sync(phy);
306 if (ret < 0 && ret != -ENOTSUPP)
307 goto err_pm_sync;
308
309 ret = 0; /* Override possible ret == -ENOTSUPP */
310
311 mutex_lock(&phy->mutex);
312 if (phy->power_count == 0 && phy->ops->power_on) {
313 ret = phy->ops->power_on(phy);
314 if (ret < 0) {
315 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
316 goto err_pwr_on;
317 }
318 }
319 ++phy->power_count;
320 mutex_unlock(&phy->mutex);
321 return 0;
322
323err_pwr_on:
324 mutex_unlock(&phy->mutex);
325 phy_pm_runtime_put_sync(phy);
326err_pm_sync:
327 if (phy->pwr)
328 regulator_disable(phy->pwr);
329out:
330 return ret;
331}
332EXPORT_SYMBOL_GPL(phy_power_on);
333
334int phy_power_off(struct phy *phy)
335{
336 int ret;
337
338 if (!phy)
339 return 0;
340
341 mutex_lock(&phy->mutex);
342 if (phy->power_count == 1 && phy->ops->power_off) {
343 ret = phy->ops->power_off(phy);
344 if (ret < 0) {
345 dev_err(&phy->dev, "phy poweroff failed --> %d\n", ret);
346 mutex_unlock(&phy->mutex);
347 return ret;
348 }
349 }
350 --phy->power_count;
351 mutex_unlock(&phy->mutex);
352 phy_pm_runtime_put(phy);
353
354 if (phy->pwr)
355 regulator_disable(phy->pwr);
356
357 return 0;
358}
359EXPORT_SYMBOL_GPL(phy_power_off);
360
361int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode)
362{
363 int ret;
364
365 if (!phy || !phy->ops->set_mode)
366 return 0;
367
368 mutex_lock(&phy->mutex);
369 ret = phy->ops->set_mode(phy, mode, submode);
370 if (!ret)
371 phy->attrs.mode = mode;
372 mutex_unlock(&phy->mutex);
373
374 return ret;
375}
376EXPORT_SYMBOL_GPL(phy_set_mode_ext);
377
378int phy_reset(struct phy *phy)
379{
380 int ret;
381
382 if (!phy || !phy->ops->reset)
383 return 0;
384
385 ret = phy_pm_runtime_get_sync(phy);
386 if (ret < 0 && ret != -ENOTSUPP)
387 return ret;
388
389 mutex_lock(&phy->mutex);
390 ret = phy->ops->reset(phy);
391 mutex_unlock(&phy->mutex);
392
393 phy_pm_runtime_put(phy);
394
395 return ret;
396}
397EXPORT_SYMBOL_GPL(phy_reset);
398
399/**
400 * phy_calibrate() - Tunes the phy hw parameters for current configuration
401 * @phy: the phy returned by phy_get()
402 *
403 * Used to calibrate phy hardware, typically by adjusting some parameters in
404 * runtime, which are otherwise lost after host controller reset and cannot
405 * be applied in phy_init() or phy_power_on().
406 *
407 * Returns: 0 if successful, an negative error code otherwise
408 */
409int phy_calibrate(struct phy *phy)
410{
411 int ret;
412
413 if (!phy || !phy->ops->calibrate)
414 return 0;
415
416 mutex_lock(&phy->mutex);
417 ret = phy->ops->calibrate(phy);
418 mutex_unlock(&phy->mutex);
419
420 return ret;
421}
422EXPORT_SYMBOL_GPL(phy_calibrate);
423
424/**
425 * phy_configure() - Changes the phy parameters
426 * @phy: the phy returned by phy_get()
427 * @opts: New configuration to apply
428 *
429 * Used to change the PHY parameters. phy_init() must have been called
430 * on the phy. The configuration will be applied on the current phy
431 * mode, that can be changed using phy_set_mode().
432 *
433 * Returns: 0 if successful, an negative error code otherwise
434 */
435int phy_configure(struct phy *phy, union phy_configure_opts *opts)
436{
437 int ret;
438
439 if (!phy)
440 return -EINVAL;
441
442 if (!phy->ops->configure)
443 return -EOPNOTSUPP;
444
445 mutex_lock(&phy->mutex);
446 ret = phy->ops->configure(phy, opts);
447 mutex_unlock(&phy->mutex);
448
449 return ret;
450}
451EXPORT_SYMBOL_GPL(phy_configure);
452
453/**
454 * phy_validate() - Checks the phy parameters
455 * @phy: the phy returned by phy_get()
456 * @mode: phy_mode the configuration is applicable to.
457 * @submode: PHY submode the configuration is applicable to.
458 * @opts: Configuration to check
459 *
460 * Used to check that the current set of parameters can be handled by
461 * the phy. Implementations are free to tune the parameters passed as
462 * arguments if needed by some implementation detail or
463 * constraints. It will not change any actual configuration of the
464 * PHY, so calling it as many times as deemed fit will have no side
465 * effect.
466 *
467 * Returns: 0 if successful, an negative error code otherwise
468 */
469int phy_validate(struct phy *phy, enum phy_mode mode, int submode,
470 union phy_configure_opts *opts)
471{
472 int ret;
473
474 if (!phy)
475 return -EINVAL;
476
477 if (!phy->ops->validate)
478 return -EOPNOTSUPP;
479
480 mutex_lock(&phy->mutex);
481 ret = phy->ops->validate(phy, mode, submode, opts);
482 mutex_unlock(&phy->mutex);
483
484 return ret;
485}
486EXPORT_SYMBOL_GPL(phy_validate);
487
488/**
489 * _of_phy_get() - lookup and obtain a reference to a phy by phandle
490 * @np: device_node for which to get the phy
491 * @index: the index of the phy
492 *
493 * Returns the phy associated with the given phandle value,
494 * after getting a refcount to it or -ENODEV if there is no such phy or
495 * -EPROBE_DEFER if there is a phandle to the phy, but the device is
496 * not yet loaded. This function uses of_xlate call back function provided
497 * while registering the phy_provider to find the phy instance.
498 */
499static struct phy *_of_phy_get(struct device_node *np, int index)
500{
501 int ret;
502 struct phy_provider *phy_provider;
503 struct phy *phy = NULL;
504 struct of_phandle_args args;
505
506 ret = of_parse_phandle_with_args(np, "phys", "#phy-cells",
507 index, &args);
508 if (ret)
509 return ERR_PTR(-ENODEV);
510
511 /* This phy type handled by the usb-phy subsystem for now */
512 if (of_device_is_compatible(args.np, "usb-nop-xceiv")) {
513 phy = ERR_PTR(-ENODEV);
514 goto out_put_node;
515 }
516
517 mutex_lock(&phy_provider_mutex);
518 phy_provider = of_phy_provider_lookup(args.np);
519 if (IS_ERR(phy_provider) || !try_module_get(phy_provider->owner)) {
520 phy = ERR_PTR(-EPROBE_DEFER);
521 goto out_unlock;
522 }
523
524 if (!of_device_is_available(args.np)) {
525 dev_warn(phy_provider->dev, "Requested PHY is disabled\n");
526 phy = ERR_PTR(-ENODEV);
527 goto out_put_module;
528 }
529
530 phy = phy_provider->of_xlate(phy_provider->dev, &args);
531
532out_put_module:
533 module_put(phy_provider->owner);
534
535out_unlock:
536 mutex_unlock(&phy_provider_mutex);
537out_put_node:
538 of_node_put(args.np);
539
540 return phy;
541}
542
543/**
544 * of_phy_get() - lookup and obtain a reference to a phy using a device_node.
545 * @np: device_node for which to get the phy
546 * @con_id: name of the phy from device's point of view
547 *
548 * Returns the phy driver, after getting a refcount to it; or
549 * -ENODEV if there is no such phy. The caller is responsible for
550 * calling phy_put() to release that count.
551 */
552struct phy *of_phy_get(struct device_node *np, const char *con_id)
553{
554 struct phy *phy = NULL;
555 int index = 0;
556
557 if (con_id)
558 index = of_property_match_string(np, "phy-names", con_id);
559
560 phy = _of_phy_get(np, index);
561 if (IS_ERR(phy))
562 return phy;
563
564 if (!try_module_get(phy->ops->owner))
565 return ERR_PTR(-EPROBE_DEFER);
566
567 get_device(&phy->dev);
568
569 return phy;
570}
571EXPORT_SYMBOL_GPL(of_phy_get);
572
573/**
574 * phy_put() - release the PHY
575 * @phy: the phy returned by phy_get()
576 *
577 * Releases a refcount the caller received from phy_get().
578 */
579void phy_put(struct phy *phy)
580{
581 if (!phy || IS_ERR(phy))
582 return;
583
584 mutex_lock(&phy->mutex);
585 if (phy->ops->release)
586 phy->ops->release(phy);
587 mutex_unlock(&phy->mutex);
588
589 module_put(phy->ops->owner);
590 put_device(&phy->dev);
591}
592EXPORT_SYMBOL_GPL(phy_put);
593
594/**
595 * devm_phy_put() - release the PHY
596 * @dev: device that wants to release this phy
597 * @phy: the phy returned by devm_phy_get()
598 *
599 * destroys the devres associated with this phy and invokes phy_put
600 * to release the phy.
601 */
602void devm_phy_put(struct device *dev, struct phy *phy)
603{
604 int r;
605
606 if (!phy)
607 return;
608
609 r = devres_release(dev, devm_phy_release, devm_phy_match, phy);
610 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
611}
612EXPORT_SYMBOL_GPL(devm_phy_put);
613
614/**
615 * of_phy_simple_xlate() - returns the phy instance from phy provider
616 * @dev: the PHY provider device
617 * @args: of_phandle_args (not used here)
618 *
619 * Intended to be used by phy provider for the common case where #phy-cells is
620 * 0. For other cases where #phy-cells is greater than '0', the phy provider
621 * should provide a custom of_xlate function that reads the *args* and returns
622 * the appropriate phy.
623 */
624struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args
625 *args)
626{
627 struct phy *phy;
628 struct class_dev_iter iter;
629
630 class_dev_iter_init(&iter, phy_class, NULL, NULL);
631 while ((dev = class_dev_iter_next(&iter))) {
632 phy = to_phy(dev);
633 if (args->np != phy->dev.of_node)
634 continue;
635
636 class_dev_iter_exit(&iter);
637 return phy;
638 }
639
640 class_dev_iter_exit(&iter);
641 return ERR_PTR(-ENODEV);
642}
643EXPORT_SYMBOL_GPL(of_phy_simple_xlate);
644
645/**
646 * phy_get() - lookup and obtain a reference to a phy.
647 * @dev: device that requests this phy
648 * @string: the phy name as given in the dt data or the name of the controller
649 * port for non-dt case
650 *
651 * Returns the phy driver, after getting a refcount to it; or
652 * -ENODEV if there is no such phy. The caller is responsible for
653 * calling phy_put() to release that count.
654 */
655struct phy *phy_get(struct device *dev, const char *string)
656{
657 int index = 0;
658 struct phy *phy;
659
660 if (string == NULL) {
661 dev_WARN(dev, "missing string\n");
662 return ERR_PTR(-EINVAL);
663 }
664
665 if (dev->of_node) {
666 index = of_property_match_string(dev->of_node, "phy-names",
667 string);
668 phy = _of_phy_get(dev->of_node, index);
669 } else {
670 phy = phy_find(dev, string);
671 }
672 if (IS_ERR(phy))
673 return phy;
674
675 if (!try_module_get(phy->ops->owner))
676 return ERR_PTR(-EPROBE_DEFER);
677
678 get_device(&phy->dev);
679
680 return phy;
681}
682EXPORT_SYMBOL_GPL(phy_get);
683
684/**
685 * phy_optional_get() - lookup and obtain a reference to an optional phy.
686 * @dev: device that requests this phy
687 * @string: the phy name as given in the dt data or the name of the controller
688 * port for non-dt case
689 *
690 * Returns the phy driver, after getting a refcount to it; or
691 * NULL if there is no such phy. The caller is responsible for
692 * calling phy_put() to release that count.
693 */
694struct phy *phy_optional_get(struct device *dev, const char *string)
695{
696 struct phy *phy = phy_get(dev, string);
697
698 if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
699 phy = NULL;
700
701 return phy;
702}
703EXPORT_SYMBOL_GPL(phy_optional_get);
704
705/**
706 * devm_phy_get() - lookup and obtain a reference to a phy.
707 * @dev: device that requests this phy
708 * @string: the phy name as given in the dt data or phy device name
709 * for non-dt case
710 *
711 * Gets the phy using phy_get(), and associates a device with it using
712 * devres. On driver detach, release function is invoked on the devres data,
713 * then, devres data is freed.
714 */
715struct phy *devm_phy_get(struct device *dev, const char *string)
716{
717 struct phy **ptr, *phy;
718
719 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
720 if (!ptr)
721 return ERR_PTR(-ENOMEM);
722
723 phy = phy_get(dev, string);
724 if (!IS_ERR(phy)) {
725 *ptr = phy;
726 devres_add(dev, ptr);
727 } else {
728 devres_free(ptr);
729 }
730
731 return phy;
732}
733EXPORT_SYMBOL_GPL(devm_phy_get);
734
735/**
736 * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
737 * @dev: device that requests this phy
738 * @string: the phy name as given in the dt data or phy device name
739 * for non-dt case
740 *
741 * Gets the phy using phy_get(), and associates a device with it using
742 * devres. On driver detach, release function is invoked on the devres
743 * data, then, devres data is freed. This differs to devm_phy_get() in
744 * that if the phy does not exist, it is not considered an error and
745 * -ENODEV will not be returned. Instead the NULL phy is returned,
746 * which can be passed to all other phy consumer calls.
747 */
748struct phy *devm_phy_optional_get(struct device *dev, const char *string)
749{
750 struct phy *phy = devm_phy_get(dev, string);
751
752 if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
753 phy = NULL;
754
755 return phy;
756}
757EXPORT_SYMBOL_GPL(devm_phy_optional_get);
758
759/**
760 * devm_of_phy_get() - lookup and obtain a reference to a phy.
761 * @dev: device that requests this phy
762 * @np: node containing the phy
763 * @con_id: name of the phy from device's point of view
764 *
765 * Gets the phy using of_phy_get(), and associates a device with it using
766 * devres. On driver detach, release function is invoked on the devres data,
767 * then, devres data is freed.
768 */
769struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
770 const char *con_id)
771{
772 struct phy **ptr, *phy;
773
774 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
775 if (!ptr)
776 return ERR_PTR(-ENOMEM);
777
778 phy = of_phy_get(np, con_id);
779 if (!IS_ERR(phy)) {
780 *ptr = phy;
781 devres_add(dev, ptr);
782 } else {
783 devres_free(ptr);
784 }
785
786 return phy;
787}
788EXPORT_SYMBOL_GPL(devm_of_phy_get);
789
790/**
791 * devm_of_phy_get_by_index() - lookup and obtain a reference to a phy by index.
792 * @dev: device that requests this phy
793 * @np: node containing the phy
794 * @index: index of the phy
795 *
796 * Gets the phy using _of_phy_get(), then gets a refcount to it,
797 * and associates a device with it using devres. On driver detach,
798 * release function is invoked on the devres data,
799 * then, devres data is freed.
800 *
801 */
802struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
803 int index)
804{
805 struct phy **ptr, *phy;
806
807 ptr = devres_alloc(devm_phy_release, sizeof(*ptr), GFP_KERNEL);
808 if (!ptr)
809 return ERR_PTR(-ENOMEM);
810
811 phy = _of_phy_get(np, index);
812 if (IS_ERR(phy)) {
813 devres_free(ptr);
814 return phy;
815 }
816
817 if (!try_module_get(phy->ops->owner)) {
818 devres_free(ptr);
819 return ERR_PTR(-EPROBE_DEFER);
820 }
821
822 get_device(&phy->dev);
823
824 *ptr = phy;
825 devres_add(dev, ptr);
826
827 return phy;
828}
829EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
830
831/**
832 * phy_create() - create a new phy
833 * @dev: device that is creating the new phy
834 * @node: device node of the phy
835 * @ops: function pointers for performing phy operations
836 *
837 * Called to create a phy using phy framework.
838 */
839struct phy *phy_create(struct device *dev, struct device_node *node,
840 const struct phy_ops *ops)
841{
842 int ret;
843 int id;
844 struct phy *phy;
845
846 if (WARN_ON(!dev))
847 return ERR_PTR(-EINVAL);
848
849 phy = kzalloc(sizeof(*phy), GFP_KERNEL);
850 if (!phy)
851 return ERR_PTR(-ENOMEM);
852
853 id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
854 if (id < 0) {
855 dev_err(dev, "unable to get id\n");
856 ret = id;
857 goto free_phy;
858 }
859
860 device_initialize(&phy->dev);
861 mutex_init(&phy->mutex);
862
863 phy->dev.class = phy_class;
864 phy->dev.parent = dev;
865 phy->dev.of_node = node ?: dev->of_node;
866 phy->id = id;
867 phy->ops = ops;
868
869 ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
870 if (ret)
871 goto put_dev;
872
873 /* phy-supply */
874 phy->pwr = regulator_get_optional(&phy->dev, "phy");
875 if (IS_ERR(phy->pwr)) {
876 ret = PTR_ERR(phy->pwr);
877 if (ret == -EPROBE_DEFER)
878 goto put_dev;
879
880 phy->pwr = NULL;
881 }
882
883 ret = device_add(&phy->dev);
884 if (ret)
885 goto put_dev;
886
887 if (pm_runtime_enabled(dev)) {
888 pm_runtime_enable(&phy->dev);
889 pm_runtime_no_callbacks(&phy->dev);
890 }
891
892 return phy;
893
894put_dev:
895 put_device(&phy->dev); /* calls phy_release() which frees resources */
896 return ERR_PTR(ret);
897
898free_phy:
899 kfree(phy);
900 return ERR_PTR(ret);
901}
902EXPORT_SYMBOL_GPL(phy_create);
903
904/**
905 * devm_phy_create() - create a new phy
906 * @dev: device that is creating the new phy
907 * @node: device node of the phy
908 * @ops: function pointers for performing phy operations
909 *
910 * Creates a new PHY device adding it to the PHY class.
911 * While at that, it also associates the device with the phy using devres.
912 * On driver detach, release function is invoked on the devres data,
913 * then, devres data is freed.
914 */
915struct phy *devm_phy_create(struct device *dev, struct device_node *node,
916 const struct phy_ops *ops)
917{
918 struct phy **ptr, *phy;
919
920 ptr = devres_alloc(devm_phy_consume, sizeof(*ptr), GFP_KERNEL);
921 if (!ptr)
922 return ERR_PTR(-ENOMEM);
923
924 phy = phy_create(dev, node, ops);
925 if (!IS_ERR(phy)) {
926 *ptr = phy;
927 devres_add(dev, ptr);
928 } else {
929 devres_free(ptr);
930 }
931
932 return phy;
933}
934EXPORT_SYMBOL_GPL(devm_phy_create);
935
936/**
937 * phy_destroy() - destroy the phy
938 * @phy: the phy to be destroyed
939 *
940 * Called to destroy the phy.
941 */
942void phy_destroy(struct phy *phy)
943{
944 pm_runtime_disable(&phy->dev);
945 device_unregister(&phy->dev);
946}
947EXPORT_SYMBOL_GPL(phy_destroy);
948
949/**
950 * devm_phy_destroy() - destroy the PHY
951 * @dev: device that wants to release this phy
952 * @phy: the phy returned by devm_phy_get()
953 *
954 * destroys the devres associated with this phy and invokes phy_destroy
955 * to destroy the phy.
956 */
957void devm_phy_destroy(struct device *dev, struct phy *phy)
958{
959 int r;
960
961 r = devres_release(dev, devm_phy_consume, devm_phy_match, phy);
962 dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
963}
964EXPORT_SYMBOL_GPL(devm_phy_destroy);
965
966/**
967 * __of_phy_provider_register() - create/register phy provider with the framework
968 * @dev: struct device of the phy provider
969 * @children: device node containing children (if different from dev->of_node)
970 * @owner: the module owner containing of_xlate
971 * @of_xlate: function pointer to obtain phy instance from phy provider
972 *
973 * Creates struct phy_provider from dev and of_xlate function pointer.
974 * This is used in the case of dt boot for finding the phy instance from
975 * phy provider.
976 *
977 * If the PHY provider doesn't nest children directly but uses a separate
978 * child node to contain the individual children, the @children parameter
979 * can be used to override the default. If NULL, the default (dev->of_node)
980 * will be used. If non-NULL, the device node must be a child (or further
981 * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL
982 * error code is returned.
983 */
984struct phy_provider *__of_phy_provider_register(struct device *dev,
985 struct device_node *children, struct module *owner,
986 struct phy * (*of_xlate)(struct device *dev,
987 struct of_phandle_args *args))
988{
989 struct phy_provider *phy_provider;
990
991 /*
992 * If specified, the device node containing the children must itself
993 * be the provider's device node or a child (or further descendant)
994 * thereof.
995 */
996 if (children) {
997 struct device_node *parent = of_node_get(children), *next;
998
999 while (parent) {
1000 if (parent == dev->of_node)
1001 break;
1002
1003 next = of_get_parent(parent);
1004 of_node_put(parent);
1005 parent = next;
1006 }
1007
1008 if (!parent)
1009 return ERR_PTR(-EINVAL);
1010
1011 of_node_put(parent);
1012 } else {
1013 children = dev->of_node;
1014 }
1015
1016 phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
1017 if (!phy_provider)
1018 return ERR_PTR(-ENOMEM);
1019
1020 phy_provider->dev = dev;
1021 phy_provider->children = of_node_get(children);
1022 phy_provider->owner = owner;
1023 phy_provider->of_xlate = of_xlate;
1024
1025 mutex_lock(&phy_provider_mutex);
1026 list_add_tail(&phy_provider->list, &phy_provider_list);
1027 mutex_unlock(&phy_provider_mutex);
1028
1029 return phy_provider;
1030}
1031EXPORT_SYMBOL_GPL(__of_phy_provider_register);
1032
1033/**
1034 * __devm_of_phy_provider_register() - create/register phy provider with the
1035 * framework
1036 * @dev: struct device of the phy provider
1037 * @owner: the module owner containing of_xlate
1038 * @of_xlate: function pointer to obtain phy instance from phy provider
1039 *
1040 * Creates struct phy_provider from dev and of_xlate function pointer.
1041 * This is used in the case of dt boot for finding the phy instance from
1042 * phy provider. While at that, it also associates the device with the
1043 * phy provider using devres. On driver detach, release function is invoked
1044 * on the devres data, then, devres data is freed.
1045 */
1046struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
1047 struct device_node *children, struct module *owner,
1048 struct phy * (*of_xlate)(struct device *dev,
1049 struct of_phandle_args *args))
1050{
1051 struct phy_provider **ptr, *phy_provider;
1052
1053 ptr = devres_alloc(devm_phy_provider_release, sizeof(*ptr), GFP_KERNEL);
1054 if (!ptr)
1055 return ERR_PTR(-ENOMEM);
1056
1057 phy_provider = __of_phy_provider_register(dev, children, owner,
1058 of_xlate);
1059 if (!IS_ERR(phy_provider)) {
1060 *ptr = phy_provider;
1061 devres_add(dev, ptr);
1062 } else {
1063 devres_free(ptr);
1064 }
1065
1066 return phy_provider;
1067}
1068EXPORT_SYMBOL_GPL(__devm_of_phy_provider_register);
1069
1070/**
1071 * of_phy_provider_unregister() - unregister phy provider from the framework
1072 * @phy_provider: phy provider returned by of_phy_provider_register()
1073 *
1074 * Removes the phy_provider created using of_phy_provider_register().
1075 */
1076void of_phy_provider_unregister(struct phy_provider *phy_provider)
1077{
1078 if (IS_ERR(phy_provider))
1079 return;
1080
1081 mutex_lock(&phy_provider_mutex);
1082 list_del(&phy_provider->list);
1083 of_node_put(phy_provider->children);
1084 kfree(phy_provider);
1085 mutex_unlock(&phy_provider_mutex);
1086}
1087EXPORT_SYMBOL_GPL(of_phy_provider_unregister);
1088
1089/**
1090 * devm_of_phy_provider_unregister() - remove phy provider from the framework
1091 * @dev: struct device of the phy provider
1092 *
1093 * destroys the devres associated with this phy provider and invokes
1094 * of_phy_provider_unregister to unregister the phy provider.
1095 */
1096void devm_of_phy_provider_unregister(struct device *dev,
1097 struct phy_provider *phy_provider)
1098{
1099 int r;
1100
1101 r = devres_release(dev, devm_phy_provider_release, devm_phy_match,
1102 phy_provider);
1103 dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n");
1104}
1105EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister);
1106
1107/**
1108 * phy_release() - release the phy
1109 * @dev: the dev member within phy
1110 *
1111 * When the last reference to the device is removed, it is called
1112 * from the embedded kobject as release method.
1113 */
1114static void phy_release(struct device *dev)
1115{
1116 struct phy *phy;
1117
1118 phy = to_phy(dev);
1119 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
1120 regulator_put(phy->pwr);
1121 ida_simple_remove(&phy_ida, phy->id);
1122 kfree(phy);
1123}
1124
1125static int __init phy_core_init(void)
1126{
1127 phy_class = class_create(THIS_MODULE, "phy");
1128 if (IS_ERR(phy_class)) {
1129 pr_err("failed to create phy class --> %ld\n",
1130 PTR_ERR(phy_class));
1131 return PTR_ERR(phy_class);
1132 }
1133
1134 phy_class->dev_release = phy_release;
1135
1136 return 0;
1137}
1138device_initcall(phy_core_init);