Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/clk/clk-bulk.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright 2017 NXP
4
*
5
* Dong Aisheng <[email protected]>
6
*/
7
8
#include <linux/clk.h>
9
#include <linux/clk-provider.h>
10
#include <linux/device.h>
11
#include <linux/export.h>
12
#include <linux/of.h>
13
#include <linux/slab.h>
14
15
static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
16
struct clk_bulk_data *clks)
17
{
18
int ret;
19
int i;
20
21
for (i = 0; i < num_clks; i++) {
22
clks[i].id = NULL;
23
clks[i].clk = NULL;
24
}
25
26
for (i = 0; i < num_clks; i++) {
27
of_property_read_string_index(np, "clock-names", i, &clks[i].id);
28
clks[i].clk = of_clk_get(np, i);
29
if (IS_ERR(clks[i].clk)) {
30
ret = PTR_ERR(clks[i].clk);
31
pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
32
np, i, ret);
33
clks[i].clk = NULL;
34
goto err;
35
}
36
}
37
38
return 0;
39
40
err:
41
clk_bulk_put(i, clks);
42
43
return ret;
44
}
45
46
static int __must_check of_clk_bulk_get_all(struct device_node *np,
47
struct clk_bulk_data **clks)
48
{
49
struct clk_bulk_data *clk_bulk;
50
int num_clks;
51
int ret;
52
53
num_clks = of_clk_get_parent_count(np);
54
if (!num_clks)
55
return 0;
56
57
clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
58
if (!clk_bulk)
59
return -ENOMEM;
60
61
ret = of_clk_bulk_get(np, num_clks, clk_bulk);
62
if (ret) {
63
kfree(clk_bulk);
64
return ret;
65
}
66
67
*clks = clk_bulk;
68
69
return num_clks;
70
}
71
72
void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
73
{
74
while (--num_clks >= 0) {
75
clk_put(clks[num_clks].clk);
76
clks[num_clks].clk = NULL;
77
}
78
}
79
EXPORT_SYMBOL_GPL(clk_bulk_put);
80
81
static int __clk_bulk_get(struct device *dev, int num_clks,
82
struct clk_bulk_data *clks, bool optional)
83
{
84
int ret;
85
int i;
86
87
for (i = 0; i < num_clks; i++)
88
clks[i].clk = NULL;
89
90
for (i = 0; i < num_clks; i++) {
91
clks[i].clk = clk_get(dev, clks[i].id);
92
if (IS_ERR(clks[i].clk)) {
93
ret = PTR_ERR(clks[i].clk);
94
clks[i].clk = NULL;
95
96
if (ret == -ENOENT && optional)
97
continue;
98
99
dev_err_probe(dev, ret,
100
"Failed to get clk '%s'\n",
101
clks[i].id);
102
goto err;
103
}
104
}
105
106
return 0;
107
108
err:
109
clk_bulk_put(i, clks);
110
111
return ret;
112
}
113
114
int __must_check clk_bulk_get(struct device *dev, int num_clks,
115
struct clk_bulk_data *clks)
116
{
117
return __clk_bulk_get(dev, num_clks, clks, false);
118
}
119
EXPORT_SYMBOL(clk_bulk_get);
120
121
int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
122
struct clk_bulk_data *clks)
123
{
124
return __clk_bulk_get(dev, num_clks, clks, true);
125
}
126
EXPORT_SYMBOL_GPL(clk_bulk_get_optional);
127
128
void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
129
{
130
if (IS_ERR_OR_NULL(clks))
131
return;
132
133
clk_bulk_put(num_clks, clks);
134
135
kfree(clks);
136
}
137
EXPORT_SYMBOL(clk_bulk_put_all);
138
139
int __must_check clk_bulk_get_all(struct device *dev,
140
struct clk_bulk_data **clks)
141
{
142
struct device_node *np = dev_of_node(dev);
143
144
if (!np)
145
return 0;
146
147
return of_clk_bulk_get_all(np, clks);
148
}
149
EXPORT_SYMBOL(clk_bulk_get_all);
150
151
#ifdef CONFIG_HAVE_CLK_PREPARE
152
153
/**
154
* clk_bulk_unprepare - undo preparation of a set of clock sources
155
* @num_clks: the number of clk_bulk_data
156
* @clks: the clk_bulk_data table being unprepared
157
*
158
* clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
159
* Returns 0 on success, -EERROR otherwise.
160
*/
161
void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
162
{
163
while (--num_clks >= 0)
164
clk_unprepare(clks[num_clks].clk);
165
}
166
EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
167
168
/**
169
* clk_bulk_prepare - prepare a set of clocks
170
* @num_clks: the number of clk_bulk_data
171
* @clks: the clk_bulk_data table being prepared
172
*
173
* clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
174
* Returns 0 on success, -EERROR otherwise.
175
*/
176
int __must_check clk_bulk_prepare(int num_clks,
177
const struct clk_bulk_data *clks)
178
{
179
int ret;
180
int i;
181
182
for (i = 0; i < num_clks; i++) {
183
ret = clk_prepare(clks[i].clk);
184
if (ret) {
185
pr_err("Failed to prepare clk '%s': %d\n",
186
clks[i].id, ret);
187
goto err;
188
}
189
}
190
191
return 0;
192
193
err:
194
clk_bulk_unprepare(i, clks);
195
196
return ret;
197
}
198
EXPORT_SYMBOL_GPL(clk_bulk_prepare);
199
200
#endif /* CONFIG_HAVE_CLK_PREPARE */
201
202
/**
203
* clk_bulk_disable - gate a set of clocks
204
* @num_clks: the number of clk_bulk_data
205
* @clks: the clk_bulk_data table being gated
206
*
207
* clk_bulk_disable must not sleep, which differentiates it from
208
* clk_bulk_unprepare. clk_bulk_disable must be called before
209
* clk_bulk_unprepare.
210
*/
211
void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
212
{
213
214
while (--num_clks >= 0)
215
clk_disable(clks[num_clks].clk);
216
}
217
EXPORT_SYMBOL_GPL(clk_bulk_disable);
218
219
/**
220
* clk_bulk_enable - ungate a set of clocks
221
* @num_clks: the number of clk_bulk_data
222
* @clks: the clk_bulk_data table being ungated
223
*
224
* clk_bulk_enable must not sleep
225
* Returns 0 on success, -EERROR otherwise.
226
*/
227
int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
228
{
229
int ret;
230
int i;
231
232
for (i = 0; i < num_clks; i++) {
233
ret = clk_enable(clks[i].clk);
234
if (ret) {
235
pr_err("Failed to enable clk '%s': %d\n",
236
clks[i].id, ret);
237
goto err;
238
}
239
}
240
241
return 0;
242
243
err:
244
clk_bulk_disable(i, clks);
245
246
return ret;
247
}
248
EXPORT_SYMBOL_GPL(clk_bulk_enable);
249
250