Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/clk/baikal-t1/ccu-div.c
48856 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
4
*
5
* Authors:
6
* Serge Semin <[email protected]>
7
* Dmitry Dunaev <[email protected]>
8
*
9
* Baikal-T1 CCU Dividers interface driver
10
*/
11
12
#define pr_fmt(fmt) "bt1-ccu-div: " fmt
13
14
#include <linux/kernel.h>
15
#include <linux/printk.h>
16
#include <linux/bits.h>
17
#include <linux/bitfield.h>
18
#include <linux/slab.h>
19
#include <linux/clk-provider.h>
20
#include <linux/of.h>
21
#include <linux/spinlock.h>
22
#include <linux/regmap.h>
23
#include <linux/delay.h>
24
#include <linux/time64.h>
25
#include <linux/debugfs.h>
26
27
#include "ccu-div.h"
28
29
#define CCU_DIV_CTL 0x00
30
#define CCU_DIV_CTL_EN BIT(0)
31
#define CCU_DIV_CTL_RST BIT(1)
32
#define CCU_DIV_CTL_SET_CLKDIV BIT(2)
33
#define CCU_DIV_CTL_CLKDIV_FLD 4
34
#define CCU_DIV_CTL_CLKDIV_MASK(_width) \
35
GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
36
#define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
37
#define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
38
#define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
39
40
#define CCU_DIV_LOCK_CHECK_RETRIES 50
41
42
#define CCU_DIV_CLKDIV_MIN 0
43
#define CCU_DIV_CLKDIV_MAX(_mask) \
44
((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
45
46
/*
47
* Use the next two methods until there are generic field setter and
48
* getter available with non-constant mask support.
49
*/
50
static inline u32 ccu_div_get(u32 mask, u32 val)
51
{
52
return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
53
}
54
55
static inline u32 ccu_div_prep(u32 mask, u32 val)
56
{
57
return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
58
}
59
60
static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
61
unsigned long div)
62
{
63
u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
64
65
do_div(ns, ref_clk);
66
67
return ns;
68
}
69
70
static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
71
unsigned long div)
72
{
73
return ref_clk / (div ?: 1);
74
}
75
76
static int ccu_div_var_update_clkdiv(struct ccu_div *div,
77
unsigned long parent_rate,
78
unsigned long divider)
79
{
80
unsigned long nd;
81
u32 val = 0;
82
u32 lock;
83
int count;
84
85
nd = ccu_div_lock_delay_ns(parent_rate, divider);
86
87
if (div->features & CCU_DIV_LOCK_SHIFTED)
88
lock = CCU_DIV_CTL_LOCK_SHIFTED;
89
else
90
lock = CCU_DIV_CTL_LOCK_NORMAL;
91
92
regmap_update_bits(div->sys_regs, div->reg_ctl,
93
CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
94
95
/*
96
* Until there is nsec-version of readl_poll_timeout() is available
97
* we have to implement the next polling loop.
98
*/
99
count = CCU_DIV_LOCK_CHECK_RETRIES;
100
do {
101
ndelay(nd);
102
regmap_read(div->sys_regs, div->reg_ctl, &val);
103
if (val & lock)
104
return 0;
105
} while (--count);
106
107
return -ETIMEDOUT;
108
}
109
110
static int ccu_div_var_enable(struct clk_hw *hw)
111
{
112
struct clk_hw *parent_hw = clk_hw_get_parent(hw);
113
struct ccu_div *div = to_ccu_div(hw);
114
unsigned long flags;
115
u32 val = 0;
116
int ret;
117
118
if (!parent_hw) {
119
pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
120
return -EINVAL;
121
}
122
123
regmap_read(div->sys_regs, div->reg_ctl, &val);
124
if (val & CCU_DIV_CTL_EN)
125
return 0;
126
127
spin_lock_irqsave(&div->lock, flags);
128
ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
129
ccu_div_get(div->mask, val));
130
if (!ret)
131
regmap_update_bits(div->sys_regs, div->reg_ctl,
132
CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
133
spin_unlock_irqrestore(&div->lock, flags);
134
if (ret)
135
pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
136
137
return ret;
138
}
139
140
static int ccu_div_gate_enable(struct clk_hw *hw)
141
{
142
struct ccu_div *div = to_ccu_div(hw);
143
unsigned long flags;
144
145
spin_lock_irqsave(&div->lock, flags);
146
regmap_update_bits(div->sys_regs, div->reg_ctl,
147
CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
148
spin_unlock_irqrestore(&div->lock, flags);
149
150
return 0;
151
}
152
153
static void ccu_div_gate_disable(struct clk_hw *hw)
154
{
155
struct ccu_div *div = to_ccu_div(hw);
156
unsigned long flags;
157
158
spin_lock_irqsave(&div->lock, flags);
159
regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
160
spin_unlock_irqrestore(&div->lock, flags);
161
}
162
163
static int ccu_div_gate_is_enabled(struct clk_hw *hw)
164
{
165
struct ccu_div *div = to_ccu_div(hw);
166
u32 val = 0;
167
168
regmap_read(div->sys_regs, div->reg_ctl, &val);
169
170
return !!(val & CCU_DIV_CTL_EN);
171
}
172
173
static int ccu_div_buf_enable(struct clk_hw *hw)
174
{
175
struct ccu_div *div = to_ccu_div(hw);
176
unsigned long flags;
177
178
spin_lock_irqsave(&div->lock, flags);
179
regmap_update_bits(div->sys_regs, div->reg_ctl,
180
CCU_DIV_CTL_GATE_REF_BUF, 0);
181
spin_unlock_irqrestore(&div->lock, flags);
182
183
return 0;
184
}
185
186
static void ccu_div_buf_disable(struct clk_hw *hw)
187
{
188
struct ccu_div *div = to_ccu_div(hw);
189
unsigned long flags;
190
191
spin_lock_irqsave(&div->lock, flags);
192
regmap_update_bits(div->sys_regs, div->reg_ctl,
193
CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
194
spin_unlock_irqrestore(&div->lock, flags);
195
}
196
197
static int ccu_div_buf_is_enabled(struct clk_hw *hw)
198
{
199
struct ccu_div *div = to_ccu_div(hw);
200
u32 val = 0;
201
202
regmap_read(div->sys_regs, div->reg_ctl, &val);
203
204
return !(val & CCU_DIV_CTL_GATE_REF_BUF);
205
}
206
207
static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
208
unsigned long parent_rate)
209
{
210
struct ccu_div *div = to_ccu_div(hw);
211
unsigned long divider;
212
u32 val = 0;
213
214
regmap_read(div->sys_regs, div->reg_ctl, &val);
215
divider = ccu_div_get(div->mask, val);
216
217
return ccu_div_calc_freq(parent_rate, divider);
218
}
219
220
static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
221
unsigned long parent_rate,
222
unsigned int mask)
223
{
224
unsigned long divider;
225
226
divider = parent_rate / rate;
227
return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
228
CCU_DIV_CLKDIV_MAX(mask));
229
}
230
231
static int ccu_div_var_determine_rate(struct clk_hw *hw,
232
struct clk_rate_request *req)
233
{
234
struct ccu_div *div = to_ccu_div(hw);
235
unsigned long divider;
236
237
divider = ccu_div_var_calc_divider(req->rate, req->best_parent_rate,
238
div->mask);
239
240
req->rate = ccu_div_calc_freq(req->best_parent_rate, divider);
241
242
return 0;
243
}
244
245
/*
246
* This method is used for the clock divider blocks, which support the
247
* on-the-fly rate change. So due to lacking the EN bit functionality
248
* they can't be gated before the rate adjustment.
249
*/
250
static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
251
unsigned long parent_rate)
252
{
253
struct ccu_div *div = to_ccu_div(hw);
254
unsigned long flags, divider;
255
u32 val;
256
int ret;
257
258
divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
259
if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
260
divider = 0;
261
} else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
262
if (divider == 1 || divider == 2)
263
divider = 0;
264
else if (divider == 3)
265
divider = 4;
266
}
267
268
val = ccu_div_prep(div->mask, divider);
269
270
spin_lock_irqsave(&div->lock, flags);
271
regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
272
ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
273
spin_unlock_irqrestore(&div->lock, flags);
274
if (ret)
275
pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
276
277
return ret;
278
}
279
280
/*
281
* This method is used for the clock divider blocks, which don't support
282
* the on-the-fly rate change.
283
*/
284
static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
285
unsigned long parent_rate)
286
{
287
struct ccu_div *div = to_ccu_div(hw);
288
unsigned long flags, divider;
289
u32 val;
290
291
divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
292
val = ccu_div_prep(div->mask, divider);
293
294
/*
295
* Also disable the clock divider block if it was enabled by default
296
* or by the bootloader.
297
*/
298
spin_lock_irqsave(&div->lock, flags);
299
regmap_update_bits(div->sys_regs, div->reg_ctl,
300
div->mask | CCU_DIV_CTL_EN, val);
301
spin_unlock_irqrestore(&div->lock, flags);
302
303
return 0;
304
}
305
306
static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
307
unsigned long parent_rate)
308
{
309
struct ccu_div *div = to_ccu_div(hw);
310
311
return ccu_div_calc_freq(parent_rate, div->divider);
312
}
313
314
static int ccu_div_fixed_determine_rate(struct clk_hw *hw,
315
struct clk_rate_request *req)
316
{
317
struct ccu_div *div = to_ccu_div(hw);
318
319
req->rate = ccu_div_calc_freq(req->best_parent_rate, div->divider);
320
321
return 0;
322
}
323
324
static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
325
unsigned long parent_rate)
326
{
327
return 0;
328
}
329
330
#ifdef CONFIG_DEBUG_FS
331
332
struct ccu_div_dbgfs_bit {
333
struct ccu_div *div;
334
const char *name;
335
u32 mask;
336
};
337
338
#define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \
339
.name = _name, \
340
.mask = _mask \
341
}
342
343
static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
344
CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
345
CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
346
CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
347
CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
348
CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
349
};
350
351
#define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits)
352
353
/*
354
* It can be dangerous to change the Divider settings behind clock framework
355
* back, therefore we don't provide any kernel config based compile time option
356
* for this feature to enable.
357
*/
358
#undef CCU_DIV_ALLOW_WRITE_DEBUGFS
359
#ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
360
361
static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
362
{
363
const struct ccu_div_dbgfs_bit *bit = priv;
364
struct ccu_div *div = bit->div;
365
unsigned long flags;
366
367
spin_lock_irqsave(&div->lock, flags);
368
regmap_update_bits(div->sys_regs, div->reg_ctl,
369
bit->mask, val ? bit->mask : 0);
370
spin_unlock_irqrestore(&div->lock, flags);
371
372
return 0;
373
}
374
375
static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
376
{
377
struct ccu_div *div = priv;
378
unsigned long flags;
379
u32 data;
380
381
val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
382
CCU_DIV_CLKDIV_MAX(div->mask));
383
data = ccu_div_prep(div->mask, val);
384
385
spin_lock_irqsave(&div->lock, flags);
386
regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
387
spin_unlock_irqrestore(&div->lock, flags);
388
389
return 0;
390
}
391
392
#define ccu_div_dbgfs_mode 0644
393
394
#else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
395
396
#define ccu_div_dbgfs_bit_set NULL
397
#define ccu_div_dbgfs_var_clkdiv_set NULL
398
#define ccu_div_dbgfs_mode 0444
399
400
#endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
401
402
static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
403
{
404
const struct ccu_div_dbgfs_bit *bit = priv;
405
struct ccu_div *div = bit->div;
406
u32 data = 0;
407
408
regmap_read(div->sys_regs, div->reg_ctl, &data);
409
*val = !!(data & bit->mask);
410
411
return 0;
412
}
413
DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
414
ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
415
416
static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
417
{
418
struct ccu_div *div = priv;
419
u32 data = 0;
420
421
regmap_read(div->sys_regs, div->reg_ctl, &data);
422
*val = ccu_div_get(div->mask, data);
423
424
return 0;
425
}
426
DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
427
ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
428
429
static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
430
{
431
struct ccu_div *div = priv;
432
433
*val = div->divider;
434
435
return 0;
436
}
437
DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
438
ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
439
440
static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
441
{
442
struct ccu_div *div = to_ccu_div(hw);
443
struct ccu_div_dbgfs_bit *bits;
444
int didx, bidx, num = 2;
445
const char *name;
446
447
num += !!(div->flags & CLK_SET_RATE_GATE) +
448
!!(div->features & CCU_DIV_RESET_DOMAIN);
449
450
bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
451
if (!bits)
452
return;
453
454
for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
455
name = ccu_div_bits[bidx].name;
456
if (!(div->flags & CLK_SET_RATE_GATE) &&
457
!strcmp("div_en", name)) {
458
continue;
459
}
460
461
if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
462
!strcmp("div_rst", name)) {
463
continue;
464
}
465
466
if (!strcmp("div_buf", name))
467
continue;
468
469
bits[didx] = ccu_div_bits[bidx];
470
bits[didx].div = div;
471
472
if (div->features & CCU_DIV_LOCK_SHIFTED &&
473
!strcmp("div_lock", name)) {
474
bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
475
}
476
477
debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
478
dentry, &bits[didx],
479
&ccu_div_dbgfs_bit_fops);
480
++didx;
481
}
482
483
debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
484
div, &ccu_div_dbgfs_var_clkdiv_fops);
485
}
486
487
static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
488
{
489
struct ccu_div *div = to_ccu_div(hw);
490
struct ccu_div_dbgfs_bit *bit;
491
492
bit = kmalloc(sizeof(*bit), GFP_KERNEL);
493
if (!bit)
494
return;
495
496
*bit = ccu_div_bits[0];
497
bit->div = div;
498
debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
499
&ccu_div_dbgfs_bit_fops);
500
501
debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
502
&ccu_div_dbgfs_fixed_clkdiv_fops);
503
}
504
505
static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
506
{
507
struct ccu_div *div = to_ccu_div(hw);
508
struct ccu_div_dbgfs_bit *bit;
509
510
bit = kmalloc(sizeof(*bit), GFP_KERNEL);
511
if (!bit)
512
return;
513
514
*bit = ccu_div_bits[3];
515
bit->div = div;
516
debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
517
&ccu_div_dbgfs_bit_fops);
518
}
519
520
static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
521
{
522
struct ccu_div *div = to_ccu_div(hw);
523
524
debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
525
&ccu_div_dbgfs_fixed_clkdiv_fops);
526
}
527
528
#else /* !CONFIG_DEBUG_FS */
529
530
#define ccu_div_var_debug_init NULL
531
#define ccu_div_gate_debug_init NULL
532
#define ccu_div_buf_debug_init NULL
533
#define ccu_div_fixed_debug_init NULL
534
535
#endif /* !CONFIG_DEBUG_FS */
536
537
static const struct clk_ops ccu_div_var_gate_to_set_ops = {
538
.enable = ccu_div_var_enable,
539
.disable = ccu_div_gate_disable,
540
.is_enabled = ccu_div_gate_is_enabled,
541
.recalc_rate = ccu_div_var_recalc_rate,
542
.determine_rate = ccu_div_var_determine_rate,
543
.set_rate = ccu_div_var_set_rate_fast,
544
.debug_init = ccu_div_var_debug_init
545
};
546
547
static const struct clk_ops ccu_div_var_nogate_ops = {
548
.recalc_rate = ccu_div_var_recalc_rate,
549
.determine_rate = ccu_div_var_determine_rate,
550
.set_rate = ccu_div_var_set_rate_slow,
551
.debug_init = ccu_div_var_debug_init
552
};
553
554
static const struct clk_ops ccu_div_gate_ops = {
555
.enable = ccu_div_gate_enable,
556
.disable = ccu_div_gate_disable,
557
.is_enabled = ccu_div_gate_is_enabled,
558
.recalc_rate = ccu_div_fixed_recalc_rate,
559
.determine_rate = ccu_div_fixed_determine_rate,
560
.set_rate = ccu_div_fixed_set_rate,
561
.debug_init = ccu_div_gate_debug_init
562
};
563
564
static const struct clk_ops ccu_div_buf_ops = {
565
.enable = ccu_div_buf_enable,
566
.disable = ccu_div_buf_disable,
567
.is_enabled = ccu_div_buf_is_enabled,
568
.debug_init = ccu_div_buf_debug_init
569
};
570
571
static const struct clk_ops ccu_div_fixed_ops = {
572
.recalc_rate = ccu_div_fixed_recalc_rate,
573
.determine_rate = ccu_div_fixed_determine_rate,
574
.set_rate = ccu_div_fixed_set_rate,
575
.debug_init = ccu_div_fixed_debug_init
576
};
577
578
struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
579
{
580
struct clk_parent_data parent_data = { };
581
struct clk_init_data hw_init = { };
582
struct ccu_div *div;
583
int ret;
584
585
if (!div_init)
586
return ERR_PTR(-EINVAL);
587
588
div = kzalloc(sizeof(*div), GFP_KERNEL);
589
if (!div)
590
return ERR_PTR(-ENOMEM);
591
592
/*
593
* Note since Baikal-T1 System Controller registers are MMIO-backed
594
* we won't check the regmap IO operations return status, because it
595
* must be zero anyway.
596
*/
597
div->hw.init = &hw_init;
598
div->id = div_init->id;
599
div->reg_ctl = div_init->base + CCU_DIV_CTL;
600
div->sys_regs = div_init->sys_regs;
601
div->flags = div_init->flags;
602
div->features = div_init->features;
603
spin_lock_init(&div->lock);
604
605
hw_init.name = div_init->name;
606
hw_init.flags = div_init->flags;
607
608
if (div_init->type == CCU_DIV_VAR) {
609
if (hw_init.flags & CLK_SET_RATE_GATE)
610
hw_init.ops = &ccu_div_var_gate_to_set_ops;
611
else
612
hw_init.ops = &ccu_div_var_nogate_ops;
613
div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
614
} else if (div_init->type == CCU_DIV_GATE) {
615
hw_init.ops = &ccu_div_gate_ops;
616
div->divider = div_init->divider;
617
} else if (div_init->type == CCU_DIV_BUF) {
618
hw_init.ops = &ccu_div_buf_ops;
619
} else if (div_init->type == CCU_DIV_FIXED) {
620
hw_init.ops = &ccu_div_fixed_ops;
621
div->divider = div_init->divider;
622
} else {
623
ret = -EINVAL;
624
goto err_free_div;
625
}
626
627
if (!div_init->parent_name) {
628
ret = -EINVAL;
629
goto err_free_div;
630
}
631
parent_data.fw_name = div_init->parent_name;
632
parent_data.name = div_init->parent_name;
633
hw_init.parent_data = &parent_data;
634
hw_init.num_parents = 1;
635
636
ret = of_clk_hw_register(div_init->np, &div->hw);
637
if (ret)
638
goto err_free_div;
639
640
return div;
641
642
err_free_div:
643
kfree(div);
644
645
return ERR_PTR(ret);
646
}
647
648
void ccu_div_hw_unregister(struct ccu_div *div)
649
{
650
clk_hw_unregister(&div->hw);
651
652
kfree(div);
653
}
654
655