Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/clk/bcm/clk-iproc-pll.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
// Copyright (C) 2014 Broadcom Corporation
3
4
#include <linux/kernel.h>
5
#include <linux/err.h>
6
#include <linux/clk-provider.h>
7
#include <linux/io.h>
8
#include <linux/of.h>
9
#include <linux/clkdev.h>
10
#include <linux/of_address.h>
11
#include <linux/delay.h>
12
13
#include "clk-iproc.h"
14
15
#define PLL_VCO_HIGH_SHIFT 19
16
#define PLL_VCO_LOW_SHIFT 30
17
18
/*
19
* PLL MACRO_SELECT modes 0 to 5 choose pre-calculated PLL output frequencies
20
* from a look-up table. Mode 7 allows user to manipulate PLL clock dividers
21
*/
22
#define PLL_USER_MODE 7
23
24
/* number of delay loops waiting for PLL to lock */
25
#define LOCK_DELAY 100
26
27
/* number of VCO frequency bands */
28
#define NUM_FREQ_BANDS 8
29
30
#define NUM_KP_BANDS 3
31
enum kp_band {
32
KP_BAND_MID = 0,
33
KP_BAND_HIGH,
34
KP_BAND_HIGH_HIGH
35
};
36
37
static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = {
38
{ 5, 6, 6, 7, 7, 8, 9, 10 },
39
{ 4, 4, 5, 5, 6, 7, 8, 9 },
40
{ 4, 5, 5, 6, 7, 8, 9, 10 },
41
};
42
43
static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = {
44
{ 10000000, 12500000 },
45
{ 12500000, 15000000 },
46
{ 15000000, 20000000 },
47
{ 20000000, 25000000 },
48
{ 25000000, 50000000 },
49
{ 50000000, 75000000 },
50
{ 75000000, 100000000 },
51
{ 100000000, 125000000 },
52
};
53
54
enum vco_freq_range {
55
VCO_LOW = 700000000U,
56
VCO_MID = 1200000000U,
57
VCO_HIGH = 2200000000U,
58
VCO_HIGH_HIGH = 3100000000U,
59
VCO_MAX = 4000000000U,
60
};
61
62
struct iproc_pll {
63
void __iomem *status_base;
64
void __iomem *control_base;
65
void __iomem *pwr_base;
66
void __iomem *asiu_base;
67
68
const struct iproc_pll_ctrl *ctrl;
69
const struct iproc_pll_vco_param *vco_param;
70
unsigned int num_vco_entries;
71
};
72
73
struct iproc_clk {
74
struct clk_hw hw;
75
struct iproc_pll *pll;
76
const struct iproc_clk_ctrl *ctrl;
77
};
78
79
#define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
80
81
static int pll_calc_param(unsigned long target_rate,
82
unsigned long parent_rate,
83
struct iproc_pll_vco_param *vco_out)
84
{
85
u64 ndiv_int, ndiv_frac, residual;
86
87
ndiv_int = target_rate / parent_rate;
88
89
if (!ndiv_int || (ndiv_int > 255))
90
return -EINVAL;
91
92
residual = target_rate - (ndiv_int * parent_rate);
93
residual <<= 20;
94
95
/*
96
* Add half of the divisor so the result will be rounded to closest
97
* instead of rounded down.
98
*/
99
residual += (parent_rate / 2);
100
ndiv_frac = div64_u64((u64)residual, (u64)parent_rate);
101
102
vco_out->ndiv_int = ndiv_int;
103
vco_out->ndiv_frac = ndiv_frac;
104
vco_out->pdiv = 1;
105
106
vco_out->rate = vco_out->ndiv_int * parent_rate;
107
residual = (u64)vco_out->ndiv_frac * (u64)parent_rate;
108
residual >>= 20;
109
vco_out->rate += residual;
110
111
return 0;
112
}
113
114
/*
115
* Based on the target frequency, find a match from the VCO frequency parameter
116
* table and return its index
117
*/
118
static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate)
119
{
120
int i;
121
122
for (i = 0; i < pll->num_vco_entries; i++)
123
if (target_rate == pll->vco_param[i].rate)
124
break;
125
126
if (i >= pll->num_vco_entries)
127
return -EINVAL;
128
129
return i;
130
}
131
132
static int get_kp(unsigned long ref_freq, enum kp_band kp_index)
133
{
134
int i;
135
136
if (ref_freq < ref_freq_table[0][0])
137
return -EINVAL;
138
139
for (i = 0; i < NUM_FREQ_BANDS; i++) {
140
if (ref_freq >= ref_freq_table[i][0] &&
141
ref_freq < ref_freq_table[i][1])
142
return kp_table[kp_index][i];
143
}
144
return -EINVAL;
145
}
146
147
static int pll_wait_for_lock(struct iproc_pll *pll)
148
{
149
int i;
150
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
151
152
for (i = 0; i < LOCK_DELAY; i++) {
153
u32 val = readl(pll->status_base + ctrl->status.offset);
154
155
if (val & (1 << ctrl->status.shift))
156
return 0;
157
udelay(10);
158
}
159
160
return -EIO;
161
}
162
163
static void iproc_pll_write(const struct iproc_pll *pll, void __iomem *base,
164
const u32 offset, u32 val)
165
{
166
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
167
168
writel(val, base + offset);
169
170
if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK &&
171
(base == pll->status_base || base == pll->control_base)))
172
val = readl(base + offset);
173
}
174
175
static void __pll_disable(struct iproc_pll *pll)
176
{
177
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
178
u32 val;
179
180
if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
181
val = readl(pll->asiu_base + ctrl->asiu.offset);
182
val &= ~(1 << ctrl->asiu.en_shift);
183
iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
184
}
185
186
if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
187
val = readl(pll->control_base + ctrl->aon.offset);
188
val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
189
iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
190
}
191
192
if (pll->pwr_base) {
193
/* latch input value so core power can be shut down */
194
val = readl(pll->pwr_base + ctrl->aon.offset);
195
val |= 1 << ctrl->aon.iso_shift;
196
iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
197
198
/* power down the core */
199
val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
200
iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
201
}
202
}
203
204
static int __pll_enable(struct iproc_pll *pll)
205
{
206
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
207
u32 val;
208
209
if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
210
val = readl(pll->control_base + ctrl->aon.offset);
211
val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
212
iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val);
213
}
214
215
if (pll->pwr_base) {
216
/* power up the PLL and make sure it's not latched */
217
val = readl(pll->pwr_base + ctrl->aon.offset);
218
val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
219
val &= ~(1 << ctrl->aon.iso_shift);
220
iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
221
}
222
223
/* certain PLLs also need to be ungated from the ASIU top level */
224
if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
225
val = readl(pll->asiu_base + ctrl->asiu.offset);
226
val |= (1 << ctrl->asiu.en_shift);
227
iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
228
}
229
230
return 0;
231
}
232
233
static void __pll_put_in_reset(struct iproc_pll *pll)
234
{
235
u32 val;
236
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
237
const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
238
239
val = readl(pll->control_base + reset->offset);
240
if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
241
val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
242
else
243
val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
244
iproc_pll_write(pll, pll->control_base, reset->offset, val);
245
}
246
247
static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
248
unsigned int ka, unsigned int ki)
249
{
250
u32 val;
251
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
252
const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
253
const struct iproc_pll_dig_filter_ctrl *dig_filter = &ctrl->dig_filter;
254
255
val = readl(pll->control_base + dig_filter->offset);
256
val &= ~(bit_mask(dig_filter->ki_width) << dig_filter->ki_shift |
257
bit_mask(dig_filter->kp_width) << dig_filter->kp_shift |
258
bit_mask(dig_filter->ka_width) << dig_filter->ka_shift);
259
val |= ki << dig_filter->ki_shift | kp << dig_filter->kp_shift |
260
ka << dig_filter->ka_shift;
261
iproc_pll_write(pll, pll->control_base, dig_filter->offset, val);
262
263
val = readl(pll->control_base + reset->offset);
264
if (ctrl->flags & IPROC_CLK_PLL_RESET_ACTIVE_LOW)
265
val &= ~(BIT(reset->reset_shift) | BIT(reset->p_reset_shift));
266
else
267
val |= BIT(reset->reset_shift) | BIT(reset->p_reset_shift);
268
iproc_pll_write(pll, pll->control_base, reset->offset, val);
269
}
270
271
/*
272
* Determines if the change to be applied to the PLL is minor (just an update
273
* or the fractional divider). If so, then we can avoid going through a
274
* disruptive reset and lock sequence.
275
*/
276
static bool pll_fractional_change_only(struct iproc_pll *pll,
277
struct iproc_pll_vco_param *vco)
278
{
279
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
280
u32 val;
281
u32 ndiv_int;
282
unsigned int pdiv;
283
284
/* PLL needs to be locked */
285
val = readl(pll->status_base + ctrl->status.offset);
286
if ((val & (1 << ctrl->status.shift)) == 0)
287
return false;
288
289
val = readl(pll->control_base + ctrl->ndiv_int.offset);
290
ndiv_int = (val >> ctrl->ndiv_int.shift) &
291
bit_mask(ctrl->ndiv_int.width);
292
293
if (ndiv_int != vco->ndiv_int)
294
return false;
295
296
val = readl(pll->control_base + ctrl->pdiv.offset);
297
pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
298
299
if (pdiv != vco->pdiv)
300
return false;
301
302
return true;
303
}
304
305
static int pll_set_rate(struct iproc_clk *clk, struct iproc_pll_vco_param *vco,
306
unsigned long parent_rate)
307
{
308
struct iproc_pll *pll = clk->pll;
309
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
310
int ka = 0, ki, kp, ret;
311
unsigned long rate = vco->rate;
312
u32 val;
313
enum kp_band kp_index;
314
unsigned long ref_freq;
315
const char *clk_name = clk_hw_get_name(&clk->hw);
316
317
/*
318
* reference frequency = parent frequency / PDIV
319
* If PDIV = 0, then it becomes a multiplier (x2)
320
*/
321
if (vco->pdiv == 0)
322
ref_freq = parent_rate * 2;
323
else
324
ref_freq = parent_rate / vco->pdiv;
325
326
/* determine Ki and Kp index based on target VCO frequency */
327
if (rate >= VCO_LOW && rate < VCO_HIGH) {
328
ki = 4;
329
kp_index = KP_BAND_MID;
330
} else if (rate >= VCO_HIGH && rate < VCO_HIGH_HIGH) {
331
ki = 3;
332
kp_index = KP_BAND_HIGH;
333
} else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) {
334
ki = 3;
335
kp_index = KP_BAND_HIGH_HIGH;
336
} else {
337
pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
338
clk_name, rate);
339
return -EINVAL;
340
}
341
342
kp = get_kp(ref_freq, kp_index);
343
if (kp < 0) {
344
pr_err("%s: pll: %s has invalid kp\n", __func__, clk_name);
345
return kp;
346
}
347
348
ret = __pll_enable(pll);
349
if (ret) {
350
pr_err("%s: pll: %s fails to enable\n", __func__, clk_name);
351
return ret;
352
}
353
354
if (pll_fractional_change_only(clk->pll, vco)) {
355
/* program fractional part of NDIV */
356
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
357
val = readl(pll->control_base + ctrl->ndiv_frac.offset);
358
val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
359
ctrl->ndiv_frac.shift);
360
val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
361
iproc_pll_write(pll, pll->control_base,
362
ctrl->ndiv_frac.offset, val);
363
return 0;
364
}
365
}
366
367
/* put PLL in reset */
368
__pll_put_in_reset(pll);
369
370
/* set PLL in user mode before modifying PLL controls */
371
if (ctrl->flags & IPROC_CLK_PLL_USER_MODE_ON) {
372
val = readl(pll->control_base + ctrl->macro_mode.offset);
373
val &= ~(bit_mask(ctrl->macro_mode.width) <<
374
ctrl->macro_mode.shift);
375
val |= PLL_USER_MODE << ctrl->macro_mode.shift;
376
iproc_pll_write(pll, pll->control_base,
377
ctrl->macro_mode.offset, val);
378
}
379
380
iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0);
381
382
val = readl(pll->control_base + ctrl->vco_ctrl.l_offset);
383
384
if (rate >= VCO_LOW && rate < VCO_MID)
385
val |= (1 << PLL_VCO_LOW_SHIFT);
386
387
if (rate < VCO_HIGH)
388
val &= ~(1 << PLL_VCO_HIGH_SHIFT);
389
else
390
val |= (1 << PLL_VCO_HIGH_SHIFT);
391
392
iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.l_offset, val);
393
394
/* program integer part of NDIV */
395
val = readl(pll->control_base + ctrl->ndiv_int.offset);
396
val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift);
397
val |= vco->ndiv_int << ctrl->ndiv_int.shift;
398
iproc_pll_write(pll, pll->control_base, ctrl->ndiv_int.offset, val);
399
400
/* program fractional part of NDIV */
401
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
402
val = readl(pll->control_base + ctrl->ndiv_frac.offset);
403
val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
404
ctrl->ndiv_frac.shift);
405
val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
406
iproc_pll_write(pll, pll->control_base, ctrl->ndiv_frac.offset,
407
val);
408
}
409
410
/* program PDIV */
411
val = readl(pll->control_base + ctrl->pdiv.offset);
412
val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift);
413
val |= vco->pdiv << ctrl->pdiv.shift;
414
iproc_pll_write(pll, pll->control_base, ctrl->pdiv.offset, val);
415
416
__pll_bring_out_reset(pll, kp, ka, ki);
417
418
ret = pll_wait_for_lock(pll);
419
if (ret < 0) {
420
pr_err("%s: pll: %s failed to lock\n", __func__, clk_name);
421
return ret;
422
}
423
424
return 0;
425
}
426
427
static int iproc_pll_enable(struct clk_hw *hw)
428
{
429
struct iproc_clk *clk = to_iproc_clk(hw);
430
struct iproc_pll *pll = clk->pll;
431
432
return __pll_enable(pll);
433
}
434
435
static void iproc_pll_disable(struct clk_hw *hw)
436
{
437
struct iproc_clk *clk = to_iproc_clk(hw);
438
struct iproc_pll *pll = clk->pll;
439
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
440
441
if (ctrl->flags & IPROC_CLK_AON)
442
return;
443
444
__pll_disable(pll);
445
}
446
447
static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
448
unsigned long parent_rate)
449
{
450
struct iproc_clk *clk = to_iproc_clk(hw);
451
struct iproc_pll *pll = clk->pll;
452
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
453
u32 val;
454
u64 ndiv, ndiv_int, ndiv_frac;
455
unsigned int pdiv;
456
unsigned long rate;
457
458
if (parent_rate == 0)
459
return 0;
460
461
/* PLL needs to be locked */
462
val = readl(pll->status_base + ctrl->status.offset);
463
if ((val & (1 << ctrl->status.shift)) == 0)
464
return 0;
465
466
/*
467
* PLL output frequency =
468
*
469
* ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
470
*/
471
val = readl(pll->control_base + ctrl->ndiv_int.offset);
472
ndiv_int = (val >> ctrl->ndiv_int.shift) &
473
bit_mask(ctrl->ndiv_int.width);
474
ndiv = ndiv_int << 20;
475
476
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
477
val = readl(pll->control_base + ctrl->ndiv_frac.offset);
478
ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
479
bit_mask(ctrl->ndiv_frac.width);
480
ndiv += ndiv_frac;
481
}
482
483
val = readl(pll->control_base + ctrl->pdiv.offset);
484
pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
485
486
rate = (ndiv * parent_rate) >> 20;
487
488
if (pdiv == 0)
489
rate *= 2;
490
else
491
rate /= pdiv;
492
493
return rate;
494
}
495
496
static int iproc_pll_determine_rate(struct clk_hw *hw,
497
struct clk_rate_request *req)
498
{
499
unsigned int i;
500
struct iproc_clk *clk = to_iproc_clk(hw);
501
struct iproc_pll *pll = clk->pll;
502
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
503
unsigned long diff, best_diff;
504
unsigned int best_idx = 0;
505
int ret;
506
507
if (req->rate == 0 || req->best_parent_rate == 0)
508
return -EINVAL;
509
510
if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
511
struct iproc_pll_vco_param vco_param;
512
513
ret = pll_calc_param(req->rate, req->best_parent_rate,
514
&vco_param);
515
if (ret)
516
return ret;
517
518
req->rate = vco_param.rate;
519
return 0;
520
}
521
522
if (!pll->vco_param)
523
return -EINVAL;
524
525
best_diff = ULONG_MAX;
526
for (i = 0; i < pll->num_vco_entries; i++) {
527
diff = abs(req->rate - pll->vco_param[i].rate);
528
if (diff <= best_diff) {
529
best_diff = diff;
530
best_idx = i;
531
}
532
/* break now if perfect match */
533
if (diff == 0)
534
break;
535
}
536
537
req->rate = pll->vco_param[best_idx].rate;
538
539
return 0;
540
}
541
542
static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
543
unsigned long parent_rate)
544
{
545
struct iproc_clk *clk = to_iproc_clk(hw);
546
struct iproc_pll *pll = clk->pll;
547
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
548
struct iproc_pll_vco_param vco_param;
549
int rate_index, ret;
550
551
if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
552
ret = pll_calc_param(rate, parent_rate, &vco_param);
553
if (ret)
554
return ret;
555
} else {
556
rate_index = pll_get_rate_index(pll, rate);
557
if (rate_index < 0)
558
return rate_index;
559
560
vco_param = pll->vco_param[rate_index];
561
}
562
563
ret = pll_set_rate(clk, &vco_param, parent_rate);
564
return ret;
565
}
566
567
static const struct clk_ops iproc_pll_ops = {
568
.enable = iproc_pll_enable,
569
.disable = iproc_pll_disable,
570
.recalc_rate = iproc_pll_recalc_rate,
571
.determine_rate = iproc_pll_determine_rate,
572
.set_rate = iproc_pll_set_rate,
573
};
574
575
static int iproc_clk_enable(struct clk_hw *hw)
576
{
577
struct iproc_clk *clk = to_iproc_clk(hw);
578
const struct iproc_clk_ctrl *ctrl = clk->ctrl;
579
struct iproc_pll *pll = clk->pll;
580
u32 val;
581
582
/* channel enable is active low */
583
val = readl(pll->control_base + ctrl->enable.offset);
584
val &= ~(1 << ctrl->enable.enable_shift);
585
iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
586
587
/* also make sure channel is not held */
588
val = readl(pll->control_base + ctrl->enable.offset);
589
val &= ~(1 << ctrl->enable.hold_shift);
590
iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
591
592
return 0;
593
}
594
595
static void iproc_clk_disable(struct clk_hw *hw)
596
{
597
struct iproc_clk *clk = to_iproc_clk(hw);
598
const struct iproc_clk_ctrl *ctrl = clk->ctrl;
599
struct iproc_pll *pll = clk->pll;
600
u32 val;
601
602
if (ctrl->flags & IPROC_CLK_AON)
603
return;
604
605
val = readl(pll->control_base + ctrl->enable.offset);
606
val |= 1 << ctrl->enable.enable_shift;
607
iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val);
608
}
609
610
static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
611
unsigned long parent_rate)
612
{
613
struct iproc_clk *clk = to_iproc_clk(hw);
614
const struct iproc_clk_ctrl *ctrl = clk->ctrl;
615
struct iproc_pll *pll = clk->pll;
616
u32 val;
617
unsigned int mdiv;
618
unsigned long rate;
619
620
if (parent_rate == 0)
621
return 0;
622
623
val = readl(pll->control_base + ctrl->mdiv.offset);
624
mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width);
625
if (mdiv == 0)
626
mdiv = 256;
627
628
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
629
rate = parent_rate / (mdiv * 2);
630
else
631
rate = parent_rate / mdiv;
632
633
return rate;
634
}
635
636
static int iproc_clk_determine_rate(struct clk_hw *hw,
637
struct clk_rate_request *req)
638
{
639
unsigned int bestdiv;
640
641
if (req->rate == 0)
642
return -EINVAL;
643
if (req->rate == req->best_parent_rate)
644
return 0;
645
646
bestdiv = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
647
if (bestdiv < 2)
648
req->rate = req->best_parent_rate;
649
650
if (bestdiv > 256)
651
bestdiv = 256;
652
653
req->rate = req->best_parent_rate / bestdiv;
654
655
return 0;
656
}
657
658
static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
659
unsigned long parent_rate)
660
{
661
struct iproc_clk *clk = to_iproc_clk(hw);
662
const struct iproc_clk_ctrl *ctrl = clk->ctrl;
663
struct iproc_pll *pll = clk->pll;
664
u32 val;
665
unsigned int div;
666
667
if (rate == 0 || parent_rate == 0)
668
return -EINVAL;
669
670
div = DIV_ROUND_CLOSEST(parent_rate, rate);
671
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
672
div /= 2;
673
674
if (div > 256)
675
return -EINVAL;
676
677
val = readl(pll->control_base + ctrl->mdiv.offset);
678
if (div == 256) {
679
val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
680
} else {
681
val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
682
val |= div << ctrl->mdiv.shift;
683
}
684
iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
685
686
return 0;
687
}
688
689
static const struct clk_ops iproc_clk_ops = {
690
.enable = iproc_clk_enable,
691
.disable = iproc_clk_disable,
692
.recalc_rate = iproc_clk_recalc_rate,
693
.determine_rate = iproc_clk_determine_rate,
694
.set_rate = iproc_clk_set_rate,
695
};
696
697
/*
698
* Some PLLs require the PLL SW override bit to be set before changes can be
699
* applied to the PLL
700
*/
701
static void iproc_pll_sw_cfg(struct iproc_pll *pll)
702
{
703
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
704
705
if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) {
706
u32 val;
707
708
val = readl(pll->control_base + ctrl->sw_ctrl.offset);
709
val |= BIT(ctrl->sw_ctrl.shift);
710
iproc_pll_write(pll, pll->control_base, ctrl->sw_ctrl.offset,
711
val);
712
}
713
}
714
715
void iproc_pll_clk_setup(struct device_node *node,
716
const struct iproc_pll_ctrl *pll_ctrl,
717
const struct iproc_pll_vco_param *vco,
718
unsigned int num_vco_entries,
719
const struct iproc_clk_ctrl *clk_ctrl,
720
unsigned int num_clks)
721
{
722
int i, ret;
723
struct iproc_pll *pll;
724
struct iproc_clk *iclk;
725
struct clk_init_data init;
726
const char *parent_name;
727
struct iproc_clk *iclk_array;
728
struct clk_hw_onecell_data *clk_data;
729
const char *clk_name;
730
731
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
732
return;
733
734
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
735
if (WARN_ON(!pll))
736
return;
737
738
clk_data = kzalloc(struct_size(clk_data, hws, num_clks), GFP_KERNEL);
739
if (WARN_ON(!clk_data))
740
goto err_clk_data;
741
clk_data->num = num_clks;
742
743
iclk_array = kcalloc(num_clks, sizeof(struct iproc_clk), GFP_KERNEL);
744
if (WARN_ON(!iclk_array))
745
goto err_clks;
746
747
pll->control_base = of_iomap(node, 0);
748
if (WARN_ON(!pll->control_base))
749
goto err_pll_iomap;
750
751
/* Some SoCs do not require the pwr_base, thus failing is not fatal */
752
pll->pwr_base = of_iomap(node, 1);
753
754
/* some PLLs require gating control at the top ASIU level */
755
if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) {
756
pll->asiu_base = of_iomap(node, 2);
757
if (WARN_ON(!pll->asiu_base))
758
goto err_asiu_iomap;
759
}
760
761
if (pll_ctrl->flags & IPROC_CLK_PLL_SPLIT_STAT_CTRL) {
762
/* Some SoCs have a split status/control. If this does not
763
* exist, assume they are unified.
764
*/
765
pll->status_base = of_iomap(node, 2);
766
if (!pll->status_base)
767
goto err_status_iomap;
768
} else
769
pll->status_base = pll->control_base;
770
771
/* initialize and register the PLL itself */
772
pll->ctrl = pll_ctrl;
773
774
iclk = &iclk_array[0];
775
iclk->pll = pll;
776
777
ret = of_property_read_string_index(node, "clock-output-names",
778
0, &clk_name);
779
if (WARN_ON(ret))
780
goto err_pll_register;
781
782
init.name = clk_name;
783
init.ops = &iproc_pll_ops;
784
init.flags = 0;
785
parent_name = of_clk_get_parent_name(node, 0);
786
init.parent_names = (parent_name ? &parent_name : NULL);
787
init.num_parents = (parent_name ? 1 : 0);
788
iclk->hw.init = &init;
789
790
if (vco) {
791
pll->num_vco_entries = num_vco_entries;
792
pll->vco_param = vco;
793
}
794
795
iproc_pll_sw_cfg(pll);
796
797
ret = clk_hw_register(NULL, &iclk->hw);
798
if (WARN_ON(ret))
799
goto err_pll_register;
800
801
clk_data->hws[0] = &iclk->hw;
802
parent_name = clk_name;
803
804
/* now initialize and register all leaf clocks */
805
for (i = 1; i < num_clks; i++) {
806
memset(&init, 0, sizeof(init));
807
808
ret = of_property_read_string_index(node, "clock-output-names",
809
i, &clk_name);
810
if (WARN_ON(ret))
811
goto err_clk_register;
812
813
iclk = &iclk_array[i];
814
iclk->pll = pll;
815
iclk->ctrl = &clk_ctrl[i];
816
817
init.name = clk_name;
818
init.ops = &iproc_clk_ops;
819
init.flags = 0;
820
init.parent_names = (parent_name ? &parent_name : NULL);
821
init.num_parents = (parent_name ? 1 : 0);
822
iclk->hw.init = &init;
823
824
ret = clk_hw_register(NULL, &iclk->hw);
825
if (WARN_ON(ret))
826
goto err_clk_register;
827
828
clk_data->hws[i] = &iclk->hw;
829
}
830
831
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
832
if (WARN_ON(ret))
833
goto err_clk_register;
834
835
return;
836
837
err_clk_register:
838
while (--i >= 0)
839
clk_hw_unregister(clk_data->hws[i]);
840
841
err_pll_register:
842
if (pll->status_base != pll->control_base)
843
iounmap(pll->status_base);
844
845
err_status_iomap:
846
if (pll->asiu_base)
847
iounmap(pll->asiu_base);
848
849
err_asiu_iomap:
850
if (pll->pwr_base)
851
iounmap(pll->pwr_base);
852
853
iounmap(pll->control_base);
854
855
err_pll_iomap:
856
kfree(iclk_array);
857
858
err_clks:
859
kfree(clk_data);
860
861
err_clk_data:
862
kfree(pll);
863
}
864
865