Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/devfreq/event/rockchip-dfi.c
49791 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
4
* Author: Lin Huang <[email protected]>
5
*/
6
7
#include <linux/clk.h>
8
#include <linux/devfreq-event.h>
9
#include <linux/kernel.h>
10
#include <linux/err.h>
11
#include <linux/init.h>
12
#include <linux/io.h>
13
#include <linux/mfd/syscon.h>
14
#include <linux/module.h>
15
#include <linux/platform_device.h>
16
#include <linux/regmap.h>
17
#include <linux/slab.h>
18
#include <linux/list.h>
19
#include <linux/seqlock.h>
20
#include <linux/of.h>
21
#include <linux/of_device.h>
22
#include <linux/bitfield.h>
23
#include <linux/hw_bitfield.h>
24
#include <linux/bits.h>
25
#include <linux/perf_event.h>
26
27
#include <soc/rockchip/rockchip_grf.h>
28
#include <soc/rockchip/rk3399_grf.h>
29
#include <soc/rockchip/rk3568_grf.h>
30
#include <soc/rockchip/rk3588_grf.h>
31
32
#define DMC_MAX_CHANNELS 4
33
34
/* DDRMON_CTRL */
35
#define DDRMON_CTRL 0x04
36
#define DDRMON_CTRL_LPDDR5 BIT(6)
37
#define DDRMON_CTRL_DDR4 BIT(5)
38
#define DDRMON_CTRL_LPDDR4 BIT(4)
39
#define DDRMON_CTRL_HARDWARE_EN BIT(3)
40
#define DDRMON_CTRL_LPDDR23 BIT(2)
41
#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
42
#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
43
#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
44
45
#define DDRMON_CH0_WR_NUM 0x20
46
#define DDRMON_CH0_RD_NUM 0x24
47
#define DDRMON_CH0_COUNT_NUM 0x28
48
#define DDRMON_CH0_DFI_ACCESS_NUM 0x2c
49
#define DDRMON_CH1_COUNT_NUM 0x3c
50
#define DDRMON_CH1_DFI_ACCESS_NUM 0x40
51
52
#define PERF_EVENT_CYCLES 0x0
53
#define PERF_EVENT_READ_BYTES 0x1
54
#define PERF_EVENT_WRITE_BYTES 0x2
55
#define PERF_EVENT_READ_BYTES0 0x3
56
#define PERF_EVENT_WRITE_BYTES0 0x4
57
#define PERF_EVENT_READ_BYTES1 0x5
58
#define PERF_EVENT_WRITE_BYTES1 0x6
59
#define PERF_EVENT_READ_BYTES2 0x7
60
#define PERF_EVENT_WRITE_BYTES2 0x8
61
#define PERF_EVENT_READ_BYTES3 0x9
62
#define PERF_EVENT_WRITE_BYTES3 0xa
63
#define PERF_EVENT_BYTES 0xb
64
#define PERF_ACCESS_TYPE_MAX 0xc
65
66
/**
67
* struct dmc_count_channel - structure to hold counter values from the DDR controller
68
* @access: Number of read and write accesses
69
* @clock_cycles: DDR clock cycles
70
* @read_access: number of read accesses
71
* @write_access: number of write accesses
72
*/
73
struct dmc_count_channel {
74
u64 access;
75
u64 clock_cycles;
76
u64 read_access;
77
u64 write_access;
78
};
79
80
struct dmc_count {
81
struct dmc_count_channel c[DMC_MAX_CHANNELS];
82
};
83
84
/*
85
* The dfi controller can monitor DDR load. It has an upper and lower threshold
86
* for the operating points. Whenever the usage leaves these bounds an event is
87
* generated to indicate the DDR frequency should be changed.
88
*/
89
struct rockchip_dfi {
90
struct devfreq_event_dev *edev;
91
struct devfreq_event_desc desc;
92
struct dmc_count last_event_count;
93
94
struct dmc_count last_perf_count;
95
struct dmc_count total_count;
96
seqlock_t count_seqlock; /* protects last_perf_count and total_count */
97
98
struct device *dev;
99
void __iomem *regs;
100
struct regmap *regmap_pmu;
101
struct clk *clk;
102
int usecount;
103
struct mutex mutex;
104
u32 ddr_type;
105
unsigned int channel_mask;
106
unsigned int max_channels;
107
enum cpuhp_state cpuhp_state;
108
struct hlist_node node;
109
struct pmu pmu;
110
struct hrtimer timer;
111
unsigned int cpu;
112
int active_events;
113
int burst_len;
114
int buswidth[DMC_MAX_CHANNELS];
115
int ddrmon_stride;
116
bool ddrmon_ctrl_single;
117
u32 lp5_bank_mode;
118
bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
119
unsigned int count_multiplier; /* number of data clocks per count */
120
};
121
122
static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl)
123
{
124
u32 ddrmon_ver;
125
126
switch (dfi->ddr_type) {
127
case ROCKCHIP_DDRTYPE_LPDDR2:
128
case ROCKCHIP_DDRTYPE_LPDDR3:
129
*ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 1) |
130
FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
131
FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
132
break;
133
case ROCKCHIP_DDRTYPE_LPDDR4:
134
case ROCKCHIP_DDRTYPE_LPDDR4X:
135
*ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
136
FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 1) |
137
FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
138
break;
139
case ROCKCHIP_DDRTYPE_LPDDR5:
140
ddrmon_ver = readl_relaxed(dfi->regs);
141
if (ddrmon_ver < 0x40) {
142
*ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
143
FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
144
FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 1) |
145
FIELD_PREP_WM16(DDRMON_CTRL_LP5_BANK_MODE_MASK,
146
dfi->lp5_bank_mode);
147
break;
148
}
149
150
/*
151
* As it is unknown whether the unpleasant special case
152
* behaviour used by the vendor kernel is needed for any
153
* shipping hardware, ask users to report if they have
154
* some of that hardware.
155
*/
156
dev_err(&dfi->edev->dev,
157
"unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
158
ddrmon_ver);
159
return -EOPNOTSUPP;
160
default:
161
dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
162
dfi->ddr_type);
163
return -EOPNOTSUPP;
164
}
165
166
return 0;
167
}
168
169
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
170
{
171
void __iomem *dfi_regs = dfi->regs;
172
int i, ret = 0;
173
u32 ctrl;
174
175
mutex_lock(&dfi->mutex);
176
177
dfi->usecount++;
178
if (dfi->usecount > 1)
179
goto out;
180
181
ret = clk_prepare_enable(dfi->clk);
182
if (ret) {
183
dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret);
184
goto out;
185
}
186
187
ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl);
188
if (ret)
189
goto out;
190
191
for (i = 0; i < dfi->max_channels; i++) {
192
193
if (!(dfi->channel_mask & BIT(i)))
194
continue;
195
196
/* clear DDRMON_CTRL setting */
197
writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_TIMER_CNT_EN, 0) |
198
FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0) |
199
FIELD_PREP_WM16(DDRMON_CTRL_HARDWARE_EN, 0),
200
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
201
202
writel_relaxed(ctrl, dfi_regs + i * dfi->ddrmon_stride +
203
DDRMON_CTRL);
204
205
/* enable count, use software mode */
206
writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 1),
207
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
208
209
if (dfi->ddrmon_ctrl_single)
210
break;
211
}
212
out:
213
mutex_unlock(&dfi->mutex);
214
215
return ret;
216
}
217
218
static void rockchip_dfi_disable(struct rockchip_dfi *dfi)
219
{
220
void __iomem *dfi_regs = dfi->regs;
221
int i;
222
223
mutex_lock(&dfi->mutex);
224
225
dfi->usecount--;
226
227
WARN_ON_ONCE(dfi->usecount < 0);
228
229
if (dfi->usecount > 0)
230
goto out;
231
232
for (i = 0; i < dfi->max_channels; i++) {
233
if (!(dfi->channel_mask & BIT(i)))
234
continue;
235
236
writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0),
237
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
238
239
if (dfi->ddrmon_ctrl_single)
240
break;
241
}
242
243
clk_disable_unprepare(dfi->clk);
244
out:
245
mutex_unlock(&dfi->mutex);
246
}
247
248
static void rockchip_dfi_read_counters(struct rockchip_dfi *dfi, struct dmc_count *res)
249
{
250
u32 i;
251
void __iomem *dfi_regs = dfi->regs;
252
253
for (i = 0; i < dfi->max_channels; i++) {
254
if (!(dfi->channel_mask & BIT(i)))
255
continue;
256
res->c[i].read_access = readl_relaxed(dfi_regs +
257
DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride);
258
res->c[i].write_access = readl_relaxed(dfi_regs +
259
DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride);
260
res->c[i].access = readl_relaxed(dfi_regs +
261
DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride);
262
res->c[i].clock_cycles = readl_relaxed(dfi_regs +
263
DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride);
264
}
265
}
266
267
static int rockchip_dfi_event_disable(struct devfreq_event_dev *edev)
268
{
269
struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
270
271
rockchip_dfi_disable(dfi);
272
273
return 0;
274
}
275
276
static int rockchip_dfi_event_enable(struct devfreq_event_dev *edev)
277
{
278
struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
279
280
return rockchip_dfi_enable(dfi);
281
}
282
283
static int rockchip_dfi_set_event(struct devfreq_event_dev *edev)
284
{
285
return 0;
286
}
287
288
static int rockchip_dfi_get_event(struct devfreq_event_dev *edev,
289
struct devfreq_event_data *edata)
290
{
291
struct rockchip_dfi *dfi = devfreq_event_get_drvdata(edev);
292
struct dmc_count count;
293
struct dmc_count *last = &dfi->last_event_count;
294
u32 access = 0, clock_cycles = 0;
295
int i;
296
297
rockchip_dfi_read_counters(dfi, &count);
298
299
/* We can only report one channel, so find the busiest one */
300
for (i = 0; i < dfi->max_channels; i++) {
301
u32 a, c;
302
303
if (!(dfi->channel_mask & BIT(i)))
304
continue;
305
306
a = count.c[i].access - last->c[i].access;
307
c = count.c[i].clock_cycles - last->c[i].clock_cycles;
308
309
if (a > access) {
310
access = a;
311
clock_cycles = c;
312
}
313
}
314
315
edata->load_count = access * 4;
316
edata->total_count = clock_cycles;
317
318
dfi->last_event_count = count;
319
320
return 0;
321
}
322
323
static const struct devfreq_event_ops rockchip_dfi_ops = {
324
.disable = rockchip_dfi_event_disable,
325
.enable = rockchip_dfi_event_enable,
326
.get_event = rockchip_dfi_get_event,
327
.set_event = rockchip_dfi_set_event,
328
};
329
330
#ifdef CONFIG_PERF_EVENTS
331
332
static void rockchip_ddr_perf_counters_add(struct rockchip_dfi *dfi,
333
const struct dmc_count *now,
334
struct dmc_count *res)
335
{
336
const struct dmc_count *last = &dfi->last_perf_count;
337
int i;
338
339
for (i = 0; i < dfi->max_channels; i++) {
340
res->c[i].read_access = dfi->total_count.c[i].read_access +
341
(u32)(now->c[i].read_access - last->c[i].read_access);
342
res->c[i].write_access = dfi->total_count.c[i].write_access +
343
(u32)(now->c[i].write_access - last->c[i].write_access);
344
res->c[i].access = dfi->total_count.c[i].access +
345
(u32)(now->c[i].access - last->c[i].access);
346
res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles +
347
(u32)(now->c[i].clock_cycles - last->c[i].clock_cycles);
348
}
349
}
350
351
static ssize_t ddr_perf_cpumask_show(struct device *dev,
352
struct device_attribute *attr, char *buf)
353
{
354
struct pmu *pmu = dev_get_drvdata(dev);
355
struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu);
356
357
return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu));
358
}
359
360
static struct device_attribute ddr_perf_cpumask_attr =
361
__ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL);
362
363
static struct attribute *ddr_perf_cpumask_attrs[] = {
364
&ddr_perf_cpumask_attr.attr,
365
NULL,
366
};
367
368
static const struct attribute_group ddr_perf_cpumask_attr_group = {
369
.attrs = ddr_perf_cpumask_attrs,
370
};
371
372
PMU_EVENT_ATTR_STRING(cycles, ddr_pmu_cycles, "event="__stringify(PERF_EVENT_CYCLES))
373
374
#define DFI_PMU_EVENT_ATTR(_name, _var, _str) \
375
PMU_EVENT_ATTR_STRING(_name, _var, _str); \
376
PMU_EVENT_ATTR_STRING(_name.unit, _var##_unit, "MB"); \
377
PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07")
378
379
DFI_PMU_EVENT_ATTR(read-bytes0, ddr_pmu_read_bytes0, "event="__stringify(PERF_EVENT_READ_BYTES0));
380
DFI_PMU_EVENT_ATTR(write-bytes0, ddr_pmu_write_bytes0, "event="__stringify(PERF_EVENT_WRITE_BYTES0));
381
382
DFI_PMU_EVENT_ATTR(read-bytes1, ddr_pmu_read_bytes1, "event="__stringify(PERF_EVENT_READ_BYTES1));
383
DFI_PMU_EVENT_ATTR(write-bytes1, ddr_pmu_write_bytes1, "event="__stringify(PERF_EVENT_WRITE_BYTES1));
384
385
DFI_PMU_EVENT_ATTR(read-bytes2, ddr_pmu_read_bytes2, "event="__stringify(PERF_EVENT_READ_BYTES2));
386
DFI_PMU_EVENT_ATTR(write-bytes2, ddr_pmu_write_bytes2, "event="__stringify(PERF_EVENT_WRITE_BYTES2));
387
388
DFI_PMU_EVENT_ATTR(read-bytes3, ddr_pmu_read_bytes3, "event="__stringify(PERF_EVENT_READ_BYTES3));
389
DFI_PMU_EVENT_ATTR(write-bytes3, ddr_pmu_write_bytes3, "event="__stringify(PERF_EVENT_WRITE_BYTES3));
390
391
DFI_PMU_EVENT_ATTR(read-bytes, ddr_pmu_read_bytes, "event="__stringify(PERF_EVENT_READ_BYTES));
392
DFI_PMU_EVENT_ATTR(write-bytes, ddr_pmu_write_bytes, "event="__stringify(PERF_EVENT_WRITE_BYTES));
393
394
DFI_PMU_EVENT_ATTR(bytes, ddr_pmu_bytes, "event="__stringify(PERF_EVENT_BYTES));
395
396
#define DFI_ATTR_MB(_name) \
397
&_name.attr.attr, \
398
&_name##_unit.attr.attr, \
399
&_name##_scale.attr.attr
400
401
static struct attribute *ddr_perf_events_attrs[] = {
402
&ddr_pmu_cycles.attr.attr,
403
DFI_ATTR_MB(ddr_pmu_read_bytes),
404
DFI_ATTR_MB(ddr_pmu_write_bytes),
405
DFI_ATTR_MB(ddr_pmu_read_bytes0),
406
DFI_ATTR_MB(ddr_pmu_write_bytes0),
407
DFI_ATTR_MB(ddr_pmu_read_bytes1),
408
DFI_ATTR_MB(ddr_pmu_write_bytes1),
409
DFI_ATTR_MB(ddr_pmu_read_bytes2),
410
DFI_ATTR_MB(ddr_pmu_write_bytes2),
411
DFI_ATTR_MB(ddr_pmu_read_bytes3),
412
DFI_ATTR_MB(ddr_pmu_write_bytes3),
413
DFI_ATTR_MB(ddr_pmu_bytes),
414
NULL,
415
};
416
417
static const struct attribute_group ddr_perf_events_attr_group = {
418
.name = "events",
419
.attrs = ddr_perf_events_attrs,
420
};
421
422
PMU_FORMAT_ATTR(event, "config:0-7");
423
424
static struct attribute *ddr_perf_format_attrs[] = {
425
&format_attr_event.attr,
426
NULL,
427
};
428
429
static const struct attribute_group ddr_perf_format_attr_group = {
430
.name = "format",
431
.attrs = ddr_perf_format_attrs,
432
};
433
434
static const struct attribute_group *attr_groups[] = {
435
&ddr_perf_events_attr_group,
436
&ddr_perf_cpumask_attr_group,
437
&ddr_perf_format_attr_group,
438
NULL,
439
};
440
441
static int rockchip_ddr_perf_event_init(struct perf_event *event)
442
{
443
struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
444
445
if (event->attr.type != event->pmu->type)
446
return -ENOENT;
447
448
if (event->attach_state & PERF_ATTACH_TASK)
449
return -EINVAL;
450
451
if (event->cpu < 0) {
452
dev_warn(dfi->dev, "Can't provide per-task data!\n");
453
return -EINVAL;
454
}
455
456
return 0;
457
}
458
459
static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
460
{
461
struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
462
int blen = dfi->burst_len;
463
struct dmc_count total, now;
464
unsigned int seq;
465
u64 count = 0;
466
int i;
467
468
rockchip_dfi_read_counters(dfi, &now);
469
470
do {
471
seq = read_seqbegin(&dfi->count_seqlock);
472
rockchip_ddr_perf_counters_add(dfi, &now, &total);
473
} while (read_seqretry(&dfi->count_seqlock, seq));
474
475
switch (event->attr.config) {
476
case PERF_EVENT_CYCLES:
477
count = total.c[0].clock_cycles * dfi->count_multiplier;
478
break;
479
case PERF_EVENT_READ_BYTES:
480
for (i = 0; i < dfi->max_channels; i++)
481
count += total.c[i].read_access * blen * dfi->buswidth[i];
482
break;
483
case PERF_EVENT_WRITE_BYTES:
484
for (i = 0; i < dfi->max_channels; i++)
485
count += total.c[i].write_access * blen * dfi->buswidth[i];
486
break;
487
case PERF_EVENT_READ_BYTES0:
488
count = total.c[0].read_access * blen * dfi->buswidth[0];
489
break;
490
case PERF_EVENT_WRITE_BYTES0:
491
count = total.c[0].write_access * blen * dfi->buswidth[0];
492
break;
493
case PERF_EVENT_READ_BYTES1:
494
count = total.c[1].read_access * blen * dfi->buswidth[1];
495
break;
496
case PERF_EVENT_WRITE_BYTES1:
497
count = total.c[1].write_access * blen * dfi->buswidth[1];
498
break;
499
case PERF_EVENT_READ_BYTES2:
500
count = total.c[2].read_access * blen * dfi->buswidth[2];
501
break;
502
case PERF_EVENT_WRITE_BYTES2:
503
count = total.c[2].write_access * blen * dfi->buswidth[2];
504
break;
505
case PERF_EVENT_READ_BYTES3:
506
count = total.c[3].read_access * blen * dfi->buswidth[3];
507
break;
508
case PERF_EVENT_WRITE_BYTES3:
509
count = total.c[3].write_access * blen * dfi->buswidth[3];
510
break;
511
case PERF_EVENT_BYTES:
512
for (i = 0; i < dfi->max_channels; i++)
513
count += total.c[i].access * blen * dfi->buswidth[i];
514
break;
515
}
516
517
return count;
518
}
519
520
static void rockchip_ddr_perf_event_update(struct perf_event *event)
521
{
522
u64 now;
523
s64 prev;
524
525
if (event->attr.config >= PERF_ACCESS_TYPE_MAX)
526
return;
527
528
now = rockchip_ddr_perf_event_get_count(event);
529
prev = local64_xchg(&event->hw.prev_count, now);
530
local64_add(now - prev, &event->count);
531
}
532
533
static void rockchip_ddr_perf_event_start(struct perf_event *event, int flags)
534
{
535
u64 now = rockchip_ddr_perf_event_get_count(event);
536
537
local64_set(&event->hw.prev_count, now);
538
}
539
540
static int rockchip_ddr_perf_event_add(struct perf_event *event, int flags)
541
{
542
struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
543
544
dfi->active_events++;
545
546
if (dfi->active_events == 1) {
547
dfi->total_count = (struct dmc_count){};
548
rockchip_dfi_read_counters(dfi, &dfi->last_perf_count);
549
hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL);
550
}
551
552
if (flags & PERF_EF_START)
553
rockchip_ddr_perf_event_start(event, flags);
554
555
return 0;
556
}
557
558
static void rockchip_ddr_perf_event_stop(struct perf_event *event, int flags)
559
{
560
rockchip_ddr_perf_event_update(event);
561
}
562
563
static void rockchip_ddr_perf_event_del(struct perf_event *event, int flags)
564
{
565
struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu);
566
567
rockchip_ddr_perf_event_stop(event, PERF_EF_UPDATE);
568
569
dfi->active_events--;
570
571
if (dfi->active_events == 0)
572
hrtimer_cancel(&dfi->timer);
573
}
574
575
static enum hrtimer_restart rockchip_dfi_timer(struct hrtimer *timer)
576
{
577
struct rockchip_dfi *dfi = container_of(timer, struct rockchip_dfi, timer);
578
struct dmc_count now, total;
579
580
rockchip_dfi_read_counters(dfi, &now);
581
582
write_seqlock(&dfi->count_seqlock);
583
584
rockchip_ddr_perf_counters_add(dfi, &now, &total);
585
dfi->total_count = total;
586
dfi->last_perf_count = now;
587
588
write_sequnlock(&dfi->count_seqlock);
589
590
hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC));
591
592
return HRTIMER_RESTART;
593
};
594
595
static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node)
596
{
597
struct rockchip_dfi *dfi = hlist_entry_safe(node, struct rockchip_dfi, node);
598
int target;
599
600
if (cpu != dfi->cpu)
601
return 0;
602
603
target = cpumask_any_but(cpu_online_mask, cpu);
604
if (target >= nr_cpu_ids)
605
return 0;
606
607
perf_pmu_migrate_context(&dfi->pmu, cpu, target);
608
dfi->cpu = target;
609
610
return 0;
611
}
612
613
static void rockchip_ddr_cpuhp_remove_state(void *data)
614
{
615
struct rockchip_dfi *dfi = data;
616
617
cpuhp_remove_multi_state(dfi->cpuhp_state);
618
619
rockchip_dfi_disable(dfi);
620
}
621
622
static void rockchip_ddr_cpuhp_remove_instance(void *data)
623
{
624
struct rockchip_dfi *dfi = data;
625
626
cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node);
627
}
628
629
static void rockchip_ddr_perf_remove(void *data)
630
{
631
struct rockchip_dfi *dfi = data;
632
633
perf_pmu_unregister(&dfi->pmu);
634
}
635
636
static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
637
{
638
struct pmu *pmu = &dfi->pmu;
639
int ret;
640
641
seqlock_init(&dfi->count_seqlock);
642
643
pmu->module = THIS_MODULE;
644
pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
645
pmu->task_ctx_nr = perf_invalid_context;
646
pmu->attr_groups = attr_groups;
647
pmu->event_init = rockchip_ddr_perf_event_init;
648
pmu->add = rockchip_ddr_perf_event_add;
649
pmu->del = rockchip_ddr_perf_event_del;
650
pmu->start = rockchip_ddr_perf_event_start;
651
pmu->stop = rockchip_ddr_perf_event_stop;
652
pmu->read = rockchip_ddr_perf_event_update;
653
654
dfi->cpu = raw_smp_processor_id();
655
656
ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
657
"rockchip_ddr_perf_pmu",
658
NULL,
659
ddr_perf_offline_cpu);
660
661
if (ret < 0) {
662
dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret);
663
return ret;
664
}
665
666
dfi->cpuhp_state = ret;
667
668
rockchip_dfi_enable(dfi);
669
670
ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi);
671
if (ret)
672
return ret;
673
674
ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node);
675
if (ret) {
676
dev_err(dfi->dev, "Error %d registering hotplug\n", ret);
677
return ret;
678
}
679
680
ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi);
681
if (ret)
682
return ret;
683
684
hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
685
686
switch (dfi->ddr_type) {
687
case ROCKCHIP_DDRTYPE_LPDDR2:
688
case ROCKCHIP_DDRTYPE_LPDDR3:
689
dfi->burst_len = 8;
690
break;
691
case ROCKCHIP_DDRTYPE_LPDDR4:
692
case ROCKCHIP_DDRTYPE_LPDDR4X:
693
case ROCKCHIP_DDRTYPE_LPDDR5:
694
dfi->burst_len = 16;
695
break;
696
}
697
698
if (!dfi->count_multiplier)
699
dfi->count_multiplier = 1;
700
701
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
702
if (ret)
703
return ret;
704
705
return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi);
706
}
707
#else
708
static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
709
{
710
return 0;
711
}
712
#endif
713
714
static int rk3399_dfi_init(struct rockchip_dfi *dfi)
715
{
716
struct regmap *regmap_pmu = dfi->regmap_pmu;
717
u32 val;
718
719
dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon");
720
if (IS_ERR(dfi->clk))
721
return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk),
722
"Cannot get the clk pclk_ddr_mon\n");
723
724
/* get ddr type */
725
regmap_read(regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
726
dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val);
727
728
dfi->channel_mask = GENMASK(1, 0);
729
dfi->max_channels = 2;
730
731
dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2;
732
dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2;
733
734
dfi->ddrmon_stride = 0x14;
735
dfi->ddrmon_ctrl_single = true;
736
737
return 0;
738
};
739
740
static int rk3568_dfi_init(struct rockchip_dfi *dfi)
741
{
742
struct regmap *regmap_pmu = dfi->regmap_pmu;
743
u32 reg2, reg3;
744
745
regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG2, &reg2);
746
regmap_read(regmap_pmu, RK3568_PMUGRF_OS_REG3, &reg3);
747
748
/* lower 3 bits of the DDR type */
749
dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
750
751
/*
752
* For version three and higher the upper two bits of the DDR type are
753
* in RK3568_PMUGRF_OS_REG3
754
*/
755
if (FIELD_GET(RK3568_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
756
dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
757
758
dfi->channel_mask = BIT(0);
759
dfi->max_channels = 1;
760
761
dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
762
763
dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */
764
dfi->ddrmon_ctrl_single = true;
765
766
return 0;
767
};
768
769
static int rk3588_dfi_init(struct rockchip_dfi *dfi)
770
{
771
struct regmap *regmap_pmu = dfi->regmap_pmu;
772
u32 reg2, reg3, reg4, reg6;
773
774
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
775
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
776
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG4, &reg4);
777
778
/* lower 3 bits of the DDR type */
779
dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2);
780
781
/*
782
* For version three and higher the upper two bits of the DDR type are
783
* in RK3588_PMUGRF_OS_REG3
784
*/
785
if (FIELD_GET(RK3588_PMUGRF_OS_REG3_SYSREG_VERSION, reg3) >= 0x3)
786
dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3;
787
788
dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2;
789
dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2;
790
dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2;
791
dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2;
792
dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) |
793
FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg4) << 2;
794
dfi->max_channels = 4;
795
796
dfi->ddrmon_stride = 0x4000;
797
dfi->count_multiplier = 2;
798
799
if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
800
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
801
dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
802
dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
803
if (dfi->lp5_ckr)
804
dfi->count_multiplier *= 2;
805
}
806
807
return 0;
808
};
809
810
static const struct of_device_id rockchip_dfi_id_match[] = {
811
{ .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init },
812
{ .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init },
813
{ .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
814
{ },
815
};
816
817
MODULE_DEVICE_TABLE(of, rockchip_dfi_id_match);
818
819
static int rockchip_dfi_probe(struct platform_device *pdev)
820
{
821
struct device *dev = &pdev->dev;
822
struct rockchip_dfi *dfi;
823
struct devfreq_event_desc *desc;
824
struct device_node *np = pdev->dev.of_node, *node;
825
int (*soc_init)(struct rockchip_dfi *dfi);
826
int ret;
827
828
soc_init = of_device_get_match_data(&pdev->dev);
829
if (!soc_init)
830
return -EINVAL;
831
832
dfi = devm_kzalloc(dev, sizeof(*dfi), GFP_KERNEL);
833
if (!dfi)
834
return -ENOMEM;
835
836
dfi->regs = devm_platform_ioremap_resource(pdev, 0);
837
if (IS_ERR(dfi->regs))
838
return PTR_ERR(dfi->regs);
839
840
node = of_parse_phandle(np, "rockchip,pmu", 0);
841
if (!node)
842
return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n");
843
844
dfi->regmap_pmu = syscon_node_to_regmap(node);
845
of_node_put(node);
846
if (IS_ERR(dfi->regmap_pmu))
847
return PTR_ERR(dfi->regmap_pmu);
848
849
dfi->dev = dev;
850
mutex_init(&dfi->mutex);
851
852
desc = &dfi->desc;
853
desc->ops = &rockchip_dfi_ops;
854
desc->driver_data = dfi;
855
desc->name = np->name;
856
857
ret = soc_init(dfi);
858
if (ret)
859
return ret;
860
861
dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc);
862
if (IS_ERR(dfi->edev)) {
863
dev_err(&pdev->dev,
864
"failed to add devfreq-event device\n");
865
return PTR_ERR(dfi->edev);
866
}
867
868
ret = rockchip_ddr_perf_init(dfi);
869
if (ret)
870
return ret;
871
872
platform_set_drvdata(pdev, dfi);
873
874
return 0;
875
}
876
877
static struct platform_driver rockchip_dfi_driver = {
878
.probe = rockchip_dfi_probe,
879
.driver = {
880
.name = "rockchip-dfi",
881
.of_match_table = rockchip_dfi_id_match,
882
.suppress_bind_attrs = true,
883
},
884
};
885
module_platform_driver(rockchip_dfi_driver);
886
887
MODULE_LICENSE("GPL v2");
888
MODULE_AUTHOR("Lin Huang <[email protected]>");
889
MODULE_DESCRIPTION("Rockchip DFI driver");
890
891