Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/clocksource/timer-econet-en751221.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Timer present on EcoNet EN75xx MIPS based SoCs.
4
*
5
* Copyright (C) 2025 by Caleb James DeLisle <[email protected]>
6
*/
7
8
#include <linux/io.h>
9
#include <linux/cpumask.h>
10
#include <linux/interrupt.h>
11
#include <linux/clockchips.h>
12
#include <linux/sched_clock.h>
13
#include <linux/of.h>
14
#include <linux/of_irq.h>
15
#include <linux/of_address.h>
16
#include <linux/cpuhotplug.h>
17
#include <linux/clk.h>
18
19
#define ECONET_BITS 32
20
#define ECONET_MIN_DELTA 0x00001000
21
#define ECONET_MAX_DELTA GENMASK(ECONET_BITS - 2, 0)
22
/* 34Kc hardware has 1 block and 1004Kc has 2. */
23
#define ECONET_NUM_BLOCKS DIV_ROUND_UP(NR_CPUS, 2)
24
25
static struct {
26
void __iomem *membase[ECONET_NUM_BLOCKS];
27
u32 freq_hz;
28
} econet_timer __ro_after_init;
29
30
static DEFINE_PER_CPU(struct clock_event_device, econet_timer_pcpu);
31
32
/* Each memory block has 2 timers, the order of registers is:
33
* CTL, CMR0, CNT0, CMR1, CNT1
34
*/
35
static inline void __iomem *reg_ctl(u32 timer_n)
36
{
37
return econet_timer.membase[timer_n >> 1];
38
}
39
40
static inline void __iomem *reg_compare(u32 timer_n)
41
{
42
return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x04;
43
}
44
45
static inline void __iomem *reg_count(u32 timer_n)
46
{
47
return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x08;
48
}
49
50
static inline u32 ctl_bit_enabled(u32 timer_n)
51
{
52
return 1U << (timer_n & 1);
53
}
54
55
static inline u32 ctl_bit_pending(u32 timer_n)
56
{
57
return 1U << ((timer_n & 1) + 16);
58
}
59
60
static bool cevt_is_pending(int cpu_id)
61
{
62
return ioread32(reg_ctl(cpu_id)) & ctl_bit_pending(cpu_id);
63
}
64
65
static irqreturn_t cevt_interrupt(int irq, void *dev_id)
66
{
67
struct clock_event_device *dev = this_cpu_ptr(&econet_timer_pcpu);
68
int cpu = cpumask_first(dev->cpumask);
69
70
/* Each VPE has its own events,
71
* so this will only happen on spurious interrupt.
72
*/
73
if (!cevt_is_pending(cpu))
74
return IRQ_NONE;
75
76
iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));
77
dev->event_handler(dev);
78
return IRQ_HANDLED;
79
}
80
81
static int cevt_set_next_event(ulong delta, struct clock_event_device *dev)
82
{
83
u32 next;
84
int cpu;
85
86
cpu = cpumask_first(dev->cpumask);
87
next = ioread32(reg_count(cpu)) + delta;
88
iowrite32(next, reg_compare(cpu));
89
90
if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)
91
return -ETIME;
92
93
return 0;
94
}
95
96
static int cevt_init_cpu(uint cpu)
97
{
98
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);
99
u32 reg;
100
101
pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);
102
103
reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);
104
iowrite32(reg, reg_ctl(cpu));
105
106
enable_percpu_irq(cd->irq, IRQ_TYPE_NONE);
107
108
/* Do this last because it synchronously configures the timer */
109
clockevents_config_and_register(cd, econet_timer.freq_hz,
110
ECONET_MIN_DELTA, ECONET_MAX_DELTA);
111
112
return 0;
113
}
114
115
static u64 notrace sched_clock_read(void)
116
{
117
/* Always read from clock zero no matter the CPU */
118
return (u64)ioread32(reg_count(0));
119
}
120
121
/* Init */
122
123
static void __init cevt_dev_init(uint cpu)
124
{
125
iowrite32(0, reg_count(cpu));
126
iowrite32(U32_MAX, reg_compare(cpu));
127
}
128
129
static int __init cevt_init(struct device_node *np)
130
{
131
int i, irq, ret;
132
133
irq = irq_of_parse_and_map(np, 0);
134
if (irq <= 0) {
135
pr_err("%pOFn: irq_of_parse_and_map failed", np);
136
return -EINVAL;
137
}
138
139
ret = request_percpu_irq(irq, cevt_interrupt, np->name, &econet_timer_pcpu);
140
141
if (ret < 0) {
142
pr_err("%pOFn: IRQ %d setup failed (%d)\n", np, irq, ret);
143
goto err_unmap_irq;
144
}
145
146
for_each_possible_cpu(i) {
147
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
148
149
cd->rating = 310,
150
cd->features = CLOCK_EVT_FEAT_ONESHOT |
151
CLOCK_EVT_FEAT_C3STOP |
152
CLOCK_EVT_FEAT_PERCPU;
153
cd->set_next_event = cevt_set_next_event;
154
cd->irq = irq;
155
cd->cpumask = cpumask_of(i);
156
cd->name = np->name;
157
158
cevt_dev_init(i);
159
}
160
161
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
162
"clockevents/econet/timer:starting",
163
cevt_init_cpu, NULL);
164
return 0;
165
166
err_unmap_irq:
167
irq_dispose_mapping(irq);
168
return ret;
169
}
170
171
static int __init timer_init(struct device_node *np)
172
{
173
int num_blocks = DIV_ROUND_UP(num_possible_cpus(), 2);
174
struct clk *clk;
175
int ret;
176
177
clk = of_clk_get(np, 0);
178
if (IS_ERR(clk)) {
179
pr_err("%pOFn: Failed to get CPU clock from DT %ld\n", np, PTR_ERR(clk));
180
return PTR_ERR(clk);
181
}
182
183
econet_timer.freq_hz = clk_get_rate(clk);
184
185
for (int i = 0; i < num_blocks; i++) {
186
econet_timer.membase[i] = of_iomap(np, i);
187
if (!econet_timer.membase[i]) {
188
pr_err("%pOFn: failed to map register [%d]\n", np, i);
189
return -ENXIO;
190
}
191
}
192
193
/* For clocksource purposes always read clock zero, whatever the CPU */
194
ret = clocksource_mmio_init(reg_count(0), np->name,
195
econet_timer.freq_hz, 301, ECONET_BITS,
196
clocksource_mmio_readl_up);
197
if (ret) {
198
pr_err("%pOFn: clocksource_mmio_init failed: %d", np, ret);
199
return ret;
200
}
201
202
ret = cevt_init(np);
203
if (ret < 0)
204
return ret;
205
206
sched_clock_register(sched_clock_read, ECONET_BITS,
207
econet_timer.freq_hz);
208
209
pr_info("%pOFn: using %u.%03u MHz high precision timer\n", np,
210
econet_timer.freq_hz / 1000000,
211
(econet_timer.freq_hz / 1000) % 1000);
212
213
return 0;
214
}
215
216
TIMER_OF_DECLARE(econet_timer_hpt, "econet,en751221-timer", timer_init);
217
218