Path: blob/master/drivers/clocksource/timer-econet-en751221.c
26278 views
// SPDX-License-Identifier: GPL-2.01/*2* Timer present on EcoNet EN75xx MIPS based SoCs.3*4* Copyright (C) 2025 by Caleb James DeLisle <[email protected]>5*/67#include <linux/io.h>8#include <linux/cpumask.h>9#include <linux/interrupt.h>10#include <linux/clockchips.h>11#include <linux/sched_clock.h>12#include <linux/of.h>13#include <linux/of_irq.h>14#include <linux/of_address.h>15#include <linux/cpuhotplug.h>16#include <linux/clk.h>1718#define ECONET_BITS 3219#define ECONET_MIN_DELTA 0x0000100020#define ECONET_MAX_DELTA GENMASK(ECONET_BITS - 2, 0)21/* 34Kc hardware has 1 block and 1004Kc has 2. */22#define ECONET_NUM_BLOCKS DIV_ROUND_UP(NR_CPUS, 2)2324static struct {25void __iomem *membase[ECONET_NUM_BLOCKS];26u32 freq_hz;27} econet_timer __ro_after_init;2829static DEFINE_PER_CPU(struct clock_event_device, econet_timer_pcpu);3031/* Each memory block has 2 timers, the order of registers is:32* CTL, CMR0, CNT0, CMR1, CNT133*/34static inline void __iomem *reg_ctl(u32 timer_n)35{36return econet_timer.membase[timer_n >> 1];37}3839static inline void __iomem *reg_compare(u32 timer_n)40{41return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x04;42}4344static inline void __iomem *reg_count(u32 timer_n)45{46return econet_timer.membase[timer_n >> 1] + (timer_n & 1) * 0x08 + 0x08;47}4849static inline u32 ctl_bit_enabled(u32 timer_n)50{51return 1U << (timer_n & 1);52}5354static inline u32 ctl_bit_pending(u32 timer_n)55{56return 1U << ((timer_n & 1) + 16);57}5859static bool cevt_is_pending(int cpu_id)60{61return ioread32(reg_ctl(cpu_id)) & ctl_bit_pending(cpu_id);62}6364static irqreturn_t cevt_interrupt(int irq, void *dev_id)65{66struct clock_event_device *dev = this_cpu_ptr(&econet_timer_pcpu);67int cpu = cpumask_first(dev->cpumask);6869/* Each VPE has its own events,70* so this will only happen on spurious interrupt.71*/72if (!cevt_is_pending(cpu))73return IRQ_NONE;7475iowrite32(ioread32(reg_count(cpu)), reg_compare(cpu));76dev->event_handler(dev);77return IRQ_HANDLED;78}7980static int cevt_set_next_event(ulong delta, struct clock_event_device *dev)81{82u32 next;83int cpu;8485cpu = cpumask_first(dev->cpumask);86next = ioread32(reg_count(cpu)) + delta;87iowrite32(next, reg_compare(cpu));8889if ((s32)(next - ioread32(reg_count(cpu))) < ECONET_MIN_DELTA / 2)90return -ETIME;9192return 0;93}9495static int cevt_init_cpu(uint cpu)96{97struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, cpu);98u32 reg;99100pr_debug("%s: Setting up clockevent for CPU %d\n", cd->name, cpu);101102reg = ioread32(reg_ctl(cpu)) | ctl_bit_enabled(cpu);103iowrite32(reg, reg_ctl(cpu));104105enable_percpu_irq(cd->irq, IRQ_TYPE_NONE);106107/* Do this last because it synchronously configures the timer */108clockevents_config_and_register(cd, econet_timer.freq_hz,109ECONET_MIN_DELTA, ECONET_MAX_DELTA);110111return 0;112}113114static u64 notrace sched_clock_read(void)115{116/* Always read from clock zero no matter the CPU */117return (u64)ioread32(reg_count(0));118}119120/* Init */121122static void __init cevt_dev_init(uint cpu)123{124iowrite32(0, reg_count(cpu));125iowrite32(U32_MAX, reg_compare(cpu));126}127128static int __init cevt_init(struct device_node *np)129{130int i, irq, ret;131132irq = irq_of_parse_and_map(np, 0);133if (irq <= 0) {134pr_err("%pOFn: irq_of_parse_and_map failed", np);135return -EINVAL;136}137138ret = request_percpu_irq(irq, cevt_interrupt, np->name, &econet_timer_pcpu);139140if (ret < 0) {141pr_err("%pOFn: IRQ %d setup failed (%d)\n", np, irq, ret);142goto err_unmap_irq;143}144145for_each_possible_cpu(i) {146struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);147148cd->rating = 310,149cd->features = CLOCK_EVT_FEAT_ONESHOT |150CLOCK_EVT_FEAT_C3STOP |151CLOCK_EVT_FEAT_PERCPU;152cd->set_next_event = cevt_set_next_event;153cd->irq = irq;154cd->cpumask = cpumask_of(i);155cd->name = np->name;156157cevt_dev_init(i);158}159160cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,161"clockevents/econet/timer:starting",162cevt_init_cpu, NULL);163return 0;164165err_unmap_irq:166irq_dispose_mapping(irq);167return ret;168}169170static int __init timer_init(struct device_node *np)171{172int num_blocks = DIV_ROUND_UP(num_possible_cpus(), 2);173struct clk *clk;174int ret;175176clk = of_clk_get(np, 0);177if (IS_ERR(clk)) {178pr_err("%pOFn: Failed to get CPU clock from DT %ld\n", np, PTR_ERR(clk));179return PTR_ERR(clk);180}181182econet_timer.freq_hz = clk_get_rate(clk);183184for (int i = 0; i < num_blocks; i++) {185econet_timer.membase[i] = of_iomap(np, i);186if (!econet_timer.membase[i]) {187pr_err("%pOFn: failed to map register [%d]\n", np, i);188return -ENXIO;189}190}191192/* For clocksource purposes always read clock zero, whatever the CPU */193ret = clocksource_mmio_init(reg_count(0), np->name,194econet_timer.freq_hz, 301, ECONET_BITS,195clocksource_mmio_readl_up);196if (ret) {197pr_err("%pOFn: clocksource_mmio_init failed: %d", np, ret);198return ret;199}200201ret = cevt_init(np);202if (ret < 0)203return ret;204205sched_clock_register(sched_clock_read, ECONET_BITS,206econet_timer.freq_hz);207208pr_info("%pOFn: using %u.%03u MHz high precision timer\n", np,209econet_timer.freq_hz / 1000000,210(econet_timer.freq_hz / 1000) % 1000);211212return 0;213}214215TIMER_OF_DECLARE(econet_timer_hpt, "econet,en751221-timer", timer_init);216217218