Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/kernel/mips-cpc.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright (C) 2013 Imagination Technologies
4
* Author: Paul Burton <[email protected]>
5
*/
6
7
#include <linux/bitfield.h>
8
#include <linux/errno.h>
9
#include <linux/percpu.h>
10
#include <linux/of.h>
11
#include <linux/of_address.h>
12
#include <linux/spinlock.h>
13
14
#include <asm/mips-cps.h>
15
16
void __iomem *mips_cpc_base;
17
18
static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
19
20
static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
21
22
phys_addr_t __weak mips_cpc_default_phys_base(void)
23
{
24
struct device_node *cpc_node;
25
struct resource res;
26
int err;
27
28
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
29
if (cpc_node) {
30
err = of_address_to_resource(cpc_node, 0, &res);
31
of_node_put(cpc_node);
32
if (!err)
33
return res.start;
34
}
35
36
return 0;
37
}
38
39
/**
40
* mips_cpc_phys_base - retrieve the physical base address of the CPC
41
*
42
* This function returns the physical base address of the Cluster Power
43
* Controller memory mapped registers, or 0 if no Cluster Power Controller
44
* is present.
45
*/
46
static phys_addr_t mips_cpc_phys_base(void)
47
{
48
unsigned long cpc_base;
49
50
if (!mips_cm_present())
51
return 0;
52
53
if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
54
return 0;
55
56
/* If the CPC is already enabled, leave it so */
57
cpc_base = read_gcr_cpc_base();
58
if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
59
return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
60
61
/* Otherwise, use the default address */
62
cpc_base = mips_cpc_default_phys_base();
63
if (!cpc_base)
64
return cpc_base;
65
66
/* Enable the CPC, mapped at the default address */
67
write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
68
return cpc_base;
69
}
70
71
int mips_cpc_probe(void)
72
{
73
phys_addr_t addr;
74
unsigned int cpu;
75
76
for_each_possible_cpu(cpu)
77
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
78
79
addr = mips_cpc_phys_base();
80
if (!addr)
81
return -ENODEV;
82
83
mips_cpc_base = ioremap(addr, 0x8000);
84
if (!mips_cpc_base)
85
return -ENXIO;
86
87
return 0;
88
}
89
90
void mips_cpc_lock_other(unsigned int core)
91
{
92
unsigned int curr_core;
93
94
if (mips_cm_revision() >= CM_REV_CM3)
95
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
96
return;
97
98
preempt_disable();
99
curr_core = cpu_core(&current_cpu_data);
100
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
101
per_cpu(cpc_core_lock_flags, curr_core));
102
write_cpc_cl_other(FIELD_PREP(CPC_Cx_OTHER_CORENUM, core));
103
104
/*
105
* Ensure the core-other region reflects the appropriate core &
106
* VP before any accesses to it occur.
107
*/
108
mb();
109
}
110
111
void mips_cpc_unlock_other(void)
112
{
113
unsigned int curr_core;
114
115
if (mips_cm_revision() >= CM_REV_CM3)
116
/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
117
return;
118
119
curr_core = cpu_core(&current_cpu_data);
120
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
121
per_cpu(cpc_core_lock_flags, curr_core));
122
preempt_enable();
123
}
124
125