Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/sysdev/xics/icp-opal.c
26493 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Copyright 2016 IBM Corporation.
4
*/
5
#include <linux/types.h>
6
#include <linux/kernel.h>
7
#include <linux/irq.h>
8
#include <linux/smp.h>
9
#include <linux/interrupt.h>
10
#include <linux/irqdomain.h>
11
#include <linux/cpu.h>
12
#include <linux/of.h>
13
14
#include <asm/smp.h>
15
#include <asm/irq.h>
16
#include <asm/errno.h>
17
#include <asm/xics.h>
18
#include <asm/io.h>
19
#include <asm/opal.h>
20
#include <asm/kvm_ppc.h>
21
22
static void icp_opal_teardown_cpu(void)
23
{
24
int hw_cpu = hard_smp_processor_id();
25
26
/* Clear any pending IPI */
27
opal_int_set_mfrr(hw_cpu, 0xff);
28
}
29
30
static void icp_opal_flush_ipi(void)
31
{
32
/*
33
* We take the ipi irq but and never return so we need to EOI the IPI,
34
* but want to leave our priority 0.
35
*
36
* Should we check all the other interrupts too?
37
* Should we be flagging idle loop instead?
38
* Or creating some task to be scheduled?
39
*/
40
if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
41
force_external_irq_replay();
42
}
43
44
static unsigned int icp_opal_get_xirr(void)
45
{
46
unsigned int kvm_xirr;
47
__be32 hw_xirr;
48
int64_t rc;
49
50
/* Handle an interrupt latched by KVM first */
51
kvm_xirr = kvmppc_get_xics_latch();
52
if (kvm_xirr)
53
return kvm_xirr;
54
55
/* Then ask OPAL */
56
rc = opal_int_get_xirr(&hw_xirr, false);
57
if (rc < 0)
58
return 0;
59
return be32_to_cpu(hw_xirr);
60
}
61
62
static unsigned int icp_opal_get_irq(void)
63
{
64
unsigned int xirr;
65
unsigned int vec;
66
unsigned int irq;
67
68
xirr = icp_opal_get_xirr();
69
vec = xirr & 0x00ffffff;
70
if (vec == XICS_IRQ_SPURIOUS)
71
return 0;
72
73
irq = irq_find_mapping(xics_host, vec);
74
if (likely(irq)) {
75
xics_push_cppr(vec);
76
return irq;
77
}
78
79
/* We don't have a linux mapping, so have rtas mask it. */
80
xics_mask_unknown_vec(vec);
81
82
/* We might learn about it later, so EOI it */
83
if (opal_int_eoi(xirr) > 0)
84
force_external_irq_replay();
85
86
return 0;
87
}
88
89
static void icp_opal_set_cpu_priority(unsigned char cppr)
90
{
91
/*
92
* Here be dragons. The caller has asked to allow only IPI's and not
93
* external interrupts. But OPAL XIVE doesn't support that. So instead
94
* of allowing no interrupts allow all. That's still not right, but
95
* currently the only caller who does this is xics_migrate_irqs_away()
96
* and it works in that case.
97
*/
98
if (cppr >= DEFAULT_PRIORITY)
99
cppr = LOWEST_PRIORITY;
100
101
xics_set_base_cppr(cppr);
102
opal_int_set_cppr(cppr);
103
iosync();
104
}
105
106
static void icp_opal_eoi(struct irq_data *d)
107
{
108
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
109
int64_t rc;
110
111
iosync();
112
rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
113
114
/*
115
* EOI tells us whether there are more interrupts to fetch.
116
*
117
* Some HW implementations might not be able to send us another
118
* external interrupt in that case, so we force a replay.
119
*/
120
if (rc > 0)
121
force_external_irq_replay();
122
}
123
124
#ifdef CONFIG_SMP
125
126
static void icp_opal_cause_ipi(int cpu)
127
{
128
int hw_cpu = get_hard_smp_processor_id(cpu);
129
130
kvmppc_set_host_ipi(cpu);
131
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
132
}
133
134
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
135
{
136
int cpu = smp_processor_id();
137
138
kvmppc_clear_host_ipi(cpu);
139
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
140
141
return smp_ipi_demux();
142
}
143
144
/*
145
* Called when an interrupt is received on an off-line CPU to
146
* clear the interrupt, so that the CPU can go back to nap mode.
147
*/
148
void icp_opal_flush_interrupt(void)
149
{
150
unsigned int xirr;
151
unsigned int vec;
152
153
do {
154
xirr = icp_opal_get_xirr();
155
vec = xirr & 0x00ffffff;
156
if (vec == XICS_IRQ_SPURIOUS)
157
break;
158
if (vec == XICS_IPI) {
159
/* Clear pending IPI */
160
int cpu = smp_processor_id();
161
kvmppc_clear_host_ipi(cpu);
162
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
163
} else {
164
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
165
"disabling\n", vec);
166
xics_mask_unknown_vec(vec);
167
}
168
169
/* EOI the interrupt */
170
} while (opal_int_eoi(xirr) > 0);
171
}
172
173
#endif /* CONFIG_SMP */
174
175
static const struct icp_ops icp_opal_ops = {
176
.get_irq = icp_opal_get_irq,
177
.eoi = icp_opal_eoi,
178
.set_priority = icp_opal_set_cpu_priority,
179
.teardown_cpu = icp_opal_teardown_cpu,
180
.flush_ipi = icp_opal_flush_ipi,
181
#ifdef CONFIG_SMP
182
.ipi_action = icp_opal_ipi_action,
183
.cause_ipi = icp_opal_cause_ipi,
184
#endif
185
};
186
187
int __init icp_opal_init(void)
188
{
189
struct device_node *np;
190
191
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
192
if (!np)
193
return -ENODEV;
194
195
icp_ops = &icp_opal_ops;
196
197
printk("XICS: Using OPAL ICP fallbacks\n");
198
199
of_node_put(np);
200
return 0;
201
}
202
203
204