Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/pci/msi-octeon.c
26424 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* Copyright (C) 2005-2009, 2010 Cavium Networks
7
*/
8
#include <linux/kernel.h>
9
#include <linux/init.h>
10
#include <linux/msi.h>
11
#include <linux/spinlock.h>
12
#include <linux/interrupt.h>
13
14
#include <asm/octeon/octeon.h>
15
#include <asm/octeon/cvmx-npi-defs.h>
16
#include <asm/octeon/cvmx-pci-defs.h>
17
#include <asm/octeon/cvmx-npei-defs.h>
18
#include <asm/octeon/cvmx-sli-defs.h>
19
#include <asm/octeon/cvmx-pexp-defs.h>
20
#include <asm/octeon/pci-octeon.h>
21
22
/*
23
* Each bit in msi_free_irq_bitmask represents a MSI interrupt that is
24
* in use.
25
*/
26
static u64 msi_free_irq_bitmask[4];
27
28
/*
29
* Each bit in msi_multiple_irq_bitmask tells that the device using
30
* this bit in msi_free_irq_bitmask is also using the next bit. This
31
* is used so we can disable all of the MSI interrupts when a device
32
* uses multiple.
33
*/
34
static u64 msi_multiple_irq_bitmask[4];
35
36
/*
37
* This lock controls updates to msi_free_irq_bitmask and
38
* msi_multiple_irq_bitmask.
39
*/
40
static DEFINE_SPINLOCK(msi_free_irq_bitmask_lock);
41
42
/*
43
* Number of MSI IRQs used. This variable is set up in
44
* the module init time.
45
*/
46
static int msi_irq_size;
47
48
/**
49
* arch_setup_msi_irq() - setup MSI IRQs for a device
50
* @dev: Device requesting MSI interrupts
51
* @desc: MSI descriptor
52
*
53
* Called when a driver requests MSI interrupts instead of the
54
* legacy INT A-D. This routine will allocate multiple interrupts
55
* for MSI devices that support them. A device can override this by
56
* programming the MSI control bits [6:4] before calling
57
* pci_enable_msi().
58
*
59
* Return: %0 on success, non-%0 on error.
60
*/
61
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
62
{
63
struct msi_msg msg;
64
u16 control;
65
int configured_private_bits;
66
int request_private_bits;
67
int irq = 0;
68
int irq_step;
69
u64 search_mask;
70
int index;
71
72
if (desc->pci.msi_attrib.is_msix)
73
return -EINVAL;
74
75
/*
76
* Read the MSI config to figure out how many IRQs this device
77
* wants. Most devices only want 1, which will give
78
* configured_private_bits and request_private_bits equal 0.
79
*/
80
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
81
82
/*
83
* If the number of private bits has been configured then use
84
* that value instead of the requested number. This gives the
85
* driver the chance to override the number of interrupts
86
* before calling pci_enable_msi().
87
*/
88
configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4;
89
if (configured_private_bits == 0) {
90
/* Nothing is configured, so use the hardware requested size */
91
request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1;
92
} else {
93
/*
94
* Use the number of configured bits, assuming the
95
* driver wanted to override the hardware request
96
* value.
97
*/
98
request_private_bits = configured_private_bits;
99
}
100
101
/*
102
* The PCI 2.3 spec mandates that there are at most 32
103
* interrupts. If this device asks for more, only give it one.
104
*/
105
if (request_private_bits > 5)
106
request_private_bits = 0;
107
108
try_only_one:
109
/*
110
* The IRQs have to be aligned on a power of two based on the
111
* number being requested.
112
*/
113
irq_step = 1 << request_private_bits;
114
115
/* Mask with one bit for each IRQ */
116
search_mask = (1 << irq_step) - 1;
117
118
/*
119
* We're going to search msi_free_irq_bitmask_lock for zero
120
* bits. This represents an MSI interrupt number that isn't in
121
* use.
122
*/
123
spin_lock(&msi_free_irq_bitmask_lock);
124
for (index = 0; index < msi_irq_size/64; index++) {
125
for (irq = 0; irq < 64; irq += irq_step) {
126
if ((msi_free_irq_bitmask[index] & (search_mask << irq)) == 0) {
127
msi_free_irq_bitmask[index] |= search_mask << irq;
128
msi_multiple_irq_bitmask[index] |= (search_mask >> 1) << irq;
129
goto msi_irq_allocated;
130
}
131
}
132
}
133
msi_irq_allocated:
134
spin_unlock(&msi_free_irq_bitmask_lock);
135
136
/* Make sure the search for available interrupts didn't fail */
137
if (irq >= 64) {
138
if (request_private_bits) {
139
pr_err("arch_setup_msi_irq: Unable to find %d free interrupts, trying just one",
140
1 << request_private_bits);
141
request_private_bits = 0;
142
goto try_only_one;
143
} else
144
panic("arch_setup_msi_irq: Unable to find a free MSI interrupt");
145
}
146
147
/* MSI interrupts start at logical IRQ OCTEON_IRQ_MSI_BIT0 */
148
irq += index*64;
149
irq += OCTEON_IRQ_MSI_BIT0;
150
151
switch (octeon_dma_bar_type) {
152
case OCTEON_DMA_BAR_TYPE_SMALL:
153
/* When not using big bar, Bar 0 is based at 128MB */
154
msg.address_lo =
155
((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
156
msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
157
break;
158
case OCTEON_DMA_BAR_TYPE_BIG:
159
/* When using big bar, Bar 0 is based at 0 */
160
msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
161
msg.address_hi = (0 + CVMX_PCI_MSI_RCV) >> 32;
162
break;
163
case OCTEON_DMA_BAR_TYPE_PCIE:
164
/* When using PCIe, Bar 0 is based at 0 */
165
/* FIXME CVMX_NPEI_MSI_RCV* other than 0? */
166
msg.address_lo = (0 + CVMX_NPEI_PCIE_MSI_RCV) & 0xffffffff;
167
msg.address_hi = (0 + CVMX_NPEI_PCIE_MSI_RCV) >> 32;
168
break;
169
case OCTEON_DMA_BAR_TYPE_PCIE2:
170
/* When using PCIe2, Bar 0 is based at 0 */
171
msg.address_lo = (0 + CVMX_SLI_PCIE_MSI_RCV) & 0xffffffff;
172
msg.address_hi = (0 + CVMX_SLI_PCIE_MSI_RCV) >> 32;
173
break;
174
default:
175
panic("arch_setup_msi_irq: Invalid octeon_dma_bar_type");
176
}
177
msg.data = irq - OCTEON_IRQ_MSI_BIT0;
178
179
/* Update the number of IRQs the device has available to it */
180
control &= ~PCI_MSI_FLAGS_QSIZE;
181
control |= request_private_bits << 4;
182
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
183
184
irq_set_msi_desc(irq, desc);
185
pci_write_msi_msg(irq, &msg);
186
return 0;
187
}
188
189
/**
190
* arch_teardown_msi_irq() - release MSI IRQs for a device
191
* @irq: The devices first irq number. There may be multiple in sequence.
192
*
193
* Called when a device no longer needs its MSI interrupts. All
194
* MSI interrupts for the device are freed.
195
*/
196
void arch_teardown_msi_irq(unsigned int irq)
197
{
198
int number_irqs;
199
u64 bitmask;
200
int index = 0;
201
int irq0;
202
203
if ((irq < OCTEON_IRQ_MSI_BIT0)
204
|| (irq > msi_irq_size + OCTEON_IRQ_MSI_BIT0))
205
panic("arch_teardown_msi_irq: Attempted to teardown illegal "
206
"MSI interrupt (%d)", irq);
207
208
irq -= OCTEON_IRQ_MSI_BIT0;
209
index = irq / 64;
210
irq0 = irq % 64;
211
212
/*
213
* Count the number of IRQs we need to free by looking at the
214
* msi_multiple_irq_bitmask. Each bit set means that the next
215
* IRQ is also owned by this device.
216
*/
217
number_irqs = 0;
218
while ((irq0 + number_irqs < 64) &&
219
(msi_multiple_irq_bitmask[index]
220
& (1ull << (irq0 + number_irqs))))
221
number_irqs++;
222
number_irqs++;
223
/* Mask with one bit for each IRQ */
224
bitmask = (1 << number_irqs) - 1;
225
/* Shift the mask to the correct bit location */
226
bitmask <<= irq0;
227
if ((msi_free_irq_bitmask[index] & bitmask) != bitmask)
228
panic("arch_teardown_msi_irq: Attempted to teardown MSI "
229
"interrupt (%d) not in use", irq);
230
231
/* Checks are done, update the in use bitmask */
232
spin_lock(&msi_free_irq_bitmask_lock);
233
msi_free_irq_bitmask[index] &= ~bitmask;
234
msi_multiple_irq_bitmask[index] &= ~bitmask;
235
spin_unlock(&msi_free_irq_bitmask_lock);
236
}
237
238
static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock);
239
240
static u64 msi_rcv_reg[4];
241
static u64 mis_ena_reg[4];
242
243
static void octeon_irq_msi_enable_pcie(struct irq_data *data)
244
{
245
u64 en;
246
unsigned long flags;
247
int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
248
int irq_index = msi_number >> 6;
249
int irq_bit = msi_number & 0x3f;
250
251
raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
252
en = cvmx_read_csr(mis_ena_reg[irq_index]);
253
en |= 1ull << irq_bit;
254
cvmx_write_csr(mis_ena_reg[irq_index], en);
255
cvmx_read_csr(mis_ena_reg[irq_index]);
256
raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
257
}
258
259
static void octeon_irq_msi_disable_pcie(struct irq_data *data)
260
{
261
u64 en;
262
unsigned long flags;
263
int msi_number = data->irq - OCTEON_IRQ_MSI_BIT0;
264
int irq_index = msi_number >> 6;
265
int irq_bit = msi_number & 0x3f;
266
267
raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
268
en = cvmx_read_csr(mis_ena_reg[irq_index]);
269
en &= ~(1ull << irq_bit);
270
cvmx_write_csr(mis_ena_reg[irq_index], en);
271
cvmx_read_csr(mis_ena_reg[irq_index]);
272
raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
273
}
274
275
static struct irq_chip octeon_irq_chip_msi_pcie = {
276
.name = "MSI",
277
.irq_enable = octeon_irq_msi_enable_pcie,
278
.irq_disable = octeon_irq_msi_disable_pcie,
279
};
280
281
static void octeon_irq_msi_enable_pci(struct irq_data *data)
282
{
283
/*
284
* Octeon PCI doesn't have the ability to mask/unmask MSI
285
* interrupts individually. Instead of masking/unmasking them
286
* in groups of 16, we simple assume MSI devices are well
287
* behaved. MSI interrupts are always enable and the ACK is
288
* assumed to be enough
289
*/
290
}
291
292
static void octeon_irq_msi_disable_pci(struct irq_data *data)
293
{
294
/* See comment in enable */
295
}
296
297
static struct irq_chip octeon_irq_chip_msi_pci = {
298
.name = "MSI",
299
.irq_enable = octeon_irq_msi_enable_pci,
300
.irq_disable = octeon_irq_msi_disable_pci,
301
};
302
303
/*
304
* Called by the interrupt handling code when an MSI interrupt
305
* occurs.
306
*/
307
static irqreturn_t __octeon_msi_do_interrupt(int index, u64 msi_bits)
308
{
309
int irq;
310
int bit;
311
312
bit = fls64(msi_bits);
313
if (bit) {
314
bit--;
315
/* Acknowledge it first. */
316
cvmx_write_csr(msi_rcv_reg[index], 1ull << bit);
317
318
irq = bit + OCTEON_IRQ_MSI_BIT0 + 64 * index;
319
do_IRQ(irq);
320
return IRQ_HANDLED;
321
}
322
return IRQ_NONE;
323
}
324
325
#define OCTEON_MSI_INT_HANDLER_X(x) \
326
static irqreturn_t octeon_msi_interrupt##x(int cpl, void *dev_id) \
327
{ \
328
u64 msi_bits = cvmx_read_csr(msi_rcv_reg[(x)]); \
329
return __octeon_msi_do_interrupt((x), msi_bits); \
330
}
331
332
/*
333
* Create octeon_msi_interrupt{0-3} function body
334
*/
335
OCTEON_MSI_INT_HANDLER_X(0);
336
OCTEON_MSI_INT_HANDLER_X(1);
337
OCTEON_MSI_INT_HANDLER_X(2);
338
OCTEON_MSI_INT_HANDLER_X(3);
339
340
/*
341
* Initializes the MSI interrupt handling code
342
*/
343
int __init octeon_msi_initialize(void)
344
{
345
int irq;
346
struct irq_chip *msi;
347
348
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
349
return 0;
350
} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
351
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
352
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
353
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
354
msi_rcv_reg[3] = CVMX_PEXP_NPEI_MSI_RCV3;
355
mis_ena_reg[0] = CVMX_PEXP_NPEI_MSI_ENB0;
356
mis_ena_reg[1] = CVMX_PEXP_NPEI_MSI_ENB1;
357
mis_ena_reg[2] = CVMX_PEXP_NPEI_MSI_ENB2;
358
mis_ena_reg[3] = CVMX_PEXP_NPEI_MSI_ENB3;
359
msi = &octeon_irq_chip_msi_pcie;
360
} else {
361
msi_rcv_reg[0] = CVMX_NPI_NPI_MSI_RCV;
362
#define INVALID_GENERATE_ADE 0x8700000000000000ULL;
363
msi_rcv_reg[1] = INVALID_GENERATE_ADE;
364
msi_rcv_reg[2] = INVALID_GENERATE_ADE;
365
msi_rcv_reg[3] = INVALID_GENERATE_ADE;
366
mis_ena_reg[0] = INVALID_GENERATE_ADE;
367
mis_ena_reg[1] = INVALID_GENERATE_ADE;
368
mis_ena_reg[2] = INVALID_GENERATE_ADE;
369
mis_ena_reg[3] = INVALID_GENERATE_ADE;
370
msi = &octeon_irq_chip_msi_pci;
371
}
372
373
for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_LAST; irq++)
374
irq_set_chip_and_handler(irq, msi, handle_simple_irq);
375
376
if (octeon_has_feature(OCTEON_FEATURE_PCIE)) {
377
if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
378
0, "MSI[0:63]", octeon_msi_interrupt0))
379
panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
380
381
if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt1,
382
0, "MSI[64:127]", octeon_msi_interrupt1))
383
panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed");
384
385
if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt2,
386
0, "MSI[127:191]", octeon_msi_interrupt2))
387
panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed");
388
389
if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt3,
390
0, "MSI[192:255]", octeon_msi_interrupt3))
391
panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed");
392
393
msi_irq_size = 256;
394
} else if (octeon_is_pci_host()) {
395
if (request_irq(OCTEON_IRQ_PCI_MSI0, octeon_msi_interrupt0,
396
0, "MSI[0:15]", octeon_msi_interrupt0))
397
panic("request_irq(OCTEON_IRQ_PCI_MSI0) failed");
398
399
if (request_irq(OCTEON_IRQ_PCI_MSI1, octeon_msi_interrupt0,
400
0, "MSI[16:31]", octeon_msi_interrupt0))
401
panic("request_irq(OCTEON_IRQ_PCI_MSI1) failed");
402
403
if (request_irq(OCTEON_IRQ_PCI_MSI2, octeon_msi_interrupt0,
404
0, "MSI[32:47]", octeon_msi_interrupt0))
405
panic("request_irq(OCTEON_IRQ_PCI_MSI2) failed");
406
407
if (request_irq(OCTEON_IRQ_PCI_MSI3, octeon_msi_interrupt0,
408
0, "MSI[48:63]", octeon_msi_interrupt0))
409
panic("request_irq(OCTEON_IRQ_PCI_MSI3) failed");
410
msi_irq_size = 64;
411
}
412
return 0;
413
}
414
subsys_initcall(octeon_msi_initialize);
415
416