Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mips/kernel/genex.S
10817 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8
* Copyright (C) 2001 MIPS Technologies, Inc.
9
* Copyright (C) 2002, 2007 Maciej W. Rozycki
10
*/
11
#include <linux/init.h>
12
13
#include <asm/asm.h>
14
#include <asm/asmmacro.h>
15
#include <asm/cacheops.h>
16
#include <asm/irqflags.h>
17
#include <asm/regdef.h>
18
#include <asm/fpregdef.h>
19
#include <asm/mipsregs.h>
20
#include <asm/stackframe.h>
21
#include <asm/war.h>
22
#include <asm/page.h>
23
#include <asm/thread_info.h>
24
25
#define PANIC_PIC(msg) \
26
.set push; \
27
.set reorder; \
28
PTR_LA a0,8f; \
29
.set noat; \
30
PTR_LA AT, panic; \
31
jr AT; \
32
9: b 9b; \
33
.set pop; \
34
TEXT(msg)
35
36
__INIT
37
38
NESTED(except_vec0_generic, 0, sp)
39
PANIC_PIC("Exception vector 0 called")
40
END(except_vec0_generic)
41
42
NESTED(except_vec1_generic, 0, sp)
43
PANIC_PIC("Exception vector 1 called")
44
END(except_vec1_generic)
45
46
/*
47
* General exception vector for all other CPUs.
48
*
49
* Be careful when changing this, it has to be at most 128 bytes
50
* to fit into space reserved for the exception handler.
51
*/
52
NESTED(except_vec3_generic, 0, sp)
53
.set push
54
.set noat
55
#if R5432_CP0_INTERRUPT_WAR
56
mfc0 k0, CP0_INDEX
57
#endif
58
mfc0 k1, CP0_CAUSE
59
andi k1, k1, 0x7c
60
#ifdef CONFIG_64BIT
61
dsll k1, k1, 1
62
#endif
63
PTR_L k0, exception_handlers(k1)
64
jr k0
65
.set pop
66
END(except_vec3_generic)
67
68
/*
69
* General exception handler for CPUs with virtual coherency exception.
70
*
71
* Be careful when changing this, it has to be at most 256 (as a special
72
* exception) bytes to fit into space reserved for the exception handler.
73
*/
74
NESTED(except_vec3_r4000, 0, sp)
75
.set push
76
.set mips3
77
.set noat
78
mfc0 k1, CP0_CAUSE
79
li k0, 31<<2
80
andi k1, k1, 0x7c
81
.set push
82
.set noreorder
83
.set nomacro
84
beq k1, k0, handle_vced
85
li k0, 14<<2
86
beq k1, k0, handle_vcei
87
#ifdef CONFIG_64BIT
88
dsll k1, k1, 1
89
#endif
90
.set pop
91
PTR_L k0, exception_handlers(k1)
92
jr k0
93
94
/*
95
* Big shit, we now may have two dirty primary cache lines for the same
96
* physical address. We can safely invalidate the line pointed to by
97
* c0_badvaddr because after return from this exception handler the
98
* load / store will be re-executed.
99
*/
100
handle_vced:
101
MFC0 k0, CP0_BADVADDR
102
li k1, -4 # Is this ...
103
and k0, k1 # ... really needed?
104
mtc0 zero, CP0_TAGLO
105
cache Index_Store_Tag_D, (k0)
106
cache Hit_Writeback_Inv_SD, (k0)
107
#ifdef CONFIG_PROC_FS
108
PTR_LA k0, vced_count
109
lw k1, (k0)
110
addiu k1, 1
111
sw k1, (k0)
112
#endif
113
eret
114
115
handle_vcei:
116
MFC0 k0, CP0_BADVADDR
117
cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
118
#ifdef CONFIG_PROC_FS
119
PTR_LA k0, vcei_count
120
lw k1, (k0)
121
addiu k1, 1
122
sw k1, (k0)
123
#endif
124
eret
125
.set pop
126
END(except_vec3_r4000)
127
128
__FINIT
129
130
.align 5 /* 32 byte rollback region */
131
LEAF(r4k_wait)
132
.set push
133
.set noreorder
134
/* start of rollback region */
135
LONG_L t0, TI_FLAGS($28)
136
nop
137
andi t0, _TIF_NEED_RESCHED
138
bnez t0, 1f
139
nop
140
nop
141
nop
142
.set mips3
143
wait
144
/* end of rollback region (the region size must be power of two) */
145
.set pop
146
1:
147
jr ra
148
END(r4k_wait)
149
150
.macro BUILD_ROLLBACK_PROLOGUE handler
151
FEXPORT(rollback_\handler)
152
.set push
153
.set noat
154
MFC0 k0, CP0_EPC
155
PTR_LA k1, r4k_wait
156
ori k0, 0x1f /* 32 byte rollback region */
157
xori k0, 0x1f
158
bne k0, k1, 9f
159
MTC0 k0, CP0_EPC
160
9:
161
.set pop
162
.endm
163
164
.align 5
165
BUILD_ROLLBACK_PROLOGUE handle_int
166
NESTED(handle_int, PT_SIZE, sp)
167
#ifdef CONFIG_TRACE_IRQFLAGS
168
/*
169
* Check to see if the interrupted code has just disabled
170
* interrupts and ignore this interrupt for now if so.
171
*
172
* local_irq_disable() disables interrupts and then calls
173
* trace_hardirqs_off() to track the state. If an interrupt is taken
174
* after interrupts are disabled but before the state is updated
175
* it will appear to restore_all that it is incorrectly returning with
176
* interrupts disabled
177
*/
178
.set push
179
.set noat
180
mfc0 k0, CP0_STATUS
181
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
182
and k0, ST0_IEP
183
bnez k0, 1f
184
185
mfc0 k0, CP0_EPC
186
.set noreorder
187
j k0
188
rfe
189
#else
190
and k0, ST0_IE
191
bnez k0, 1f
192
193
eret
194
#endif
195
1:
196
.set pop
197
#endif
198
SAVE_ALL
199
CLI
200
TRACE_IRQS_OFF
201
202
LONG_L s0, TI_REGS($28)
203
LONG_S sp, TI_REGS($28)
204
PTR_LA ra, ret_from_irq
205
j plat_irq_dispatch
206
END(handle_int)
207
208
__INIT
209
210
/*
211
* Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
212
* This is a dedicated interrupt exception vector which reduces the
213
* interrupt processing overhead. The jump instruction will be replaced
214
* at the initialization time.
215
*
216
* Be careful when changing this, it has to be at most 128 bytes
217
* to fit into space reserved for the exception handler.
218
*/
219
NESTED(except_vec4, 0, sp)
220
1: j 1b /* Dummy, will be replaced */
221
END(except_vec4)
222
223
/*
224
* EJTAG debug exception handler.
225
* The EJTAG debug exception entry point is 0xbfc00480, which
226
* normally is in the boot PROM, so the boot PROM must do a
227
* unconditional jump to this vector.
228
*/
229
NESTED(except_vec_ejtag_debug, 0, sp)
230
j ejtag_debug_handler
231
END(except_vec_ejtag_debug)
232
233
__FINIT
234
235
/*
236
* Vectored interrupt handler.
237
* This prototype is copied to ebase + n*IntCtl.VS and patched
238
* to invoke the handler
239
*/
240
BUILD_ROLLBACK_PROLOGUE except_vec_vi
241
NESTED(except_vec_vi, 0, sp)
242
SAVE_SOME
243
SAVE_AT
244
.set push
245
.set noreorder
246
#ifdef CONFIG_MIPS_MT_SMTC
247
/*
248
* To keep from blindly blocking *all* interrupts
249
* during service by SMTC kernel, we also want to
250
* pass the IM value to be cleared.
251
*/
252
FEXPORT(except_vec_vi_mori)
253
ori a0, $0, 0
254
#endif /* CONFIG_MIPS_MT_SMTC */
255
FEXPORT(except_vec_vi_lui)
256
lui v0, 0 /* Patched */
257
j except_vec_vi_handler
258
FEXPORT(except_vec_vi_ori)
259
ori v0, 0 /* Patched */
260
.set pop
261
END(except_vec_vi)
262
EXPORT(except_vec_vi_end)
263
264
/*
265
* Common Vectored Interrupt code
266
* Complete the register saves and invoke the handler which is passed in $v0
267
*/
268
NESTED(except_vec_vi_handler, 0, sp)
269
SAVE_TEMP
270
SAVE_STATIC
271
#ifdef CONFIG_MIPS_MT_SMTC
272
/*
273
* SMTC has an interesting problem that interrupts are level-triggered,
274
* and the CLI macro will clear EXL, potentially causing a duplicate
275
* interrupt service invocation. So we need to clear the associated
276
* IM bit of Status prior to doing CLI, and restore it after the
277
* service routine has been invoked - we must assume that the
278
* service routine will have cleared the state, and any active
279
* level represents a new or otherwised unserviced event...
280
*/
281
mfc0 t1, CP0_STATUS
282
and t0, a0, t1
283
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
284
mfc0 t2, CP0_TCCONTEXT
285
or t2, t0, t2
286
mtc0 t2, CP0_TCCONTEXT
287
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
288
xor t1, t1, t0
289
mtc0 t1, CP0_STATUS
290
_ehb
291
#endif /* CONFIG_MIPS_MT_SMTC */
292
CLI
293
#ifdef CONFIG_TRACE_IRQFLAGS
294
move s0, v0
295
#ifdef CONFIG_MIPS_MT_SMTC
296
move s1, a0
297
#endif
298
TRACE_IRQS_OFF
299
#ifdef CONFIG_MIPS_MT_SMTC
300
move a0, s1
301
#endif
302
move v0, s0
303
#endif
304
305
LONG_L s0, TI_REGS($28)
306
LONG_S sp, TI_REGS($28)
307
PTR_LA ra, ret_from_irq
308
jr v0
309
END(except_vec_vi_handler)
310
311
/*
312
* EJTAG debug exception handler.
313
*/
314
NESTED(ejtag_debug_handler, PT_SIZE, sp)
315
.set push
316
.set noat
317
MTC0 k0, CP0_DESAVE
318
mfc0 k0, CP0_DEBUG
319
320
sll k0, k0, 30 # Check for SDBBP.
321
bgez k0, ejtag_return
322
323
PTR_LA k0, ejtag_debug_buffer
324
LONG_S k1, 0(k0)
325
SAVE_ALL
326
move a0, sp
327
jal ejtag_exception_handler
328
RESTORE_ALL
329
PTR_LA k0, ejtag_debug_buffer
330
LONG_L k1, 0(k0)
331
332
ejtag_return:
333
MFC0 k0, CP0_DESAVE
334
.set mips32
335
deret
336
.set pop
337
END(ejtag_debug_handler)
338
339
/*
340
* This buffer is reserved for the use of the EJTAG debug
341
* handler.
342
*/
343
.data
344
EXPORT(ejtag_debug_buffer)
345
.fill LONGSIZE
346
.previous
347
348
__INIT
349
350
/*
351
* NMI debug exception handler for MIPS reference boards.
352
* The NMI debug exception entry point is 0xbfc00000, which
353
* normally is in the boot PROM, so the boot PROM must do a
354
* unconditional jump to this vector.
355
*/
356
NESTED(except_vec_nmi, 0, sp)
357
j nmi_handler
358
END(except_vec_nmi)
359
360
__FINIT
361
362
NESTED(nmi_handler, PT_SIZE, sp)
363
.set push
364
.set noat
365
SAVE_ALL
366
move a0, sp
367
jal nmi_exception_handler
368
RESTORE_ALL
369
.set mips3
370
eret
371
.set pop
372
END(nmi_handler)
373
374
.macro __build_clear_none
375
.endm
376
377
.macro __build_clear_sti
378
TRACE_IRQS_ON
379
STI
380
.endm
381
382
.macro __build_clear_cli
383
CLI
384
TRACE_IRQS_OFF
385
.endm
386
387
.macro __build_clear_fpe
388
.set push
389
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
390
.set mips1
391
cfc1 a1, fcr31
392
li a2, ~(0x3f << 12)
393
and a2, a1
394
ctc1 a2, fcr31
395
.set pop
396
TRACE_IRQS_ON
397
STI
398
.endm
399
400
.macro __build_clear_ade
401
MFC0 t0, CP0_BADVADDR
402
PTR_S t0, PT_BVADDR(sp)
403
KMODE
404
.endm
405
406
.macro __BUILD_silent exception
407
.endm
408
409
/* Gas tries to parse the PRINT argument as a string containing
410
string escapes and emits bogus warnings if it believes to
411
recognize an unknown escape code. So make the arguments
412
start with an n and gas will believe \n is ok ... */
413
.macro __BUILD_verbose nexception
414
LONG_L a1, PT_EPC(sp)
415
#ifdef CONFIG_32BIT
416
PRINT("Got \nexception at %08lx\012")
417
#endif
418
#ifdef CONFIG_64BIT
419
PRINT("Got \nexception at %016lx\012")
420
#endif
421
.endm
422
423
.macro __BUILD_count exception
424
LONG_L t0,exception_count_\exception
425
LONG_ADDIU t0, 1
426
LONG_S t0,exception_count_\exception
427
.comm exception_count\exception, 8, 8
428
.endm
429
430
.macro __BUILD_HANDLER exception handler clear verbose ext
431
.align 5
432
NESTED(handle_\exception, PT_SIZE, sp)
433
.set noat
434
SAVE_ALL
435
FEXPORT(handle_\exception\ext)
436
__BUILD_clear_\clear
437
.set at
438
__BUILD_\verbose \exception
439
move a0, sp
440
PTR_LA ra, ret_from_exception
441
j do_\handler
442
END(handle_\exception)
443
.endm
444
445
.macro BUILD_HANDLER exception handler clear verbose
446
__BUILD_HANDLER \exception \handler \clear \verbose _int
447
.endm
448
449
BUILD_HANDLER adel ade ade silent /* #4 */
450
BUILD_HANDLER ades ade ade silent /* #5 */
451
BUILD_HANDLER ibe be cli silent /* #6 */
452
BUILD_HANDLER dbe be cli silent /* #7 */
453
BUILD_HANDLER bp bp sti silent /* #9 */
454
BUILD_HANDLER ri ri sti silent /* #10 */
455
BUILD_HANDLER cpu cpu sti silent /* #11 */
456
BUILD_HANDLER ov ov sti silent /* #12 */
457
BUILD_HANDLER tr tr sti silent /* #13 */
458
BUILD_HANDLER fpe fpe fpe silent /* #15 */
459
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
460
#ifdef CONFIG_HARDWARE_WATCHPOINTS
461
/*
462
* For watch, interrupts will be enabled after the watch
463
* registers are read.
464
*/
465
BUILD_HANDLER watch watch cli silent /* #23 */
466
#else
467
BUILD_HANDLER watch watch sti verbose /* #23 */
468
#endif
469
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
470
BUILD_HANDLER mt mt sti silent /* #25 */
471
BUILD_HANDLER dsp dsp sti silent /* #26 */
472
BUILD_HANDLER reserved reserved sti verbose /* others */
473
474
.align 5
475
LEAF(handle_ri_rdhwr_vivt)
476
#ifdef CONFIG_MIPS_MT_SMTC
477
PANIC_PIC("handle_ri_rdhwr_vivt called")
478
#else
479
.set push
480
.set noat
481
.set noreorder
482
/* check if TLB contains a entry for EPC */
483
MFC0 k1, CP0_ENTRYHI
484
andi k1, 0xff /* ASID_MASK */
485
MFC0 k0, CP0_EPC
486
PTR_SRL k0, PAGE_SHIFT + 1
487
PTR_SLL k0, PAGE_SHIFT + 1
488
or k1, k0
489
MTC0 k1, CP0_ENTRYHI
490
mtc0_tlbw_hazard
491
tlbp
492
tlb_probe_hazard
493
mfc0 k1, CP0_INDEX
494
.set pop
495
bltz k1, handle_ri /* slow path */
496
/* fall thru */
497
#endif
498
END(handle_ri_rdhwr_vivt)
499
500
LEAF(handle_ri_rdhwr)
501
.set push
502
.set noat
503
.set noreorder
504
/* 0x7c03e83b: rdhwr v1,$29 */
505
MFC0 k1, CP0_EPC
506
lui k0, 0x7c03
507
lw k1, (k1)
508
ori k0, 0xe83b
509
.set reorder
510
bne k0, k1, handle_ri /* if not ours */
511
/* The insn is rdhwr. No need to check CAUSE.BD here. */
512
get_saved_sp /* k1 := current_thread_info */
513
.set noreorder
514
MFC0 k0, CP0_EPC
515
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
516
ori k1, _THREAD_MASK
517
xori k1, _THREAD_MASK
518
LONG_L v1, TI_TP_VALUE(k1)
519
LONG_ADDIU k0, 4
520
jr k0
521
rfe
522
#else
523
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
524
LONG_ADDIU k0, 4 /* stall on $k0 */
525
#else
526
.set at=v1
527
LONG_ADDIU k0, 4
528
.set noat
529
#endif
530
MTC0 k0, CP0_EPC
531
/* I hope three instructions between MTC0 and ERET are enough... */
532
ori k1, _THREAD_MASK
533
xori k1, _THREAD_MASK
534
LONG_L v1, TI_TP_VALUE(k1)
535
.set mips3
536
eret
537
.set mips0
538
#endif
539
.set pop
540
END(handle_ri_rdhwr)
541
542
#ifdef CONFIG_64BIT
543
/* A temporary overflow handler used by check_daddi(). */
544
545
__INIT
546
547
BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
548
#endif
549
550