Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/kernel/genex.S
26442 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8
* Copyright (C) 2002, 2007 Maciej W. Rozycki
9
* Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
10
*/
11
#include <linux/init.h>
12
13
#include <asm/asm.h>
14
#include <asm/asmmacro.h>
15
#include <asm/cacheops.h>
16
#include <asm/irqflags.h>
17
#include <asm/regdef.h>
18
#include <asm/fpregdef.h>
19
#include <asm/mipsregs.h>
20
#include <asm/stackframe.h>
21
#include <asm/sync.h>
22
#include <asm/thread_info.h>
23
24
__INIT
25
26
/*
27
* General exception vector for all other CPUs.
28
*
29
* Be careful when changing this, it has to be at most 128 bytes
30
* to fit into space reserved for the exception handler.
31
*/
32
NESTED(except_vec3_generic, 0, sp)
33
.set push
34
.set noat
35
mfc0 k1, CP0_CAUSE
36
andi k1, k1, 0x7c
37
#ifdef CONFIG_64BIT
38
dsll k1, k1, 1
39
#endif
40
PTR_L k0, exception_handlers(k1)
41
jr k0
42
.set pop
43
END(except_vec3_generic)
44
45
/*
46
* General exception handler for CPUs with virtual coherency exception.
47
*
48
* Be careful when changing this, it has to be at most 256 (as a special
49
* exception) bytes to fit into space reserved for the exception handler.
50
*/
51
NESTED(except_vec3_r4000, 0, sp)
52
.set push
53
.set arch=r4000
54
.set noat
55
mfc0 k1, CP0_CAUSE
56
li k0, 31<<2
57
andi k1, k1, 0x7c
58
.set push
59
.set noreorder
60
.set nomacro
61
beq k1, k0, handle_vced
62
li k0, 14<<2
63
beq k1, k0, handle_vcei
64
#ifdef CONFIG_64BIT
65
dsll k1, k1, 1
66
#endif
67
.set pop
68
PTR_L k0, exception_handlers(k1)
69
jr k0
70
71
/*
72
* Big shit, we now may have two dirty primary cache lines for the same
73
* physical address. We can safely invalidate the line pointed to by
74
* c0_badvaddr because after return from this exception handler the
75
* load / store will be re-executed.
76
*/
77
handle_vced:
78
MFC0 k0, CP0_BADVADDR
79
li k1, -4 # Is this ...
80
and k0, k1 # ... really needed?
81
mtc0 zero, CP0_TAGLO
82
cache Index_Store_Tag_D, (k0)
83
cache Hit_Writeback_Inv_SD, (k0)
84
#ifdef CONFIG_PROC_FS
85
PTR_LA k0, vced_count
86
lw k1, (k0)
87
addiu k1, 1
88
sw k1, (k0)
89
#endif
90
eret
91
92
handle_vcei:
93
MFC0 k0, CP0_BADVADDR
94
cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
95
#ifdef CONFIG_PROC_FS
96
PTR_LA k0, vcei_count
97
lw k1, (k0)
98
addiu k1, 1
99
sw k1, (k0)
100
#endif
101
eret
102
.set pop
103
END(except_vec3_r4000)
104
105
__FINIT
106
107
.section .cpuidle.text,"ax"
108
/* Align to 32 bytes for the maximum idle interrupt region size. */
109
.align 5
110
LEAF(r4k_wait)
111
/* Keep the ISA bit clear for calculations on local labels here. */
112
0: .fill 0
113
/* Start of idle interrupt region. */
114
local_irq_enable
115
/*
116
* If an interrupt lands here, before going idle on the next
117
* instruction, we must *NOT* go idle since the interrupt could
118
* have set TIF_NEED_RESCHED or caused a timer to need resched.
119
* Fall through -- see skipover_handler below -- and have the
120
* idle loop take care of things.
121
*/
122
1: .fill 0
123
/* The R2 EI/EHB sequence takes 8 bytes, otherwise pad up. */
124
.if 1b - 0b > 32
125
.error "overlong idle interrupt region"
126
.elseif 1b - 0b > 8
127
.align 4
128
.endif
129
2: .fill 0
130
.equ r4k_wait_idle_size, 2b - 0b
131
/* End of idle interrupt region; size has to be a power of 2. */
132
.set MIPS_ISA_ARCH_LEVEL_RAW
133
r4k_wait_insn:
134
wait
135
r4k_wait_exit:
136
.set mips0
137
local_irq_disable
138
jr ra
139
END(r4k_wait)
140
.previous
141
142
.macro BUILD_SKIPOVER_PROLOGUE handler
143
FEXPORT(skipover_\handler)
144
.set push
145
.set noat
146
MFC0 k0, CP0_EPC
147
/* Subtract/add 2 to let the ISA bit propagate through the mask. */
148
PTR_LA k1, r4k_wait_insn - 2
149
ori k0, r4k_wait_idle_size - 2
150
.set noreorder
151
bne k0, k1, \handler
152
PTR_ADDIU k0, r4k_wait_exit - r4k_wait_insn + 2
153
.set reorder
154
MTC0 k0, CP0_EPC
155
.set pop
156
.endm
157
158
.align 5
159
BUILD_SKIPOVER_PROLOGUE handle_int
160
NESTED(handle_int, PT_SIZE, sp)
161
.cfi_signal_frame
162
#ifdef CONFIG_TRACE_IRQFLAGS
163
/*
164
* Check to see if the interrupted code has just disabled
165
* interrupts and ignore this interrupt for now if so.
166
*
167
* local_irq_disable() disables interrupts and then calls
168
* trace_hardirqs_off() to track the state. If an interrupt is taken
169
* after interrupts are disabled but before the state is updated
170
* it will appear to restore_all that it is incorrectly returning with
171
* interrupts disabled
172
*/
173
.set push
174
.set noat
175
mfc0 k0, CP0_STATUS
176
#if defined(CONFIG_CPU_R3000)
177
and k0, ST0_IEP
178
bnez k0, 1f
179
180
mfc0 k0, CP0_EPC
181
.set noreorder
182
j k0
183
rfe
184
#else
185
and k0, ST0_IE
186
bnez k0, 1f
187
188
eret
189
#endif
190
1:
191
.set pop
192
#endif
193
SAVE_ALL docfi=1
194
CLI
195
TRACE_IRQS_OFF
196
197
LONG_L s0, TI_REGS($28)
198
LONG_S sp, TI_REGS($28)
199
200
/*
201
* SAVE_ALL ensures we are using a valid kernel stack for the thread.
202
* Check if we are already using the IRQ stack.
203
*/
204
move s1, sp # Preserve the sp
205
206
/* Get IRQ stack for this CPU */
207
ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
208
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
209
lui k1, %hi(irq_stack)
210
#else
211
lui k1, %highest(irq_stack)
212
daddiu k1, %higher(irq_stack)
213
dsll k1, 16
214
daddiu k1, %hi(irq_stack)
215
dsll k1, 16
216
#endif
217
LONG_SRL k0, SMP_CPUID_PTRSHIFT
218
LONG_ADDU k1, k0
219
LONG_L t0, %lo(irq_stack)(k1)
220
221
# Check if already on IRQ stack
222
PTR_LI t1, ~(_THREAD_SIZE-1)
223
and t1, t1, sp
224
beq t0, t1, 2f
225
226
/* Switch to IRQ stack */
227
li t1, _IRQ_STACK_START
228
PTR_ADD sp, t0, t1
229
230
/* Save task's sp on IRQ stack so that unwinding can follow it */
231
LONG_S s1, 0(sp)
232
2:
233
jal plat_irq_dispatch
234
235
/* Restore sp */
236
move sp, s1
237
238
j ret_from_irq
239
#ifdef CONFIG_CPU_MICROMIPS
240
nop
241
#endif
242
END(handle_int)
243
244
__INIT
245
246
/*
247
* Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
248
* This is a dedicated interrupt exception vector which reduces the
249
* interrupt processing overhead. The jump instruction will be replaced
250
* at the initialization time.
251
*
252
* Be careful when changing this, it has to be at most 128 bytes
253
* to fit into space reserved for the exception handler.
254
*/
255
NESTED(except_vec4, 0, sp)
256
1: j 1b /* Dummy, will be replaced */
257
END(except_vec4)
258
259
/*
260
* EJTAG debug exception handler.
261
* The EJTAG debug exception entry point is 0xbfc00480, which
262
* normally is in the boot PROM, so the boot PROM must do an
263
* unconditional jump to this vector.
264
*/
265
NESTED(except_vec_ejtag_debug, 0, sp)
266
j ejtag_debug_handler
267
#ifdef CONFIG_CPU_MICROMIPS
268
nop
269
#endif
270
END(except_vec_ejtag_debug)
271
272
__FINIT
273
274
/*
275
* Vectored interrupt handler.
276
* This prototype is copied to ebase + n*IntCtl.VS and patched
277
* to invoke the handler
278
*/
279
BUILD_SKIPOVER_PROLOGUE except_vec_vi
280
NESTED(except_vec_vi, 0, sp)
281
SAVE_SOME docfi=1
282
SAVE_AT docfi=1
283
.set push
284
.set noreorder
285
PTR_LA v1, except_vec_vi_handler
286
jr v1
287
FEXPORT(except_vec_vi_ori)
288
ori v0, zero, 0 /* Offset in vi_handlers[] */
289
.set pop
290
END(except_vec_vi)
291
EXPORT(except_vec_vi_end)
292
293
/*
294
* Common Vectored Interrupt code
295
* Complete the register saves and invoke the handler, $v0 holds
296
* offset into vi_handlers[]
297
*/
298
NESTED(except_vec_vi_handler, 0, sp)
299
SAVE_TEMP
300
SAVE_STATIC
301
CLI
302
#ifdef CONFIG_TRACE_IRQFLAGS
303
move s0, v0
304
TRACE_IRQS_OFF
305
move v0, s0
306
#endif
307
308
LONG_L s0, TI_REGS($28)
309
LONG_S sp, TI_REGS($28)
310
311
/*
312
* SAVE_ALL ensures we are using a valid kernel stack for the thread.
313
* Check if we are already using the IRQ stack.
314
*/
315
move s1, sp # Preserve the sp
316
317
/* Get IRQ stack for this CPU */
318
ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
319
#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
320
lui k1, %hi(irq_stack)
321
#else
322
lui k1, %highest(irq_stack)
323
daddiu k1, %higher(irq_stack)
324
dsll k1, 16
325
daddiu k1, %hi(irq_stack)
326
dsll k1, 16
327
#endif
328
LONG_SRL k0, SMP_CPUID_PTRSHIFT
329
LONG_ADDU k1, k0
330
LONG_L t0, %lo(irq_stack)(k1)
331
332
# Check if already on IRQ stack
333
PTR_LI t1, ~(_THREAD_SIZE-1)
334
and t1, t1, sp
335
beq t0, t1, 2f
336
337
/* Switch to IRQ stack */
338
li t1, _IRQ_STACK_START
339
PTR_ADD sp, t0, t1
340
341
/* Save task's sp on IRQ stack so that unwinding can follow it */
342
LONG_S s1, 0(sp)
343
2:
344
PTR_L v0, vi_handlers(v0)
345
jalr v0
346
347
/* Restore sp */
348
move sp, s1
349
350
j ret_from_irq
351
END(except_vec_vi_handler)
352
353
/*
354
* EJTAG debug exception handler.
355
*/
356
NESTED(ejtag_debug_handler, PT_SIZE, sp)
357
.set push
358
.set noat
359
MTC0 k0, CP0_DESAVE
360
mfc0 k0, CP0_DEBUG
361
362
andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.
363
beqz k0, ejtag_return
364
365
#ifdef CONFIG_SMP
366
1: PTR_LA k0, ejtag_debug_buffer_spinlock
367
__SYNC(full, loongson3_war)
368
2: ll k0, 0(k0)
369
bnez k0, 2b
370
PTR_LA k0, ejtag_debug_buffer_spinlock
371
sc k0, 0(k0)
372
beqz k0, 1b
373
# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
374
sync
375
# endif
376
377
PTR_LA k0, ejtag_debug_buffer
378
LONG_S k1, 0(k0)
379
380
ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
381
PTR_SRL k1, SMP_CPUID_PTRSHIFT
382
PTR_SLL k1, LONGLOG
383
PTR_LA k0, ejtag_debug_buffer_per_cpu
384
PTR_ADDU k0, k1
385
386
PTR_LA k1, ejtag_debug_buffer
387
LONG_L k1, 0(k1)
388
LONG_S k1, 0(k0)
389
390
PTR_LA k0, ejtag_debug_buffer_spinlock
391
sw zero, 0(k0)
392
#else
393
PTR_LA k0, ejtag_debug_buffer
394
LONG_S k1, 0(k0)
395
#endif
396
397
SAVE_ALL
398
move a0, sp
399
jal ejtag_exception_handler
400
RESTORE_ALL
401
402
#ifdef CONFIG_SMP
403
ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
404
PTR_SRL k1, SMP_CPUID_PTRSHIFT
405
PTR_SLL k1, LONGLOG
406
PTR_LA k0, ejtag_debug_buffer_per_cpu
407
PTR_ADDU k0, k1
408
LONG_L k1, 0(k0)
409
#else
410
PTR_LA k0, ejtag_debug_buffer
411
LONG_L k1, 0(k0)
412
#endif
413
414
ejtag_return:
415
back_to_back_c0_hazard
416
MFC0 k0, CP0_DESAVE
417
.set mips32
418
deret
419
.set pop
420
END(ejtag_debug_handler)
421
422
/*
423
* This buffer is reserved for the use of the EJTAG debug
424
* handler.
425
*/
426
.data
427
EXPORT(ejtag_debug_buffer)
428
.fill LONGSIZE
429
#ifdef CONFIG_SMP
430
EXPORT(ejtag_debug_buffer_spinlock)
431
.fill LONGSIZE
432
EXPORT(ejtag_debug_buffer_per_cpu)
433
.fill LONGSIZE * NR_CPUS
434
#endif
435
.previous
436
437
__INIT
438
439
/*
440
* NMI debug exception handler for MIPS reference boards.
441
* The NMI debug exception entry point is 0xbfc00000, which
442
* normally is in the boot PROM, so the boot PROM must do a
443
* unconditional jump to this vector.
444
*/
445
NESTED(except_vec_nmi, 0, sp)
446
j nmi_handler
447
#ifdef CONFIG_CPU_MICROMIPS
448
nop
449
#endif
450
END(except_vec_nmi)
451
452
__FINIT
453
454
NESTED(nmi_handler, PT_SIZE, sp)
455
.cfi_signal_frame
456
.set push
457
.set noat
458
/*
459
* Clear ERL - restore segment mapping
460
* Clear BEV - required for page fault exception handler to work
461
*/
462
mfc0 k0, CP0_STATUS
463
ori k0, k0, ST0_EXL
464
li k1, ~(ST0_BEV | ST0_ERL)
465
and k0, k0, k1
466
mtc0 k0, CP0_STATUS
467
_ehb
468
SAVE_ALL
469
move a0, sp
470
jal nmi_exception_handler
471
/* nmi_exception_handler never returns */
472
.set pop
473
END(nmi_handler)
474
475
.macro __build_clear_none
476
.endm
477
478
.macro __build_clear_sti
479
TRACE_IRQS_ON
480
STI
481
.endm
482
483
.macro __build_clear_cli
484
CLI
485
TRACE_IRQS_OFF
486
.endm
487
488
.macro __build_clear_fpe
489
CLI
490
TRACE_IRQS_OFF
491
.set push
492
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
493
.set mips1
494
.set hardfloat
495
cfc1 a1, fcr31
496
.set pop
497
.endm
498
499
.macro __build_clear_msa_fpe
500
CLI
501
TRACE_IRQS_OFF
502
_cfcmsa a1, MSA_CSR
503
.endm
504
505
.macro __build_clear_ade
506
MFC0 t0, CP0_BADVADDR
507
PTR_S t0, PT_BVADDR(sp)
508
KMODE
509
.endm
510
511
.macro __build_clear_gsexc
512
.set push
513
/*
514
* We need to specify a selector to access the CP0.Diag1 (GSCause)
515
* register. All GSExc-equipped processors have MIPS32.
516
*/
517
.set mips32
518
mfc0 a1, CP0_DIAGNOSTIC1
519
.set pop
520
TRACE_IRQS_ON
521
STI
522
.endm
523
524
.macro __BUILD_silent exception
525
.endm
526
527
/* Gas tries to parse the ASM_PRINT argument as a string containing
528
string escapes and emits bogus warnings if it believes to
529
recognize an unknown escape code. So make the arguments
530
start with an n and gas will believe \n is ok ... */
531
.macro __BUILD_verbose nexception
532
LONG_L a1, PT_EPC(sp)
533
#ifdef CONFIG_32BIT
534
ASM_PRINT("Got \nexception at %08lx\012")
535
#endif
536
#ifdef CONFIG_64BIT
537
ASM_PRINT("Got \nexception at %016lx\012")
538
#endif
539
.endm
540
541
.macro __BUILD_count exception
542
LONG_L t0,exception_count_\exception
543
LONG_ADDIU t0, 1
544
LONG_S t0,exception_count_\exception
545
.comm exception_count\exception, 8, 8
546
.endm
547
548
.macro __BUILD_HANDLER exception handler clear verbose ext
549
.align 5
550
NESTED(handle_\exception, PT_SIZE, sp)
551
.cfi_signal_frame
552
.set noat
553
SAVE_ALL
554
FEXPORT(handle_\exception\ext)
555
__build_clear_\clear
556
.set at
557
__BUILD_\verbose \exception
558
move a0, sp
559
jal do_\handler
560
j ret_from_exception
561
END(handle_\exception)
562
.endm
563
564
.macro BUILD_HANDLER exception handler clear verbose
565
__BUILD_HANDLER \exception \handler \clear \verbose _int
566
.endm
567
568
BUILD_HANDLER adel ade ade silent /* #4 */
569
BUILD_HANDLER ades ade ade silent /* #5 */
570
BUILD_HANDLER ibe be cli silent /* #6 */
571
BUILD_HANDLER dbe be cli silent /* #7 */
572
BUILD_HANDLER bp bp sti silent /* #9 */
573
BUILD_HANDLER ri ri sti silent /* #10 */
574
BUILD_HANDLER cpu cpu sti silent /* #11 */
575
BUILD_HANDLER ov ov sti silent /* #12 */
576
BUILD_HANDLER tr tr sti silent /* #13 */
577
BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
578
#ifdef CONFIG_MIPS_FP_SUPPORT
579
BUILD_HANDLER fpe fpe fpe silent /* #15 */
580
#endif
581
BUILD_HANDLER ftlb ftlb none silent /* #16 */
582
BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */
583
BUILD_HANDLER msa msa sti silent /* #21 */
584
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
585
#ifdef CONFIG_HARDWARE_WATCHPOINTS
586
/*
587
* For watch, interrupts will be enabled after the watch
588
* registers are read.
589
*/
590
BUILD_HANDLER watch watch cli silent /* #23 */
591
#else
592
BUILD_HANDLER watch watch sti verbose /* #23 */
593
#endif
594
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
595
BUILD_HANDLER mt mt sti silent /* #25 */
596
BUILD_HANDLER dsp dsp sti silent /* #26 */
597
BUILD_HANDLER reserved reserved sti verbose /* others */
598
599
.align 5
600
LEAF(handle_ri_rdhwr_tlbp)
601
.set push
602
.set noat
603
.set noreorder
604
/* check if TLB contains a entry for EPC */
605
MFC0 k1, CP0_ENTRYHI
606
andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
607
MFC0 k0, CP0_EPC
608
PTR_SRL k0, _PAGE_SHIFT + 1
609
PTR_SLL k0, _PAGE_SHIFT + 1
610
or k1, k0
611
MTC0 k1, CP0_ENTRYHI
612
mtc0_tlbw_hazard
613
tlbp
614
tlb_probe_hazard
615
mfc0 k1, CP0_INDEX
616
.set pop
617
bltz k1, handle_ri /* slow path */
618
/* fall thru */
619
END(handle_ri_rdhwr_tlbp)
620
621
LEAF(handle_ri_rdhwr)
622
.set push
623
.set noat
624
.set noreorder
625
/* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
626
/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
627
MFC0 k1, CP0_EPC
628
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
629
and k0, k1, 1
630
beqz k0, 1f
631
xor k1, k0
632
lhu k0, (k1)
633
lhu k1, 2(k1)
634
ins k1, k0, 16, 16
635
lui k0, 0x007d
636
b docheck
637
ori k0, 0x6b3c
638
1:
639
lui k0, 0x7c03
640
lw k1, (k1)
641
ori k0, 0xe83b
642
#else
643
andi k0, k1, 1
644
bnez k0, handle_ri
645
lui k0, 0x7c03
646
lw k1, (k1)
647
ori k0, 0xe83b
648
#endif
649
.set reorder
650
docheck:
651
bne k0, k1, handle_ri /* if not ours */
652
653
isrdhwr:
654
/* The insn is rdhwr. No need to check CAUSE.BD here. */
655
get_saved_sp /* k1 := current_thread_info */
656
.set noreorder
657
MFC0 k0, CP0_EPC
658
#if defined(CONFIG_CPU_R3000)
659
ori k1, _THREAD_MASK
660
xori k1, _THREAD_MASK
661
LONG_L v1, TI_TP_VALUE(k1)
662
LONG_ADDIU k0, 4
663
jr k0
664
rfe
665
#else
666
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
667
LONG_ADDIU k0, 4 /* stall on $k0 */
668
#else
669
.set at=v1
670
LONG_ADDIU k0, 4
671
.set noat
672
#endif
673
MTC0 k0, CP0_EPC
674
/* I hope three instructions between MTC0 and ERET are enough... */
675
ori k1, _THREAD_MASK
676
xori k1, _THREAD_MASK
677
LONG_L v1, TI_TP_VALUE(k1)
678
.set push
679
.set arch=r4000
680
eret
681
.set pop
682
#endif
683
.set pop
684
END(handle_ri_rdhwr)
685
686
#ifdef CONFIG_CPU_R4X00_BUGS64
687
/* A temporary overflow handler used by check_daddi(). */
688
689
__INIT
690
691
BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
692
#endif
693
694