Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/kernel/entry_32.S
10817 views
1
/*
2
*
3
* Copyright (C) 1991, 1992 Linus Torvalds
4
*/
5
6
/*
7
* entry.S contains the system-call and fault low-level handling routines.
8
* This also contains the timer-interrupt handler, as well as all interrupts
9
* and faults that can result in a task-switch.
10
*
11
* NOTE: This code handles signal-recognition, which happens every time
12
* after a timer-interrupt and after each system call.
13
*
14
* I changed all the .align's to 4 (16 byte alignment), as that's faster
15
* on a 486.
16
*
17
* Stack layout in 'syscall_exit':
18
* ptrace needs to have all regs on the stack.
19
* if the order here is changed, it needs to be
20
* updated in fork.c:copy_process, signal.c:do_signal,
21
* ptrace.c and ptrace.h
22
*
23
* 0(%esp) - %ebx
24
* 4(%esp) - %ecx
25
* 8(%esp) - %edx
26
* C(%esp) - %esi
27
* 10(%esp) - %edi
28
* 14(%esp) - %ebp
29
* 18(%esp) - %eax
30
* 1C(%esp) - %ds
31
* 20(%esp) - %es
32
* 24(%esp) - %fs
33
* 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34
* 2C(%esp) - orig_eax
35
* 30(%esp) - %eip
36
* 34(%esp) - %cs
37
* 38(%esp) - %eflags
38
* 3C(%esp) - %oldesp
39
* 40(%esp) - %oldss
40
*
41
* "current" is in register %ebx during any slow entries.
42
*/
43
44
#include <linux/linkage.h>
45
#include <asm/thread_info.h>
46
#include <asm/irqflags.h>
47
#include <asm/errno.h>
48
#include <asm/segment.h>
49
#include <asm/smp.h>
50
#include <asm/page_types.h>
51
#include <asm/percpu.h>
52
#include <asm/dwarf2.h>
53
#include <asm/processor-flags.h>
54
#include <asm/ftrace.h>
55
#include <asm/irq_vectors.h>
56
#include <asm/cpufeature.h>
57
58
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
59
#include <linux/elf-em.h>
60
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
61
#define __AUDIT_ARCH_LE 0x40000000
62
63
#ifndef CONFIG_AUDITSYSCALL
64
#define sysenter_audit syscall_trace_entry
65
#define sysexit_audit syscall_exit_work
66
#endif
67
68
.section .entry.text, "ax"
69
70
/*
71
* We use macros for low-level operations which need to be overridden
72
* for paravirtualization. The following will never clobber any registers:
73
* INTERRUPT_RETURN (aka. "iret")
74
* GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
75
* ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
76
*
77
* For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
78
* specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
79
* Allowing a register to be clobbered can shrink the paravirt replacement
80
* enough to patch inline, increasing performance.
81
*/
82
83
#define nr_syscalls ((syscall_table_size)/4)
84
85
#ifdef CONFIG_PREEMPT
86
#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
87
#else
88
#define preempt_stop(clobbers)
89
#define resume_kernel restore_all
90
#endif
91
92
.macro TRACE_IRQS_IRET
93
#ifdef CONFIG_TRACE_IRQFLAGS
94
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
95
jz 1f
96
TRACE_IRQS_ON
97
1:
98
#endif
99
.endm
100
101
#ifdef CONFIG_VM86
102
#define resume_userspace_sig check_userspace
103
#else
104
#define resume_userspace_sig resume_userspace
105
#endif
106
107
/*
108
* User gs save/restore
109
*
110
* %gs is used for userland TLS and kernel only uses it for stack
111
* canary which is required to be at %gs:20 by gcc. Read the comment
112
* at the top of stackprotector.h for more info.
113
*
114
* Local labels 98 and 99 are used.
115
*/
116
#ifdef CONFIG_X86_32_LAZY_GS
117
118
/* unfortunately push/pop can't be no-op */
119
.macro PUSH_GS
120
pushl_cfi $0
121
.endm
122
.macro POP_GS pop=0
123
addl $(4 + \pop), %esp
124
CFI_ADJUST_CFA_OFFSET -(4 + \pop)
125
.endm
126
.macro POP_GS_EX
127
.endm
128
129
/* all the rest are no-op */
130
.macro PTGS_TO_GS
131
.endm
132
.macro PTGS_TO_GS_EX
133
.endm
134
.macro GS_TO_REG reg
135
.endm
136
.macro REG_TO_PTGS reg
137
.endm
138
.macro SET_KERNEL_GS reg
139
.endm
140
141
#else /* CONFIG_X86_32_LAZY_GS */
142
143
.macro PUSH_GS
144
pushl_cfi %gs
145
/*CFI_REL_OFFSET gs, 0*/
146
.endm
147
148
.macro POP_GS pop=0
149
98: popl_cfi %gs
150
/*CFI_RESTORE gs*/
151
.if \pop <> 0
152
add $\pop, %esp
153
CFI_ADJUST_CFA_OFFSET -\pop
154
.endif
155
.endm
156
.macro POP_GS_EX
157
.pushsection .fixup, "ax"
158
99: movl $0, (%esp)
159
jmp 98b
160
.section __ex_table, "a"
161
.align 4
162
.long 98b, 99b
163
.popsection
164
.endm
165
166
.macro PTGS_TO_GS
167
98: mov PT_GS(%esp), %gs
168
.endm
169
.macro PTGS_TO_GS_EX
170
.pushsection .fixup, "ax"
171
99: movl $0, PT_GS(%esp)
172
jmp 98b
173
.section __ex_table, "a"
174
.align 4
175
.long 98b, 99b
176
.popsection
177
.endm
178
179
.macro GS_TO_REG reg
180
movl %gs, \reg
181
/*CFI_REGISTER gs, \reg*/
182
.endm
183
.macro REG_TO_PTGS reg
184
movl \reg, PT_GS(%esp)
185
/*CFI_REL_OFFSET gs, PT_GS*/
186
.endm
187
.macro SET_KERNEL_GS reg
188
movl $(__KERNEL_STACK_CANARY), \reg
189
movl \reg, %gs
190
.endm
191
192
#endif /* CONFIG_X86_32_LAZY_GS */
193
194
.macro SAVE_ALL
195
cld
196
PUSH_GS
197
pushl_cfi %fs
198
/*CFI_REL_OFFSET fs, 0;*/
199
pushl_cfi %es
200
/*CFI_REL_OFFSET es, 0;*/
201
pushl_cfi %ds
202
/*CFI_REL_OFFSET ds, 0;*/
203
pushl_cfi %eax
204
CFI_REL_OFFSET eax, 0
205
pushl_cfi %ebp
206
CFI_REL_OFFSET ebp, 0
207
pushl_cfi %edi
208
CFI_REL_OFFSET edi, 0
209
pushl_cfi %esi
210
CFI_REL_OFFSET esi, 0
211
pushl_cfi %edx
212
CFI_REL_OFFSET edx, 0
213
pushl_cfi %ecx
214
CFI_REL_OFFSET ecx, 0
215
pushl_cfi %ebx
216
CFI_REL_OFFSET ebx, 0
217
movl $(__USER_DS), %edx
218
movl %edx, %ds
219
movl %edx, %es
220
movl $(__KERNEL_PERCPU), %edx
221
movl %edx, %fs
222
SET_KERNEL_GS %edx
223
.endm
224
225
.macro RESTORE_INT_REGS
226
popl_cfi %ebx
227
CFI_RESTORE ebx
228
popl_cfi %ecx
229
CFI_RESTORE ecx
230
popl_cfi %edx
231
CFI_RESTORE edx
232
popl_cfi %esi
233
CFI_RESTORE esi
234
popl_cfi %edi
235
CFI_RESTORE edi
236
popl_cfi %ebp
237
CFI_RESTORE ebp
238
popl_cfi %eax
239
CFI_RESTORE eax
240
.endm
241
242
.macro RESTORE_REGS pop=0
243
RESTORE_INT_REGS
244
1: popl_cfi %ds
245
/*CFI_RESTORE ds;*/
246
2: popl_cfi %es
247
/*CFI_RESTORE es;*/
248
3: popl_cfi %fs
249
/*CFI_RESTORE fs;*/
250
POP_GS \pop
251
.pushsection .fixup, "ax"
252
4: movl $0, (%esp)
253
jmp 1b
254
5: movl $0, (%esp)
255
jmp 2b
256
6: movl $0, (%esp)
257
jmp 3b
258
.section __ex_table, "a"
259
.align 4
260
.long 1b, 4b
261
.long 2b, 5b
262
.long 3b, 6b
263
.popsection
264
POP_GS_EX
265
.endm
266
267
.macro RING0_INT_FRAME
268
CFI_STARTPROC simple
269
CFI_SIGNAL_FRAME
270
CFI_DEF_CFA esp, 3*4
271
/*CFI_OFFSET cs, -2*4;*/
272
CFI_OFFSET eip, -3*4
273
.endm
274
275
.macro RING0_EC_FRAME
276
CFI_STARTPROC simple
277
CFI_SIGNAL_FRAME
278
CFI_DEF_CFA esp, 4*4
279
/*CFI_OFFSET cs, -2*4;*/
280
CFI_OFFSET eip, -3*4
281
.endm
282
283
.macro RING0_PTREGS_FRAME
284
CFI_STARTPROC simple
285
CFI_SIGNAL_FRAME
286
CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
287
/*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
288
CFI_OFFSET eip, PT_EIP-PT_OLDESP
289
/*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
290
/*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
291
CFI_OFFSET eax, PT_EAX-PT_OLDESP
292
CFI_OFFSET ebp, PT_EBP-PT_OLDESP
293
CFI_OFFSET edi, PT_EDI-PT_OLDESP
294
CFI_OFFSET esi, PT_ESI-PT_OLDESP
295
CFI_OFFSET edx, PT_EDX-PT_OLDESP
296
CFI_OFFSET ecx, PT_ECX-PT_OLDESP
297
CFI_OFFSET ebx, PT_EBX-PT_OLDESP
298
.endm
299
300
ENTRY(ret_from_fork)
301
CFI_STARTPROC
302
pushl_cfi %eax
303
call schedule_tail
304
GET_THREAD_INFO(%ebp)
305
popl_cfi %eax
306
pushl_cfi $0x0202 # Reset kernel eflags
307
popfl_cfi
308
jmp syscall_exit
309
CFI_ENDPROC
310
END(ret_from_fork)
311
312
/*
313
* Interrupt exit functions should be protected against kprobes
314
*/
315
.pushsection .kprobes.text, "ax"
316
/*
317
* Return to user mode is not as complex as all this looks,
318
* but we want the default path for a system call return to
319
* go as quickly as possible which is why some of this is
320
* less clear than it otherwise should be.
321
*/
322
323
# userspace resumption stub bypassing syscall exit tracing
324
ALIGN
325
RING0_PTREGS_FRAME
326
ret_from_exception:
327
preempt_stop(CLBR_ANY)
328
ret_from_intr:
329
GET_THREAD_INFO(%ebp)
330
check_userspace:
331
movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
332
movb PT_CS(%esp), %al
333
andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
334
cmpl $USER_RPL, %eax
335
jb resume_kernel # not returning to v8086 or userspace
336
337
ENTRY(resume_userspace)
338
LOCKDEP_SYS_EXIT
339
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
340
# setting need_resched or sigpending
341
# between sampling and the iret
342
TRACE_IRQS_OFF
343
movl TI_flags(%ebp), %ecx
344
andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
345
# int/exception return?
346
jne work_pending
347
jmp restore_all
348
END(ret_from_exception)
349
350
#ifdef CONFIG_PREEMPT
351
ENTRY(resume_kernel)
352
DISABLE_INTERRUPTS(CLBR_ANY)
353
cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
354
jnz restore_all
355
need_resched:
356
movl TI_flags(%ebp), %ecx # need_resched set ?
357
testb $_TIF_NEED_RESCHED, %cl
358
jz restore_all
359
testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
360
jz restore_all
361
call preempt_schedule_irq
362
jmp need_resched
363
END(resume_kernel)
364
#endif
365
CFI_ENDPROC
366
/*
367
* End of kprobes section
368
*/
369
.popsection
370
371
/* SYSENTER_RETURN points to after the "sysenter" instruction in
372
the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
373
374
# sysenter call handler stub
375
ENTRY(ia32_sysenter_target)
376
CFI_STARTPROC simple
377
CFI_SIGNAL_FRAME
378
CFI_DEF_CFA esp, 0
379
CFI_REGISTER esp, ebp
380
movl TSS_sysenter_sp0(%esp),%esp
381
sysenter_past_esp:
382
/*
383
* Interrupts are disabled here, but we can't trace it until
384
* enough kernel state to call TRACE_IRQS_OFF can be called - but
385
* we immediately enable interrupts at that point anyway.
386
*/
387
pushl_cfi $__USER_DS
388
/*CFI_REL_OFFSET ss, 0*/
389
pushl_cfi %ebp
390
CFI_REL_OFFSET esp, 0
391
pushfl_cfi
392
orl $X86_EFLAGS_IF, (%esp)
393
pushl_cfi $__USER_CS
394
/*CFI_REL_OFFSET cs, 0*/
395
/*
396
* Push current_thread_info()->sysenter_return to the stack.
397
* A tiny bit of offset fixup is necessary - 4*4 means the 4 words
398
* pushed above; +8 corresponds to copy_thread's esp0 setting.
399
*/
400
pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
401
CFI_REL_OFFSET eip, 0
402
403
pushl_cfi %eax
404
SAVE_ALL
405
ENABLE_INTERRUPTS(CLBR_NONE)
406
407
/*
408
* Load the potential sixth argument from user stack.
409
* Careful about security.
410
*/
411
cmpl $__PAGE_OFFSET-3,%ebp
412
jae syscall_fault
413
1: movl (%ebp),%ebp
414
movl %ebp,PT_EBP(%esp)
415
.section __ex_table,"a"
416
.align 4
417
.long 1b,syscall_fault
418
.previous
419
420
GET_THREAD_INFO(%ebp)
421
422
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
423
jnz sysenter_audit
424
sysenter_do_call:
425
cmpl $(nr_syscalls), %eax
426
jae syscall_badsys
427
call *sys_call_table(,%eax,4)
428
movl %eax,PT_EAX(%esp)
429
LOCKDEP_SYS_EXIT
430
DISABLE_INTERRUPTS(CLBR_ANY)
431
TRACE_IRQS_OFF
432
movl TI_flags(%ebp), %ecx
433
testl $_TIF_ALLWORK_MASK, %ecx
434
jne sysexit_audit
435
sysenter_exit:
436
/* if something modifies registers it must also disable sysexit */
437
movl PT_EIP(%esp), %edx
438
movl PT_OLDESP(%esp), %ecx
439
xorl %ebp,%ebp
440
TRACE_IRQS_ON
441
1: mov PT_FS(%esp), %fs
442
PTGS_TO_GS
443
ENABLE_INTERRUPTS_SYSEXIT
444
445
#ifdef CONFIG_AUDITSYSCALL
446
sysenter_audit:
447
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
448
jnz syscall_trace_entry
449
addl $4,%esp
450
CFI_ADJUST_CFA_OFFSET -4
451
/* %esi already in 8(%esp) 6th arg: 4th syscall arg */
452
/* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
453
/* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
454
movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
455
movl %eax,%edx /* 2nd arg: syscall number */
456
movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
457
call audit_syscall_entry
458
pushl_cfi %ebx
459
movl PT_EAX(%esp),%eax /* reload syscall number */
460
jmp sysenter_do_call
461
462
sysexit_audit:
463
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
464
jne syscall_exit_work
465
TRACE_IRQS_ON
466
ENABLE_INTERRUPTS(CLBR_ANY)
467
movl %eax,%edx /* second arg, syscall return value */
468
cmpl $0,%eax /* is it < 0? */
469
setl %al /* 1 if so, 0 if not */
470
movzbl %al,%eax /* zero-extend that */
471
inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
472
call audit_syscall_exit
473
DISABLE_INTERRUPTS(CLBR_ANY)
474
TRACE_IRQS_OFF
475
movl TI_flags(%ebp), %ecx
476
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
477
jne syscall_exit_work
478
movl PT_EAX(%esp),%eax /* reload syscall return value */
479
jmp sysenter_exit
480
#endif
481
482
CFI_ENDPROC
483
.pushsection .fixup,"ax"
484
2: movl $0,PT_FS(%esp)
485
jmp 1b
486
.section __ex_table,"a"
487
.align 4
488
.long 1b,2b
489
.popsection
490
PTGS_TO_GS_EX
491
ENDPROC(ia32_sysenter_target)
492
493
/*
494
* syscall stub including irq exit should be protected against kprobes
495
*/
496
.pushsection .kprobes.text, "ax"
497
# system call handler stub
498
ENTRY(system_call)
499
RING0_INT_FRAME # can't unwind into user space anyway
500
pushl_cfi %eax # save orig_eax
501
SAVE_ALL
502
GET_THREAD_INFO(%ebp)
503
# system call tracing in operation / emulation
504
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
505
jnz syscall_trace_entry
506
cmpl $(nr_syscalls), %eax
507
jae syscall_badsys
508
syscall_call:
509
call *sys_call_table(,%eax,4)
510
movl %eax,PT_EAX(%esp) # store the return value
511
syscall_exit:
512
LOCKDEP_SYS_EXIT
513
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
514
# setting need_resched or sigpending
515
# between sampling and the iret
516
TRACE_IRQS_OFF
517
movl TI_flags(%ebp), %ecx
518
testl $_TIF_ALLWORK_MASK, %ecx # current->work
519
jne syscall_exit_work
520
521
restore_all:
522
TRACE_IRQS_IRET
523
restore_all_notrace:
524
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
525
# Warning: PT_OLDSS(%esp) contains the wrong/random values if we
526
# are returning to the kernel.
527
# See comments in process.c:copy_thread() for details.
528
movb PT_OLDSS(%esp), %ah
529
movb PT_CS(%esp), %al
530
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
531
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
532
CFI_REMEMBER_STATE
533
je ldt_ss # returning to user-space with LDT SS
534
restore_nocheck:
535
RESTORE_REGS 4 # skip orig_eax/error_code
536
irq_return:
537
INTERRUPT_RETURN
538
.section .fixup,"ax"
539
ENTRY(iret_exc)
540
pushl $0 # no error code
541
pushl $do_iret_error
542
jmp error_code
543
.previous
544
.section __ex_table,"a"
545
.align 4
546
.long irq_return,iret_exc
547
.previous
548
549
CFI_RESTORE_STATE
550
ldt_ss:
551
larl PT_OLDSS(%esp), %eax
552
jnz restore_nocheck
553
testl $0x00400000, %eax # returning to 32bit stack?
554
jnz restore_nocheck # allright, normal return
555
556
#ifdef CONFIG_PARAVIRT
557
/*
558
* The kernel can't run on a non-flat stack if paravirt mode
559
* is active. Rather than try to fixup the high bits of
560
* ESP, bypass this code entirely. This may break DOSemu
561
* and/or Wine support in a paravirt VM, although the option
562
* is still available to implement the setting of the high
563
* 16-bits in the INTERRUPT_RETURN paravirt-op.
564
*/
565
cmpl $0, pv_info+PARAVIRT_enabled
566
jne restore_nocheck
567
#endif
568
569
/*
570
* Setup and switch to ESPFIX stack
571
*
572
* We're returning to userspace with a 16 bit stack. The CPU will not
573
* restore the high word of ESP for us on executing iret... This is an
574
* "official" bug of all the x86-compatible CPUs, which we can work
575
* around to make dosemu and wine happy. We do this by preloading the
576
* high word of ESP with the high word of the userspace ESP while
577
* compensating for the offset by changing to the ESPFIX segment with
578
* a base address that matches for the difference.
579
*/
580
#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
581
mov %esp, %edx /* load kernel esp */
582
mov PT_OLDESP(%esp), %eax /* load userspace esp */
583
mov %dx, %ax /* eax: new kernel esp */
584
sub %eax, %edx /* offset (low word is 0) */
585
shr $16, %edx
586
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
587
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
588
pushl_cfi $__ESPFIX_SS
589
pushl_cfi %eax /* new kernel esp */
590
/* Disable interrupts, but do not irqtrace this section: we
591
* will soon execute iret and the tracer was already set to
592
* the irqstate after the iret */
593
DISABLE_INTERRUPTS(CLBR_EAX)
594
lss (%esp), %esp /* switch to espfix segment */
595
CFI_ADJUST_CFA_OFFSET -8
596
jmp restore_nocheck
597
CFI_ENDPROC
598
ENDPROC(system_call)
599
600
# perform work that needs to be done immediately before resumption
601
ALIGN
602
RING0_PTREGS_FRAME # can't unwind into user space anyway
603
work_pending:
604
testb $_TIF_NEED_RESCHED, %cl
605
jz work_notifysig
606
work_resched:
607
call schedule
608
LOCKDEP_SYS_EXIT
609
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
610
# setting need_resched or sigpending
611
# between sampling and the iret
612
TRACE_IRQS_OFF
613
movl TI_flags(%ebp), %ecx
614
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
615
# than syscall tracing?
616
jz restore_all
617
testb $_TIF_NEED_RESCHED, %cl
618
jnz work_resched
619
620
work_notifysig: # deal with pending signals and
621
# notify-resume requests
622
#ifdef CONFIG_VM86
623
testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
624
movl %esp, %eax
625
jne work_notifysig_v86 # returning to kernel-space or
626
# vm86-space
627
xorl %edx, %edx
628
call do_notify_resume
629
jmp resume_userspace_sig
630
631
ALIGN
632
work_notifysig_v86:
633
pushl_cfi %ecx # save ti_flags for do_notify_resume
634
call save_v86_state # %eax contains pt_regs pointer
635
popl_cfi %ecx
636
movl %eax, %esp
637
#else
638
movl %esp, %eax
639
#endif
640
xorl %edx, %edx
641
call do_notify_resume
642
jmp resume_userspace_sig
643
END(work_pending)
644
645
# perform syscall exit tracing
646
ALIGN
647
syscall_trace_entry:
648
movl $-ENOSYS,PT_EAX(%esp)
649
movl %esp, %eax
650
call syscall_trace_enter
651
/* What it returned is what we'll actually use. */
652
cmpl $(nr_syscalls), %eax
653
jnae syscall_call
654
jmp syscall_exit
655
END(syscall_trace_entry)
656
657
# perform syscall exit tracing
658
ALIGN
659
syscall_exit_work:
660
testl $_TIF_WORK_SYSCALL_EXIT, %ecx
661
jz work_pending
662
TRACE_IRQS_ON
663
ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
664
# schedule() instead
665
movl %esp, %eax
666
call syscall_trace_leave
667
jmp resume_userspace
668
END(syscall_exit_work)
669
CFI_ENDPROC
670
671
RING0_INT_FRAME # can't unwind into user space anyway
672
syscall_fault:
673
GET_THREAD_INFO(%ebp)
674
movl $-EFAULT,PT_EAX(%esp)
675
jmp resume_userspace
676
END(syscall_fault)
677
678
syscall_badsys:
679
movl $-ENOSYS,PT_EAX(%esp)
680
jmp resume_userspace
681
END(syscall_badsys)
682
CFI_ENDPROC
683
/*
684
* End of kprobes section
685
*/
686
.popsection
687
688
/*
689
* System calls that need a pt_regs pointer.
690
*/
691
#define PTREGSCALL0(name) \
692
ALIGN; \
693
ptregs_##name: \
694
leal 4(%esp),%eax; \
695
jmp sys_##name;
696
697
#define PTREGSCALL1(name) \
698
ALIGN; \
699
ptregs_##name: \
700
leal 4(%esp),%edx; \
701
movl (PT_EBX+4)(%esp),%eax; \
702
jmp sys_##name;
703
704
#define PTREGSCALL2(name) \
705
ALIGN; \
706
ptregs_##name: \
707
leal 4(%esp),%ecx; \
708
movl (PT_ECX+4)(%esp),%edx; \
709
movl (PT_EBX+4)(%esp),%eax; \
710
jmp sys_##name;
711
712
#define PTREGSCALL3(name) \
713
ALIGN; \
714
ptregs_##name: \
715
CFI_STARTPROC; \
716
leal 4(%esp),%eax; \
717
pushl_cfi %eax; \
718
movl PT_EDX(%eax),%ecx; \
719
movl PT_ECX(%eax),%edx; \
720
movl PT_EBX(%eax),%eax; \
721
call sys_##name; \
722
addl $4,%esp; \
723
CFI_ADJUST_CFA_OFFSET -4; \
724
ret; \
725
CFI_ENDPROC; \
726
ENDPROC(ptregs_##name)
727
728
PTREGSCALL1(iopl)
729
PTREGSCALL0(fork)
730
PTREGSCALL0(vfork)
731
PTREGSCALL3(execve)
732
PTREGSCALL2(sigaltstack)
733
PTREGSCALL0(sigreturn)
734
PTREGSCALL0(rt_sigreturn)
735
PTREGSCALL2(vm86)
736
PTREGSCALL1(vm86old)
737
738
/* Clone is an oddball. The 4th arg is in %edi */
739
ALIGN;
740
ptregs_clone:
741
CFI_STARTPROC
742
leal 4(%esp),%eax
743
pushl_cfi %eax
744
pushl_cfi PT_EDI(%eax)
745
movl PT_EDX(%eax),%ecx
746
movl PT_ECX(%eax),%edx
747
movl PT_EBX(%eax),%eax
748
call sys_clone
749
addl $8,%esp
750
CFI_ADJUST_CFA_OFFSET -8
751
ret
752
CFI_ENDPROC
753
ENDPROC(ptregs_clone)
754
755
.macro FIXUP_ESPFIX_STACK
756
/*
757
* Switch back for ESPFIX stack to the normal zerobased stack
758
*
759
* We can't call C functions using the ESPFIX stack. This code reads
760
* the high word of the segment base from the GDT and swiches to the
761
* normal stack and adjusts ESP with the matching offset.
762
*/
763
/* fixup the stack */
764
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
765
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
766
shl $16, %eax
767
addl %esp, %eax /* the adjusted stack pointer */
768
pushl_cfi $__KERNEL_DS
769
pushl_cfi %eax
770
lss (%esp), %esp /* switch to the normal stack segment */
771
CFI_ADJUST_CFA_OFFSET -8
772
.endm
773
.macro UNWIND_ESPFIX_STACK
774
movl %ss, %eax
775
/* see if on espfix stack */
776
cmpw $__ESPFIX_SS, %ax
777
jne 27f
778
movl $__KERNEL_DS, %eax
779
movl %eax, %ds
780
movl %eax, %es
781
/* switch to normal stack */
782
FIXUP_ESPFIX_STACK
783
27:
784
.endm
785
786
/*
787
* Build the entry stubs and pointer table with some assembler magic.
788
* We pack 7 stubs into a single 32-byte chunk, which will fit in a
789
* single cache line on all modern x86 implementations.
790
*/
791
.section .init.rodata,"a"
792
ENTRY(interrupt)
793
.section .entry.text, "ax"
794
.p2align 5
795
.p2align CONFIG_X86_L1_CACHE_SHIFT
796
ENTRY(irq_entries_start)
797
RING0_INT_FRAME
798
vector=FIRST_EXTERNAL_VECTOR
799
.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
800
.balign 32
801
.rept 7
802
.if vector < NR_VECTORS
803
.if vector <> FIRST_EXTERNAL_VECTOR
804
CFI_ADJUST_CFA_OFFSET -4
805
.endif
806
1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
807
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
808
jmp 2f
809
.endif
810
.previous
811
.long 1b
812
.section .entry.text, "ax"
813
vector=vector+1
814
.endif
815
.endr
816
2: jmp common_interrupt
817
.endr
818
END(irq_entries_start)
819
820
.previous
821
END(interrupt)
822
.previous
823
824
/*
825
* the CPU automatically disables interrupts when executing an IRQ vector,
826
* so IRQ-flags tracing has to follow that:
827
*/
828
.p2align CONFIG_X86_L1_CACHE_SHIFT
829
common_interrupt:
830
addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
831
SAVE_ALL
832
TRACE_IRQS_OFF
833
movl %esp,%eax
834
call do_IRQ
835
jmp ret_from_intr
836
ENDPROC(common_interrupt)
837
CFI_ENDPROC
838
839
/*
840
* Irq entries should be protected against kprobes
841
*/
842
.pushsection .kprobes.text, "ax"
843
#define BUILD_INTERRUPT3(name, nr, fn) \
844
ENTRY(name) \
845
RING0_INT_FRAME; \
846
pushl_cfi $~(nr); \
847
SAVE_ALL; \
848
TRACE_IRQS_OFF \
849
movl %esp,%eax; \
850
call fn; \
851
jmp ret_from_intr; \
852
CFI_ENDPROC; \
853
ENDPROC(name)
854
855
#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
856
857
/* The include is where all of the SMP etc. interrupts come from */
858
#include <asm/entry_arch.h>
859
860
ENTRY(coprocessor_error)
861
RING0_INT_FRAME
862
pushl_cfi $0
863
pushl_cfi $do_coprocessor_error
864
jmp error_code
865
CFI_ENDPROC
866
END(coprocessor_error)
867
868
ENTRY(simd_coprocessor_error)
869
RING0_INT_FRAME
870
pushl_cfi $0
871
#ifdef CONFIG_X86_INVD_BUG
872
/* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
873
661: pushl_cfi $do_general_protection
874
662:
875
.section .altinstructions,"a"
876
.balign 4
877
.long 661b
878
.long 663f
879
.word X86_FEATURE_XMM
880
.byte 662b-661b
881
.byte 664f-663f
882
.previous
883
.section .altinstr_replacement,"ax"
884
663: pushl $do_simd_coprocessor_error
885
664:
886
.previous
887
#else
888
pushl_cfi $do_simd_coprocessor_error
889
#endif
890
jmp error_code
891
CFI_ENDPROC
892
END(simd_coprocessor_error)
893
894
ENTRY(device_not_available)
895
RING0_INT_FRAME
896
pushl_cfi $-1 # mark this as an int
897
pushl_cfi $do_device_not_available
898
jmp error_code
899
CFI_ENDPROC
900
END(device_not_available)
901
902
#ifdef CONFIG_PARAVIRT
903
ENTRY(native_iret)
904
iret
905
.section __ex_table,"a"
906
.align 4
907
.long native_iret, iret_exc
908
.previous
909
END(native_iret)
910
911
ENTRY(native_irq_enable_sysexit)
912
sti
913
sysexit
914
END(native_irq_enable_sysexit)
915
#endif
916
917
ENTRY(overflow)
918
RING0_INT_FRAME
919
pushl_cfi $0
920
pushl_cfi $do_overflow
921
jmp error_code
922
CFI_ENDPROC
923
END(overflow)
924
925
ENTRY(bounds)
926
RING0_INT_FRAME
927
pushl_cfi $0
928
pushl_cfi $do_bounds
929
jmp error_code
930
CFI_ENDPROC
931
END(bounds)
932
933
ENTRY(invalid_op)
934
RING0_INT_FRAME
935
pushl_cfi $0
936
pushl_cfi $do_invalid_op
937
jmp error_code
938
CFI_ENDPROC
939
END(invalid_op)
940
941
ENTRY(coprocessor_segment_overrun)
942
RING0_INT_FRAME
943
pushl_cfi $0
944
pushl_cfi $do_coprocessor_segment_overrun
945
jmp error_code
946
CFI_ENDPROC
947
END(coprocessor_segment_overrun)
948
949
ENTRY(invalid_TSS)
950
RING0_EC_FRAME
951
pushl_cfi $do_invalid_TSS
952
jmp error_code
953
CFI_ENDPROC
954
END(invalid_TSS)
955
956
ENTRY(segment_not_present)
957
RING0_EC_FRAME
958
pushl_cfi $do_segment_not_present
959
jmp error_code
960
CFI_ENDPROC
961
END(segment_not_present)
962
963
ENTRY(stack_segment)
964
RING0_EC_FRAME
965
pushl_cfi $do_stack_segment
966
jmp error_code
967
CFI_ENDPROC
968
END(stack_segment)
969
970
ENTRY(alignment_check)
971
RING0_EC_FRAME
972
pushl_cfi $do_alignment_check
973
jmp error_code
974
CFI_ENDPROC
975
END(alignment_check)
976
977
ENTRY(divide_error)
978
RING0_INT_FRAME
979
pushl_cfi $0 # no error code
980
pushl_cfi $do_divide_error
981
jmp error_code
982
CFI_ENDPROC
983
END(divide_error)
984
985
#ifdef CONFIG_X86_MCE
986
ENTRY(machine_check)
987
RING0_INT_FRAME
988
pushl_cfi $0
989
pushl_cfi machine_check_vector
990
jmp error_code
991
CFI_ENDPROC
992
END(machine_check)
993
#endif
994
995
ENTRY(spurious_interrupt_bug)
996
RING0_INT_FRAME
997
pushl_cfi $0
998
pushl_cfi $do_spurious_interrupt_bug
999
jmp error_code
1000
CFI_ENDPROC
1001
END(spurious_interrupt_bug)
1002
/*
1003
* End of kprobes section
1004
*/
1005
.popsection
1006
1007
ENTRY(kernel_thread_helper)
1008
pushl $0 # fake return address for unwinder
1009
CFI_STARTPROC
1010
movl %edi,%eax
1011
call *%esi
1012
call do_exit
1013
ud2 # padding for call trace
1014
CFI_ENDPROC
1015
ENDPROC(kernel_thread_helper)
1016
1017
#ifdef CONFIG_XEN
1018
/* Xen doesn't set %esp to be precisely what the normal sysenter
1019
entrypoint expects, so fix it up before using the normal path. */
1020
ENTRY(xen_sysenter_target)
1021
RING0_INT_FRAME
1022
addl $5*4, %esp /* remove xen-provided frame */
1023
CFI_ADJUST_CFA_OFFSET -5*4
1024
jmp sysenter_past_esp
1025
CFI_ENDPROC
1026
1027
ENTRY(xen_hypervisor_callback)
1028
CFI_STARTPROC
1029
pushl_cfi $0
1030
SAVE_ALL
1031
TRACE_IRQS_OFF
1032
1033
/* Check to see if we got the event in the critical
1034
region in xen_iret_direct, after we've reenabled
1035
events and checked for pending events. This simulates
1036
iret instruction's behaviour where it delivers a
1037
pending interrupt when enabling interrupts. */
1038
movl PT_EIP(%esp),%eax
1039
cmpl $xen_iret_start_crit,%eax
1040
jb 1f
1041
cmpl $xen_iret_end_crit,%eax
1042
jae 1f
1043
1044
jmp xen_iret_crit_fixup
1045
1046
ENTRY(xen_do_upcall)
1047
1: mov %esp, %eax
1048
call xen_evtchn_do_upcall
1049
jmp ret_from_intr
1050
CFI_ENDPROC
1051
ENDPROC(xen_hypervisor_callback)
1052
1053
# Hypervisor uses this for application faults while it executes.
1054
# We get here for two reasons:
1055
# 1. Fault while reloading DS, ES, FS or GS
1056
# 2. Fault while executing IRET
1057
# Category 1 we fix up by reattempting the load, and zeroing the segment
1058
# register if the load fails.
1059
# Category 2 we fix up by jumping to do_iret_error. We cannot use the
1060
# normal Linux return path in this case because if we use the IRET hypercall
1061
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1062
# We distinguish between categories by maintaining a status value in EAX.
1063
ENTRY(xen_failsafe_callback)
1064
CFI_STARTPROC
1065
pushl_cfi %eax
1066
movl $1,%eax
1067
1: mov 4(%esp),%ds
1068
2: mov 8(%esp),%es
1069
3: mov 12(%esp),%fs
1070
4: mov 16(%esp),%gs
1071
testl %eax,%eax
1072
popl_cfi %eax
1073
lea 16(%esp),%esp
1074
CFI_ADJUST_CFA_OFFSET -16
1075
jz 5f
1076
addl $16,%esp
1077
jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
1078
5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
1079
SAVE_ALL
1080
jmp ret_from_exception
1081
CFI_ENDPROC
1082
1083
.section .fixup,"ax"
1084
6: xorl %eax,%eax
1085
movl %eax,4(%esp)
1086
jmp 1b
1087
7: xorl %eax,%eax
1088
movl %eax,8(%esp)
1089
jmp 2b
1090
8: xorl %eax,%eax
1091
movl %eax,12(%esp)
1092
jmp 3b
1093
9: xorl %eax,%eax
1094
movl %eax,16(%esp)
1095
jmp 4b
1096
.previous
1097
.section __ex_table,"a"
1098
.align 4
1099
.long 1b,6b
1100
.long 2b,7b
1101
.long 3b,8b
1102
.long 4b,9b
1103
.previous
1104
ENDPROC(xen_failsafe_callback)
1105
1106
BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
1107
xen_evtchn_do_upcall)
1108
1109
#endif /* CONFIG_XEN */
1110
1111
#ifdef CONFIG_FUNCTION_TRACER
1112
#ifdef CONFIG_DYNAMIC_FTRACE
1113
1114
ENTRY(mcount)
1115
ret
1116
END(mcount)
1117
1118
ENTRY(ftrace_caller)
1119
cmpl $0, function_trace_stop
1120
jne ftrace_stub
1121
1122
pushl %eax
1123
pushl %ecx
1124
pushl %edx
1125
movl 0xc(%esp), %eax
1126
movl 0x4(%ebp), %edx
1127
subl $MCOUNT_INSN_SIZE, %eax
1128
1129
.globl ftrace_call
1130
ftrace_call:
1131
call ftrace_stub
1132
1133
popl %edx
1134
popl %ecx
1135
popl %eax
1136
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1137
.globl ftrace_graph_call
1138
ftrace_graph_call:
1139
jmp ftrace_stub
1140
#endif
1141
1142
.globl ftrace_stub
1143
ftrace_stub:
1144
ret
1145
END(ftrace_caller)
1146
1147
#else /* ! CONFIG_DYNAMIC_FTRACE */
1148
1149
ENTRY(mcount)
1150
cmpl $0, function_trace_stop
1151
jne ftrace_stub
1152
1153
cmpl $ftrace_stub, ftrace_trace_function
1154
jnz trace
1155
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1156
cmpl $ftrace_stub, ftrace_graph_return
1157
jnz ftrace_graph_caller
1158
1159
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1160
jnz ftrace_graph_caller
1161
#endif
1162
.globl ftrace_stub
1163
ftrace_stub:
1164
ret
1165
1166
/* taken from glibc */
1167
trace:
1168
pushl %eax
1169
pushl %ecx
1170
pushl %edx
1171
movl 0xc(%esp), %eax
1172
movl 0x4(%ebp), %edx
1173
subl $MCOUNT_INSN_SIZE, %eax
1174
1175
call *ftrace_trace_function
1176
1177
popl %edx
1178
popl %ecx
1179
popl %eax
1180
jmp ftrace_stub
1181
END(mcount)
1182
#endif /* CONFIG_DYNAMIC_FTRACE */
1183
#endif /* CONFIG_FUNCTION_TRACER */
1184
1185
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1186
ENTRY(ftrace_graph_caller)
1187
cmpl $0, function_trace_stop
1188
jne ftrace_stub
1189
1190
pushl %eax
1191
pushl %ecx
1192
pushl %edx
1193
movl 0xc(%esp), %edx
1194
lea 0x4(%ebp), %eax
1195
movl (%ebp), %ecx
1196
subl $MCOUNT_INSN_SIZE, %edx
1197
call prepare_ftrace_return
1198
popl %edx
1199
popl %ecx
1200
popl %eax
1201
ret
1202
END(ftrace_graph_caller)
1203
1204
.globl return_to_handler
1205
return_to_handler:
1206
pushl %eax
1207
pushl %edx
1208
movl %ebp, %eax
1209
call ftrace_return_to_handler
1210
movl %eax, %ecx
1211
popl %edx
1212
popl %eax
1213
jmp *%ecx
1214
#endif
1215
1216
.section .rodata,"a"
1217
#include "syscall_table_32.S"
1218
1219
syscall_table_size=(.-sys_call_table)
1220
1221
/*
1222
* Some functions should be protected against kprobes
1223
*/
1224
.pushsection .kprobes.text, "ax"
1225
1226
ENTRY(page_fault)
1227
RING0_EC_FRAME
1228
pushl_cfi $do_page_fault
1229
ALIGN
1230
error_code:
1231
/* the function address is in %gs's slot on the stack */
1232
pushl_cfi %fs
1233
/*CFI_REL_OFFSET fs, 0*/
1234
pushl_cfi %es
1235
/*CFI_REL_OFFSET es, 0*/
1236
pushl_cfi %ds
1237
/*CFI_REL_OFFSET ds, 0*/
1238
pushl_cfi %eax
1239
CFI_REL_OFFSET eax, 0
1240
pushl_cfi %ebp
1241
CFI_REL_OFFSET ebp, 0
1242
pushl_cfi %edi
1243
CFI_REL_OFFSET edi, 0
1244
pushl_cfi %esi
1245
CFI_REL_OFFSET esi, 0
1246
pushl_cfi %edx
1247
CFI_REL_OFFSET edx, 0
1248
pushl_cfi %ecx
1249
CFI_REL_OFFSET ecx, 0
1250
pushl_cfi %ebx
1251
CFI_REL_OFFSET ebx, 0
1252
cld
1253
movl $(__KERNEL_PERCPU), %ecx
1254
movl %ecx, %fs
1255
UNWIND_ESPFIX_STACK
1256
GS_TO_REG %ecx
1257
movl PT_GS(%esp), %edi # get the function address
1258
movl PT_ORIG_EAX(%esp), %edx # get the error code
1259
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1260
REG_TO_PTGS %ecx
1261
SET_KERNEL_GS %ecx
1262
movl $(__USER_DS), %ecx
1263
movl %ecx, %ds
1264
movl %ecx, %es
1265
TRACE_IRQS_OFF
1266
movl %esp,%eax # pt_regs pointer
1267
call *%edi
1268
jmp ret_from_exception
1269
CFI_ENDPROC
1270
END(page_fault)
1271
1272
/*
1273
* Debug traps and NMI can happen at the one SYSENTER instruction
1274
* that sets up the real kernel stack. Check here, since we can't
1275
* allow the wrong stack to be used.
1276
*
1277
* "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1278
* already pushed 3 words if it hits on the sysenter instruction:
1279
* eflags, cs and eip.
1280
*
1281
* We just load the right stack, and push the three (known) values
1282
* by hand onto the new stack - while updating the return eip past
1283
* the instruction that would have done it for sysenter.
1284
*/
1285
.macro FIX_STACK offset ok label
1286
cmpw $__KERNEL_CS, 4(%esp)
1287
jne \ok
1288
\label:
1289
movl TSS_sysenter_sp0 + \offset(%esp), %esp
1290
CFI_DEF_CFA esp, 0
1291
CFI_UNDEFINED eip
1292
pushfl_cfi
1293
pushl_cfi $__KERNEL_CS
1294
pushl_cfi $sysenter_past_esp
1295
CFI_REL_OFFSET eip, 0
1296
.endm
1297
1298
ENTRY(debug)
1299
RING0_INT_FRAME
1300
cmpl $ia32_sysenter_target,(%esp)
1301
jne debug_stack_correct
1302
FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1303
debug_stack_correct:
1304
pushl_cfi $-1 # mark this as an int
1305
SAVE_ALL
1306
TRACE_IRQS_OFF
1307
xorl %edx,%edx # error code 0
1308
movl %esp,%eax # pt_regs pointer
1309
call do_debug
1310
jmp ret_from_exception
1311
CFI_ENDPROC
1312
END(debug)
1313
1314
/*
1315
* NMI is doubly nasty. It can happen _while_ we're handling
1316
* a debug fault, and the debug fault hasn't yet been able to
1317
* clear up the stack. So we first check whether we got an
1318
* NMI on the sysenter entry path, but after that we need to
1319
* check whether we got an NMI on the debug path where the debug
1320
* fault happened on the sysenter path.
1321
*/
1322
ENTRY(nmi)
1323
RING0_INT_FRAME
1324
pushl_cfi %eax
1325
movl %ss, %eax
1326
cmpw $__ESPFIX_SS, %ax
1327
popl_cfi %eax
1328
je nmi_espfix_stack
1329
cmpl $ia32_sysenter_target,(%esp)
1330
je nmi_stack_fixup
1331
pushl_cfi %eax
1332
movl %esp,%eax
1333
/* Do not access memory above the end of our stack page,
1334
* it might not exist.
1335
*/
1336
andl $(THREAD_SIZE-1),%eax
1337
cmpl $(THREAD_SIZE-20),%eax
1338
popl_cfi %eax
1339
jae nmi_stack_correct
1340
cmpl $ia32_sysenter_target,12(%esp)
1341
je nmi_debug_stack_check
1342
nmi_stack_correct:
1343
/* We have a RING0_INT_FRAME here */
1344
pushl_cfi %eax
1345
SAVE_ALL
1346
xorl %edx,%edx # zero error code
1347
movl %esp,%eax # pt_regs pointer
1348
call do_nmi
1349
jmp restore_all_notrace
1350
CFI_ENDPROC
1351
1352
nmi_stack_fixup:
1353
RING0_INT_FRAME
1354
FIX_STACK 12, nmi_stack_correct, 1
1355
jmp nmi_stack_correct
1356
1357
nmi_debug_stack_check:
1358
/* We have a RING0_INT_FRAME here */
1359
cmpw $__KERNEL_CS,16(%esp)
1360
jne nmi_stack_correct
1361
cmpl $debug,(%esp)
1362
jb nmi_stack_correct
1363
cmpl $debug_esp_fix_insn,(%esp)
1364
ja nmi_stack_correct
1365
FIX_STACK 24, nmi_stack_correct, 1
1366
jmp nmi_stack_correct
1367
1368
nmi_espfix_stack:
1369
/* We have a RING0_INT_FRAME here.
1370
*
1371
* create the pointer to lss back
1372
*/
1373
pushl_cfi %ss
1374
pushl_cfi %esp
1375
addl $4, (%esp)
1376
/* copy the iret frame of 12 bytes */
1377
.rept 3
1378
pushl_cfi 16(%esp)
1379
.endr
1380
pushl_cfi %eax
1381
SAVE_ALL
1382
FIXUP_ESPFIX_STACK # %eax == %esp
1383
xorl %edx,%edx # zero error code
1384
call do_nmi
1385
RESTORE_REGS
1386
lss 12+4(%esp), %esp # back to espfix stack
1387
CFI_ADJUST_CFA_OFFSET -24
1388
jmp irq_return
1389
CFI_ENDPROC
1390
END(nmi)
1391
1392
ENTRY(int3)
1393
RING0_INT_FRAME
1394
pushl_cfi $-1 # mark this as an int
1395
SAVE_ALL
1396
TRACE_IRQS_OFF
1397
xorl %edx,%edx # zero error code
1398
movl %esp,%eax # pt_regs pointer
1399
call do_int3
1400
jmp ret_from_exception
1401
CFI_ENDPROC
1402
END(int3)
1403
1404
ENTRY(general_protection)
1405
RING0_EC_FRAME
1406
pushl_cfi $do_general_protection
1407
jmp error_code
1408
CFI_ENDPROC
1409
END(general_protection)
1410
1411
#ifdef CONFIG_KVM_GUEST
1412
ENTRY(async_page_fault)
1413
RING0_EC_FRAME
1414
pushl_cfi $do_async_page_fault
1415
jmp error_code
1416
CFI_ENDPROC
1417
END(async_page_fault)
1418
#endif
1419
1420
/*
1421
* End of kprobes section
1422
*/
1423
.popsection
1424
1425