Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/um/shared/sysdep/stub_32.h
26516 views
1
/*
2
* Copyright (C) 2004 Jeff Dike ([email protected])
3
* Licensed under the GPL
4
*/
5
6
#ifndef __SYSDEP_STUB_H
7
#define __SYSDEP_STUB_H
8
9
#include <stddef.h>
10
#include <asm/ptrace.h>
11
#include <generated/asm-offsets.h>
12
13
#define STUB_MMAP_NR __NR_mmap2
14
#define MMAP_OFFSET(o) ((o) >> UM_KERN_PAGE_SHIFT)
15
16
static __always_inline long stub_syscall0(long syscall)
17
{
18
long ret;
19
20
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall)
21
: "memory");
22
23
return ret;
24
}
25
26
static __always_inline long stub_syscall1(long syscall, long arg1)
27
{
28
long ret;
29
30
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1)
31
: "memory");
32
33
return ret;
34
}
35
36
static __always_inline long stub_syscall2(long syscall, long arg1, long arg2)
37
{
38
long ret;
39
40
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
41
"c" (arg2)
42
: "memory");
43
44
return ret;
45
}
46
47
static __always_inline long stub_syscall3(long syscall, long arg1, long arg2,
48
long arg3)
49
{
50
long ret;
51
52
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
53
"c" (arg2), "d" (arg3)
54
: "memory");
55
56
return ret;
57
}
58
59
static __always_inline long stub_syscall4(long syscall, long arg1, long arg2,
60
long arg3, long arg4)
61
{
62
long ret;
63
64
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
65
"c" (arg2), "d" (arg3), "S" (arg4)
66
: "memory");
67
68
return ret;
69
}
70
71
static __always_inline long stub_syscall5(long syscall, long arg1, long arg2,
72
long arg3, long arg4, long arg5)
73
{
74
long ret;
75
76
__asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
77
"c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
78
: "memory");
79
80
return ret;
81
}
82
83
static __always_inline long stub_syscall6(long syscall, long arg1, long arg2,
84
long arg3, long arg4, long arg5,
85
long arg6)
86
{
87
struct syscall_args {
88
int ebx, ebp;
89
} args = { arg1, arg6 };
90
long ret;
91
92
__asm__ volatile ("pushl %%ebp;"
93
"movl 0x4(%%ebx),%%ebp;"
94
"movl (%%ebx),%%ebx;"
95
"int $0x80;"
96
"popl %%ebp"
97
: "=a" (ret)
98
: "0" (syscall), "b" (&args),
99
"c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
100
: "memory");
101
102
return ret;
103
}
104
105
static __always_inline void trap_myself(void)
106
{
107
__asm("int3");
108
}
109
110
static __always_inline void *get_stub_data(void)
111
{
112
unsigned long ret;
113
114
asm volatile (
115
"call _here_%=;"
116
"_here_%=:"
117
"popl %0;"
118
"andl %1, %0 ;"
119
"addl %2, %0 ;"
120
: "=a" (ret)
121
: "g" (~(UM_KERN_PAGE_SIZE - 1)),
122
"g" (UM_KERN_PAGE_SIZE));
123
124
return (void *)ret;
125
}
126
127
#define stub_start(fn) \
128
asm volatile ( \
129
"subl %0,%%esp ;" \
130
"movl %1, %%eax ; " \
131
"call *%%eax ;" \
132
:: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE), \
133
"i" (&fn))
134
135
static __always_inline void
136
stub_seccomp_restore_state(struct stub_data_arch *arch)
137
{
138
for (int i = 0; i < sizeof(arch->tls) / sizeof(arch->tls[0]); i++) {
139
if (arch->sync & (1 << i))
140
stub_syscall1(__NR_set_thread_area,
141
(unsigned long) &arch->tls[i]);
142
}
143
144
arch->sync = 0;
145
}
146
147
#endif
148
149