#include <sys/ptrace.h>
#include <sys/prctl.h>
#include <sys/fcntl.h>
#include <asm/unistd.h>
#include <sysdep/stub.h>
#include <stub-data.h>
#include <linux/filter.h>
#include <linux/seccomp.h>
#include <generated/asm-offsets.h>
void _start(void);
noinline static void real_init(void)
{
struct stub_init_data init_data;
unsigned long res;
struct {
void *ss_sp;
int ss_flags;
size_t ss_size;
} stack = {
.ss_size = STUB_DATA_PAGES * UM_KERN_PAGE_SIZE,
};
struct {
void *sa_handler_;
unsigned long sa_flags;
void *sa_restorer;
unsigned long long sa_mask;
} sa = {
.sa_flags = SA_ONSTACK | SA_NODEFER | SA_SIGINFO | 0x04000000,
};
stub_syscall2(__NR_prctl, PR_SET_NAME, (unsigned long)"uml-userspace");
stub_syscall2(__NR_prctl, PR_SET_PDEATHSIG, SIGKILL);
stub_syscall5(__NR_prctl, PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
res = stub_syscall3(__NR_read, 0,
(unsigned long)&init_data, sizeof(init_data));
if (res != sizeof(init_data))
stub_syscall1(__NR_exit, 10);
if (!init_data.seccomp)
stub_syscall1(__NR_close, 0);
else
stub_syscall3(__NR_fcntl, 0, F_SETFL, O_NONBLOCK);
res = stub_syscall6(STUB_MMAP_NR,
init_data.stub_start, UM_KERN_PAGE_SIZE,
PROT_READ | PROT_EXEC, MAP_FIXED | MAP_SHARED,
init_data.stub_code_fd, init_data.stub_code_offset);
if (res != init_data.stub_start)
stub_syscall1(__NR_exit, 11);
res = stub_syscall6(STUB_MMAP_NR,
init_data.stub_start + UM_KERN_PAGE_SIZE,
STUB_DATA_PAGES * UM_KERN_PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED,
init_data.stub_data_fd, init_data.stub_data_offset);
if (res != init_data.stub_start + UM_KERN_PAGE_SIZE)
stub_syscall1(__NR_exit, 12);
if (init_data.seccomp) {
res = stub_syscall3(__NR_close_range, 1, ~0U, 0);
if (res != 0)
stub_syscall1(__NR_exit, 13);
}
stack.ss_sp = (void *)init_data.stub_start + UM_KERN_PAGE_SIZE;
stub_syscall2(__NR_sigaltstack, (unsigned long)&stack, 0);
sa.sa_handler_ = (void *) init_data.signal_handler;
sa.sa_restorer = (void *) init_data.signal_restorer;
if (!init_data.seccomp) {
sa.sa_mask = 0;
res = stub_syscall4(__NR_rt_sigaction, SIGSEGV,
(unsigned long)&sa, 0, sizeof(sa.sa_mask));
if (res != 0)
stub_syscall1(__NR_exit, 14);
} else {
sa.sa_mask = ~0ULL;
res = stub_syscall4(__NR_rt_sigaction, SIGSEGV,
(unsigned long)&sa, 0, sizeof(sa.sa_mask));
if (res != 0)
stub_syscall1(__NR_exit, 15);
res = stub_syscall4(__NR_rt_sigaction, SIGSYS,
(unsigned long)&sa, 0, sizeof(sa.sa_mask));
if (res != 0)
stub_syscall1(__NR_exit, 16);
res = stub_syscall4(__NR_rt_sigaction, SIGALRM,
(unsigned long)&sa, 0, sizeof(sa.sa_mask));
if (res != 0)
stub_syscall1(__NR_exit, 17);
res = stub_syscall4(__NR_rt_sigaction, SIGTRAP,
(unsigned long)&sa, 0, sizeof(sa.sa_mask));
if (res != 0)
stub_syscall1(__NR_exit, 18);
res = stub_syscall4(__NR_rt_sigaction, SIGILL,
(unsigned long)&sa, 0, sizeof(sa.sa_mask));
if (res != 0)
stub_syscall1(__NR_exit, 19);
res = stub_syscall4(__NR_rt_sigaction, SIGFPE,
(unsigned long)&sa, 0, sizeof(sa.sa_mask));
if (res != 0)
stub_syscall1(__NR_exit, 20);
}
if (init_data.seccomp) {
struct sock_filter filter[] = {
#if __BITS_PER_LONG > 32
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
(offsetof(struct seccomp_data, instruction_pointer) + 4)),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (init_data.stub_start) >> 32, 0, 3),
#endif
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
(offsetof(struct seccomp_data, instruction_pointer))),
BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xfffff000),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, (init_data.stub_start) & 0xfffff000, 1, 0),
BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_TRAP),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
offsetof(struct seccomp_data, arch)),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K,
UM_SECCOMP_ARCH_NATIVE, 1, 0),
BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS),
BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_futex,
7, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K,__NR_recvmsg,
6, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K,__NR_close,
5, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, STUB_MMAP_NR,
4, 0),
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_munmap,
3, 0),
#ifdef __i386__
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_set_thread_area,
2, 0),
#else
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_arch_prctl,
2, 0),
#endif
BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, __NR_rt_sigreturn,
1, 0),
BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_KILL_PROCESS),
BPF_STMT(BPF_RET | BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = sizeof(filter) / sizeof(filter[0]),
.filter = filter,
};
if (stub_syscall3(__NR_seccomp, SECCOMP_SET_MODE_FILTER,
SECCOMP_FILTER_FLAG_TSYNC,
(unsigned long)&prog) != 0)
stub_syscall1(__NR_exit, 21);
} else {
stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0);
stub_syscall2(__NR_kill, stub_syscall0(__NR_getpid), SIGSTOP);
}
stub_syscall1(__NR_exit, 30);
__builtin_unreachable();
}
__attribute__((naked)) void _start(void)
{
stub_start(real_init);
}