Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/xtensa/kernel/syscall.c
26444 views
1
/*
2
* arch/xtensa/kernel/syscall.c
3
*
4
* This file is subject to the terms and conditions of the GNU General Public
5
* License. See the file "COPYING" in the main directory of this archive
6
* for more details.
7
*
8
* Copyright (C) 2001 - 2005 Tensilica Inc.
9
* Copyright (C) 2000 Silicon Graphics, Inc.
10
* Copyright (C) 1995 - 2000 by Ralf Baechle
11
*
12
* Joe Taylor <[email protected], [email protected]>
13
* Marc Gauthier <[email protected], [email protected]>
14
* Chris Zankel <[email protected]>
15
* Kevin Chea
16
*
17
*/
18
#include <linux/uaccess.h>
19
#include <asm/syscall.h>
20
#include <linux/linkage.h>
21
#include <linux/stringify.h>
22
#include <linux/errno.h>
23
#include <linux/syscalls.h>
24
#include <linux/file.h>
25
#include <linux/fs.h>
26
#include <linux/mman.h>
27
#include <linux/sched/mm.h>
28
#include <linux/shm.h>
29
30
syscall_t sys_call_table[] /* FIXME __cacheline_aligned */= {
31
#define __SYSCALL(nr, entry) (syscall_t)entry,
32
#include <asm/syscall_table.h>
33
};
34
35
#define COLOUR_ALIGN(addr, pgoff) \
36
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
37
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
38
39
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
40
{
41
unsigned long ret;
42
long err;
43
44
err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
45
if (err)
46
return err;
47
return (long)ret;
48
}
49
50
asmlinkage long xtensa_fadvise64_64(int fd, int advice,
51
unsigned long long offset, unsigned long long len)
52
{
53
return ksys_fadvise64_64(fd, offset, len, advice);
54
}
55
56
#ifdef CONFIG_MMU
57
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
58
unsigned long len, unsigned long pgoff, unsigned long flags,
59
vm_flags_t vm_flags)
60
{
61
struct vm_area_struct *vmm;
62
struct vma_iterator vmi;
63
64
if (flags & MAP_FIXED) {
65
/* We do not accept a shared mapping if it would violate
66
* cache aliasing constraints.
67
*/
68
if ((flags & MAP_SHARED) &&
69
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
70
return -EINVAL;
71
return addr;
72
}
73
74
if (len > TASK_SIZE)
75
return -ENOMEM;
76
if (!addr)
77
addr = TASK_UNMAPPED_BASE;
78
79
if (flags & MAP_SHARED)
80
addr = COLOUR_ALIGN(addr, pgoff);
81
else
82
addr = PAGE_ALIGN(addr);
83
84
vma_iter_init(&vmi, current->mm, addr);
85
for_each_vma(vmi, vmm) {
86
/* At this point: (addr < vmm->vm_end). */
87
if (addr + len <= vm_start_gap(vmm))
88
break;
89
90
addr = vmm->vm_end;
91
if (flags & MAP_SHARED)
92
addr = COLOUR_ALIGN(addr, pgoff);
93
}
94
95
if (TASK_SIZE - len < addr)
96
return -ENOMEM;
97
98
return addr;
99
}
100
#endif
101
102