Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/mm/mmap.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* flexible mmap layout support
4
*
5
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6
* All Rights Reserved.
7
*
8
* Started by Ingo Molnar <[email protected]>
9
*/
10
11
#include <linux/elf-randomize.h>
12
#include <linux/personality.h>
13
#include <linux/mm.h>
14
#include <linux/mman.h>
15
#include <linux/sched/signal.h>
16
#include <linux/sched/mm.h>
17
#include <linux/random.h>
18
#include <linux/compat.h>
19
#include <linux/security.h>
20
#include <linux/hugetlb.h>
21
#include <asm/elf.h>
22
23
static unsigned long stack_maxrandom_size(void)
24
{
25
if (!(current->flags & PF_RANDOMIZE))
26
return 0;
27
return STACK_RND_MASK << PAGE_SHIFT;
28
}
29
30
static inline int mmap_is_legacy(struct rlimit *rlim_stack)
31
{
32
if (current->personality & ADDR_COMPAT_LAYOUT)
33
return 1;
34
if (rlim_stack->rlim_cur == RLIM_INFINITY)
35
return 1;
36
return sysctl_legacy_va_layout;
37
}
38
39
unsigned long arch_mmap_rnd(void)
40
{
41
return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
42
}
43
44
static unsigned long mmap_base_legacy(unsigned long rnd)
45
{
46
return TASK_UNMAPPED_BASE + rnd;
47
}
48
49
static inline unsigned long mmap_base(unsigned long rnd,
50
struct rlimit *rlim_stack)
51
{
52
unsigned long gap = rlim_stack->rlim_cur;
53
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
54
55
/* Values close to RLIM_INFINITY can overflow. */
56
if (gap + pad > gap)
57
gap += pad;
58
59
/*
60
* Top of mmap area (just below the process stack).
61
* Leave at least a ~128 MB hole.
62
*/
63
gap = clamp(gap, SZ_128M, (STACK_TOP / 6) * 5);
64
65
return PAGE_ALIGN(STACK_TOP - gap - rnd);
66
}
67
68
static int get_align_mask(struct file *filp, unsigned long flags)
69
{
70
if (filp && is_file_hugepages(filp))
71
return huge_page_mask_align(filp);
72
if (!(current->flags & PF_RANDOMIZE))
73
return 0;
74
if (filp || (flags & MAP_SHARED))
75
return MMAP_ALIGN_MASK << PAGE_SHIFT;
76
return 0;
77
}
78
79
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
80
unsigned long len, unsigned long pgoff,
81
unsigned long flags, vm_flags_t vm_flags)
82
{
83
struct mm_struct *mm = current->mm;
84
struct vm_area_struct *vma;
85
struct vm_unmapped_area_info info = {};
86
87
if (len > TASK_SIZE - mmap_min_addr)
88
return -ENOMEM;
89
90
if (flags & MAP_FIXED)
91
goto check_asce_limit;
92
93
if (addr) {
94
addr = PAGE_ALIGN(addr);
95
vma = find_vma(mm, addr);
96
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
97
(!vma || addr + len <= vm_start_gap(vma)))
98
goto check_asce_limit;
99
}
100
101
info.length = len;
102
info.low_limit = mm->mmap_base;
103
info.high_limit = TASK_SIZE;
104
info.align_mask = get_align_mask(filp, flags);
105
if (!(filp && is_file_hugepages(filp)))
106
info.align_offset = pgoff << PAGE_SHIFT;
107
addr = vm_unmapped_area(&info);
108
if (offset_in_page(addr))
109
return addr;
110
111
check_asce_limit:
112
return check_asce_limit(mm, addr, len);
113
}
114
115
unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
116
unsigned long len, unsigned long pgoff,
117
unsigned long flags, vm_flags_t vm_flags)
118
{
119
struct vm_area_struct *vma;
120
struct mm_struct *mm = current->mm;
121
struct vm_unmapped_area_info info = {};
122
123
/* requested length too big for entire address space */
124
if (len > TASK_SIZE - mmap_min_addr)
125
return -ENOMEM;
126
127
if (flags & MAP_FIXED)
128
goto check_asce_limit;
129
130
/* requesting a specific address */
131
if (addr) {
132
addr = PAGE_ALIGN(addr);
133
vma = find_vma(mm, addr);
134
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
135
(!vma || addr + len <= vm_start_gap(vma)))
136
goto check_asce_limit;
137
}
138
139
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
140
info.length = len;
141
info.low_limit = PAGE_SIZE;
142
info.high_limit = mm->mmap_base;
143
info.align_mask = get_align_mask(filp, flags);
144
if (!(filp && is_file_hugepages(filp)))
145
info.align_offset = pgoff << PAGE_SHIFT;
146
addr = vm_unmapped_area(&info);
147
148
/*
149
* A failed mmap() very likely causes application failure,
150
* so fall back to the bottom-up function here. This scenario
151
* can happen with large stack limits and large mmap()
152
* allocations.
153
*/
154
if (offset_in_page(addr)) {
155
VM_BUG_ON(addr != -ENOMEM);
156
info.flags = 0;
157
info.low_limit = TASK_UNMAPPED_BASE;
158
info.high_limit = TASK_SIZE;
159
addr = vm_unmapped_area(&info);
160
if (offset_in_page(addr))
161
return addr;
162
}
163
164
check_asce_limit:
165
return check_asce_limit(mm, addr, len);
166
}
167
168
/*
169
* This function, called very early during the creation of a new
170
* process VM image, sets up which VM layout function to use:
171
*/
172
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
173
{
174
unsigned long random_factor = 0UL;
175
176
if (current->flags & PF_RANDOMIZE)
177
random_factor = arch_mmap_rnd();
178
179
/*
180
* Fall back to the standard layout if the personality
181
* bit is set, or if the expected stack growth is unlimited:
182
*/
183
if (mmap_is_legacy(rlim_stack)) {
184
mm->mmap_base = mmap_base_legacy(random_factor);
185
clear_bit(MMF_TOPDOWN, &mm->flags);
186
} else {
187
mm->mmap_base = mmap_base(random_factor, rlim_stack);
188
set_bit(MMF_TOPDOWN, &mm->flags);
189
}
190
}
191
192
static pgprot_t protection_map[16] __ro_after_init;
193
194
void __init setup_protection_map(void)
195
{
196
pgprot_t *pm = protection_map;
197
198
pm[VM_NONE] = PAGE_NONE;
199
pm[VM_READ] = PAGE_RO;
200
pm[VM_WRITE] = PAGE_RO;
201
pm[VM_WRITE | VM_READ] = PAGE_RO;
202
pm[VM_EXEC] = PAGE_RX;
203
pm[VM_EXEC | VM_READ] = PAGE_RX;
204
pm[VM_EXEC | VM_WRITE] = PAGE_RX;
205
pm[VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX;
206
pm[VM_SHARED] = PAGE_NONE;
207
pm[VM_SHARED | VM_READ] = PAGE_RO;
208
pm[VM_SHARED | VM_WRITE] = PAGE_RW;
209
pm[VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW;
210
pm[VM_SHARED | VM_EXEC] = PAGE_RX;
211
pm[VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX;
212
pm[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX;
213
pm[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX;
214
}
215
216
DECLARE_VM_GET_PAGE_PROT
217
218