Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/mm/mmap.c
51385 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* flexible mmap layout support
4
*
5
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6
* All Rights Reserved.
7
*
8
* Started by Ingo Molnar <[email protected]>
9
*/
10
11
#include <linux/elf-randomize.h>
12
#include <linux/personality.h>
13
#include <linux/mm.h>
14
#include <linux/mman.h>
15
#include <linux/sched/signal.h>
16
#include <linux/sched/mm.h>
17
#include <linux/random.h>
18
#include <linux/security.h>
19
#include <linux/hugetlb.h>
20
#include <asm/elf.h>
21
22
static unsigned long stack_maxrandom_size(void)
23
{
24
if (!(current->flags & PF_RANDOMIZE))
25
return 0;
26
return STACK_RND_MASK << PAGE_SHIFT;
27
}
28
29
static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
30
{
31
if (current->personality & ADDR_COMPAT_LAYOUT)
32
return 1;
33
if (rlim_stack->rlim_cur == RLIM_INFINITY)
34
return 1;
35
return sysctl_legacy_va_layout;
36
}
37
38
unsigned long arch_mmap_rnd(void)
39
{
40
return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
41
}
42
43
static unsigned long mmap_base_legacy(unsigned long rnd)
44
{
45
return TASK_UNMAPPED_BASE + rnd;
46
}
47
48
static inline unsigned long mmap_base(unsigned long rnd,
49
const struct rlimit *rlim_stack)
50
{
51
unsigned long gap = rlim_stack->rlim_cur;
52
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
53
54
/* Values close to RLIM_INFINITY can overflow. */
55
if (gap + pad > gap)
56
gap += pad;
57
58
/*
59
* Top of mmap area (just below the process stack).
60
* Leave at least a ~128 MB hole.
61
*/
62
gap = clamp(gap, SZ_128M, (STACK_TOP / 6) * 5);
63
64
return PAGE_ALIGN(STACK_TOP - gap - rnd);
65
}
66
67
static int get_align_mask(struct file *filp, unsigned long flags)
68
{
69
if (filp && is_file_hugepages(filp))
70
return huge_page_mask_align(filp);
71
if (!(current->flags & PF_RANDOMIZE))
72
return 0;
73
if (filp || (flags & MAP_SHARED))
74
return MMAP_ALIGN_MASK << PAGE_SHIFT;
75
return 0;
76
}
77
78
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
79
unsigned long len, unsigned long pgoff,
80
unsigned long flags, vm_flags_t vm_flags)
81
{
82
struct mm_struct *mm = current->mm;
83
struct vm_area_struct *vma;
84
struct vm_unmapped_area_info info = {};
85
86
if (len > TASK_SIZE - mmap_min_addr)
87
return -ENOMEM;
88
89
if (flags & MAP_FIXED)
90
goto check_asce_limit;
91
92
if (addr) {
93
addr = PAGE_ALIGN(addr);
94
vma = find_vma(mm, addr);
95
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
96
(!vma || addr + len <= vm_start_gap(vma)))
97
goto check_asce_limit;
98
}
99
100
info.length = len;
101
info.low_limit = mm->mmap_base;
102
info.high_limit = TASK_SIZE;
103
info.align_mask = get_align_mask(filp, flags);
104
if (!(filp && is_file_hugepages(filp)))
105
info.align_offset = pgoff << PAGE_SHIFT;
106
addr = vm_unmapped_area(&info);
107
if (offset_in_page(addr))
108
return addr;
109
110
check_asce_limit:
111
return check_asce_limit(mm, addr, len);
112
}
113
114
unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
115
unsigned long len, unsigned long pgoff,
116
unsigned long flags, vm_flags_t vm_flags)
117
{
118
struct vm_area_struct *vma;
119
struct mm_struct *mm = current->mm;
120
struct vm_unmapped_area_info info = {};
121
122
/* requested length too big for entire address space */
123
if (len > TASK_SIZE - mmap_min_addr)
124
return -ENOMEM;
125
126
if (flags & MAP_FIXED)
127
goto check_asce_limit;
128
129
/* requesting a specific address */
130
if (addr) {
131
addr = PAGE_ALIGN(addr);
132
vma = find_vma(mm, addr);
133
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
134
(!vma || addr + len <= vm_start_gap(vma)))
135
goto check_asce_limit;
136
}
137
138
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
139
info.length = len;
140
info.low_limit = PAGE_SIZE;
141
info.high_limit = mm->mmap_base;
142
info.align_mask = get_align_mask(filp, flags);
143
if (!(filp && is_file_hugepages(filp)))
144
info.align_offset = pgoff << PAGE_SHIFT;
145
addr = vm_unmapped_area(&info);
146
147
/*
148
* A failed mmap() very likely causes application failure,
149
* so fall back to the bottom-up function here. This scenario
150
* can happen with large stack limits and large mmap()
151
* allocations.
152
*/
153
if (offset_in_page(addr)) {
154
VM_BUG_ON(addr != -ENOMEM);
155
info.flags = 0;
156
info.low_limit = TASK_UNMAPPED_BASE;
157
info.high_limit = TASK_SIZE;
158
addr = vm_unmapped_area(&info);
159
if (offset_in_page(addr))
160
return addr;
161
}
162
163
check_asce_limit:
164
return check_asce_limit(mm, addr, len);
165
}
166
167
/*
168
* This function, called very early during the creation of a new
169
* process VM image, sets up which VM layout function to use:
170
*/
171
void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
172
{
173
unsigned long random_factor = 0UL;
174
175
if (current->flags & PF_RANDOMIZE)
176
random_factor = arch_mmap_rnd();
177
178
/*
179
* Fall back to the standard layout if the personality
180
* bit is set, or if the expected stack growth is unlimited:
181
*/
182
if (mmap_is_legacy(rlim_stack)) {
183
mm->mmap_base = mmap_base_legacy(random_factor);
184
mm_flags_clear(MMF_TOPDOWN, mm);
185
} else {
186
mm->mmap_base = mmap_base(random_factor, rlim_stack);
187
mm_flags_set(MMF_TOPDOWN, mm);
188
}
189
}
190
191
static pgprot_t protection_map[16] __ro_after_init;
192
193
void __init setup_protection_map(void)
194
{
195
pgprot_t *pm = protection_map;
196
197
pm[VM_NONE] = PAGE_NONE;
198
pm[VM_READ] = PAGE_RO;
199
pm[VM_WRITE] = PAGE_RO;
200
pm[VM_WRITE | VM_READ] = PAGE_RO;
201
pm[VM_EXEC] = PAGE_RX;
202
pm[VM_EXEC | VM_READ] = PAGE_RX;
203
pm[VM_EXEC | VM_WRITE] = PAGE_RX;
204
pm[VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX;
205
pm[VM_SHARED] = PAGE_NONE;
206
pm[VM_SHARED | VM_READ] = PAGE_RO;
207
pm[VM_SHARED | VM_WRITE] = PAGE_RW;
208
pm[VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW;
209
pm[VM_SHARED | VM_EXEC] = PAGE_RX;
210
pm[VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX;
211
pm[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX;
212
pm[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX;
213
}
214
215
DECLARE_VM_GET_PAGE_PROT
216
217