Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/s390/mm/vmem.c
10817 views
1
/*
2
* arch/s390/mm/vmem.c
3
*
4
* Copyright IBM Corp. 2006
5
* Author(s): Heiko Carstens <[email protected]>
6
*/
7
8
#include <linux/bootmem.h>
9
#include <linux/pfn.h>
10
#include <linux/mm.h>
11
#include <linux/module.h>
12
#include <linux/list.h>
13
#include <linux/hugetlb.h>
14
#include <linux/slab.h>
15
#include <asm/pgalloc.h>
16
#include <asm/pgtable.h>
17
#include <asm/setup.h>
18
#include <asm/tlbflush.h>
19
#include <asm/sections.h>
20
21
static DEFINE_MUTEX(vmem_mutex);
22
23
struct memory_segment {
24
struct list_head list;
25
unsigned long start;
26
unsigned long size;
27
};
28
29
static LIST_HEAD(mem_segs);
30
31
static void __ref *vmem_alloc_pages(unsigned int order)
32
{
33
if (slab_is_available())
34
return (void *)__get_free_pages(GFP_KERNEL, order);
35
return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
36
}
37
38
static inline pud_t *vmem_pud_alloc(void)
39
{
40
pud_t *pud = NULL;
41
42
#ifdef CONFIG_64BIT
43
pud = vmem_alloc_pages(2);
44
if (!pud)
45
return NULL;
46
clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
47
#endif
48
return pud;
49
}
50
51
static inline pmd_t *vmem_pmd_alloc(void)
52
{
53
pmd_t *pmd = NULL;
54
55
#ifdef CONFIG_64BIT
56
pmd = vmem_alloc_pages(2);
57
if (!pmd)
58
return NULL;
59
clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
60
#endif
61
return pmd;
62
}
63
64
static pte_t __ref *vmem_pte_alloc(void)
65
{
66
pte_t *pte;
67
68
if (slab_is_available())
69
pte = (pte_t *) page_table_alloc(&init_mm);
70
else
71
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
72
if (!pte)
73
return NULL;
74
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
75
PTRS_PER_PTE * sizeof(pte_t));
76
return pte;
77
}
78
79
/*
80
* Add a physical memory range to the 1:1 mapping.
81
*/
82
static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
83
{
84
unsigned long address;
85
pgd_t *pg_dir;
86
pud_t *pu_dir;
87
pmd_t *pm_dir;
88
pte_t *pt_dir;
89
pte_t pte;
90
int ret = -ENOMEM;
91
92
for (address = start; address < start + size; address += PAGE_SIZE) {
93
pg_dir = pgd_offset_k(address);
94
if (pgd_none(*pg_dir)) {
95
pu_dir = vmem_pud_alloc();
96
if (!pu_dir)
97
goto out;
98
pgd_populate(&init_mm, pg_dir, pu_dir);
99
}
100
101
pu_dir = pud_offset(pg_dir, address);
102
if (pud_none(*pu_dir)) {
103
pm_dir = vmem_pmd_alloc();
104
if (!pm_dir)
105
goto out;
106
pud_populate(&init_mm, pu_dir, pm_dir);
107
}
108
109
pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
110
pm_dir = pmd_offset(pu_dir, address);
111
112
#ifdef __s390x__
113
if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
114
(address + HPAGE_SIZE <= start + size) &&
115
(address >= HPAGE_SIZE)) {
116
pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
117
pmd_val(*pm_dir) = pte_val(pte);
118
address += HPAGE_SIZE - PAGE_SIZE;
119
continue;
120
}
121
#endif
122
if (pmd_none(*pm_dir)) {
123
pt_dir = vmem_pte_alloc();
124
if (!pt_dir)
125
goto out;
126
pmd_populate(&init_mm, pm_dir, pt_dir);
127
}
128
129
pt_dir = pte_offset_kernel(pm_dir, address);
130
*pt_dir = pte;
131
}
132
ret = 0;
133
out:
134
flush_tlb_kernel_range(start, start + size);
135
return ret;
136
}
137
138
/*
139
* Remove a physical memory range from the 1:1 mapping.
140
* Currently only invalidates page table entries.
141
*/
142
static void vmem_remove_range(unsigned long start, unsigned long size)
143
{
144
unsigned long address;
145
pgd_t *pg_dir;
146
pud_t *pu_dir;
147
pmd_t *pm_dir;
148
pte_t *pt_dir;
149
pte_t pte;
150
151
pte_val(pte) = _PAGE_TYPE_EMPTY;
152
for (address = start; address < start + size; address += PAGE_SIZE) {
153
pg_dir = pgd_offset_k(address);
154
pu_dir = pud_offset(pg_dir, address);
155
if (pud_none(*pu_dir))
156
continue;
157
pm_dir = pmd_offset(pu_dir, address);
158
if (pmd_none(*pm_dir))
159
continue;
160
161
if (pmd_huge(*pm_dir)) {
162
pmd_clear(pm_dir);
163
address += HPAGE_SIZE - PAGE_SIZE;
164
continue;
165
}
166
167
pt_dir = pte_offset_kernel(pm_dir, address);
168
*pt_dir = pte;
169
}
170
flush_tlb_kernel_range(start, start + size);
171
}
172
173
/*
174
* Add a backed mem_map array to the virtual mem_map array.
175
*/
176
int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
177
{
178
unsigned long address, start_addr, end_addr;
179
pgd_t *pg_dir;
180
pud_t *pu_dir;
181
pmd_t *pm_dir;
182
pte_t *pt_dir;
183
pte_t pte;
184
int ret = -ENOMEM;
185
186
start_addr = (unsigned long) start;
187
end_addr = (unsigned long) (start + nr);
188
189
for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
190
pg_dir = pgd_offset_k(address);
191
if (pgd_none(*pg_dir)) {
192
pu_dir = vmem_pud_alloc();
193
if (!pu_dir)
194
goto out;
195
pgd_populate(&init_mm, pg_dir, pu_dir);
196
}
197
198
pu_dir = pud_offset(pg_dir, address);
199
if (pud_none(*pu_dir)) {
200
pm_dir = vmem_pmd_alloc();
201
if (!pm_dir)
202
goto out;
203
pud_populate(&init_mm, pu_dir, pm_dir);
204
}
205
206
pm_dir = pmd_offset(pu_dir, address);
207
if (pmd_none(*pm_dir)) {
208
pt_dir = vmem_pte_alloc();
209
if (!pt_dir)
210
goto out;
211
pmd_populate(&init_mm, pm_dir, pt_dir);
212
}
213
214
pt_dir = pte_offset_kernel(pm_dir, address);
215
if (pte_none(*pt_dir)) {
216
unsigned long new_page;
217
218
new_page =__pa(vmem_alloc_pages(0));
219
if (!new_page)
220
goto out;
221
pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
222
*pt_dir = pte;
223
}
224
}
225
memset(start, 0, nr * sizeof(struct page));
226
ret = 0;
227
out:
228
flush_tlb_kernel_range(start_addr, end_addr);
229
return ret;
230
}
231
232
/*
233
* Add memory segment to the segment list if it doesn't overlap with
234
* an already present segment.
235
*/
236
static int insert_memory_segment(struct memory_segment *seg)
237
{
238
struct memory_segment *tmp;
239
240
if (seg->start + seg->size > VMEM_MAX_PHYS ||
241
seg->start + seg->size < seg->start)
242
return -ERANGE;
243
244
list_for_each_entry(tmp, &mem_segs, list) {
245
if (seg->start >= tmp->start + tmp->size)
246
continue;
247
if (seg->start + seg->size <= tmp->start)
248
continue;
249
return -ENOSPC;
250
}
251
list_add(&seg->list, &mem_segs);
252
return 0;
253
}
254
255
/*
256
* Remove memory segment from the segment list.
257
*/
258
static void remove_memory_segment(struct memory_segment *seg)
259
{
260
list_del(&seg->list);
261
}
262
263
static void __remove_shared_memory(struct memory_segment *seg)
264
{
265
remove_memory_segment(seg);
266
vmem_remove_range(seg->start, seg->size);
267
}
268
269
int vmem_remove_mapping(unsigned long start, unsigned long size)
270
{
271
struct memory_segment *seg;
272
int ret;
273
274
mutex_lock(&vmem_mutex);
275
276
ret = -ENOENT;
277
list_for_each_entry(seg, &mem_segs, list) {
278
if (seg->start == start && seg->size == size)
279
break;
280
}
281
282
if (seg->start != start || seg->size != size)
283
goto out;
284
285
ret = 0;
286
__remove_shared_memory(seg);
287
kfree(seg);
288
out:
289
mutex_unlock(&vmem_mutex);
290
return ret;
291
}
292
293
int vmem_add_mapping(unsigned long start, unsigned long size)
294
{
295
struct memory_segment *seg;
296
int ret;
297
298
mutex_lock(&vmem_mutex);
299
ret = -ENOMEM;
300
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
301
if (!seg)
302
goto out;
303
seg->start = start;
304
seg->size = size;
305
306
ret = insert_memory_segment(seg);
307
if (ret)
308
goto out_free;
309
310
ret = vmem_add_mem(start, size, 0);
311
if (ret)
312
goto out_remove;
313
goto out;
314
315
out_remove:
316
__remove_shared_memory(seg);
317
out_free:
318
kfree(seg);
319
out:
320
mutex_unlock(&vmem_mutex);
321
return ret;
322
}
323
324
/*
325
* map whole physical memory to virtual memory (identity mapping)
326
* we reserve enough space in the vmalloc area for vmemmap to hotplug
327
* additional memory segments.
328
*/
329
void __init vmem_map_init(void)
330
{
331
unsigned long ro_start, ro_end;
332
unsigned long start, end;
333
int i;
334
335
ro_start = ((unsigned long)&_stext) & PAGE_MASK;
336
ro_end = PFN_ALIGN((unsigned long)&_eshared);
337
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
338
start = memory_chunk[i].addr;
339
end = memory_chunk[i].addr + memory_chunk[i].size;
340
if (start >= ro_end || end <= ro_start)
341
vmem_add_mem(start, end - start, 0);
342
else if (start >= ro_start && end <= ro_end)
343
vmem_add_mem(start, end - start, 1);
344
else if (start >= ro_start) {
345
vmem_add_mem(start, ro_end - start, 1);
346
vmem_add_mem(ro_end, end - ro_end, 0);
347
} else if (end < ro_end) {
348
vmem_add_mem(start, ro_start - start, 0);
349
vmem_add_mem(ro_start, end - ro_start, 1);
350
} else {
351
vmem_add_mem(start, ro_start - start, 0);
352
vmem_add_mem(ro_start, ro_end - ro_start, 1);
353
vmem_add_mem(ro_end, end - ro_end, 0);
354
}
355
}
356
}
357
358
/*
359
* Convert memory chunk array to a memory segment list so there is a single
360
* list that contains both r/w memory and shared memory segments.
361
*/
362
static int __init vmem_convert_memory_chunk(void)
363
{
364
struct memory_segment *seg;
365
int i;
366
367
mutex_lock(&vmem_mutex);
368
for (i = 0; i < MEMORY_CHUNKS; i++) {
369
if (!memory_chunk[i].size)
370
continue;
371
seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372
if (!seg)
373
panic("Out of memory...\n");
374
seg->start = memory_chunk[i].addr;
375
seg->size = memory_chunk[i].size;
376
insert_memory_segment(seg);
377
}
378
mutex_unlock(&vmem_mutex);
379
return 0;
380
}
381
382
core_initcall(vmem_convert_memory_chunk);
383
384