Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/mm/hugetlbpage.c
52820 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* IBM System z Huge TLB Page Support for Kernel.
4
*
5
* Copyright IBM Corp. 2007,2020
6
* Author(s): Gerald Schaefer <[email protected]>
7
*/
8
9
#define pr_fmt(fmt) "hugetlb: " fmt
10
11
#include <linux/cpufeature.h>
12
#include <linux/mm.h>
13
#include <linux/hugetlb.h>
14
#include <linux/mman.h>
15
#include <linux/sched/mm.h>
16
#include <linux/security.h>
17
#include <asm/pgalloc.h>
18
19
/*
20
* If the bit selected by single-bit bitmask "a" is set within "x", move
21
* it to the position indicated by single-bit bitmask "b".
22
*/
23
#define move_set_bit(x, a, b) (((x) & (a)) >> ilog2(a) << ilog2(b))
24
25
static inline unsigned long __pte_to_rste(pte_t pte)
26
{
27
swp_entry_t arch_entry;
28
unsigned long rste;
29
30
/*
31
* Convert encoding pte bits pmd / pud bits
32
* lIR.uswrdy.p dy..R...I...wr
33
* empty 010.000000.0 -> 00..0...1...00
34
* prot-none, clean, old 111.000000.1 -> 00..1...1...00
35
* prot-none, clean, young 111.000001.1 -> 01..1...1...00
36
* prot-none, dirty, old 111.000010.1 -> 10..1...1...00
37
* prot-none, dirty, young 111.000011.1 -> 11..1...1...00
38
* read-only, clean, old 111.000100.1 -> 00..1...1...01
39
* read-only, clean, young 101.000101.1 -> 01..1...0...01
40
* read-only, dirty, old 111.000110.1 -> 10..1...1...01
41
* read-only, dirty, young 101.000111.1 -> 11..1...0...01
42
* read-write, clean, old 111.001100.1 -> 00..1...1...11
43
* read-write, clean, young 101.001101.1 -> 01..1...0...11
44
* read-write, dirty, old 110.001110.1 -> 10..0...1...11
45
* read-write, dirty, young 100.001111.1 -> 11..0...0...11
46
* HW-bits: R read-only, I invalid
47
* SW-bits: p present, y young, d dirty, r read, w write, s special,
48
* u unused, l large
49
*/
50
if (pte_present(pte)) {
51
rste = pte_val(pte) & PAGE_MASK;
52
rste |= _SEGMENT_ENTRY_PRESENT;
53
rste |= move_set_bit(pte_val(pte), _PAGE_READ,
54
_SEGMENT_ENTRY_READ);
55
rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
56
_SEGMENT_ENTRY_WRITE);
57
rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
58
_SEGMENT_ENTRY_INVALID);
59
rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
60
_SEGMENT_ENTRY_PROTECT);
61
rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
62
_SEGMENT_ENTRY_DIRTY);
63
rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
64
_SEGMENT_ENTRY_YOUNG);
65
#ifdef CONFIG_MEM_SOFT_DIRTY
66
rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
67
_SEGMENT_ENTRY_SOFT_DIRTY);
68
#endif
69
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
70
_SEGMENT_ENTRY_NOEXEC);
71
} else if (!pte_none(pte)) {
72
/* swap pte */
73
arch_entry = __pte_to_swp_entry(pte);
74
rste = mk_swap_rste(__swp_type(arch_entry), __swp_offset(arch_entry));
75
} else
76
rste = _SEGMENT_ENTRY_EMPTY;
77
return rste;
78
}
79
80
static inline pte_t __rste_to_pte(unsigned long rste)
81
{
82
swp_entry_t arch_entry;
83
unsigned long pteval;
84
int present, none;
85
pte_t pte;
86
87
if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
88
present = pud_present(__pud(rste));
89
none = pud_none(__pud(rste));
90
} else {
91
present = pmd_present(__pmd(rste));
92
none = pmd_none(__pmd(rste));
93
}
94
95
/*
96
* Convert encoding pmd / pud bits pte bits
97
* dy..R...I...wr lIR.uswrdy.p
98
* empty 00..0...1...00 -> 010.000000.0
99
* prot-none, clean, old 00..1...1...00 -> 111.000000.1
100
* prot-none, clean, young 01..1...1...00 -> 111.000001.1
101
* prot-none, dirty, old 10..1...1...00 -> 111.000010.1
102
* prot-none, dirty, young 11..1...1...00 -> 111.000011.1
103
* read-only, clean, old 00..1...1...01 -> 111.000100.1
104
* read-only, clean, young 01..1...0...01 -> 101.000101.1
105
* read-only, dirty, old 10..1...1...01 -> 111.000110.1
106
* read-only, dirty, young 11..1...0...01 -> 101.000111.1
107
* read-write, clean, old 00..1...1...11 -> 111.001100.1
108
* read-write, clean, young 01..1...0...11 -> 101.001101.1
109
* read-write, dirty, old 10..0...1...11 -> 110.001110.1
110
* read-write, dirty, young 11..0...0...11 -> 100.001111.1
111
* HW-bits: R read-only, I invalid
112
* SW-bits: p present, y young, d dirty, r read, w write, s special,
113
* u unused, l large
114
*/
115
if (present) {
116
pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
117
pteval |= _PAGE_LARGE | _PAGE_PRESENT;
118
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ);
119
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE);
120
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID);
121
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT);
122
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY);
123
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG);
124
#ifdef CONFIG_MEM_SOFT_DIRTY
125
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY);
126
#endif
127
pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC);
128
} else if (!none) {
129
/* swap rste */
130
arch_entry = __rste_to_swp_entry(rste);
131
pte = mk_swap_pte(__swp_type_rste(arch_entry), __swp_offset_rste(arch_entry));
132
pteval = pte_val(pte);
133
} else
134
pteval = _PAGE_INVALID;
135
return __pte(pteval);
136
}
137
138
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
139
pte_t *ptep, pte_t pte)
140
{
141
unsigned long rste;
142
143
rste = __pte_to_rste(pte);
144
145
/* Set correct table type for 2G hugepages */
146
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
147
if (likely(pte_present(pte)))
148
rste |= _REGION3_ENTRY_LARGE;
149
rste |= _REGION_ENTRY_TYPE_R3;
150
} else if (likely(pte_present(pte)))
151
rste |= _SEGMENT_ENTRY_LARGE;
152
153
set_pte(ptep, __pte(rste));
154
}
155
156
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
157
pte_t *ptep, pte_t pte, unsigned long sz)
158
{
159
__set_huge_pte_at(mm, addr, ptep, pte);
160
}
161
162
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
163
{
164
return __rste_to_pte(pte_val(*ptep));
165
}
166
167
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
168
unsigned long addr, pte_t *ptep)
169
{
170
pte_t pte = huge_ptep_get(mm, addr, ptep);
171
pmd_t *pmdp = (pmd_t *) ptep;
172
pud_t *pudp = (pud_t *) ptep;
173
174
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
175
pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
176
else
177
pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
178
return pte;
179
}
180
181
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
182
unsigned long addr, unsigned long sz)
183
{
184
pgd_t *pgdp;
185
p4d_t *p4dp;
186
pud_t *pudp;
187
pmd_t *pmdp = NULL;
188
189
pgdp = pgd_offset(mm, addr);
190
p4dp = p4d_alloc(mm, pgdp, addr);
191
if (p4dp) {
192
pudp = pud_alloc(mm, p4dp, addr);
193
if (pudp) {
194
if (sz == PUD_SIZE)
195
return (pte_t *) pudp;
196
else if (sz == PMD_SIZE)
197
pmdp = pmd_alloc(mm, pudp, addr);
198
}
199
}
200
return (pte_t *) pmdp;
201
}
202
203
pte_t *huge_pte_offset(struct mm_struct *mm,
204
unsigned long addr, unsigned long sz)
205
{
206
pgd_t *pgdp;
207
p4d_t *p4dp;
208
pud_t *pudp;
209
pmd_t *pmdp = NULL;
210
211
pgdp = pgd_offset(mm, addr);
212
if (pgd_present(*pgdp)) {
213
p4dp = p4d_offset(pgdp, addr);
214
if (p4d_present(*p4dp)) {
215
pudp = pud_offset(p4dp, addr);
216
if (sz == PUD_SIZE)
217
return (pte_t *)pudp;
218
if (pud_present(*pudp))
219
pmdp = pmd_offset(pudp, addr);
220
}
221
}
222
return (pte_t *) pmdp;
223
}
224
225
bool __init arch_hugetlb_valid_size(unsigned long size)
226
{
227
if (cpu_has_edat1() && size == PMD_SIZE)
228
return true;
229
else if (cpu_has_edat2() && size == PUD_SIZE)
230
return true;
231
else
232
return false;
233
}
234
235
unsigned int __init arch_hugetlb_cma_order(void)
236
{
237
if (cpu_has_edat2())
238
return PUD_SHIFT - PAGE_SHIFT;
239
240
return 0;
241
}
242
243