Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/mm/hugetlb_cma.c
26131 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
#include <linux/mm.h>
4
#include <linux/cma.h>
5
#include <linux/compiler.h>
6
#include <linux/mm_inline.h>
7
8
#include <asm/page.h>
9
#include <asm/setup.h>
10
11
#include <linux/hugetlb.h>
12
#include "internal.h"
13
#include "hugetlb_cma.h"
14
15
16
static struct cma *hugetlb_cma[MAX_NUMNODES];
17
static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
18
static bool hugetlb_cma_only;
19
static unsigned long hugetlb_cma_size __initdata;
20
21
void hugetlb_cma_free_folio(struct folio *folio)
22
{
23
int nid = folio_nid(folio);
24
25
WARN_ON_ONCE(!cma_free_folio(hugetlb_cma[nid], folio));
26
}
27
28
29
struct folio *hugetlb_cma_alloc_folio(struct hstate *h, gfp_t gfp_mask,
30
int nid, nodemask_t *nodemask)
31
{
32
int node;
33
int order = huge_page_order(h);
34
struct folio *folio = NULL;
35
36
if (hugetlb_cma[nid])
37
folio = cma_alloc_folio(hugetlb_cma[nid], order, gfp_mask);
38
39
if (!folio && !(gfp_mask & __GFP_THISNODE)) {
40
for_each_node_mask(node, *nodemask) {
41
if (node == nid || !hugetlb_cma[node])
42
continue;
43
44
folio = cma_alloc_folio(hugetlb_cma[node], order, gfp_mask);
45
if (folio)
46
break;
47
}
48
}
49
50
if (folio)
51
folio_set_hugetlb_cma(folio);
52
53
return folio;
54
}
55
56
struct huge_bootmem_page * __init
57
hugetlb_cma_alloc_bootmem(struct hstate *h, int *nid, bool node_exact)
58
{
59
struct cma *cma;
60
struct huge_bootmem_page *m;
61
int node = *nid;
62
63
cma = hugetlb_cma[*nid];
64
m = cma_reserve_early(cma, huge_page_size(h));
65
if (!m) {
66
if (node_exact)
67
return NULL;
68
69
for_each_node_mask(node, hugetlb_bootmem_nodes) {
70
cma = hugetlb_cma[node];
71
if (!cma || node == *nid)
72
continue;
73
m = cma_reserve_early(cma, huge_page_size(h));
74
if (m) {
75
*nid = node;
76
break;
77
}
78
}
79
}
80
81
if (m) {
82
m->flags = HUGE_BOOTMEM_CMA;
83
m->cma = cma;
84
}
85
86
return m;
87
}
88
89
90
static bool cma_reserve_called __initdata;
91
92
static int __init cmdline_parse_hugetlb_cma(char *p)
93
{
94
int nid, count = 0;
95
unsigned long tmp;
96
char *s = p;
97
98
while (*s) {
99
if (sscanf(s, "%lu%n", &tmp, &count) != 1)
100
break;
101
102
if (s[count] == ':') {
103
if (tmp >= MAX_NUMNODES)
104
break;
105
nid = array_index_nospec(tmp, MAX_NUMNODES);
106
107
s += count + 1;
108
tmp = memparse(s, &s);
109
hugetlb_cma_size_in_node[nid] = tmp;
110
hugetlb_cma_size += tmp;
111
112
/*
113
* Skip the separator if have one, otherwise
114
* break the parsing.
115
*/
116
if (*s == ',')
117
s++;
118
else
119
break;
120
} else {
121
hugetlb_cma_size = memparse(p, &p);
122
break;
123
}
124
}
125
126
return 0;
127
}
128
129
early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
130
131
static int __init cmdline_parse_hugetlb_cma_only(char *p)
132
{
133
return kstrtobool(p, &hugetlb_cma_only);
134
}
135
136
early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
137
138
void __init hugetlb_cma_reserve(int order)
139
{
140
unsigned long size, reserved, per_node;
141
bool node_specific_cma_alloc = false;
142
int nid;
143
144
/*
145
* HugeTLB CMA reservation is required for gigantic
146
* huge pages which could not be allocated via the
147
* page allocator. Just warn if there is any change
148
* breaking this assumption.
149
*/
150
VM_WARN_ON(order <= MAX_PAGE_ORDER);
151
cma_reserve_called = true;
152
153
if (!hugetlb_cma_size)
154
return;
155
156
hugetlb_bootmem_set_nodes();
157
158
for (nid = 0; nid < MAX_NUMNODES; nid++) {
159
if (hugetlb_cma_size_in_node[nid] == 0)
160
continue;
161
162
if (!node_isset(nid, hugetlb_bootmem_nodes)) {
163
pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
164
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
165
hugetlb_cma_size_in_node[nid] = 0;
166
continue;
167
}
168
169
if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
170
pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
171
nid, (PAGE_SIZE << order) / SZ_1M);
172
hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
173
hugetlb_cma_size_in_node[nid] = 0;
174
} else {
175
node_specific_cma_alloc = true;
176
}
177
}
178
179
/* Validate the CMA size again in case some invalid nodes specified. */
180
if (!hugetlb_cma_size)
181
return;
182
183
if (hugetlb_cma_size < (PAGE_SIZE << order)) {
184
pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
185
(PAGE_SIZE << order) / SZ_1M);
186
hugetlb_cma_size = 0;
187
return;
188
}
189
190
if (!node_specific_cma_alloc) {
191
/*
192
* If 3 GB area is requested on a machine with 4 numa nodes,
193
* let's allocate 1 GB on first three nodes and ignore the last one.
194
*/
195
per_node = DIV_ROUND_UP(hugetlb_cma_size,
196
nodes_weight(hugetlb_bootmem_nodes));
197
pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
198
hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
199
}
200
201
reserved = 0;
202
for_each_node_mask(nid, hugetlb_bootmem_nodes) {
203
int res;
204
char name[CMA_MAX_NAME];
205
206
if (node_specific_cma_alloc) {
207
if (hugetlb_cma_size_in_node[nid] == 0)
208
continue;
209
210
size = hugetlb_cma_size_in_node[nid];
211
} else {
212
size = min(per_node, hugetlb_cma_size - reserved);
213
}
214
215
size = round_up(size, PAGE_SIZE << order);
216
217
snprintf(name, sizeof(name), "hugetlb%d", nid);
218
/*
219
* Note that 'order per bit' is based on smallest size that
220
* may be returned to CMA allocator in the case of
221
* huge page demotion.
222
*/
223
res = cma_declare_contiguous_multi(size, PAGE_SIZE << order,
224
HUGETLB_PAGE_ORDER, name,
225
&hugetlb_cma[nid], nid);
226
if (res) {
227
pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
228
res, nid);
229
continue;
230
}
231
232
reserved += size;
233
pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
234
size / SZ_1M, nid);
235
236
if (reserved >= hugetlb_cma_size)
237
break;
238
}
239
240
if (!reserved)
241
/*
242
* hugetlb_cma_size is used to determine if allocations from
243
* cma are possible. Set to zero if no cma regions are set up.
244
*/
245
hugetlb_cma_size = 0;
246
}
247
248
void __init hugetlb_cma_check(void)
249
{
250
if (!hugetlb_cma_size || cma_reserve_called)
251
return;
252
253
pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
254
}
255
256
bool hugetlb_cma_exclusive_alloc(void)
257
{
258
return hugetlb_cma_only;
259
}
260
261
unsigned long __init hugetlb_cma_total_size(void)
262
{
263
return hugetlb_cma_size;
264
}
265
266
void __init hugetlb_cma_validate_params(void)
267
{
268
if (!hugetlb_cma_size)
269
hugetlb_cma_only = false;
270
}
271
272
bool __init hugetlb_early_cma(struct hstate *h)
273
{
274
if (arch_has_huge_bootmem_alloc())
275
return false;
276
277
return hstate_is_gigantic(h) && hugetlb_cma_only;
278
}
279
280