Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/os/linux/cgroupSubsystem_linux.cpp
64440 views
1
/*
2
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include <string.h>
26
#include <math.h>
27
#include <errno.h>
28
#include "cgroupSubsystem_linux.hpp"
29
#include "cgroupV1Subsystem_linux.hpp"
30
#include "cgroupV2Subsystem_linux.hpp"
31
#include "logging/log.hpp"
32
#include "memory/allocation.hpp"
33
#include "runtime/globals.hpp"
34
#include "runtime/os.hpp"
35
#include "utilities/globalDefinitions.hpp"
36
37
// controller names have to match the *_IDX indices
38
static const char* cg_controller_name[] = { "cpu", "cpuset", "cpuacct", "memory", "pids" };
39
40
CgroupSubsystem* CgroupSubsystemFactory::create() {
41
CgroupV1MemoryController* memory = NULL;
42
CgroupV1Controller* cpuset = NULL;
43
CgroupV1Controller* cpu = NULL;
44
CgroupV1Controller* cpuacct = NULL;
45
CgroupV1Controller* pids = NULL;
46
CgroupInfo cg_infos[CG_INFO_LENGTH];
47
u1 cg_type_flags = INVALID_CGROUPS_GENERIC;
48
const char* proc_cgroups = "/proc/cgroups";
49
const char* proc_self_cgroup = "/proc/self/cgroup";
50
const char* proc_self_mountinfo = "/proc/self/mountinfo";
51
52
bool valid_cgroup = determine_type(cg_infos, proc_cgroups, proc_self_cgroup, proc_self_mountinfo, &cg_type_flags);
53
54
if (!valid_cgroup) {
55
// Could not detect cgroup type
56
return NULL;
57
}
58
assert(is_valid_cgroup(&cg_type_flags), "Expected valid cgroup type");
59
60
if (is_cgroup_v2(&cg_type_flags)) {
61
// Cgroups v2 case, we have all the info we need.
62
// Construct the subsystem, free resources and return
63
// Note: any index in cg_infos will do as the path is the same for
64
// all controllers.
65
CgroupController* unified = new CgroupV2Controller(cg_infos[MEMORY_IDX]._mount_path, cg_infos[MEMORY_IDX]._cgroup_path);
66
log_debug(os, container)("Detected cgroups v2 unified hierarchy");
67
cleanup(cg_infos);
68
return new CgroupV2Subsystem(unified);
69
}
70
71
/*
72
* Cgroup v1 case:
73
*
74
* Use info gathered previously from /proc/self/cgroup
75
* and map host mount point to
76
* local one via /proc/self/mountinfo content above
77
*
78
* Docker example:
79
* 5:memory:/docker/6558aed8fc662b194323ceab5b964f69cf36b3e8af877a14b80256e93aecb044
80
*
81
* Host example:
82
* 5:memory:/user.slice
83
*
84
* Construct a path to the process specific memory and cpuset
85
* cgroup directory.
86
*
87
* For a container running under Docker from memory example above
88
* the paths would be:
89
*
90
* /sys/fs/cgroup/memory
91
*
92
* For a Host from memory example above the path would be:
93
*
94
* /sys/fs/cgroup/memory/user.slice
95
*
96
*/
97
assert(is_cgroup_v1(&cg_type_flags), "Cgroup v1 expected");
98
for (int i = 0; i < CG_INFO_LENGTH; i++) {
99
CgroupInfo info = cg_infos[i];
100
if (info._data_complete) { // pids controller might have incomplete data
101
if (strcmp(info._name, "memory") == 0) {
102
memory = new CgroupV1MemoryController(info._root_mount_path, info._mount_path);
103
memory->set_subsystem_path(info._cgroup_path);
104
} else if (strcmp(info._name, "cpuset") == 0) {
105
cpuset = new CgroupV1Controller(info._root_mount_path, info._mount_path);
106
cpuset->set_subsystem_path(info._cgroup_path);
107
} else if (strcmp(info._name, "cpu") == 0) {
108
cpu = new CgroupV1Controller(info._root_mount_path, info._mount_path);
109
cpu->set_subsystem_path(info._cgroup_path);
110
} else if (strcmp(info._name, "cpuacct") == 0) {
111
cpuacct = new CgroupV1Controller(info._root_mount_path, info._mount_path);
112
cpuacct->set_subsystem_path(info._cgroup_path);
113
} else if (strcmp(info._name, "pids") == 0) {
114
pids = new CgroupV1Controller(info._root_mount_path, info._mount_path);
115
pids->set_subsystem_path(info._cgroup_path);
116
}
117
} else {
118
log_debug(os, container)("CgroupInfo for %s not complete", cg_controller_name[i]);
119
}
120
}
121
cleanup(cg_infos);
122
return new CgroupV1Subsystem(cpuset, cpu, cpuacct, pids, memory);
123
}
124
125
bool CgroupSubsystemFactory::determine_type(CgroupInfo* cg_infos,
126
const char* proc_cgroups,
127
const char* proc_self_cgroup,
128
const char* proc_self_mountinfo,
129
u1* flags) {
130
FILE *mntinfo = NULL;
131
FILE *cgroups = NULL;
132
FILE *cgroup = NULL;
133
char buf[MAXPATHLEN+1];
134
char *p;
135
bool is_cgroupsV2;
136
// true iff all required controllers, memory, cpu, cpuset, cpuacct are enabled
137
// at the kernel level.
138
// pids might not be enabled on older Linux distros (SLES 12.1, RHEL 7.1)
139
bool all_required_controllers_enabled;
140
141
/*
142
* Read /proc/cgroups so as to be able to distinguish cgroups v2 vs cgroups v1.
143
*
144
* For cgroups v1 hierarchy (hybrid or legacy), cpu, cpuacct, cpuset, memory controllers
145
* must have non-zero for the hierarchy ID field and relevant controllers mounted.
146
* Conversely, for cgroups v2 (unified hierarchy), cpu, cpuacct, cpuset, memory
147
* controllers must have hierarchy ID 0 and the unified controller mounted.
148
*/
149
cgroups = fopen(proc_cgroups, "r");
150
if (cgroups == NULL) {
151
log_debug(os, container)("Can't open %s, %s", proc_cgroups, os::strerror(errno));
152
*flags = INVALID_CGROUPS_GENERIC;
153
return false;
154
}
155
156
while ((p = fgets(buf, MAXPATHLEN, cgroups)) != NULL) {
157
char name[MAXPATHLEN+1];
158
int hierarchy_id;
159
int enabled;
160
161
// Format of /proc/cgroups documented via man 7 cgroups
162
if (sscanf(p, "%s %d %*d %d", name, &hierarchy_id, &enabled) != 3) {
163
continue;
164
}
165
if (strcmp(name, "memory") == 0) {
166
cg_infos[MEMORY_IDX]._name = os::strdup(name);
167
cg_infos[MEMORY_IDX]._hierarchy_id = hierarchy_id;
168
cg_infos[MEMORY_IDX]._enabled = (enabled == 1);
169
} else if (strcmp(name, "cpuset") == 0) {
170
cg_infos[CPUSET_IDX]._name = os::strdup(name);
171
cg_infos[CPUSET_IDX]._hierarchy_id = hierarchy_id;
172
cg_infos[CPUSET_IDX]._enabled = (enabled == 1);
173
} else if (strcmp(name, "cpu") == 0) {
174
cg_infos[CPU_IDX]._name = os::strdup(name);
175
cg_infos[CPU_IDX]._hierarchy_id = hierarchy_id;
176
cg_infos[CPU_IDX]._enabled = (enabled == 1);
177
} else if (strcmp(name, "cpuacct") == 0) {
178
cg_infos[CPUACCT_IDX]._name = os::strdup(name);
179
cg_infos[CPUACCT_IDX]._hierarchy_id = hierarchy_id;
180
cg_infos[CPUACCT_IDX]._enabled = (enabled == 1);
181
} else if (strcmp(name, "pids") == 0) {
182
log_debug(os, container)("Detected optional pids controller entry in %s", proc_cgroups);
183
cg_infos[PIDS_IDX]._name = os::strdup(name);
184
cg_infos[PIDS_IDX]._hierarchy_id = hierarchy_id;
185
cg_infos[PIDS_IDX]._enabled = (enabled == 1);
186
}
187
}
188
fclose(cgroups);
189
190
is_cgroupsV2 = true;
191
all_required_controllers_enabled = true;
192
for (int i = 0; i < CG_INFO_LENGTH; i++) {
193
// pids controller is optional. All other controllers are required
194
if (i != PIDS_IDX) {
195
is_cgroupsV2 = is_cgroupsV2 && cg_infos[i]._hierarchy_id == 0;
196
all_required_controllers_enabled = all_required_controllers_enabled && cg_infos[i]._enabled;
197
}
198
if (log_is_enabled(Debug, os, container) && !cg_infos[i]._enabled) {
199
log_debug(os, container)("controller %s is not enabled\n", cg_controller_name[i]);
200
}
201
}
202
203
if (!all_required_controllers_enabled) {
204
// one or more required controllers disabled, disable container support
205
log_debug(os, container)("One or more required controllers disabled at kernel level.");
206
cleanup(cg_infos);
207
*flags = INVALID_CGROUPS_GENERIC;
208
return false;
209
}
210
211
/*
212
* Read /proc/self/cgroup and determine:
213
* - the cgroup path for cgroups v2 or
214
* - on a cgroups v1 system, collect info for mapping
215
* the host mount point to the local one via /proc/self/mountinfo below.
216
*/
217
cgroup = fopen(proc_self_cgroup, "r");
218
if (cgroup == NULL) {
219
log_debug(os, container)("Can't open %s, %s",
220
proc_self_cgroup, os::strerror(errno));
221
cleanup(cg_infos);
222
*flags = INVALID_CGROUPS_GENERIC;
223
return false;
224
}
225
226
while ((p = fgets(buf, MAXPATHLEN, cgroup)) != NULL) {
227
char *controllers;
228
char *token;
229
char *hierarchy_id_str;
230
int hierarchy_id;
231
char *cgroup_path;
232
233
hierarchy_id_str = strsep(&p, ":");
234
hierarchy_id = atoi(hierarchy_id_str);
235
/* Get controllers and base */
236
controllers = strsep(&p, ":");
237
cgroup_path = strsep(&p, "\n");
238
239
if (controllers == NULL) {
240
continue;
241
}
242
243
while (!is_cgroupsV2 && (token = strsep(&controllers, ",")) != NULL) {
244
if (strcmp(token, "memory") == 0) {
245
assert(hierarchy_id == cg_infos[MEMORY_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for memory");
246
cg_infos[MEMORY_IDX]._cgroup_path = os::strdup(cgroup_path);
247
} else if (strcmp(token, "cpuset") == 0) {
248
assert(hierarchy_id == cg_infos[CPUSET_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for cpuset");
249
cg_infos[CPUSET_IDX]._cgroup_path = os::strdup(cgroup_path);
250
} else if (strcmp(token, "cpu") == 0) {
251
assert(hierarchy_id == cg_infos[CPU_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for cpu");
252
cg_infos[CPU_IDX]._cgroup_path = os::strdup(cgroup_path);
253
} else if (strcmp(token, "cpuacct") == 0) {
254
assert(hierarchy_id == cg_infos[CPUACCT_IDX]._hierarchy_id, "/proc/cgroups and /proc/self/cgroup hierarchy mismatch for cpuacc");
255
cg_infos[CPUACCT_IDX]._cgroup_path = os::strdup(cgroup_path);
256
} else if (strcmp(token, "pids") == 0) {
257
assert(hierarchy_id == cg_infos[PIDS_IDX]._hierarchy_id, "/proc/cgroups (%d) and /proc/self/cgroup (%d) hierarchy mismatch for pids",
258
cg_infos[PIDS_IDX]._hierarchy_id, hierarchy_id);
259
cg_infos[PIDS_IDX]._cgroup_path = os::strdup(cgroup_path);
260
}
261
}
262
if (is_cgroupsV2) {
263
// On some systems we have mixed cgroups v1 and cgroups v2 controllers (e.g. freezer on cg1 and
264
// all relevant controllers on cg2). Only set the cgroup path when we see a hierarchy id of 0.
265
if (hierarchy_id != 0) {
266
continue;
267
}
268
for (int i = 0; i < CG_INFO_LENGTH; i++) {
269
assert(cg_infos[i]._cgroup_path == NULL, "cgroup path must only be set once");
270
cg_infos[i]._cgroup_path = os::strdup(cgroup_path);
271
}
272
}
273
}
274
fclose(cgroup);
275
276
// Find various mount points by reading /proc/self/mountinfo
277
// mountinfo format is documented at https://www.kernel.org/doc/Documentation/filesystems/proc.txt
278
mntinfo = fopen(proc_self_mountinfo, "r");
279
if (mntinfo == NULL) {
280
log_debug(os, container)("Can't open %s, %s",
281
proc_self_mountinfo, os::strerror(errno));
282
cleanup(cg_infos);
283
*flags = INVALID_CGROUPS_GENERIC;
284
return false;
285
}
286
287
bool cgroupv2_mount_point_found = false;
288
bool any_cgroup_mounts_found = false;
289
while ((p = fgets(buf, MAXPATHLEN, mntinfo)) != NULL) {
290
char tmp_mount_point[MAXPATHLEN+1];
291
char tmp_fs_type[MAXPATHLEN+1];
292
char tmproot[MAXPATHLEN+1];
293
char tmpmount[MAXPATHLEN+1];
294
char tmpcgroups[MAXPATHLEN+1];
295
char *cptr = tmpcgroups;
296
char *token;
297
298
// Cgroup v2 relevant info. We only look for the _mount_path iff is_cgroupsV2 so
299
// as to avoid memory stomping of the _mount_path pointer later on in the cgroup v1
300
// block in the hybrid case.
301
//
302
if (is_cgroupsV2 && sscanf(p, "%*d %*d %*d:%*d %*s %s %*[^-]- %s %*s %*s", tmp_mount_point, tmp_fs_type) == 2) {
303
// we likely have an early match return (e.g. cgroup fs match), be sure we have cgroup2 as fstype
304
if (!cgroupv2_mount_point_found && strcmp("cgroup2", tmp_fs_type) == 0) {
305
cgroupv2_mount_point_found = true;
306
any_cgroup_mounts_found = true;
307
for (int i = 0; i < CG_INFO_LENGTH; i++) {
308
assert(cg_infos[i]._mount_path == NULL, "_mount_path memory stomping");
309
cg_infos[i]._mount_path = os::strdup(tmp_mount_point);
310
}
311
}
312
}
313
314
/* Cgroup v1 relevant info
315
*
316
* Find the cgroup mount point for memory, cpuset, cpu, cpuacct, pids
317
*
318
* Example for docker:
319
* 219 214 0:29 /docker/7208cebd00fa5f2e342b1094f7bed87fa25661471a4637118e65f1c995be8a34 /sys/fs/cgroup/memory ro,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory
320
*
321
* Example for host:
322
* 34 28 0:29 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,memory
323
*
324
* 44 31 0:39 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:23 - cgroup cgroup rw,pids
325
*/
326
if (sscanf(p, "%*d %*d %*d:%*d %s %s %*[^-]- %s %*s %s", tmproot, tmpmount, tmp_fs_type, tmpcgroups) == 4) {
327
if (strcmp("cgroup", tmp_fs_type) != 0) {
328
// Skip cgroup2 fs lines on hybrid or unified hierarchy.
329
continue;
330
}
331
while ((token = strsep(&cptr, ",")) != NULL) {
332
if (strcmp(token, "memory") == 0) {
333
any_cgroup_mounts_found = true;
334
assert(cg_infos[MEMORY_IDX]._mount_path == NULL, "stomping of _mount_path");
335
cg_infos[MEMORY_IDX]._mount_path = os::strdup(tmpmount);
336
cg_infos[MEMORY_IDX]._root_mount_path = os::strdup(tmproot);
337
cg_infos[MEMORY_IDX]._data_complete = true;
338
} else if (strcmp(token, "cpuset") == 0) {
339
any_cgroup_mounts_found = true;
340
if (cg_infos[CPUSET_IDX]._mount_path != NULL) {
341
// On some systems duplicate cpuset controllers get mounted in addition to
342
// the main cgroup controllers most likely under /sys/fs/cgroup. In that
343
// case pick the one under /sys/fs/cgroup and discard others.
344
if (strstr(cg_infos[CPUSET_IDX]._mount_path, "/sys/fs/cgroup") != cg_infos[CPUSET_IDX]._mount_path) {
345
log_warning(os, container)("Duplicate cpuset controllers detected. Picking %s, skipping %s.",
346
tmpmount, cg_infos[CPUSET_IDX]._mount_path);
347
os::free(cg_infos[CPUSET_IDX]._mount_path);
348
cg_infos[CPUSET_IDX]._mount_path = os::strdup(tmpmount);
349
} else {
350
log_warning(os, container)("Duplicate cpuset controllers detected. Picking %s, skipping %s.",
351
cg_infos[CPUSET_IDX]._mount_path, tmpmount);
352
}
353
} else {
354
cg_infos[CPUSET_IDX]._mount_path = os::strdup(tmpmount);
355
}
356
cg_infos[CPUSET_IDX]._root_mount_path = os::strdup(tmproot);
357
cg_infos[CPUSET_IDX]._data_complete = true;
358
} else if (strcmp(token, "cpu") == 0) {
359
any_cgroup_mounts_found = true;
360
assert(cg_infos[CPU_IDX]._mount_path == NULL, "stomping of _mount_path");
361
cg_infos[CPU_IDX]._mount_path = os::strdup(tmpmount);
362
cg_infos[CPU_IDX]._root_mount_path = os::strdup(tmproot);
363
cg_infos[CPU_IDX]._data_complete = true;
364
} else if (strcmp(token, "cpuacct") == 0) {
365
any_cgroup_mounts_found = true;
366
assert(cg_infos[CPUACCT_IDX]._mount_path == NULL, "stomping of _mount_path");
367
cg_infos[CPUACCT_IDX]._mount_path = os::strdup(tmpmount);
368
cg_infos[CPUACCT_IDX]._root_mount_path = os::strdup(tmproot);
369
cg_infos[CPUACCT_IDX]._data_complete = true;
370
} else if (strcmp(token, "pids") == 0) {
371
any_cgroup_mounts_found = true;
372
assert(cg_infos[PIDS_IDX]._mount_path == NULL, "stomping of _mount_path");
373
cg_infos[PIDS_IDX]._mount_path = os::strdup(tmpmount);
374
cg_infos[PIDS_IDX]._root_mount_path = os::strdup(tmproot);
375
cg_infos[PIDS_IDX]._data_complete = true;
376
}
377
}
378
}
379
}
380
fclose(mntinfo);
381
382
// Neither cgroup2 nor cgroup filesystems mounted via /proc/self/mountinfo
383
// No point in continuing.
384
if (!any_cgroup_mounts_found) {
385
log_trace(os, container)("No relevant cgroup controllers mounted.");
386
cleanup(cg_infos);
387
*flags = INVALID_CGROUPS_NO_MOUNT;
388
return false;
389
}
390
391
if (is_cgroupsV2) {
392
if (!cgroupv2_mount_point_found) {
393
log_trace(os, container)("Mount point for cgroupv2 not found in /proc/self/mountinfo");
394
cleanup(cg_infos);
395
*flags = INVALID_CGROUPS_V2;
396
return false;
397
}
398
// Cgroups v2 case, we have all the info we need.
399
*flags = CGROUPS_V2;
400
return true;
401
}
402
403
// What follows is cgroups v1
404
log_debug(os, container)("Detected cgroups hybrid or legacy hierarchy, using cgroups v1 controllers");
405
406
if (!cg_infos[MEMORY_IDX]._data_complete) {
407
log_debug(os, container)("Required cgroup v1 memory subsystem not found");
408
cleanup(cg_infos);
409
*flags = INVALID_CGROUPS_V1;
410
return false;
411
}
412
if (!cg_infos[CPUSET_IDX]._data_complete) {
413
log_debug(os, container)("Required cgroup v1 cpuset subsystem not found");
414
cleanup(cg_infos);
415
*flags = INVALID_CGROUPS_V1;
416
return false;
417
}
418
if (!cg_infos[CPU_IDX]._data_complete) {
419
log_debug(os, container)("Required cgroup v1 cpu subsystem not found");
420
cleanup(cg_infos);
421
*flags = INVALID_CGROUPS_V1;
422
return false;
423
}
424
if (!cg_infos[CPUACCT_IDX]._data_complete) {
425
log_debug(os, container)("Required cgroup v1 cpuacct subsystem not found");
426
cleanup(cg_infos);
427
*flags = INVALID_CGROUPS_V1;
428
return false;
429
}
430
if (log_is_enabled(Debug, os, container) && !cg_infos[PIDS_IDX]._data_complete) {
431
log_debug(os, container)("Optional cgroup v1 pids subsystem not found");
432
// keep the other controller info, pids is optional
433
}
434
// Cgroups v1 case, we have all the info we need.
435
*flags = CGROUPS_V1;
436
return true;
437
};
438
439
void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
440
assert(cg_infos != NULL, "Invariant");
441
for (int i = 0; i < CG_INFO_LENGTH; i++) {
442
os::free(cg_infos[i]._name);
443
os::free(cg_infos[i]._cgroup_path);
444
os::free(cg_infos[i]._root_mount_path);
445
os::free(cg_infos[i]._mount_path);
446
}
447
}
448
449
/* active_processor_count
450
*
451
* Calculate an appropriate number of active processors for the
452
* VM to use based on these three inputs.
453
*
454
* cpu affinity
455
* cgroup cpu quota & cpu period
456
* cgroup cpu shares
457
*
458
* Algorithm:
459
*
460
* Determine the number of available CPUs from sched_getaffinity
461
*
462
* If user specified a quota (quota != -1), calculate the number of
463
* required CPUs by dividing quota by period.
464
*
465
* If shares are in effect (shares != -1), calculate the number
466
* of CPUs required for the shares by dividing the share value
467
* by PER_CPU_SHARES.
468
*
469
* All results of division are rounded up to the next whole number.
470
*
471
* If neither shares or quotas have been specified, return the
472
* number of active processors in the system.
473
*
474
* If both shares and quotas have been specified, the results are
475
* based on the flag PreferContainerQuotaForCPUCount. If true,
476
* return the quota value. If false return the smallest value
477
* between shares or quotas.
478
*
479
* If shares and/or quotas have been specified, the resulting number
480
* returned will never exceed the number of active processors.
481
*
482
* return:
483
* number of CPUs
484
*/
485
int CgroupSubsystem::active_processor_count() {
486
int quota_count = 0, share_count = 0;
487
int cpu_count, limit_count;
488
int result;
489
490
// We use a cache with a timeout to avoid performing expensive
491
// computations in the event this function is called frequently.
492
// [See 8227006].
493
CachingCgroupController* contrl = cpu_controller();
494
CachedMetric* cpu_limit = contrl->metrics_cache();
495
if (!cpu_limit->should_check_metric()) {
496
int val = (int)cpu_limit->value();
497
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", val);
498
return val;
499
}
500
501
cpu_count = limit_count = os::Linux::active_processor_count();
502
int quota = cpu_quota();
503
int period = cpu_period();
504
505
// It's not a good idea to use cpu_shares() to limit the number
506
// of CPUs used by the JVM. See JDK-8281181.
507
int share = UseContainerCpuShares ? cpu_shares() : -1;
508
509
if (quota > -1 && period > 0) {
510
quota_count = ceilf((float)quota / (float)period);
511
log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
512
}
513
if (share > -1) {
514
share_count = ceilf((float)share / (float)PER_CPU_SHARES);
515
log_trace(os, container)("CPU Share count based on shares: %d", share_count);
516
}
517
518
// If both shares and quotas are setup results depend
519
// on flag PreferContainerQuotaForCPUCount.
520
// If true, limit CPU count to quota
521
// If false, use minimum of shares and quotas
522
if (quota_count !=0 && share_count != 0) {
523
if (PreferContainerQuotaForCPUCount) {
524
limit_count = quota_count;
525
} else {
526
limit_count = MIN2(quota_count, share_count);
527
}
528
} else if (quota_count != 0) {
529
limit_count = quota_count;
530
} else if (share_count != 0) {
531
limit_count = share_count;
532
}
533
534
result = MIN2(cpu_count, limit_count);
535
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
536
537
// Update cached metric to avoid re-reading container settings too often
538
cpu_limit->set_value(result, OSCONTAINER_CACHE_TIMEOUT);
539
540
return result;
541
}
542
543
/* memory_limit_in_bytes
544
*
545
* Return the limit of available memory for this process.
546
*
547
* return:
548
* memory limit in bytes or
549
* -1 for unlimited
550
* OSCONTAINER_ERROR for not supported
551
*/
552
jlong CgroupSubsystem::memory_limit_in_bytes() {
553
CachingCgroupController* contrl = memory_controller();
554
CachedMetric* memory_limit = contrl->metrics_cache();
555
if (!memory_limit->should_check_metric()) {
556
return memory_limit->value();
557
}
558
jlong mem_limit = read_memory_limit_in_bytes();
559
// Update cached metric to avoid re-reading container settings too often
560
memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT);
561
return mem_limit;
562
}
563
564
jlong CgroupSubsystem::limit_from_str(char* limit_str) {
565
if (limit_str == NULL) {
566
return OSCONTAINER_ERROR;
567
}
568
// Unlimited memory in cgroups is the literal string 'max' for
569
// some controllers, for example the pids controller.
570
if (strcmp("max", limit_str) == 0) {
571
os::free(limit_str);
572
return (jlong)-1;
573
}
574
julong limit;
575
if (sscanf(limit_str, JULONG_FORMAT, &limit) != 1) {
576
os::free(limit_str);
577
return OSCONTAINER_ERROR;
578
}
579
os::free(limit_str);
580
return (jlong)limit;
581
}
582
583