Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
gitpod-io
GitHub Repository: gitpod-io/gitpod
Path: blob/main/components/ws-daemon/pkg/cgroup/plugin_iolimit_v2.go
2499 views
1
// Copyright (c) 2022 Gitpod GmbH. All rights reserved.
2
// Licensed under the GNU Affero General Public License (AGPL).
3
// See License.AGPL.txt in the project root for license information.
4
5
package cgroup
6
7
import (
8
"context"
9
"os"
10
"path/filepath"
11
"strconv"
12
"strings"
13
"sync"
14
15
v2 "github.com/containerd/cgroups/v2"
16
"github.com/gitpod-io/gitpod/common-go/log"
17
)
18
19
type IOLimiterV2 struct {
20
limits *v2.Resources
21
22
cond *sync.Cond
23
24
devices []string
25
}
26
27
func NewIOLimiterV2(writeBytesPerSecond, readBytesPerSecond, writeIOPs, readIOPs int64) (*IOLimiterV2, error) {
28
devices := buildDevices()
29
log.WithField("devices", devices).Debug("io limiting devices")
30
return &IOLimiterV2{
31
limits: buildV2Limits(writeBytesPerSecond, readBytesPerSecond, writeIOPs, readIOPs, devices),
32
33
cond: sync.NewCond(&sync.Mutex{}),
34
devices: devices,
35
}, nil
36
}
37
38
func (c *IOLimiterV2) Name() string { return "iolimiter-v2" }
39
func (c *IOLimiterV2) Type() Version { return Version2 }
40
41
func (c *IOLimiterV2) Apply(ctx context.Context, opts *PluginOptions) error {
42
update := make(chan struct{}, 1)
43
go func() {
44
// TODO(cw): this Go-routine will leak per workspace, until we update config or restart ws-daemon
45
defer close(update)
46
47
for {
48
c.cond.L.Lock()
49
c.cond.Wait()
50
c.cond.L.Unlock()
51
52
if ctx.Err() != nil {
53
return
54
}
55
56
update <- struct{}{}
57
}
58
}()
59
60
go func() {
61
log.WithFields(log.OWI("", "", opts.InstanceId)).WithField("cgroupPath", opts.CgroupPath).Debug("starting io limiting")
62
63
_, err := v2.NewManager(opts.BasePath, filepath.Join("/", opts.CgroupPath), c.limits)
64
if err != nil {
65
log.WithError(err).WithFields(log.OWI("", "", opts.InstanceId)).WithField("basePath", opts.BasePath).WithField("cgroupPath", opts.CgroupPath).WithField("limits", c.limits).Warn("cannot write IO limits")
66
}
67
68
for {
69
select {
70
case <-update:
71
_, err := v2.NewManager(opts.BasePath, filepath.Join("/", opts.CgroupPath), c.limits)
72
if err != nil {
73
log.WithError(err).WithFields(log.OWI("", "", opts.InstanceId)).WithField("basePath", opts.BasePath).WithField("cgroupPath", opts.CgroupPath).WithField("limits", c.limits).Error("cannot write IO limits")
74
}
75
case <-ctx.Done():
76
// Prior to shutting down though, we need to reset the IO limits to ensure we don't have
77
// processes stuck in the uninterruptable "D" (disk sleep) state. This would prevent the
78
// workspace pod from shutting down.
79
_, err := v2.NewManager(opts.BasePath, filepath.Join("/", opts.CgroupPath), &v2.Resources{})
80
if err != nil {
81
log.WithError(err).WithFields(log.OWI("", "", opts.InstanceId)).WithField("cgroupPath", opts.CgroupPath).Error("cannot write IO limits")
82
}
83
log.WithFields(log.OWI("", "", opts.InstanceId)).WithField("cgroupPath", opts.CgroupPath).Debug("stopping io limiting")
84
return
85
}
86
}
87
}()
88
89
return nil
90
}
91
92
func (c *IOLimiterV2) Update(writeBytesPerSecond, readBytesPerSecond, writeIOPs, readIOPs int64) {
93
c.cond.L.Lock()
94
defer c.cond.L.Unlock()
95
96
c.limits = buildV2Limits(writeBytesPerSecond, readBytesPerSecond, writeIOPs, readIOPs, c.devices)
97
log.WithField("limits", c.limits.IO).Info("updating I/O cgroups v2 limits")
98
99
c.cond.Broadcast()
100
}
101
102
func buildV2Limits(writeBytesPerSecond, readBytesPerSecond, writeIOPs, readIOPs int64, devices []string) *v2.Resources {
103
resources := &v2.Resources{
104
IO: &v2.IO{},
105
}
106
107
for _, device := range devices {
108
majmin := strings.Split(device, ":")
109
if len(majmin) != 2 {
110
log.WithField("device", device).Error("invalid device")
111
continue
112
}
113
114
major, err := strconv.ParseInt(majmin[0], 10, 64)
115
if err != nil {
116
log.WithError(err).Error("invalid major device")
117
continue
118
}
119
120
minor, err := strconv.ParseInt(majmin[1], 10, 64)
121
if err != nil {
122
log.WithError(err).Error("invalid minor device")
123
continue
124
}
125
126
if readBytesPerSecond > 0 {
127
resources.IO.Max = append(resources.IO.Max, v2.Entry{Major: major, Minor: minor, Type: v2.ReadBPS, Rate: uint64(readBytesPerSecond)})
128
}
129
130
if readIOPs > 0 {
131
resources.IO.Max = append(resources.IO.Max, v2.Entry{Major: major, Minor: minor, Type: v2.ReadIOPS, Rate: uint64(readIOPs)})
132
}
133
134
if writeBytesPerSecond > 0 {
135
resources.IO.Max = append(resources.IO.Max, v2.Entry{Major: major, Minor: minor, Type: v2.WriteBPS, Rate: uint64(writeBytesPerSecond)})
136
}
137
138
if writeIOPs > 0 {
139
resources.IO.Max = append(resources.IO.Max, v2.Entry{Major: major, Minor: minor, Type: v2.WriteIOPS, Rate: uint64(writeIOPs)})
140
}
141
}
142
143
log.WithField("resources", resources).Debug("cgroups v2 limits")
144
145
return resources
146
}
147
148
// TODO: enable custom configuration
149
var blockDevices = []string{"dm*", "sd*", "md*", "nvme*"}
150
151
func buildDevices() []string {
152
var devices []string
153
for _, wc := range blockDevices {
154
matches, err := filepath.Glob(filepath.Join("/sys/block", wc, "dev"))
155
if err != nil {
156
log.WithField("wc", wc).Warn("cannot glob devices")
157
continue
158
}
159
160
for _, dev := range matches {
161
fc, err := os.ReadFile(dev)
162
if err != nil {
163
log.WithField("dev", dev).WithError(err).Error("cannot read device file")
164
}
165
devices = append(devices, strings.TrimSpace(string(fc)))
166
}
167
}
168
169
return devices
170
}
171
172