Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
gitpod-io
GitHub Repository: gitpod-io/gitpod
Path: blob/main/components/ws-daemon/pkg/diskguard/guard.go
2499 views
1
// Copyright (c) 2020 Gitpod GmbH. All rights reserved.
2
// Licensed under the GNU Affero General Public License (AGPL).
3
// See License.AGPL.txt in the project root for license information.
4
5
package diskguard
6
7
import (
8
"context"
9
"syscall"
10
"time"
11
12
"golang.org/x/xerrors"
13
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14
"k8s.io/client-go/kubernetes"
15
"k8s.io/client-go/util/retry"
16
17
"github.com/gitpod-io/gitpod/common-go/log"
18
"github.com/gitpod-io/gitpod/common-go/util"
19
)
20
21
const (
22
// LabelDiskPressure is set on a node if any of the guarded disks have
23
// too little space available.
24
LabelDiskPressure = "gitpod.io/diskPressure"
25
)
26
27
// Config configures the disk guard
28
type Config struct {
29
Enabled bool `json:"enabled"`
30
Interval util.Duration `json:"interval"`
31
Locations []LocationConfig `json:"locations"`
32
}
33
34
type LocationConfig struct {
35
Path string `json:"path"`
36
MinBytesAvail uint64 `json:"minBytesAvail"`
37
}
38
39
// FromConfig produces a set of disk space guards from the configuration
40
func FromConfig(cfg Config, clientset kubernetes.Interface, nodeName string) []*Guard {
41
if !cfg.Enabled {
42
return nil
43
}
44
45
res := make([]*Guard, len(cfg.Locations))
46
for i, loc := range cfg.Locations {
47
res[i] = &Guard{
48
Path: loc.Path,
49
MinBytesAvail: loc.MinBytesAvail,
50
Interval: time.Duration(cfg.Interval),
51
Clientset: clientset,
52
Nodename: nodeName,
53
}
54
}
55
56
return res
57
}
58
59
// Guard regularly checks how much free space is left on a path/disk.
60
// If the percentage of used space goes above a certain threshold,
61
// we'll label the node accordingly - and remove the label once that condition
62
// subsides.
63
type Guard struct {
64
Path string
65
MinBytesAvail uint64
66
Interval time.Duration
67
Clientset kubernetes.Interface
68
Nodename string
69
}
70
71
// Start starts the disk guard
72
func (g *Guard) Start() {
73
t := time.NewTicker(g.Interval)
74
defer t.Stop()
75
for {
76
bvail, err := getAvailableBytes(g.Path)
77
if err != nil {
78
log.WithError(err).WithField("path", g.Path).Error("cannot check how much space is available")
79
continue
80
}
81
log.WithField("bvail", bvail).WithField("minBytesAvail", g.MinBytesAvail).Debug("checked for available disk space")
82
83
addLabel := bvail <= g.MinBytesAvail
84
err = g.setLabel(LabelDiskPressure, addLabel)
85
if err != nil {
86
log.WithError(err).Error("cannot update node label")
87
}
88
89
<-t.C
90
}
91
}
92
93
// setLabel adds or removes the label from the node
94
func (g *Guard) setLabel(label string, add bool) error {
95
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
96
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
97
defer cancel()
98
99
node, err := g.Clientset.CoreV1().Nodes().Get(ctx, g.Nodename, metav1.GetOptions{})
100
if err != nil {
101
return err
102
}
103
_, hasLabel := node.Labels[label]
104
if add == hasLabel {
105
return nil
106
}
107
108
if add {
109
node.Labels[label] = "true"
110
log.WithField("node", g.Nodename).WithField("label", label).Info("adding label to node")
111
} else {
112
delete(node.Labels, label)
113
log.WithField("node", g.Nodename).WithField("label", label).Info("removing label from node")
114
}
115
_, err = g.Clientset.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
116
if err != nil {
117
return err
118
}
119
120
return nil
121
})
122
}
123
124
func getAvailableBytes(path string) (bvail uint64, err error) {
125
var stat syscall.Statfs_t
126
err = syscall.Statfs(path, &stat)
127
if err != nil {
128
return 0, xerrors.Errorf("cannot stat %s: %w", path, err)
129
}
130
131
bvail = stat.Bavail * uint64(stat.Bsize)
132
return
133
}
134
135