Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
aos
GitHub Repository: aos/grafana-agent
Path: blob/main/pkg/operator/resources_metrics.go
4093 views
1
package operator
2
3
import (
4
"context"
5
"fmt"
6
"strings"
7
8
gragent "github.com/grafana/agent/pkg/operator/apis/monitoring/v1alpha1"
9
prom_operator "github.com/prometheus-operator/prometheus-operator/pkg/operator"
10
apps_v1 "k8s.io/api/apps/v1"
11
core_v1 "k8s.io/api/core/v1"
12
k8s_errors "k8s.io/apimachinery/pkg/api/errors"
13
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14
"k8s.io/apimachinery/pkg/util/intstr"
15
"k8s.io/utils/pointer"
16
"sigs.k8s.io/controller-runtime/pkg/client"
17
)
18
19
const (
20
defaultPortName = "http-metrics"
21
)
22
23
var (
24
minShards int32 = 1
25
minReplicas int32 = 1
26
managedByOperatorLabel = "app.kubernetes.io/managed-by"
27
managedByOperatorLabelValue = "grafana-agent-operator"
28
managedByOperatorLabels = map[string]string{
29
managedByOperatorLabel: managedByOperatorLabelValue,
30
}
31
shardLabelName = "operator.agent.grafana.com/shard"
32
agentNameLabelName = "operator.agent.grafana.com/name"
33
agentTypeLabel = "operator.agent.grafana.com/type"
34
probeTimeoutSeconds int32 = 3
35
)
36
37
// deleteManagedResource deletes a managed resource. Ignores resources that are
38
// not managed.
39
func deleteManagedResource(ctx context.Context, cli client.Client, key client.ObjectKey, o client.Object) error {
40
err := cli.Get(ctx, key, o)
41
if k8s_errors.IsNotFound(err) || !isManagedResource(o) {
42
return nil
43
} else if err != nil {
44
return fmt.Errorf("failed to find stale resource %s: %w", key, err)
45
}
46
err = cli.Delete(ctx, o)
47
if err != nil {
48
return fmt.Errorf("failed to delete stale resource %s: %w", key, err)
49
}
50
return nil
51
}
52
53
// isManagedResource returns true if the given object has a managed-by
54
// grafana-agent-operator label.
55
func isManagedResource(obj client.Object) bool {
56
labelValue := obj.GetLabels()[managedByOperatorLabel]
57
return labelValue == managedByOperatorLabelValue
58
}
59
60
func governingServiceName(agentName string) string {
61
return fmt.Sprintf("%s-operated", agentName)
62
}
63
64
func generateMetricsStatefulSetService(cfg *Config, d gragent.Deployment) *core_v1.Service {
65
d = *d.DeepCopy()
66
67
if d.Agent.Spec.PortName == "" {
68
d.Agent.Spec.PortName = defaultPortName
69
}
70
71
return &core_v1.Service{
72
ObjectMeta: meta_v1.ObjectMeta{
73
Name: governingServiceName(d.Agent.Name),
74
Namespace: d.Agent.Namespace,
75
OwnerReferences: []meta_v1.OwnerReference{{
76
APIVersion: d.Agent.APIVersion,
77
Kind: d.Agent.Kind,
78
Name: d.Agent.Name,
79
BlockOwnerDeletion: pointer.Bool(true),
80
Controller: pointer.Bool(true),
81
UID: d.Agent.UID,
82
}},
83
Labels: cfg.Labels.Merge(map[string]string{
84
managedByOperatorLabel: managedByOperatorLabelValue,
85
agentNameLabelName: d.Agent.Name,
86
"operated-agent": "true",
87
}),
88
},
89
Spec: core_v1.ServiceSpec{
90
ClusterIP: "None",
91
Ports: []core_v1.ServicePort{{
92
Name: d.Agent.Spec.PortName,
93
Port: 8080,
94
TargetPort: intstr.FromString(d.Agent.Spec.PortName),
95
}},
96
Selector: map[string]string{
97
"app.kubernetes.io/name": "grafana-agent",
98
agentNameLabelName: d.Agent.Name,
99
},
100
},
101
}
102
}
103
104
func generateMetricsStatefulSet(
105
cfg *Config,
106
name string,
107
d gragent.Deployment,
108
shard int32,
109
) (*apps_v1.StatefulSet, error) {
110
111
d = *d.DeepCopy()
112
113
opts := metricsPodTemplateOptions(name, d, shard)
114
templateSpec, selector, err := generatePodTemplate(cfg, d.Agent.Name, d, opts)
115
if err != nil {
116
return nil, err
117
}
118
119
spec := &apps_v1.StatefulSetSpec{
120
ServiceName: governingServiceName(d.Agent.Name),
121
Replicas: d.Agent.Spec.Metrics.Replicas,
122
PodManagementPolicy: apps_v1.ParallelPodManagement,
123
UpdateStrategy: apps_v1.StatefulSetUpdateStrategy{
124
Type: apps_v1.RollingUpdateStatefulSetStrategyType,
125
},
126
Selector: selector,
127
Template: templateSpec,
128
}
129
130
ss := &apps_v1.StatefulSet{
131
ObjectMeta: metadataFromPodTemplate(name, d, templateSpec),
132
Spec: *spec,
133
}
134
135
if deploymentUseVolumeClaimTemplate(&d) {
136
storageSpec := d.Agent.Spec.Storage
137
pvcTemplate := prom_operator.MakeVolumeClaimTemplate(storageSpec.VolumeClaimTemplate)
138
if pvcTemplate.Name == "" {
139
pvcTemplate.Name = fmt.Sprintf("%s-wal", name)
140
}
141
if storageSpec.VolumeClaimTemplate.Spec.AccessModes == nil {
142
pvcTemplate.Spec.AccessModes = []core_v1.PersistentVolumeAccessMode{core_v1.ReadWriteOnce}
143
} else {
144
pvcTemplate.Spec.AccessModes = storageSpec.VolumeClaimTemplate.Spec.AccessModes
145
}
146
pvcTemplate.Spec.Resources = storageSpec.VolumeClaimTemplate.Spec.Resources
147
pvcTemplate.Spec.Selector = storageSpec.VolumeClaimTemplate.Spec.Selector
148
ss.Spec.VolumeClaimTemplates = append(ss.Spec.VolumeClaimTemplates, *pvcTemplate)
149
}
150
151
return ss, nil
152
}
153
154
func deploymentUseVolumeClaimTemplate(d *gragent.Deployment) bool {
155
return d.Agent.Spec.Storage != nil && d.Agent.Spec.Storage.EmptyDir == nil
156
}
157
158
func metricsPodTemplateOptions(name string, d gragent.Deployment, shard int32) podTemplateOptions {
159
shards := minShards
160
if reqShards := d.Agent.Spec.Metrics.Shards; reqShards != nil && *reqShards > 1 {
161
shards = *reqShards
162
}
163
164
walVolumeName := fmt.Sprintf("%s-wal", name)
165
if d.Agent.Spec.Storage != nil {
166
if d.Agent.Spec.Storage.VolumeClaimTemplate.Name != "" {
167
walVolumeName = d.Agent.Spec.Storage.VolumeClaimTemplate.Name
168
}
169
}
170
171
opts := podTemplateOptions{
172
ExtraSelectorLabels: map[string]string{
173
shardLabelName: fmt.Sprintf("%d", shard),
174
agentTypeLabel: "metrics",
175
},
176
ExtraVolumeMounts: []core_v1.VolumeMount{{
177
Name: walVolumeName,
178
ReadOnly: false,
179
MountPath: "/var/lib/grafana-agent/data",
180
}},
181
ExtraEnvVars: []core_v1.EnvVar{
182
{
183
Name: "SHARD",
184
Value: fmt.Sprintf("%d", shard),
185
},
186
{
187
Name: "SHARDS",
188
Value: fmt.Sprintf("%d", shards),
189
},
190
},
191
}
192
193
// Add volumes if there's no PVC template
194
storageSpec := d.Agent.Spec.Storage
195
if storageSpec == nil {
196
opts.ExtraVolumes = append(opts.ExtraVolumes, core_v1.Volume{
197
Name: walVolumeName,
198
VolumeSource: core_v1.VolumeSource{
199
EmptyDir: &core_v1.EmptyDirVolumeSource{},
200
},
201
})
202
} else if storageSpec.EmptyDir != nil {
203
emptyDir := storageSpec.EmptyDir
204
opts.ExtraVolumes = append(opts.ExtraVolumes, core_v1.Volume{
205
Name: walVolumeName,
206
VolumeSource: core_v1.VolumeSource{
207
EmptyDir: emptyDir,
208
},
209
})
210
}
211
212
return opts
213
}
214
215
func metadataFromPodTemplate(name string, d gragent.Deployment, tmpl core_v1.PodTemplateSpec) meta_v1.ObjectMeta {
216
return meta_v1.ObjectMeta{
217
Name: name,
218
Namespace: d.Agent.Namespace,
219
Labels: tmpl.Labels,
220
Annotations: prepareAnnotations(d.Agent.Annotations),
221
OwnerReferences: []meta_v1.OwnerReference{{
222
APIVersion: d.Agent.APIVersion,
223
Kind: d.Agent.Kind,
224
BlockOwnerDeletion: pointer.Bool(true),
225
Controller: pointer.Bool(true),
226
Name: d.Agent.Name,
227
UID: d.Agent.UID,
228
}},
229
}
230
}
231
232
// prepareAnnotations returns annotations that are safe to be added to a
233
// generated resource.
234
func prepareAnnotations(source map[string]string) map[string]string {
235
res := make(map[string]string, len(source))
236
for k, v := range source {
237
// Ignore kubectl annotations so kubectl doesn't prune the resource we
238
// generated.
239
if !strings.HasPrefix(k, "kubectl.kubernetes.io/") {
240
res[k] = v
241
}
242
}
243
return res
244
}
245
246