Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
gitpod-io
GitHub Repository: gitpod-io/gitpod
Path: blob/main/components/ws-daemon/pkg/iws/iws.go
2499 views
1
// Copyright (c) 2020 Gitpod GmbH. All rights reserved.
2
// Licensed under the GNU Affero General Public License (AGPL).
3
// See License.AGPL.txt in the project root for license information.
4
5
package iws
6
7
import (
8
"context"
9
"errors"
10
"fmt"
11
"io"
12
"io/fs"
13
"math"
14
"net"
15
"os"
16
"os/exec"
17
"path/filepath"
18
"strconv"
19
"strings"
20
"sync"
21
"syscall"
22
"time"
23
24
"github.com/opentracing/opentracing-go"
25
"github.com/sirupsen/logrus"
26
"golang.org/x/sys/unix"
27
"golang.org/x/time/rate"
28
"golang.org/x/xerrors"
29
"google.golang.org/grpc"
30
"google.golang.org/grpc/codes"
31
"google.golang.org/grpc/status"
32
33
linuxproc "github.com/c9s/goprocinfo/linux"
34
"github.com/gitpod-io/gitpod/common-go/cgroups"
35
v2 "github.com/gitpod-io/gitpod/common-go/cgroups/v2"
36
"github.com/gitpod-io/gitpod/common-go/log"
37
"github.com/gitpod-io/gitpod/common-go/tracing"
38
wsinit "github.com/gitpod-io/gitpod/content-service/pkg/initializer"
39
"github.com/gitpod-io/gitpod/ws-daemon/api"
40
"github.com/gitpod-io/gitpod/ws-daemon/pkg/container"
41
"github.com/gitpod-io/gitpod/ws-daemon/pkg/internal/session"
42
nsi "github.com/gitpod-io/gitpod/ws-daemon/pkg/nsinsider"
43
)
44
45
//
46
// BEWARE
47
// The code in this file, i.e. everything offered by InWorkspaceHelperServer is accessible without further protection
48
// by user-reachable code. There's no server or ws-man in front of this interface. Keep this interface minimal, and
49
// be defensive!
50
//
51
// IWS services are made available to workspaces through the workspace dispatch.
52
// When a new workspace is added, the dispatch listener creates a new gRPC server socket,
53
// and tears it down when the workspace is removed (i.e. the workspace context is canceled).
54
//
55
56
var (
57
// These *must* be kept in sync with moby/moby and kubernetes/kubernetes.
58
// https://github.com/moby/moby/blob/master/oci/defaults.go#L116-L134
59
// https://github.com/kubernetes/kubernetes/blob/master/pkg/securitycontext/util.go#L200-L218
60
//
61
// Compared to the origin of this list, we imply the /proc prefix.
62
// That means we don't list the prefix, but also we only list files/dirs here which
63
// reside in /proc (e.g. not /sys/firmware).
64
procDefaultMaskedPaths = []string{
65
"acpi",
66
"kcore",
67
"keys",
68
"latency_stats",
69
"timer_list",
70
"timer_stats",
71
"sched_debug",
72
"scsi",
73
}
74
procDefaultReadonlyPaths = []string{
75
"asound",
76
"bus",
77
"fs",
78
"irq",
79
"sys",
80
"sysrq-trigger",
81
}
82
sysfsDefaultMaskedPaths = []string{
83
"firmware",
84
}
85
)
86
87
// ServeWorkspace establishes the IWS server for a workspace
88
func ServeWorkspace(uidmapper *Uidmapper, fsshift api.FSShiftMethod, cgroupMountPoint string, workspaceCIDR string) func(ctx context.Context, ws *session.Workspace) error {
89
return func(ctx context.Context, ws *session.Workspace) (err error) {
90
span, _ := opentracing.StartSpanFromContext(ctx, "iws.ServeWorkspace")
91
defer tracing.FinishSpan(span, &err)
92
if _, running := ws.NonPersistentAttrs[session.AttrWorkspaceServer]; running {
93
span.SetTag("alreadyRunning", true)
94
return nil
95
}
96
97
iws := &InWorkspaceServiceServer{
98
Uidmapper: uidmapper,
99
Session: ws,
100
FSShift: fsshift,
101
CGroupMountPoint: cgroupMountPoint,
102
WorkspaceCIDR: workspaceCIDR,
103
prepareForUserNSCond: sync.NewCond(&sync.Mutex{}),
104
}
105
err = iws.Start()
106
if err != nil {
107
return xerrors.Errorf("cannot start in-workspace-helper server: %w", err)
108
}
109
110
log.WithFields(ws.OWI()).Debug("established IWS server")
111
ws.NonPersistentAttrs[session.AttrWorkspaceServer] = iws.Stop
112
113
return nil
114
}
115
}
116
117
// StopServingWorkspace stops a previously started workspace server
118
func StopServingWorkspace(ctx context.Context, ws *session.Workspace) (err error) {
119
//nolint:ineffassign
120
span, _ := opentracing.StartSpanFromContext(ctx, "iws.StopServingWorkspace")
121
defer tracing.FinishSpan(span, &err)
122
123
rawStop, ok := ws.NonPersistentAttrs[session.AttrWorkspaceServer]
124
if !ok {
125
return nil
126
}
127
128
stopFn, ok := rawStop.(func())
129
if !ok {
130
return nil
131
}
132
133
stopFn()
134
log.WithFields(ws.OWI()).Debug("stopped IWS server")
135
return nil
136
}
137
138
// InWorkspaceServiceServer implements the workspace facing backup services
139
type InWorkspaceServiceServer struct {
140
Uidmapper *Uidmapper
141
Session *session.Workspace
142
FSShift api.FSShiftMethod
143
CGroupMountPoint string
144
145
WorkspaceCIDR string
146
147
srv *grpc.Server
148
sckt io.Closer
149
150
// prepareForUserNSCond allows to synchronize around the "PrepareForUserNS" method
151
// !!! ONLY USE FOR WipingTeardown() !!!
152
prepareForUserNSCond *sync.Cond
153
154
api.UnimplementedInWorkspaceServiceServer
155
}
156
157
// Start creates the syscall socket the IWS server listens on, and starts the gRPC server on it
158
func (wbs *InWorkspaceServiceServer) Start() error {
159
// It's possible that the kubelet hasn't create the ServiceLocDaemon directory yet.
160
err := os.MkdirAll(wbs.Session.ServiceLocDaemon, 0755)
161
if err != nil && !os.IsExist(err) {
162
return xerrors.Errorf("cannot create ServiceLocDaemon: %w", err)
163
}
164
165
socketFN := filepath.Join(wbs.Session.ServiceLocDaemon, "daemon.sock")
166
if _, err := os.Stat(socketFN); err == nil {
167
// a former ws-daemon instance left their sockets laying around.
168
// Let's clean up after them.
169
_ = os.Remove(socketFN)
170
}
171
sckt, err := net.Listen("unix", socketFN)
172
if err != nil {
173
return xerrors.Errorf("cannot create IWS socket: %w", err)
174
}
175
176
err = os.Chmod(socketFN, 0777)
177
if err != nil {
178
return xerrors.Errorf("cannot chmod IWS socket: %w", err)
179
}
180
181
wbs.sckt = sckt
182
183
limits := ratelimitingInterceptor{
184
"/iws.InWorkspaceService/PrepareForUserNS": ratelimit{
185
UseOnce: true,
186
},
187
"/iws.InWorkspaceService/EvacuateCGroup": ratelimit{
188
UseOnce: true,
189
},
190
"/iws.InWorkspaceService/WriteIDMapping": ratelimit{
191
Limiter: rate.NewLimiter(rate.Every(2500*time.Millisecond), 4),
192
},
193
"/iws.InWorkspaceService/Teardown": ratelimit{
194
UseOnce: true,
195
},
196
"/iws.InWorkspaceService/WipingTeardown": ratelimit{
197
Limiter: rate.NewLimiter(rate.Every(2500*time.Millisecond), 4),
198
},
199
"/iws.InWorkspaceService/WorkspaceInfo": ratelimit{
200
Limiter: rate.NewLimiter(rate.Every(1500*time.Millisecond), 4),
201
},
202
}
203
204
wbs.srv = grpc.NewServer(grpc.ChainUnaryInterceptor(limits.UnaryInterceptor()))
205
api.RegisterInWorkspaceServiceServer(wbs.srv, wbs)
206
go func() {
207
err := wbs.srv.Serve(sckt)
208
if err != nil {
209
log.WithError(err).WithFields(wbs.Session.OWI()).Error("IWS server failed")
210
}
211
}()
212
return nil
213
}
214
215
// Stop stops the service and closes the socket
216
func (wbs *InWorkspaceServiceServer) Stop() {
217
defer wbs.sckt.Close()
218
wbs.srv.GracefulStop()
219
}
220
221
// PrepareForUserNS mounts the workspace's shiftfs mark
222
func (wbs *InWorkspaceServiceServer) PrepareForUserNS(ctx context.Context, req *api.PrepareForUserNSRequest) (*api.PrepareForUserNSResponse, error) {
223
wbs.prepareForUserNSCond.L.Lock()
224
defer wbs.prepareForUserNSCond.L.Unlock()
225
226
rt := wbs.Uidmapper.Runtime
227
if rt == nil {
228
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
229
}
230
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
231
if err != nil {
232
log.WithError(err).WithFields(wbs.Session.OWI()).Error("PrepareForUserNS: cannot find workspace container")
233
return nil, status.Errorf(codes.Internal, "cannot find workspace container")
234
}
235
236
rootfs, err := rt.ContainerRootfs(ctx, wscontainerID, container.OptsContainerRootfs{Unmapped: true})
237
if err != nil {
238
log.WithError(err).WithFields(wbs.Session.OWI()).Error("PrepareForUserNS: cannot find workspace rootfs")
239
return nil, status.Errorf(codes.Internal, "cannot find workspace rootfs")
240
}
241
242
containerPID, err := rt.ContainerPID(ctx, wscontainerID)
243
if err != nil {
244
log.WithError(err).WithFields(wbs.Session.OWI()).Error("PrepareForUserNS: cannot find workspace container PID")
245
return nil, status.Errorf(codes.Internal, "cannot find workspace rootfs")
246
}
247
248
log.WithField("type", wbs.FSShift).WithFields(wbs.Session.OWI()).Debug("FSShift")
249
250
// user namespace support for FUSE landed in Linux 4.18:
251
// - http://lkml.iu.edu/hypermail/linux/kernel/1806.0/04385.html
252
// Development leading up to this point:
253
// - https://lists.linuxcontainers.org/pipermail/lxc-devel/2014-July/009797.html
254
// - https://lists.linuxcontainers.org/pipermail/lxc-users/2014-October/007948.html
255
err = nsi.Nsinsider(wbs.Session.InstanceID, int(containerPID), func(c *exec.Cmd) {
256
c.Args = append(c.Args, "prepare-dev", "--uid", strconv.Itoa(wsinit.GitpodUID), "--gid", strconv.Itoa(wsinit.GitpodGID))
257
})
258
if err != nil {
259
log.WithError(err).WithFields(wbs.Session.OWI()).Error("PrepareForUserNS: cannot prepare /dev")
260
return nil, status.Errorf(codes.Internal, "cannot prepare /dev")
261
}
262
263
// create overlayfs directories to be used in ring2 as rootfs and also upper layer to track changes in the workspace
264
_ = os.MkdirAll(filepath.Join(wbs.Session.ServiceLocDaemon, "upper"), 0755)
265
_ = os.MkdirAll(filepath.Join(wbs.Session.ServiceLocDaemon, "work"), 0755)
266
_ = os.MkdirAll(filepath.Join(wbs.Session.ServiceLocDaemon, "mark"), 0755)
267
268
mountpoint := filepath.Join(wbs.Session.ServiceLocNode, "mark")
269
270
// We cannot use the nsenter syscall here because mount namespaces affect the whole process, not just the current thread.
271
// That's why we resort to exec'ing "nsenter ... mount ...".
272
err = nsi.Nsinsider(wbs.Session.InstanceID, int(1), func(c *exec.Cmd) {
273
c.Args = append(c.Args, "make-shared", "--target", "/")
274
})
275
if err != nil {
276
log.WithField("containerPID", containerPID).WithFields(wbs.Session.OWI()).WithError(err).Error("cannot make container's rootfs shared")
277
return nil, status.Errorf(codes.Internal, "cannot make container's rootfs shared")
278
}
279
280
err = nsi.Nsinsider(wbs.Session.InstanceID, int(1), func(c *exec.Cmd) {
281
c.Args = append(c.Args, "mount-shiftfs-mark", "--source", rootfs, "--target", mountpoint)
282
})
283
if err != nil {
284
log.WithField("rootfs", rootfs).WithFields(wbs.Session.OWI()).WithField("mountpoint", mountpoint).WithError(err).Error("cannot mount shiftfs mark")
285
return nil, status.Errorf(codes.Internal, "cannot mount shiftfs mark")
286
}
287
288
if err := wbs.createWorkspaceCgroup(ctx, wscontainerID); err != nil {
289
return nil, err
290
}
291
292
return &api.PrepareForUserNSResponse{
293
FsShift: api.FSShiftMethod_SHIFTFS,
294
}, nil
295
}
296
297
func (wbs *InWorkspaceServiceServer) createWorkspaceCgroup(ctx context.Context, wscontainerID container.ID) error {
298
rt := wbs.Uidmapper.Runtime
299
if rt == nil {
300
return status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
301
}
302
303
unified, err := cgroups.IsUnifiedCgroupSetup()
304
if err != nil {
305
// log error and do not expose it to the user
306
log.WithError(err).WithFields(wbs.Session.OWI()).Error("could not determine cgroup setup")
307
return status.Errorf(codes.FailedPrecondition, "could not determine cgroup setup")
308
}
309
310
if !unified {
311
return nil
312
}
313
314
cgroupBase, err := rt.ContainerCGroupPath(ctx, wscontainerID)
315
if err != nil {
316
log.WithError(err).WithFields(wbs.Session.OWI()).Error("cannot find workspace container CGroup path")
317
return status.Errorf(codes.NotFound, "cannot find workspace container cgroup")
318
}
319
320
err = evacuateToCGroup(ctx, log.WithFields(wbs.Session.OWI()), wbs.CGroupMountPoint, cgroupBase, "workspace")
321
if err != nil {
322
log.WithError(err).WithFields(wbs.Session.OWI()).Error("cannot create workspace cgroup")
323
return status.Errorf(codes.FailedPrecondition, "cannot create workspace cgroup")
324
}
325
326
return nil
327
}
328
329
func (wbs *InWorkspaceServiceServer) SetupPairVeths(ctx context.Context, req *api.SetupPairVethsRequest) (*api.SetupPairVethsResponse, error) {
330
rt := wbs.Uidmapper.Runtime
331
if rt == nil {
332
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
333
}
334
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
335
if err != nil {
336
log.WithError(err).WithFields(wbs.Session.OWI()).Error("SetupPairVeths: cannot find workspace container")
337
return nil, status.Errorf(codes.Internal, "cannot find workspace container")
338
}
339
340
containerPID, err := rt.ContainerPID(ctx, wscontainerID)
341
if err != nil {
342
log.WithError(err).WithFields(wbs.Session.OWI()).Error("SetupPairVeths: cannot find workspace container PID")
343
return nil, status.Errorf(codes.Internal, "cannnot setup a pair of veths")
344
}
345
346
err = nsi.Nsinsider(wbs.Session.InstanceID, int(containerPID), func(c *exec.Cmd) {
347
c.Args = append(c.Args, "setup-pair-veths",
348
"--target-pid", strconv.Itoa(int(req.Pid)),
349
fmt.Sprintf("--workspace-cidr=%v", wbs.WorkspaceCIDR),
350
)
351
}, nsi.EnterMountNS(true), nsi.EnterPidNS(true), nsi.EnterNetNS(true))
352
if err != nil {
353
log.WithError(err).WithFields(wbs.Session.OWI()).Error("SetupPairVeths: cannot setup a pair of veths")
354
return nil, status.Errorf(codes.Internal, "cannot setup a pair of veths")
355
}
356
357
pid, err := wbs.Uidmapper.findHostPID(containerPID, uint64(req.Pid))
358
if err != nil {
359
return nil, xerrors.Errorf("cannot map in-container PID %d (container PID: %d): %w", req.Pid, containerPID, err)
360
}
361
err = nsi.Nsinsider(wbs.Session.InstanceID, int(pid), func(c *exec.Cmd) {
362
c.Args = append(c.Args, "setup-peer-veth",
363
fmt.Sprintf("--workspace-cidr=%v", wbs.WorkspaceCIDR),
364
)
365
}, nsi.EnterMountNS(true), nsi.EnterPidNS(true), nsi.EnterNetNS(true))
366
if err != nil {
367
log.WithError(err).WithFields(wbs.Session.OWI()).Error("SetupPairVeths: cannot setup a peer veths")
368
369
nsi.Nsinsider(wbs.Session.InstanceID, int(containerPID), func(c *exec.Cmd) {
370
c.Args = append(c.Args, "dump-network-info",
371
fmt.Sprintf("--tag=%v", "pod"))
372
}, nsi.EnterMountNS(true), nsi.EnterPidNS(true), nsi.EnterNetNS(true))
373
374
nsi.Nsinsider(wbs.Session.InstanceID, int(pid), func(c *exec.Cmd) {
375
c.Args = append(c.Args, "dump-network-info",
376
fmt.Sprintf("--tag=%v", "workspace"))
377
}, nsi.EnterMountNS(true), nsi.EnterPidNS(true), nsi.EnterNetNS(true))
378
379
return nil, status.Errorf(codes.Internal, "cannot setup a peer veths")
380
}
381
382
err = nsi.Nsinsider(wbs.Session.InstanceID, int(containerPID), func(c *exec.Cmd) {
383
c.Args = append(c.Args, "enable-ip-forward")
384
}, nsi.EnterNetNS(true), nsi.EnterMountNSPid(1))
385
if err != nil {
386
log.WithError(err).WithFields(wbs.Session.OWI()).Error("SetupPairVeths: cannot enable IP forwarding")
387
return nil, status.Errorf(codes.Internal, "cannot enable IP forwarding")
388
}
389
390
return &api.SetupPairVethsResponse{}, nil
391
}
392
393
func evacuateToCGroup(ctx context.Context, log *logrus.Entry, mountpoint, oldGroup, child string) error {
394
newGroup := filepath.Join(oldGroup, child)
395
oldPath := filepath.Join(mountpoint, oldGroup)
396
newPath := filepath.Join(mountpoint, newGroup)
397
398
if err := os.MkdirAll(newPath, 0755); err != nil {
399
return err
400
}
401
402
// evacuate existing procs from oldGroup to newGroup, so that we can enable all controllers including threaded ones
403
cgroupProcsBytes, err := os.ReadFile(filepath.Join(oldPath, "cgroup.procs"))
404
if err != nil {
405
return err
406
}
407
for _, pidStr := range strings.Split(string(cgroupProcsBytes), "\n") {
408
if pidStr == "" || pidStr == "0" {
409
continue
410
}
411
if err := os.WriteFile(filepath.Join(newPath, "cgroup.procs"), []byte(pidStr), 0644); err != nil {
412
log.WithError(err).Warnf("failed to move process %s to cgroup %q", pidStr, newGroup)
413
}
414
}
415
416
// enable controllers for all subgroups under the oldGroup
417
controllerBytes, err := os.ReadFile(filepath.Join(oldPath, "cgroup.controllers"))
418
if err != nil {
419
return err
420
}
421
for _, controller := range strings.Fields(string(controllerBytes)) {
422
log.Debugf("enabling controller %q", controller)
423
if err := os.WriteFile(filepath.Join(oldPath, "cgroup.subtree_control"), []byte("+"+controller), 0644); err != nil {
424
log.WithError(err).Warnf("failed to enable controller %q", controller)
425
}
426
}
427
428
return nil
429
}
430
431
// MountProc mounts a proc filesystem
432
func (wbs *InWorkspaceServiceServer) MountProc(ctx context.Context, req *api.MountProcRequest) (resp *api.MountProcResponse, err error) {
433
var (
434
reqPID = req.Pid
435
procPID uint64
436
)
437
defer func() {
438
if err == nil {
439
return
440
}
441
442
log.WithError(err).WithField("procPID", procPID).WithField("reqPID", reqPID).WithFields(wbs.Session.OWI()).Error("cannot mount proc")
443
if _, ok := status.FromError(err); !ok {
444
err = status.Error(codes.Internal, "cannot mount proc")
445
}
446
}()
447
448
rt := wbs.Uidmapper.Runtime
449
if rt == nil {
450
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
451
}
452
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
453
if err != nil {
454
return nil, xerrors.Errorf("cannot find workspace container")
455
}
456
457
containerPID, err := rt.ContainerPID(ctx, wscontainerID)
458
if err != nil {
459
return nil, xerrors.Errorf("cannot find container PID for containerID %v: %w", wscontainerID, err)
460
}
461
462
procPID, err = wbs.Uidmapper.findHostPID(containerPID, uint64(req.Pid))
463
if err != nil {
464
return nil, xerrors.Errorf("cannot map in-container PID %d (container PID: %d): %w", req.Pid, containerPID, err)
465
}
466
467
nodeStaging, err := os.MkdirTemp("", "proc-staging")
468
if err != nil {
469
return nil, xerrors.Errorf("cannot prepare proc staging: %w", err)
470
}
471
err = nsi.Nsinsider(wbs.Session.InstanceID, int(procPID), func(c *exec.Cmd) {
472
c.Args = append(c.Args, "mount-proc", "--target", nodeStaging)
473
}, nsi.EnterMountNS(false), nsi.EnterPidNS(true), nsi.EnterNetNS(true))
474
if err != nil {
475
return nil, xerrors.Errorf("mount new proc at %s: %w", nodeStaging, err)
476
}
477
478
for _, mask := range procDefaultMaskedPaths {
479
err = maskPath(filepath.Join(nodeStaging, mask))
480
if err != nil {
481
return nil, xerrors.Errorf("cannot mask %s: %w", mask, err)
482
}
483
}
484
for _, rdonly := range procDefaultReadonlyPaths {
485
err = readonlyPath(filepath.Join(nodeStaging, rdonly))
486
if err != nil {
487
return nil, xerrors.Errorf("cannot mount readonly %s: %w", rdonly, err)
488
}
489
}
490
491
err = moveMount(wbs.Session.InstanceID, int(procPID), nodeStaging, req.Target)
492
if err != nil {
493
return nil, err
494
}
495
496
// now that we've moved the mount (which we've done with OPEN_TREE_CLONE), we'll
497
// need to unmount the mask mounts again to not leave them dangling.
498
var masks []string
499
masks = append(masks, procDefaultMaskedPaths...)
500
masks = append(masks, procDefaultReadonlyPaths...)
501
cleanupMaskedMount(wbs.Session.OWI(), nodeStaging, masks)
502
503
err = nsi.Nsinsider(wbs.Session.InstanceID, int(procPID), func(c *exec.Cmd) {
504
c.Args = append(c.Args, "disable-ipv6")
505
}, nsi.EnterNetNS(true), nsi.EnterMountNSPid(1))
506
if err != nil {
507
return nil, status.Errorf(codes.Internal, "cannot disable IPv6")
508
}
509
510
return &api.MountProcResponse{}, nil
511
}
512
513
// MountProc mounts a proc filesystem
514
func (wbs *InWorkspaceServiceServer) UmountProc(ctx context.Context, req *api.UmountProcRequest) (resp *api.UmountProcResponse, err error) {
515
var (
516
reqPID = req.Pid
517
procPID uint64
518
)
519
defer func() {
520
if err == nil {
521
return
522
}
523
524
log.WithError(err).WithFields(wbs.Session.OWI()).WithField("procPID", procPID).WithField("reqPID", reqPID).Error("UmountProc failed")
525
if _, ok := status.FromError(err); !ok {
526
err = status.Error(codes.Internal, "cannot umount proc")
527
}
528
}()
529
530
rt := wbs.Uidmapper.Runtime
531
if rt == nil {
532
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
533
}
534
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
535
if err != nil {
536
return nil, xerrors.Errorf("cannot find workspace container")
537
}
538
539
containerPID, err := rt.ContainerPID(ctx, wscontainerID)
540
if err != nil {
541
return nil, xerrors.Errorf("cannot find container PID for containerID %v: %w", wscontainerID, err)
542
}
543
544
procPID, err = wbs.Uidmapper.findHostPID(containerPID, uint64(req.Pid))
545
if err != nil {
546
return nil, xerrors.Errorf("cannot map in-container PID %d (container PID: %d): %w", req.Pid, containerPID, err)
547
}
548
549
nodeStaging, err := os.MkdirTemp("", "proc-umount")
550
if err != nil {
551
return nil, xerrors.Errorf("cannot prepare proc staging: %w", err)
552
}
553
scktPath := filepath.Join(nodeStaging, "sckt")
554
l, err := net.Listen("unix", scktPath)
555
if err != nil {
556
return nil, xerrors.Errorf("cannot listen for mountfd: %w", err)
557
}
558
defer l.Close()
559
560
type fdresp struct {
561
FD int
562
Err error
563
}
564
fdrecv := make(chan fdresp)
565
go func() {
566
defer close(fdrecv)
567
568
rconn, err := l.Accept()
569
if err != nil {
570
fdrecv <- fdresp{Err: err}
571
return
572
}
573
defer rconn.Close()
574
575
conn := rconn.(*net.UnixConn)
576
err = conn.SetDeadline(time.Now().Add(5 * time.Second))
577
if err != nil {
578
fdrecv <- fdresp{Err: err}
579
return
580
}
581
582
f, err := conn.File()
583
if err != nil {
584
fdrecv <- fdresp{Err: err}
585
return
586
}
587
defer f.Close()
588
connfd := int(f.Fd())
589
590
buf := make([]byte, unix.CmsgSpace(4))
591
_, _, _, _, err = unix.Recvmsg(connfd, nil, buf, 0)
592
if err != nil {
593
fdrecv <- fdresp{Err: err}
594
return
595
}
596
597
msgs, err := unix.ParseSocketControlMessage(buf)
598
if err != nil {
599
fdrecv <- fdresp{Err: err}
600
return
601
}
602
if len(msgs) != 1 {
603
fdrecv <- fdresp{Err: xerrors.Errorf("expected a single socket control message")}
604
return
605
}
606
607
fds, err := unix.ParseUnixRights(&msgs[0])
608
if err != nil {
609
fdrecv <- fdresp{Err: err}
610
return
611
}
612
if len(fds) == 0 {
613
fdrecv <- fdresp{Err: xerrors.Errorf("expected a single socket FD")}
614
return
615
}
616
617
fdrecv <- fdresp{FD: fds[0]}
618
}()
619
620
rconn, err := net.Dial("unix", scktPath)
621
if err != nil {
622
return nil, err
623
}
624
defer rconn.Close()
625
conn := rconn.(*net.UnixConn)
626
connFD, err := conn.File()
627
if err != nil {
628
return nil, err
629
}
630
631
err = nsi.Nsinsider(wbs.Session.InstanceID, int(procPID), func(c *exec.Cmd) {
632
c.Args = append(c.Args, "open-tree", "--target", req.Target, "--pipe-fd", "3")
633
c.ExtraFiles = append(c.ExtraFiles, connFD)
634
})
635
if err != nil {
636
return nil, xerrors.Errorf("cannot open-tree at %s (container PID: %d): %w", req.Target, containerPID, err)
637
}
638
639
fdr := <-fdrecv
640
if fdr.Err != nil {
641
return nil, fdr.Err
642
}
643
if fdr.FD == 0 {
644
return nil, xerrors.Errorf("received nil as mountfd (container PID: %d): %w", containerPID, err)
645
}
646
647
base, err := os.Executable()
648
if err != nil {
649
return nil, err
650
}
651
652
cmd := exec.Command(filepath.Join(filepath.Dir(base), "nsinsider"), "move-mount", "--target", nodeStaging, "--pipe-fd", "3")
653
cmd.ExtraFiles = append(cmd.ExtraFiles, os.NewFile(uintptr(fdr.FD), ""))
654
cmd.SysProcAttr = &syscall.SysProcAttr{
655
Unshareflags: syscall.CLONE_NEWNS,
656
}
657
out, err := cmd.CombinedOutput()
658
if err != nil {
659
return nil, xerrors.Errorf("cannot move-mount: %w: %s", err, string(out))
660
}
661
662
return &api.UmountProcResponse{}, nil
663
}
664
665
func (wbs *InWorkspaceServiceServer) MountSysfs(ctx context.Context, req *api.MountProcRequest) (resp *api.MountProcResponse, err error) {
666
var (
667
reqPID = req.Pid
668
procPID uint64
669
)
670
defer func() {
671
if err == nil {
672
return
673
}
674
675
log.WithError(err).WithFields(wbs.Session.OWI()).WithField("procPID", procPID).WithField("reqPID", reqPID).WithFields(wbs.Session.OWI()).Error("cannot mount sysfs")
676
if _, ok := status.FromError(err); !ok {
677
err = status.Error(codes.Internal, "cannot mount sysfs")
678
}
679
}()
680
681
rt := wbs.Uidmapper.Runtime
682
if rt == nil {
683
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
684
}
685
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
686
if err != nil {
687
return nil, xerrors.Errorf("cannot find workspace container")
688
}
689
690
containerPID, err := rt.ContainerPID(ctx, wscontainerID)
691
if err != nil {
692
return nil, xerrors.Errorf("cannot find container PID for containerID %v: %w", wscontainerID, err)
693
}
694
695
procPID, err = wbs.Uidmapper.findHostPID(containerPID, uint64(req.Pid))
696
if err != nil {
697
return nil, xerrors.Errorf("cannot map in-container PID %d (container PID: %d): %w", req.Pid, containerPID, err)
698
}
699
700
nodeStaging, err := os.MkdirTemp("", "sysfs-staging")
701
if err != nil {
702
return nil, xerrors.Errorf("cannot prepare proc staging: %w", err)
703
}
704
err = nsi.Nsinsider(wbs.Session.InstanceID, int(procPID), func(c *exec.Cmd) {
705
c.Args = append(c.Args, "mount-sysfs", "--target", nodeStaging)
706
}, nsi.EnterMountNS(false), nsi.EnterNetNS(true))
707
if err != nil {
708
return nil, xerrors.Errorf("mount new sysfs at %s: %w", nodeStaging, err)
709
}
710
711
for _, mask := range sysfsDefaultMaskedPaths {
712
err = maskPath(filepath.Join(nodeStaging, mask))
713
if err != nil {
714
return nil, xerrors.Errorf("cannot mask %s: %w", mask, err)
715
}
716
}
717
718
err = moveMount(wbs.Session.InstanceID, int(procPID), nodeStaging, req.Target)
719
if err != nil {
720
return nil, err
721
}
722
723
cleanupMaskedMount(wbs.Session.OWI(), nodeStaging, sysfsDefaultMaskedPaths)
724
725
return &api.MountProcResponse{}, nil
726
}
727
728
func (wbs *InWorkspaceServiceServer) MountNfs(ctx context.Context, req *api.MountNfsRequest) (resp *api.MountNfsResponse, err error) {
729
var (
730
reqPID = req.Pid
731
supervisorPID uint64
732
)
733
defer func() {
734
if err == nil {
735
return
736
}
737
738
log.WithError(err).WithFields(wbs.Session.OWI()).WithField("procPID", supervisorPID).WithField("reqPID", reqPID).WithFields(wbs.Session.OWI()).Error("cannot mount nfs")
739
if _, ok := status.FromError(err); !ok {
740
err = status.Error(codes.Internal, "cannot mount nfs")
741
}
742
}()
743
744
rt := wbs.Uidmapper.Runtime
745
if rt == nil {
746
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
747
}
748
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
749
if err != nil {
750
return nil, xerrors.Errorf("cannot find workspace container")
751
}
752
753
containerPID, err := rt.ContainerPID(ctx, wscontainerID)
754
if err != nil {
755
return nil, xerrors.Errorf("cannot find container PID for containerID %v: %w", wscontainerID, err)
756
}
757
758
supervisorPID, err = wbs.Uidmapper.findSupervisorPID(containerPID)
759
if err != nil {
760
return nil, xerrors.Errorf("cannot map supervisor PID %d (container PID: %d): %w", req.Pid, containerPID, err)
761
}
762
763
nodeStaging, err := os.MkdirTemp("", "nfs-staging")
764
if err != nil {
765
return nil, xerrors.Errorf("cannot prepare nfs staging: %w", err)
766
}
767
768
log.WithField("source", req.Source).WithField("target", req.Target).WithField("staging", nodeStaging).WithField("args", req.Args).Info("Mounting nfs")
769
cmd := exec.CommandContext(ctx, "nsenter", "-n", "-t", strconv.Itoa(int(supervisorPID)), "mount", "-t", "nfs4", "-o", req.Args, req.Source, nodeStaging)
770
cmd.Stderr = os.Stderr
771
cmd.Stdin = os.Stdin
772
773
err = cmd.Run()
774
if err != nil {
775
return nil, xerrors.Errorf("cannot mount nfs: %w", err)
776
}
777
778
stat, err := os.Stat(nodeStaging)
779
if err != nil {
780
return nil, xerrors.Errorf("cannot stat staging: %w", err)
781
}
782
783
sys, ok := stat.Sys().(*syscall.Stat_t)
784
if !ok {
785
return nil, xerrors.Errorf("cast to stat failed")
786
}
787
788
if sys.Uid != 133332 || sys.Gid != 133332 {
789
err = os.Chown(nodeStaging, 133332, 133332)
790
if err != nil {
791
return nil, xerrors.Errorf("cannot chown %s for %s", nodeStaging, req.Source)
792
}
793
}
794
795
err = moveMount(wbs.Session.InstanceID, int(supervisorPID), nodeStaging, req.Target)
796
if err != nil {
797
return nil, err
798
}
799
800
return &api.MountNfsResponse{}, nil
801
}
802
803
func moveMount(instanceID string, targetPid int, source, target string) error {
804
mntfd, err := syscallOpenTree(unix.AT_FDCWD, source, flagOpenTreeClone|flagAtRecursive)
805
if err != nil {
806
return xerrors.Errorf("cannot open tree: %w", err)
807
}
808
mntf := os.NewFile(mntfd, "")
809
defer mntf.Close()
810
811
// Note(cw): we also need to enter the target PID namespace because the mount target
812
// might refer to proc.
813
err = nsi.Nsinsider(instanceID, targetPid, func(c *exec.Cmd) {
814
c.Args = append(c.Args, "move-mount", "--target", target, "--pipe-fd", "3")
815
c.ExtraFiles = append(c.ExtraFiles, mntf)
816
}, nsi.EnterPidNS(true))
817
if err != nil {
818
return xerrors.Errorf("cannot move mount: %w", err)
819
}
820
return nil
821
}
822
823
// cleanupMaskedMount will unmount and remove the paths joined with the basedir.
824
// Errors are logged instead of returned.
825
// This is useful for when we've moved the mount (which we've done with OPEN_TREE_CLONE), we'll
826
// need to unmount the mask mounts again to not leave them dangling.
827
func cleanupMaskedMount(owi map[string]interface{}, base string, paths []string) {
828
for _, mask := range paths {
829
// Note: if errors happen while unmounting or removing the masks this does not mean
830
// that the final unmount won't happen. I.e. we can ignore those errors here
831
// because they would not be actionable anyways. Only if the final removal or
832
// unmount fails did we leak a mount.
833
834
fn := filepath.Join(base, mask)
835
err := unix.Unmount(fn, 0)
836
if err != nil {
837
continue
838
}
839
_ = os.RemoveAll(fn)
840
}
841
842
err := unix.Unmount(base, 0)
843
if err != nil {
844
log.WithError(err).WithField("fn", base).WithFields(owi).Warn("cannot unmount dangling base mount")
845
err = unix.Unmount(base, syscall.MNT_DETACH)
846
if err != nil {
847
log.WithError(err).WithField("fn", base).WithFields(owi).Warn("cannot detach dangling base mount")
848
}
849
return
850
}
851
852
err = os.RemoveAll(base)
853
if err != nil {
854
log.WithError(err).WithField("fn", base).WithFields(owi).Warn("cannot remove dangling base mount")
855
return
856
}
857
}
858
859
// maskPath masks the top of the specified path inside a container to avoid
860
// security issues from processes reading information from non-namespace aware
861
// mounts ( proc/kcore ).
862
// For files, maskPath bind mounts /dev/null over the top of the specified path.
863
// For directories, maskPath mounts read-only tmpfs over the top of the specified path.
864
//
865
// Blatant copy from runc: https://github.com/opencontainers/runc/blob/master/libcontainer/rootfs_linux.go#L946-L959
866
func maskPath(path string) error {
867
if err := unix.Mount("/dev/null", path, "", unix.MS_BIND, ""); err != nil && !errors.Is(err, fs.ErrNotExist) {
868
if err == unix.ENOTDIR {
869
return unix.Mount("tmpfs", path, "tmpfs", unix.MS_RDONLY, "")
870
}
871
return err
872
}
873
return nil
874
}
875
876
// readonlyPath will make a path read only.
877
//
878
// Blatant copy from runc: https://github.com/opencontainers/runc/blob/master/libcontainer/rootfs_linux.go#L907-L916
879
func readonlyPath(path string) error {
880
if err := unix.Mount(path, path, "", unix.MS_BIND|unix.MS_REC, ""); err != nil {
881
if errors.Is(err, fs.ErrNotExist) {
882
return nil
883
}
884
return err
885
}
886
return unix.Mount(path, path, "", unix.MS_BIND|unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_REC, "")
887
}
888
889
// WriteIDMapping writes /proc/.../uid_map and /proc/.../gid_map for a workapce container
890
func (wbs *InWorkspaceServiceServer) WriteIDMapping(ctx context.Context, req *api.WriteIDMappingRequest) (*api.WriteIDMappingResponse, error) {
891
cid, err := wbs.Uidmapper.Runtime.WaitForContainer(ctx, wbs.Session.InstanceID)
892
if err != nil {
893
log.WithFields(wbs.Session.OWI()).WithError(err).Error("cannot write ID mapping, because we cannot find the container")
894
return nil, status.Error(codes.FailedPrecondition, "cannot find container")
895
}
896
897
err = wbs.Uidmapper.HandleUIDMappingRequest(ctx, req, cid, wbs.Session.InstanceID)
898
if err != nil {
899
return nil, err
900
}
901
902
return &api.WriteIDMappingResponse{}, nil
903
}
904
905
// Allow workspace users to manipulate the cgroups to which the user process belong by constructing the cgroups of the following form
906
//
907
// <container-cgorup> drwxr-xr-x 3 root root
908
// └── workspace drwxr-xr-x 5 gitpodUid gitpodGid
909
//
910
// └── user drwxr-xr-x 5 gitpodUid gitpodGid
911
func (wbs *InWorkspaceServiceServer) EvacuateCGroup(ctx context.Context, req *api.EvacuateCGroupRequest) (*api.EvacuateCGroupResponse, error) {
912
unified, err := cgroups.IsUnifiedCgroupSetup()
913
if err != nil {
914
// log error and do not expose it to the user
915
log.WithFields(wbs.Session.OWI()).WithError(err).Error("could not determine cgroup setup")
916
return nil, status.Errorf(codes.FailedPrecondition, "could not determine cgroup setup")
917
}
918
if !unified {
919
return &api.EvacuateCGroupResponse{}, nil
920
}
921
922
rt := wbs.Uidmapper.Runtime
923
if rt == nil {
924
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
925
}
926
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
927
if err != nil {
928
log.WithError(err).WithFields(wbs.Session.OWI()).Error("EvacuateCGroup: cannot find workspace container")
929
return nil, status.Errorf(codes.NotFound, "cannot find workspace container")
930
}
931
932
cgroupBase, err := rt.ContainerCGroupPath(ctx, wscontainerID)
933
if err != nil {
934
log.WithError(err).WithFields(wbs.Session.OWI()).Error("EvacuateCGroup: cannot find workspace container CGroup path")
935
return nil, status.Errorf(codes.NotFound, "cannot find workspace container cgroup")
936
}
937
938
workspaceCGroup := filepath.Join(cgroupBase, "workspace")
939
if _, err := os.Stat(filepath.Join(wbs.CGroupMountPoint, workspaceCGroup)); err != nil {
940
log.WithError(err).WithFields(wbs.Session.OWI()).WithField("path", workspaceCGroup).Error("EvacuateCGroup: workspace cgroup error")
941
return nil, status.Errorf(codes.FailedPrecondition, "cannot find workspace cgroup")
942
}
943
944
err = evacuateToCGroup(ctx, log.WithFields(wbs.Session.OWI()), wbs.CGroupMountPoint, workspaceCGroup, "user")
945
if err != nil {
946
log.WithError(err).WithFields(wbs.Session.OWI()).WithField("path", workspaceCGroup).Error("EvacuateCGroup: cannot produce user cgroup")
947
return nil, status.Errorf(codes.FailedPrecondition, "cannot produce user cgroup")
948
}
949
950
out, err := exec.CommandContext(ctx, "chown", "-R", fmt.Sprintf("%d:%d", wsinit.GitpodUID, wsinit.GitpodGID), filepath.Join(wbs.CGroupMountPoint, workspaceCGroup)).CombinedOutput()
951
if err != nil {
952
log.WithError(err).WithFields(wbs.Session.OWI()).WithField("path", workspaceCGroup).WithField("out", string(out)).Error("EvacuateCGroup: cannot chown workspace cgroup")
953
return nil, status.Errorf(codes.FailedPrecondition, "cannot chown workspace cgroup")
954
}
955
956
return &api.EvacuateCGroupResponse{}, nil
957
}
958
959
// Teardown triggers the final live backup and possibly shiftfs mark unmount
960
func (wbs *InWorkspaceServiceServer) Teardown(ctx context.Context, req *api.TeardownRequest) (*api.TeardownResponse, error) {
961
owi := wbs.Session.OWI()
962
log := log.WithFields(owi)
963
964
var (
965
success = true
966
err error
967
)
968
969
err = wbs.unPrepareForUserNS()
970
if err != nil {
971
log.WithError(err).Error("mark FS unmount failed")
972
success = false
973
}
974
975
return &api.TeardownResponse{Success: success}, nil
976
}
977
978
// WipingTeardown tears down every state we created using IWS
979
func (wbs *InWorkspaceServiceServer) WipingTeardown(ctx context.Context, req *api.WipingTeardownRequest) (*api.WipingTeardownResponse, error) {
980
log := log.WithFields(wbs.Session.OWI())
981
log.WithField("doWipe", req.DoWipe).Debug("iws.WipingTeardown")
982
defer log.WithField("doWipe", req.DoWipe).Debug("iws.WipingTeardown done")
983
984
if !req.DoWipe {
985
return &api.WipingTeardownResponse{Success: true}, nil
986
}
987
988
wbs.prepareForUserNSCond.L.Lock()
989
defer wbs.prepareForUserNSCond.L.Unlock()
990
991
// Sometimes the Teardown() call in ring0 is not executed successfully, and we leave the mark-mount dangling
992
// Here we just try to unmount it (again) best-effort-style. Testing shows it works reliably!
993
success := true
994
err := wbs.unPrepareForUserNS()
995
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
996
log.WithError(err).Warnf("error trying to unmount mark")
997
success = false
998
}
999
1000
return &api.WipingTeardownResponse{Success: success}, nil
1001
}
1002
1003
func (wbs *InWorkspaceServiceServer) unPrepareForUserNS() error {
1004
mountpoint := filepath.Join(wbs.Session.ServiceLocNode, "mark")
1005
err := nsi.Nsinsider(wbs.Session.InstanceID, 1, func(c *exec.Cmd) {
1006
c.Args = append(c.Args, "unmount", "--target", mountpoint)
1007
})
1008
if err != nil {
1009
return xerrors.Errorf("cannot unmount mark at %s: %w", mountpoint, err)
1010
}
1011
1012
return nil
1013
}
1014
1015
func (wbs *InWorkspaceServiceServer) WorkspaceInfo(ctx context.Context, req *api.WorkspaceInfoRequest) (*api.WorkspaceInfoResponse, error) {
1016
log.WithFields(wbs.Session.OWI()).Debug("Received workspace info request")
1017
rt := wbs.Uidmapper.Runtime
1018
if rt == nil {
1019
return nil, status.Errorf(codes.FailedPrecondition, "not connected to container runtime")
1020
}
1021
wscontainerID, err := rt.WaitForContainer(ctx, wbs.Session.InstanceID)
1022
if err != nil {
1023
log.WithError(err).WithFields(wbs.Session.OWI()).Error("EvacuateCGroup: cannot find workspace container")
1024
return nil, status.Errorf(codes.NotFound, "cannot find workspace container")
1025
}
1026
1027
cgroupPath, err := rt.ContainerCGroupPath(ctx, wscontainerID)
1028
if err != nil {
1029
log.WithError(err).WithFields(wbs.Session.OWI()).Error("EvacuateCGroup: cannot find workspace container CGroup path")
1030
return nil, status.Errorf(codes.NotFound, "cannot find workspace container cgroup")
1031
}
1032
1033
unified, err := cgroups.IsUnifiedCgroupSetup()
1034
if err != nil {
1035
// log error and do not expose it to the user
1036
log.WithError(err).WithFields(wbs.Session.OWI()).Error("could not determine cgroup setup")
1037
return nil, status.Errorf(codes.FailedPrecondition, "could not determine cgroup setup")
1038
}
1039
1040
if !unified {
1041
return nil, status.Errorf(codes.FailedPrecondition, "only cgroups v2 is supported")
1042
}
1043
1044
resources, err := getWorkspaceResourceInfo(wbs.CGroupMountPoint, cgroupPath)
1045
if err != nil {
1046
if !errors.Is(err, os.ErrNotExist) {
1047
log.WithError(err).WithFields(wbs.Session.OWI()).Error("could not get resource information")
1048
}
1049
return nil, status.Error(codes.Unknown, err.Error())
1050
}
1051
1052
return &api.WorkspaceInfoResponse{
1053
Resources: resources,
1054
}, nil
1055
}
1056
1057
func getWorkspaceResourceInfo(mountPoint, cgroupPath string) (*api.Resources, error) {
1058
cpu, err := getCpuResourceInfoV2(mountPoint, cgroupPath)
1059
if err != nil {
1060
return nil, err
1061
}
1062
1063
memory, err := getMemoryResourceInfoV2(mountPoint, cgroupPath)
1064
if err != nil {
1065
return nil, err
1066
}
1067
1068
return &api.Resources{
1069
Cpu: cpu,
1070
Memory: memory,
1071
}, nil
1072
}
1073
1074
func getCpuResourceInfoV2(mountPoint, cgroupPath string) (*api.Cpu, error) {
1075
cpu := v2.NewCpuControllerWithMount(mountPoint, cgroupPath)
1076
1077
t, err := resolveCPUStatV2(cpu)
1078
if err != nil {
1079
return nil, err
1080
}
1081
1082
time.Sleep(time.Second)
1083
1084
t2, err := resolveCPUStatV2(cpu)
1085
if err != nil {
1086
return nil, err
1087
}
1088
1089
cpuUsage := t2.usage - t.usage
1090
totalTime := t2.uptime - t.uptime
1091
used := cpuUsage / totalTime * 1000
1092
1093
quota, period, err := cpu.Max()
1094
if errors.Is(err, os.ErrNotExist) {
1095
quota = math.MaxUint64
1096
} else if err != nil {
1097
return nil, err
1098
}
1099
1100
// if no cpu limit has been specified, use the number of cores
1101
var limit uint64
1102
if quota == math.MaxUint64 {
1103
// TODO(toru): we have to check a parent cgroup instead of a host resources
1104
cpuInfo, err := linuxproc.ReadCPUInfo("/proc/cpuinfo")
1105
if err != nil {
1106
return nil, err
1107
}
1108
1109
limit = uint64(cpuInfo.NumCore()) * 1000
1110
} else {
1111
limit = quota / period * 1000
1112
}
1113
1114
return &api.Cpu{
1115
Used: int64(used),
1116
Limit: int64(limit),
1117
}, nil
1118
}
1119
1120
func getMemoryResourceInfoV2(mountPoint, cgroupPath string) (*api.Memory, error) {
1121
memory := v2.NewMemoryControllerWithMount(mountPoint, cgroupPath)
1122
memoryLimit, err := memory.Max()
1123
if err != nil {
1124
return nil, xerrors.Errorf("could not retrieve memory max: %w", err)
1125
}
1126
1127
memInfo, err := linuxproc.ReadMemInfo("/proc/meminfo")
1128
if err != nil {
1129
return nil, xerrors.Errorf("failed to read meminfo: %w", err)
1130
}
1131
1132
// if no memory limit has been specified, use total available memory
1133
if memoryLimit == math.MaxUint64 || memoryLimit > memInfo.MemTotal*1024 {
1134
// total memory is specifed on kilobytes -> convert to bytes
1135
memoryLimit = memInfo.MemTotal * 1024
1136
}
1137
1138
usedMemory, err := memory.Current()
1139
if err != nil {
1140
return nil, xerrors.Errorf("failed to read current memory usage: %w", err)
1141
}
1142
1143
stats, err := memory.Stat()
1144
if err != nil {
1145
return nil, xerrors.Errorf("failed to read memory stats: %w", err)
1146
}
1147
1148
if stats.InactiveFileTotal > 0 {
1149
if usedMemory < stats.InactiveFileTotal {
1150
usedMemory = 0
1151
} else {
1152
usedMemory -= stats.InactiveFileTotal
1153
}
1154
}
1155
1156
return &api.Memory{
1157
Limit: int64(memoryLimit),
1158
Used: int64(usedMemory),
1159
}, nil
1160
}
1161
1162
type cpuStat struct {
1163
usage float64
1164
uptime float64
1165
}
1166
1167
func resolveCPUStatV2(cpu *v2.Cpu) (*cpuStat, error) {
1168
stats, err := cpu.Stat()
1169
if err != nil {
1170
return nil, xerrors.Errorf("failed to get cpu usage: %w", err)
1171
}
1172
1173
usage := float64(stats.UsageTotal) * 1e-6
1174
uptime, err := readProcUptime()
1175
if err != nil {
1176
return nil, err
1177
}
1178
1179
return &cpuStat{
1180
usage: usage,
1181
uptime: uptime,
1182
}, nil
1183
}
1184
1185
func readProcUptime() (float64, error) {
1186
content, err := os.ReadFile("/proc/uptime")
1187
if err != nil {
1188
return 0, xerrors.Errorf("failed to read uptime: %w", err)
1189
}
1190
values := strings.Split(strings.TrimSpace(string(content)), " ")
1191
uptime, err := strconv.ParseFloat(values[0], 64)
1192
if err != nil {
1193
return 0, xerrors.Errorf("failed to parse uptime: %w", err)
1194
}
1195
1196
return uptime, nil
1197
}
1198
1199
type ratelimitingInterceptor map[string]ratelimit
1200
1201
type ratelimit struct {
1202
Limiter *rate.Limiter
1203
UseOnce bool
1204
}
1205
1206
func (rli ratelimitingInterceptor) UnaryInterceptor() grpc.UnaryServerInterceptor {
1207
var (
1208
mu sync.Mutex
1209
used = make(map[string]struct{})
1210
)
1211
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
1212
limit, ok := rli[info.FullMethod]
1213
if ok {
1214
if limit.UseOnce {
1215
mu.Lock()
1216
_, ran := used[info.FullMethod]
1217
used[info.FullMethod] = struct{}{}
1218
mu.Unlock()
1219
1220
if ran {
1221
return nil, status.Error(codes.ResourceExhausted, "can be used only once")
1222
}
1223
}
1224
1225
if limit.Limiter != nil && !limit.Limiter.Allow() {
1226
return nil, status.Error(codes.ResourceExhausted, "too many requests")
1227
}
1228
}
1229
1230
return handler(ctx, req)
1231
}
1232
}
1233
1234