Path: blob/main/pkg/integrations/node_exporter/node_exporter.go
5296 views
//go:build !windows12package node_exporter //nolint:golint34import (5"context"6"fmt"7"net/http"8"sort"9"strings"1011"github.com/go-kit/log"12"github.com/go-kit/log/level"13"github.com/grafana/agent/pkg/build"14"github.com/grafana/agent/pkg/integrations/config"15"github.com/prometheus/client_golang/prometheus"16"github.com/prometheus/client_golang/prometheus/promhttp"17"github.com/prometheus/node_exporter/collector"18"gopkg.in/alecthomas/kingpin.v2"19)2021// Integration is the node_exporter integration. The integration scrapes metrics22// from the host Linux-based system.23type Integration struct {24c *Config25logger log.Logger26nc *collector.NodeCollector2728exporterMetricsRegistry *prometheus.Registry29}3031// New creates a new node_exporter integration.32func New(log log.Logger, c *Config) (*Integration, error) {33// NOTE(rfratto): this works as long as node_exporter is the only thing using34// kingpin across the codebase. node_exporter may need a PR eventually to pass35// in a custom kingpin application or expose methods to explicitly enable/disable36// collectors that we can use instead of this command line hack.37flags, _ := MapConfigToNodeExporterFlags(c)38level.Debug(log).Log("msg", "initializing node_exporter with flags converted from agent config", "flags", strings.Join(flags, " "))3940for _, warn := range c.UnmarshalWarnings {41level.Warn(log).Log("msg", warn)42}4344_, err := kingpin.CommandLine.Parse(flags)45if err != nil {46return nil, fmt.Errorf("failed to parse flags for generating node_exporter configuration: %w", err)47}4849nc, err := collector.NewNodeCollector(log)50if err != nil {51return nil, fmt.Errorf("failed to create node_exporter: %w", err)52}5354level.Info(log).Log("msg", "Enabled node_exporter collectors")55collectors := []string{}56for n := range nc.Collectors {57collectors = append(collectors, n)58}59sort.Strings(collectors)60for _, c := range collectors {61level.Info(log).Log("collector", c)62}6364return &Integration{65c: c,66logger: log,67nc: nc,6869exporterMetricsRegistry: prometheus.NewRegistry(),70}, nil71}7273// MetricsHandler implements Integration.74func (i *Integration) MetricsHandler() (http.Handler, error) {75r := prometheus.NewRegistry()76if err := r.Register(i.nc); err != nil {77return nil, fmt.Errorf("couldn't register node_exporter node collector: %w", err)78}79handler := promhttp.HandlerFor(80prometheus.Gatherers{i.exporterMetricsRegistry, r},81promhttp.HandlerOpts{82ErrorHandling: promhttp.ContinueOnError,83MaxRequestsInFlight: 0,84Registry: i.exporterMetricsRegistry,85},86)8788// Register node_exporter_build_info metrics, generally useful for89// dashboards that depend on them for discovering targets.90if err := r.Register(build.NewCollector(i.c.Name())); err != nil {91return nil, fmt.Errorf("couldn't register %s: %w", i.c.Name(), err)92}9394if i.c.IncludeExporterMetrics {95// Note that we have to use reg here to use the same promhttp metrics for96// all expositions.97handler = promhttp.InstrumentMetricHandler(i.exporterMetricsRegistry, handler)98}99100return handler, nil101}102103// ScrapeConfigs satisfies Integration.ScrapeConfigs.104func (i *Integration) ScrapeConfigs() []config.ScrapeConfig {105return []config.ScrapeConfig{{106JobName: i.c.Name(),107MetricsPath: "/metrics",108}}109}110111// Run satisfies Integration.Run.112func (i *Integration) Run(ctx context.Context) error {113// We don't need to do anything here, so we can just wait for the context to114// finish.115<-ctx.Done()116return ctx.Err()117}118119120