package otelcol12import (3"fmt"45"github.com/grafana/agent/pkg/river"6otelexporterhelper "go.opentelemetry.io/collector/exporter/exporterhelper"7)89// QueueArguments holds shared settings for components which can queue10// requests.11type QueueArguments struct {12Enabled bool `river:"enabled,attr,optional"`13NumConsumers int `river:"num_consumers,attr,optional"`14QueueSize int `river:"queue_size,attr,optional"`1516// TODO(rfratto): queues can send to persistent storage through an extension.17}1819var _ river.Unmarshaler = (*QueueArguments)(nil)2021// DefaultQueueArguments holds default settings for QueueArguments.22var DefaultQueueArguments = QueueArguments{23Enabled: true,24NumConsumers: 10,2526// Copied from [upstream]:27//28// 5000 queue elements at 100 requests/sec gives about 50 seconds of survival29// of destination outage. This is a pretty decent value for production. Users30// should calculate this from the perspective of how many seconds to buffer31// in case of a backend outage and multiply that by the number of requests32// per second.33//34// [upstream]: https://github.com/open-telemetry/opentelemetry-collector/blob/ff73e49f74d8fd8c57a849aa3ff23ae1940cc16a/exporter/exporterhelper/queued_retry.go#L62-L6535QueueSize: 5000,36}3738// UnmarshalRiver implements river.Unmarshaler.39func (args *QueueArguments) UnmarshalRiver(f func(interface{}) error) error {40*args = DefaultQueueArguments41type arguments QueueArguments42return f((*arguments)(args))43}4445// Convert converts args into the upstream type.46func (args *QueueArguments) Convert() *otelexporterhelper.QueueSettings {47if args == nil {48return nil49}5051return &otelexporterhelper.QueueSettings{52Enabled: args.Enabled,53NumConsumers: args.NumConsumers,54QueueSize: args.QueueSize,55}56}5758// Validate returns an error if args is invalid.59func (args *QueueArguments) Validate() error {60if args == nil || !args.Enabled {61return nil62}6364if args.QueueSize <= 0 {65return fmt.Errorf("queue_size must be greater than zero")66}6768return nil69}707172