From b9de2aab953842c17c2d7a7ae21ef2b315effad5 Mon Sep 17 00:00:00 2001 From: Donal Hurley Date: Thu, 26 Sep 2024 10:15:12 +0100 Subject: [PATCH 1/3] Update extensions config for OTel collector (#843) --- internal/collector/otelcol.tmpl | 17 ++++++++++---- internal/config/config.go | 16 +++++++------- internal/config/config_test.go | 18 +++++++++------ internal/config/flags.go | 2 +- internal/config/testdata/nginx-agent.conf | 9 ++++---- internal/config/types.go | 22 ++++++++++++++----- test/config/agent/nginx-agent-otel-load.conf | 10 +++++---- .../test-opentelemetry-collector-agent.yaml | 1 - test/mock/collector/nginx-agent.conf | 9 ++++---- test/types/config.go | 12 ++++++---- 10 files changed, 73 insertions(+), 43 deletions(-) diff --git a/internal/collector/otelcol.tmpl b/internal/collector/otelcol.tmpl index bd34cf733b..cc16d74197 100644 --- a/internal/collector/otelcol.tmpl +++ b/internal/collector/otelcol.tmpl @@ -113,13 +113,22 @@ exporters: {{- end }} {{- end }} +{{- if ne .Extensions nil }} extensions: + {{- if ne .Extensions.Health nil }} health_check: - {{- if .Health }} - endpoint: "{{ .Health.Host -}}:{{- .Health.Port }}" - {{- else }} - endpoint: "localhost:13133" + endpoint: "{{ .Extensions.Health.Server.Host -}}:{{- .Extensions.Health.Server.Port }}" + {{- if ne .Extensions.Health.Path "" }} + path: "{{ .Extensions.Health.Path -}}" {{- end }} + {{- if ne .Extensions.Health.TLS nil }} + tls: + ca_cert: "{{ .Extensions.Health.Server.TLS.Ca -}}" + cert_file: "{{ .Extensions.Health.Server.TLS.Cert -}}" + key_file: "{{ .Extensions.Health.Server.TLS.Key -}}" + {{- end }} + {{- end }} +{{- end }} service: {{- if .Log.Path}} diff --git a/internal/config/config.go b/internal/config/config.go index faddd11d3a..f28f1e7ccd 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -354,12 +354,12 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { } var ( - err error - exporters []Exporter - processors []Processor - receivers Receivers - healthCheck ServerConfig - log Log + err error + exporters []Exporter + processors []Processor + receivers Receivers + extensions Extensions + log Log ) err = errors.Join( @@ -367,7 +367,7 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { resolveMapStructure(CollectorExportersKey, &exporters), resolveMapStructure(CollectorProcessorsKey, &processors), resolveMapStructure(CollectorReceiversKey, &receivers), - resolveMapStructure(CollectorHealthKey, &healthCheck), + resolveMapStructure(CollectorExtensionsKey, &extensions), resolveMapStructure(CollectorLogKey, &log), ) if err != nil { @@ -387,7 +387,7 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { Exporters: exporters, Processors: processors, Receivers: receivers, - Health: &healthCheck, + Extensions: extensions, Log: &log, } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 3daec1c292..e4f292cf9a 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -53,7 +53,7 @@ func TestResolveConfig(t *testing.T) { assert.True(t, viperInstance.IsSet(CollectorExportersKey)) assert.True(t, viperInstance.IsSet(CollectorProcessorsKey)) assert.True(t, viperInstance.IsSet(CollectorReceiversKey)) - assert.True(t, viperInstance.IsSet(CollectorHealthKey)) + assert.True(t, viperInstance.IsSet(CollectorExtensionsKey)) actual, err := ResolveConfig() require.NoError(t, err) @@ -70,7 +70,7 @@ func TestResolveConfig(t *testing.T) { assert.NotEmpty(t, actual.Collector.Receivers) assert.NotEmpty(t, actual.Collector.Processors) assert.NotEmpty(t, actual.Collector.Exporters) - assert.NotEmpty(t, actual.Collector.Health) + assert.NotEmpty(t, actual.Collector.Extensions) assert.Equal(t, 10*time.Second, actual.Client.Timeout) @@ -226,7 +226,7 @@ func TestResolveCollector(t *testing.T) { viperInstance.Set(CollectorReceiversKey, test.expected.Receivers) viperInstance.Set(CollectorProcessorsKey, test.expected.Processors) viperInstance.Set(CollectorExportersKey, test.expected.Exporters) - viperInstance.Set(CollectorHealthKey, test.expected.Health) + viperInstance.Set(CollectorExtensionsKey, test.expected.Extensions) viperInstance.Set(CollectorLogKey, test.expected.Log) actual, err := resolveCollector(testDefault.AllowedDirectories) @@ -396,10 +396,14 @@ func getAgentConfig() *Config { }, }, }, - Health: &ServerConfig{ - Host: "localhost", - Port: 1337, - Type: 0, + Extensions: Extensions{ + Health: Health{ + Server: &ServerConfig{ + Host: "localhost", + Port: 1337, + Type: 0, + }, + }, }, Log: &Log{ Level: "INFO", diff --git a/internal/config/flags.go b/internal/config/flags.go index 1f8c4bcfbf..1474fa6537 100644 --- a/internal/config/flags.go +++ b/internal/config/flags.go @@ -34,7 +34,7 @@ var ( CollectorConfigPathKey = pre(CollectorRootKey) + "config_path" CollectorExportersKey = pre(CollectorRootKey) + "exporters" CollectorProcessorsKey = pre(CollectorRootKey) + "processors" - CollectorHealthKey = pre(CollectorRootKey) + "health" + CollectorExtensionsKey = pre(CollectorRootKey) + "extensions" CollectorReceiversKey = pre(CollectorRootKey) + "receivers" CollectorLogKey = pre(CollectorRootKey) + "log" CollectorLogLevelKey = pre(CollectorLogKey) + "level" diff --git a/internal/config/testdata/nginx-agent.conf b/internal/config/testdata/nginx-agent.conf index 81ed647b37..f67e1b6608 100644 --- a/internal/config/testdata/nginx-agent.conf +++ b/internal/config/testdata/nginx-agent.conf @@ -56,9 +56,10 @@ collector: cert: /path/to/server-cert.pem key: /path/to/server-key.pem ca: /path/to/server-cert.pem - health: - host: "127.0.0.1" - port: 1234 - type: 0 + extensions: + health: + server: + host: "127.0.0.1" + port: 1234 config_dirs: "/etc/nginx:/usr/local/etc/nginx:/var/run/nginx:/usr/share/nginx/modules:/var/log/nginx:invalid/path" diff --git a/internal/config/types.go b/internal/config/types.go index 7c29e24ca0..acc5dd9842 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -86,12 +86,22 @@ type ( } Collector struct { - ConfigPath string `yaml:"-" mapstructure:"config_path"` - Log *Log `yaml:"-" mapstructure:"log"` - Exporters []Exporter `yaml:"-" mapstructure:"exporters"` - Health *ServerConfig `yaml:"-" mapstructure:"health"` - Processors []Processor `yaml:"-" mapstructure:"processors"` - Receivers Receivers `yaml:"-" mapstructure:"receivers"` + ConfigPath string `yaml:"-" mapstructure:"config_path"` + Log *Log `yaml:"-" mapstructure:"log"` + Exporters []Exporter `yaml:"-" mapstructure:"exporters"` + Extensions Extensions `yaml:"-" mapstructure:"extensions"` + Processors []Processor `yaml:"-" mapstructure:"processors"` + Receivers Receivers `yaml:"-" mapstructure:"receivers"` + } + + Extensions struct { + Health Health `yaml:"-" mapstructure:"health"` + } + + Health struct { + Server *ServerConfig `yaml:"-" mapstructure:"server"` + TLS *TLSConfig `yaml:"-" mapstructure:"tls"` + Path string `yaml:"-" mapstructure:"path"` } // OTel Collector Exporter configuration. diff --git a/test/config/agent/nginx-agent-otel-load.conf b/test/config/agent/nginx-agent-otel-load.conf index 546d250cf8..b328dab3c0 100644 --- a/test/config/agent/nginx-agent-otel-load.conf +++ b/test/config/agent/nginx-agent-otel-load.conf @@ -30,7 +30,9 @@ collector: host: "127.0.0.1" port: 5643 type: 0 - health: - host: "127.0.0.1" - port: 1337 - type: 0 + + extensions: + health: + server: + host: "127.0.0.1" + port: 1337 diff --git a/test/config/collector/test-opentelemetry-collector-agent.yaml b/test/config/collector/test-opentelemetry-collector-agent.yaml index 9deb5be556..ae30185a8c 100644 --- a/test/config/collector/test-opentelemetry-collector-agent.yaml +++ b/test/config/collector/test-opentelemetry-collector-agent.yaml @@ -45,7 +45,6 @@ exporters: verbosity: detailed sampling_initial: 5 sampling_thereafter: 200 - extensions: health_check: endpoint: "localhost:1337" diff --git a/test/mock/collector/nginx-agent.conf b/test/mock/collector/nginx-agent.conf index b2e16388d1..a1074b7ecc 100644 --- a/test/mock/collector/nginx-agent.conf +++ b/test/mock/collector/nginx-agent.conf @@ -58,7 +58,8 @@ collector: host: "otel-collector" port: 4317 type: 0 - health: - host: "127.0.0.1" - port: 1337 - type: 0 + extensions: + health: + server: + host: "127.0.0.1" + port: 1337 diff --git a/test/types/config.go b/test/types/config.go index 68f8830cf4..ad93f325ce 100644 --- a/test/types/config.go +++ b/test/types/config.go @@ -83,10 +83,14 @@ func AgentConfig() *config.Config { }, }, }, - Health: &config.ServerConfig{ - Host: "localhost", - Port: randomPort3, - Type: 0, + Extensions: config.Extensions{ + Health: config.Health{ + Server: &config.ServerConfig{ + Host: "localhost", + Port: randomPort3, + Type: 0, + }, + }, }, Log: &config.Log{ Level: "INFO", From 682e27d4bff2b6130e69b82e2125d7a2c67384f8 Mon Sep 17 00:00:00 2001 From: Donal Hurley Date: Thu, 26 Sep 2024 11:15:51 +0100 Subject: [PATCH 2/3] Update processors and exporters config for Otel collector (#839) --- .../generated_component_test.go | 12 +- .../internal/metadata/generated_metrics.go | 54 +++---- .../metadata/generated_metrics_test.go | 18 +-- .../generated_component_test.go | 12 +- .../internal/metadata/generated_metrics.go | 54 +++---- .../metadata/generated_metrics_test.go | 76 ++++----- internal/collector/otelcol.tmpl | 36 +++-- internal/collector/settings_test.go | 15 +- internal/config/config.go | 87 ++++++---- internal/config/config_test.go | 149 +++++++----------- internal/config/defaults.go | 4 + internal/config/flags.go | 68 ++++---- internal/config/testdata/nginx-agent.conf | 27 ++-- internal/config/types.go | 79 ++++------ test/config/agent/nginx-agent-otel-load.conf | 12 +- .../test-opentelemetry-collector-agent.yaml | 8 +- test/mock/collector/nginx-agent.conf | 11 +- test/types/config.go | 30 ++-- 18 files changed, 356 insertions(+), 396 deletions(-) diff --git a/internal/collector/nginxossreceiver/generated_component_test.go b/internal/collector/nginxossreceiver/generated_component_test.go index 60de562050..3007dc92b4 100644 --- a/internal/collector/nginxossreceiver/generated_component_test.go +++ b/internal/collector/nginxossreceiver/generated_component_test.go @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go index c560fe1bc8..e74642bc94 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go @@ -313,25 +313,17 @@ type MetricsBuilder struct { metricNginxHTTPResponseStatus metricNginxHTTPResponseStatus } -// MetricBuilderOption applies changes to default metrics builder. -type MetricBuilderOption interface { - apply(*MetricsBuilder) -} - -type metricBuilderOptionFunc func(mb *MetricsBuilder) - -func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { - mbof(mb) -} +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { - return metricBuilderOptionFunc(func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { mb.startTime = startTime - }) + } } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -344,7 +336,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op.apply(mb) + op(mb) } return mb } @@ -357,28 +349,20 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption interface { - apply(pmetric.ResourceMetrics) -} - -type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) - -func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { - rmof(rm) -} +type ResourceMetricsOption func(pmetric.ResourceMetrics) // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - }) + } } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -392,7 +376,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - }) + } } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -400,7 +384,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("otelcol/nginxreceiver") @@ -411,8 +395,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxHTTPRequests.emit(ils.Metrics()) mb.metricNginxHTTPResponseStatus.emit(ils.Metrics()) - for _, op := range options { - op.apply(rm) + for _, op := range rmo { + op(rm) } if ils.Metrics().Len() > 0 { @@ -424,8 +408,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(options...) +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -453,9 +437,9 @@ func (mb *MetricsBuilder) RecordNginxHTTPResponseStatusDataPoint(ts pcommon.Time // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op.apply(mb) + op(mb) } } diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go index 2581a33ef3..261655b70a 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -78,7 +78,7 @@ func TestMetricsBuilder(t *testing.T) { res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if tt.expectEmpty { + if test.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -88,10 +88,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if tt.metricsSet == testDataSetDefault { + if test.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if tt.metricsSet == testDataSetAll { + if test.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -104,7 +104,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -136,7 +136,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -150,7 +150,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses, grouped by status code range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/internal/collector/nginxplusreceiver/generated_component_test.go b/internal/collector/nginxplusreceiver/generated_component_test.go index 9fbb9944ca..170385ba71 100644 --- a/internal/collector/nginxplusreceiver/generated_component_test.go +++ b/internal/collector/nginxplusreceiver/generated_component_test.go @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go index 15dbba1752..866fb5a735 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go @@ -3620,25 +3620,17 @@ type MetricsBuilder struct { metricNginxStreamUpstreamZombieCount metricNginxStreamUpstreamZombieCount } -// MetricBuilderOption applies changes to default metrics builder. -type MetricBuilderOption interface { - apply(*MetricsBuilder) -} - -type metricBuilderOptionFunc func(mb *MetricsBuilder) - -func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { - mbof(mb) -} +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { - return metricBuilderOptionFunc(func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { mb.startTime = startTime - }) + } } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -3705,7 +3697,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op.apply(mb) + op(mb) } return mb } @@ -3718,28 +3710,20 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption interface { - apply(pmetric.ResourceMetrics) -} - -type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) - -func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { - rmof(rm) -} +type ResourceMetricsOption func(pmetric.ResourceMetrics) // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - }) + } } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -3753,7 +3737,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - }) + } } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -3761,7 +3745,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("otelcol/nginxplusreceiver") @@ -3826,8 +3810,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxStreamUpstreamPeerUnavailable.emit(ils.Metrics()) mb.metricNginxStreamUpstreamZombieCount.emit(ils.Metrics()) - for _, op := range options { - op.apply(rm) + for _, op := range rmo { + op(rm) } if ils.Metrics().Len() > 0 { @@ -3839,8 +3823,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(options...) +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -4138,9 +4122,9 @@ func (mb *MetricsBuilder) RecordNginxStreamUpstreamZombieCountDataPoint(ts pcomm // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op.apply(mb) + op(mb) } } diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go index 6d048f6442..8e8f64cbb9 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -294,7 +294,7 @@ func TestMetricsBuilder(t *testing.T) { res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if tt.expectEmpty { + if test.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -304,10 +304,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if tt.metricsSet == testDataSetDefault { + if test.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if tt.metricsSet == testDataSetAll { + if test.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -320,7 +320,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of bytes read from the cache or proxied server.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -370,7 +370,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of responses read from the cache or proxied server.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -390,7 +390,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of NGINX config reloads.", ms.At(i).Description()) assert.Equal(t, "reloads", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -404,7 +404,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -436,7 +436,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections to an endpoint with a limit_conn directive.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -456,7 +456,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests to an endpoint with a limit_req directive.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -476,7 +476,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of HTTP byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -499,7 +499,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests completed without sending a response.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -537,7 +537,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -569,7 +569,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses, grouped by status code range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -592,7 +592,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -630,7 +630,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of byte IO per HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -752,7 +752,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of health check requests made to a HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -781,7 +781,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests forwarded to the HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -831,7 +831,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of responses obtained from the HTTP upstream peer grouped by status range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -887,7 +887,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of times the server became unavailable for client requests (“unavail”).", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -931,7 +931,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests rejected due to the queue overflow.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1036,7 +1036,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("nginx.zone.name") assert.True(t, ok) assert.EqualValues(t, "nginx.zone.name-val", attrVal.Str()) @@ -1047,7 +1047,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of attempts to allocate memory of specified size.", ms.At(i).Description()) assert.Equal(t, "allocations", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1106,7 +1106,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of SSL certificate verification failures.", ms.At(i).Description()) assert.Equal(t, "certificates", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1123,7 +1123,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of SSL handshakes.", ms.At(i).Description()) assert.Equal(t, "handshakes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1143,7 +1143,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of Stream byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1163,7 +1163,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections accepted from clients.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1180,7 +1180,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Total number of connections completed without creating a session.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1212,7 +1212,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of completed sessions.", ms.At(i).Description()) assert.Equal(t, "sessions", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1232,7 +1232,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of Stream Upstream Peer byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1309,7 +1309,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client connections forwarded to this stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1335,7 +1335,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of stream upstream peers grouped by state.", ms.At(i).Description()) assert.Equal(t, "peers", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1358,7 +1358,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of unsuccessful attempts to communicate with the stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "peers", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1381,7 +1381,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of health check requests made to the stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1434,7 +1434,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the given state then the value will be 1. If no upstream peer is a match then the value will be 0.", ms.At(i).Description()) assert.Equal(t, "deployments", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1487,7 +1487,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/internal/collector/otelcol.tmpl b/internal/collector/otelcol.tmpl index cc16d74197..0cc28a215a 100644 --- a/internal/collector/otelcol.tmpl +++ b/internal/collector/otelcol.tmpl @@ -75,17 +75,17 @@ receivers: collection_interval: 10s {{- end }} -{{ if gt (len .Processors) 0 -}} processors: -{{- range .Processors }} - {{ .Type }}: -{{- end }} +{{- if ne .Processors.Batch nil }} + batch: + send_batch_size: {{ .Processors.Batch.SendBatchSize }} + timeout: {{ .Processors.Batch.Timeout }} + send_batch_max_size: {{ .Processors.Batch.SendBatchMaxSize }} {{- end }} exporters: -{{- range .Exporters }} -{{- if eq .Type "otlp" }} - otlp: +{{- range $index, $otlpExporter := .Exporters.OtlpExporters }} + otlp/{{$index}}: endpoint: "{{ .Server.Host -}}:{{- .Server.Port }}" compression: none timeout: 10s @@ -101,17 +101,17 @@ exporters: {{ if gt (len .TLS.Cert) 0 -}}cert_file: "{{- .TLS.Cert -}}"{{- end }} {{ if gt (len .TLS.Key) 0 -}}key_file: "{{- .TLS.Key -}}"{{- end }} {{- end }} -{{- else if eq .Type "prometheus" }} +{{- end }} +{{- if ne .Exporters.PrometheusExporter nil }} prometheus: - endpoint: "{{ .Server.Host -}}:{{- .Server.Port }}" - namespace: "nginx-agent" -{{- else if eq .Type "debug" }} + endpoint: "{{ .Exporters.PrometheusExporter.Server.Host -}}:{{- .Exporters.PrometheusExporter.Server.Port }}" +{{- end }} +{{- if ne .Exporters.Debug nil }} debug: verbosity: detailed sampling_initial: 5 sampling_thereafter: 200 {{- end }} -{{- end }} {{- if ne .Extensions nil }} extensions: @@ -156,8 +156,16 @@ service: - nginxplus/{{- .InstanceID -}} {{- end }} processors: + {{- if ne .Processors.Batch nil }} - batch + {{- end }} exporters: - {{- range .Exporters }} - - {{ .Type }} + {{- range $index, $otlpExporter := .Exporters.OtlpExporters }} + - otlp/{{$index}} + {{- end }} + {{- if ne .Exporters.PrometheusExporter nil }} + - prometheus {{- end }} + {{- if ne .Exporters.Debug nil }} + - debug + {{- end }} diff --git a/internal/collector/settings_test.go b/internal/collector/settings_test.go index 1cf282af63..fb65198941 100644 --- a/internal/collector/settings_test.go +++ b/internal/collector/settings_test.go @@ -53,21 +53,16 @@ func TestTemplateWrite(t *testing.T) { actualConfPath := filepath.Join("/tmp/", "nginx-agent-otelcol-test.yaml") cfg.Collector.ConfigPath = actualConfPath - cfg.Collector.Exporters = append(cfg.Collector.Exporters, config.Exporter{ - Type: "prometheus", + cfg.Collector.Exporters.PrometheusExporter = &config.PrometheusExporter{ Server: &config.ServerConfig{ Host: "localhost", Port: 9876, Type: 0, }, - Auth: nil, // Auth and TLS not supported yet. - TLS: nil, - }, config.Exporter{ - Type: "debug", - Server: nil, // not relevant to the debug exporter - Auth: nil, - TLS: nil, - }) + TLS: nil, + } + + cfg.Collector.Exporters.Debug = &config.DebugExporter{} cfg.Collector.Receivers.HostMetrics = config.HostMetrics{ CollectionInterval: time.Minute, diff --git a/internal/config/config.go b/internal/config/config.go index f28f1e7ccd..4e581d3d27 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -218,27 +218,6 @@ func registerFlags() { "How often the NGINX Agent will check for file changes.", ) - fs.String( - CollectorConfigPathKey, - DefCollectorConfigPath, - "The path to the Opentelemetry Collector configuration file.", - ) - - fs.String( - CollectorLogLevelKey, - DefCollectorLogLevel, - `The desired verbosity level for logging messages from nginx-agent OTel collector. - Available options, in order of severity from highest to lowest, are: - ERROR, WARN, INFO and DEBUG.`, - ) - - fs.String( - CollectorLogPathKey, - DefCollectorLogPath, - `The path to output OTel collector log messages to. - If the default path doesn't exist, log messages are output to stdout/stderr.`, - ) - fs.Int( ClientMaxMessageSizeKey, DefMaxMessageSize, @@ -246,7 +225,7 @@ func registerFlags() { ) fs.Int( - ClientMaxMessageRecieveSizeKey, + ClientMaxMessageReceiveSizeKey, DefMaxMessageRecieveSize, "Updates the client grpc setting MaxRecvMsgSize with the specific value in MB.", ) @@ -257,6 +236,8 @@ func registerFlags() { "Updates the client grpc setting MaxSendMsgSize with the specific value in MB.", ) + registerCollectorFlags(fs) + fs.SetNormalizeFunc(normalizeFunc) fs.VisitAll(func(flag *flag.Flag) { @@ -270,6 +251,47 @@ func registerFlags() { }) } +func registerCollectorFlags(fs *flag.FlagSet) { + fs.String( + CollectorConfigPathKey, + DefCollectorConfigPath, + "The path to the Opentelemetry Collector configuration file.", + ) + + fs.String( + CollectorLogLevelKey, + DefCollectorLogLevel, + `The desired verbosity level for logging messages from nginx-agent OTel collector. + Available options, in order of severity from highest to lowest, are: + ERROR, WARN, INFO and DEBUG.`, + ) + + fs.String( + CollectorLogPathKey, + DefCollectorLogPath, + `The path to output OTel collector log messages to. + If the default path doesn't exist, log messages are output to stdout/stderr.`, + ) + + fs.Uint32( + CollectorBatchProcessorSendBatchSizeKey, + DefCollectorBatchProcessorSendBatchSize, + `Number of metric data points after which a batch will be sent regardless of the timeout.`, + ) + + fs.Uint32( + CollectorBatchProcessorSendBatchMaxSizeKey, + DefCollectorBatchProcessorSendBatchMaxSize, + `The upper limit of the batch size.`, + ) + + fs.Duration( + CollectorBatchProcessorTimeoutKey, + DefCollectorBatchProcessorTimeout, + `Time duration after which a batch will be sent regardless of size.`, + ) +} + func seekFileInPaths(fileName string, directories ...string) (string, error) { for _, directory := range directories { f := filepath.Join(directory, fileName) @@ -340,7 +362,7 @@ func resolveClient() *Client { Time: viperInstance.GetDuration(ClientTimeKey), PermitWithoutStream: viperInstance.GetBool(ClientPermitWithoutStreamKey), MaxMessageSize: viperInstance.GetInt(ClientMaxMessageSizeKey), - MaxMessageRecieveSize: viperInstance.GetInt(ClientMaxMessageRecieveSizeKey), + MaxMessageRecieveSize: viperInstance.GetInt(ClientMaxMessageReceiveSizeKey), MaxMessageSendSize: viperInstance.GetInt(ClientMaxMessageSendSizeKey), } } @@ -355,8 +377,7 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { var ( err error - exporters []Exporter - processors []Processor + exporters Exporters receivers Receivers extensions Extensions log Log @@ -365,7 +386,6 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { err = errors.Join( err, resolveMapStructure(CollectorExportersKey, &exporters), - resolveMapStructure(CollectorProcessorsKey, &processors), resolveMapStructure(CollectorReceiversKey, &receivers), resolveMapStructure(CollectorExtensionsKey, &extensions), resolveMapStructure(CollectorLogKey, &log), @@ -385,7 +405,7 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { col := &Collector{ ConfigPath: viperInstance.GetString(CollectorConfigPathKey), Exporters: exporters, - Processors: processors, + Processors: resolveProcessors(), Receivers: receivers, Extensions: extensions, Log: &log, @@ -404,6 +424,19 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { return col, nil } +func resolveProcessors() Processors { + processors := Processors{} + + if viperInstance.IsSet(CollectorBatchProcessorKey) { + processors.Batch = &Batch{} + processors.Batch.SendBatchSize = viperInstance.GetUint32(CollectorBatchProcessorSendBatchSizeKey) + processors.Batch.SendBatchMaxSize = viperInstance.GetUint32(CollectorBatchProcessorSendBatchMaxSizeKey) + processors.Batch.Timeout = viperInstance.GetDuration(CollectorBatchProcessorTimeoutKey) + } + + return processors +} + // generate self-signed certificate for OTEL receiver // nolint: revive func handleSelfSignedCertificates(col *Collector) error { diff --git a/internal/config/config_test.go b/internal/config/config_test.go index e4f292cf9a..836ebb7ce0 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -25,8 +25,8 @@ const accessLogFormat = `$remote_addr - $remote_user [$time_local] \"$request\" func TestRegisterConfigFile(t *testing.T) { viperInstance = viper.NewWithOptions(viper.KeyDelimiter(KeyDelimiter)) file, err := os.Create("nginx-agent.conf") - defer helpers.RemoveFileWithErrorCheck(t, file.Name()) require.NoError(t, err) + defer helpers.RemoveFileWithErrorCheck(t, file.Name()) currentDirectory, err := os.Getwd() require.NoError(t, err) @@ -68,7 +68,7 @@ func TestResolveConfig(t *testing.T) { require.NotNil(t, actual.Collector) assert.Equal(t, "/etc/nginx-agent/nginx-agent-otelcol.yaml", actual.Collector.ConfigPath) assert.NotEmpty(t, actual.Collector.Receivers) - assert.NotEmpty(t, actual.Collector.Processors) + assert.Equal(t, Processors{Batch: &Batch{}}, actual.Collector.Processors) assert.NotEmpty(t, actual.Collector.Exporters) assert.NotEmpty(t, actual.Collector.Extensions) @@ -172,73 +172,40 @@ func TestResolveClient(t *testing.T) { func TestResolveCollector(t *testing.T) { testDefault := getAgentConfig() - tests := []struct { - expected *Collector - name string - errMsg string - shouldErr bool - }{ - { - name: "Test 1: Happy path", - expected: testDefault.Collector, - }, - { - name: "Test 2: Non allowed path", - expected: &Collector{ - ConfigPath: "/path/to/secret", - }, - shouldErr: true, - errMsg: "collector path /path/to/secret not allowed", - }, - { - name: "Test 3: Unsupported Exporter", - expected: &Collector{ - ConfigPath: testDefault.Collector.ConfigPath, - Exporters: []Exporter{ - { - Type: "not-allowed", - }, - }, - }, - shouldErr: true, - errMsg: "unsupported exporter type: not-allowed", - }, - { - name: "Test 4: Unsupported Processor", - expected: &Collector{ - ConfigPath: testDefault.Collector.ConfigPath, - Exporters: testDefault.Collector.Exporters, - Processors: []Processor{ - { - Type: "custom-processor", - }, - }, - }, - shouldErr: true, - errMsg: "unsupported processor type: custom-processor", - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - viperInstance = viper.NewWithOptions(viper.KeyDelimiter(KeyDelimiter)) - viperInstance.Set(CollectorConfigPathKey, test.expected.ConfigPath) - viperInstance.Set(CollectorReceiversKey, test.expected.Receivers) - viperInstance.Set(CollectorProcessorsKey, test.expected.Processors) - viperInstance.Set(CollectorExportersKey, test.expected.Exporters) - viperInstance.Set(CollectorExtensionsKey, test.expected.Extensions) - viperInstance.Set(CollectorLogKey, test.expected.Log) - - actual, err := resolveCollector(testDefault.AllowedDirectories) - if test.shouldErr { - require.Error(t, err) - assert.Contains(t, err.Error(), test.errMsg) - } else { - require.NoError(t, err) - assert.Equal(t, test.expected, actual) - } - }) - } + t.Run("Test 1: Happy path", func(t *testing.T) { + expected := testDefault.Collector + + viperInstance = viper.NewWithOptions(viper.KeyDelimiter(KeyDelimiter)) + viperInstance.Set(CollectorConfigPathKey, expected.ConfigPath) + viperInstance.Set(CollectorReceiversKey, expected.Receivers) + viperInstance.Set(CollectorBatchProcessorKey, expected.Processors.Batch) + viperInstance.Set(CollectorBatchProcessorSendBatchSizeKey, expected.Processors.Batch.SendBatchSize) + viperInstance.Set(CollectorBatchProcessorSendBatchMaxSizeKey, expected.Processors.Batch.SendBatchMaxSize) + viperInstance.Set(CollectorBatchProcessorTimeoutKey, expected.Processors.Batch.Timeout) + viperInstance.Set(CollectorExportersKey, expected.Exporters) + viperInstance.Set(CollectorExtensionsKey, expected.Extensions) + viperInstance.Set(CollectorLogKey, expected.Log) + + actual, err := resolveCollector(testDefault.AllowedDirectories) + require.NoError(t, err) + assert.Equal(t, expected, actual) + }) + + t.Run("Test 2: Non allowed path", func(t *testing.T) { + expected := &Collector{ + ConfigPath: "/path/to/secret", + } + errMsg := "collector path /path/to/secret not allowed" + + viperInstance = viper.NewWithOptions(viper.KeyDelimiter(KeyDelimiter)) + viperInstance.Set(CollectorConfigPathKey, expected.ConfigPath) + + _, err := resolveCollector(testDefault.AllowedDirectories) + + require.Error(t, err) + assert.Contains(t, err.Error(), errMsg) + }) } func TestCommand(t *testing.T) { @@ -306,10 +273,10 @@ func TestClient(t *testing.T) { // root keys for sections are set appropriately assert.True(t, viperInstance.IsSet(ClientMaxMessageSizeKey)) - assert.False(t, viperInstance.IsSet(ClientMaxMessageRecieveSizeKey)) + assert.False(t, viperInstance.IsSet(ClientMaxMessageReceiveSizeKey)) assert.False(t, viperInstance.IsSet(ClientMaxMessageSendSizeKey)) - viperInstance.Set(ClientMaxMessageRecieveSizeKey, expected.MaxMessageRecieveSize) + viperInstance.Set(ClientMaxMessageReceiveSizeKey, expected.MaxMessageRecieveSize) viperInstance.Set(ClientMaxMessageSendSizeKey, expected.MaxMessageSendSize) result := resolveClient() @@ -337,29 +304,31 @@ func getAgentConfig() *Config { }, Collector: &Collector{ ConfigPath: "/etc/nginx-agent/nginx-agent-otelcol.yaml", - Exporters: []Exporter{ - { - Type: "otlp", - Server: &ServerConfig{ - Host: "127.0.0.1", - Port: 1234, - Type: 0, - }, - Auth: &AuthConfig{ - Token: "super-secret-token", - }, - TLS: &TLSConfig{ - Cert: "/path/to/server-cert.pem", - Key: "/path/to/server-cert.pem", - Ca: "/path/to/server-cert.pem", - SkipVerify: true, - ServerName: "remote-saas-server", + Exporters: Exporters{ + OtlpExporters: []OtlpExporter{ + { + Server: &ServerConfig{ + Host: "127.0.0.1", + Port: 1234, + }, + Auth: &AuthConfig{ + Token: "super-secret-token", + }, + TLS: &TLSConfig{ + Cert: "/path/to/server-cert.pem", + Key: "/path/to/server-cert.pem", + Ca: "/path/to/server-cert.pem", + SkipVerify: true, + ServerName: "remote-saas-server", + }, }, }, }, - Processors: []Processor{ - { - Type: "batch", + Processors: Processors{ + Batch: &Batch{ + SendBatchMaxSize: DefCollectorBatchProcessorSendBatchMaxSize, + SendBatchSize: DefCollectorBatchProcessorSendBatchSize, + Timeout: DefCollectorBatchProcessorTimeout, }, }, Receivers: Receivers{ diff --git a/internal/config/defaults.go b/internal/config/defaults.go index 5a2b6b7f4b..cede509067 100644 --- a/internal/config/defaults.go +++ b/internal/config/defaults.go @@ -50,4 +50,8 @@ const ( DefMaxMessageRecieveSize = 4194304 // math.MaxInt32 DefMaxMessageSendSize = math.MaxInt32 + + DefCollectorBatchProcessorSendBatchSize = 8192 + DefCollectorBatchProcessorSendBatchMaxSize = 0 + DefCollectorBatchProcessorTimeout = 200 * time.Millisecond ) diff --git a/internal/config/flags.go b/internal/config/flags.go index 1474fa6537..92a19839b1 100644 --- a/internal/config/flags.go +++ b/internal/config/flags.go @@ -25,39 +25,41 @@ const ( var ( // child flags saved as vars to enable easier prefixing. - ClientPermitWithoutStreamKey = pre(ClientRootKey) + "permit_without_stream" - ClientTimeKey = pre(ClientRootKey) + "time" - ClientTimeoutKey = pre(ClientRootKey) + "timeout" - ClientMaxMessageSendSizeKey = pre(ClientRootKey) + "max_message_send_size" - ClientMaxMessageRecieveSizeKey = pre(ClientRootKey) + "max_message_receive_size" - ClientMaxMessageSizeKey = pre(ClientRootKey) + "max_message_size" - CollectorConfigPathKey = pre(CollectorRootKey) + "config_path" - CollectorExportersKey = pre(CollectorRootKey) + "exporters" - CollectorProcessorsKey = pre(CollectorRootKey) + "processors" - CollectorExtensionsKey = pre(CollectorRootKey) + "extensions" - CollectorReceiversKey = pre(CollectorRootKey) + "receivers" - CollectorLogKey = pre(CollectorRootKey) + "log" - CollectorLogLevelKey = pre(CollectorLogKey) + "level" - CollectorLogPathKey = pre(CollectorLogKey) + "path" - CommandAuthKey = pre(CommandRootKey) + "auth" - CommandAuthTokenKey = pre(CommandAuthKey) + "token" - CommandServerHostKey = pre(CommandServerKey) + "host" - CommandServerKey = pre(CommandRootKey) + "server" - CommandServerPortKey = pre(CommandServerKey) + "port" - CommandServerTypeKey = pre(CommandServerKey) + "type" - CommandTLSCaKey = pre(CommandTLSKey) + "ca" - CommandTLSCertKey = pre(CommandTLSKey) + "cert" - CommandTLSKey = pre(CommandRootKey) + "tls" - CommandTLSKeyKey = pre(CommandTLSKey) + "key" - CommandTLSServerNameKey = pre(CommandRootKey) + "server_name" - CommandTLSSkipVerifyKey = pre(CommandTLSKey) + "skip_verify" - LogLevelKey = pre(LogLevelRootKey) + "level" - LogPathKey = pre(LogLevelRootKey) + "path" - NginxReloadMonitoringPeriodKey = pre(DataPlaneConfigRootKey, "nginx") + "reload_monitoring_period" - NginxTreatWarningsAsErrorsKey = pre(DataPlaneConfigRootKey, "nginx") + "treat_warnings_as_errors" - NginxExcludeLogsKey = pre(DataPlaneConfigRootKey, "nginx") + "exclude_logs" - OTLPExportURLKey = pre(CollectorRootKey) + "otlp_export_url" - OTLPReceiverURLKey = pre(CollectorRootKey) + "otlp_receiver_url" + ClientPermitWithoutStreamKey = pre(ClientRootKey) + "permit_without_stream" + ClientTimeKey = pre(ClientRootKey) + "time" + ClientTimeoutKey = pre(ClientRootKey) + "timeout" + ClientMaxMessageSendSizeKey = pre(ClientRootKey) + "max_message_send_size" + ClientMaxMessageReceiveSizeKey = pre(ClientRootKey) + "max_message_receive_size" + ClientMaxMessageSizeKey = pre(ClientRootKey) + "max_message_size" + CollectorConfigPathKey = pre(CollectorRootKey) + "config_path" + CollectorExportersKey = pre(CollectorRootKey) + "exporters" + CollectorProcessorsKey = pre(CollectorRootKey) + "processors" + CollectorBatchProcessorKey = pre(CollectorProcessorsKey) + "batch" + CollectorBatchProcessorSendBatchSizeKey = pre(CollectorBatchProcessorKey) + "send_batch_size" + CollectorBatchProcessorSendBatchMaxSizeKey = pre(CollectorBatchProcessorKey) + "send_batch_max_size" + CollectorBatchProcessorTimeoutKey = pre(CollectorBatchProcessorKey) + "timeout" + CollectorExtensionsKey = pre(CollectorRootKey) + "extensions" + CollectorReceiversKey = pre(CollectorRootKey) + "receivers" + CollectorLogKey = pre(CollectorRootKey) + "log" + CollectorLogLevelKey = pre(CollectorLogKey) + "level" + CollectorLogPathKey = pre(CollectorLogKey) + "path" + CommandAuthKey = pre(CommandRootKey) + "auth" + CommandAuthTokenKey = pre(CommandAuthKey) + "token" + CommandServerHostKey = pre(CommandServerKey) + "host" + CommandServerKey = pre(CommandRootKey) + "server" + CommandServerPortKey = pre(CommandServerKey) + "port" + CommandServerTypeKey = pre(CommandServerKey) + "type" + CommandTLSCaKey = pre(CommandTLSKey) + "ca" + CommandTLSCertKey = pre(CommandTLSKey) + "cert" + CommandTLSKey = pre(CommandRootKey) + "tls" + CommandTLSKeyKey = pre(CommandTLSKey) + "key" + CommandTLSServerNameKey = pre(CommandRootKey) + "server_name" + CommandTLSSkipVerifyKey = pre(CommandTLSKey) + "skip_verify" + LogLevelKey = pre(LogLevelRootKey) + "level" + LogPathKey = pre(LogLevelRootKey) + "path" + NginxReloadMonitoringPeriodKey = pre(DataPlaneConfigRootKey, "nginx") + "reload_monitoring_period" + NginxTreatWarningsAsErrorsKey = pre(DataPlaneConfigRootKey, "nginx") + "treat_warnings_as_errors" + NginxExcludeLogsKey = pre(DataPlaneConfigRootKey, "nginx") + "exclude_logs" ) func pre(prefixes ...string) string { diff --git a/internal/config/testdata/nginx-agent.conf b/internal/config/testdata/nginx-agent.conf index f67e1b6608..398d75d9cf 100644 --- a/internal/config/testdata/nginx-agent.conf +++ b/internal/config/testdata/nginx-agent.conf @@ -41,21 +41,20 @@ collector: - file_path: "/var/log/nginx/access-custom.conf" log_format: "$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent \"$http_referer\" \"$http_user_agent\" \"$http_x_forwarded_for\"\"$upstream_cache_status\"" processors: - - type: batch + batch: {} exporters: - - type: otlp - server: - host: "127.0.0.1" - port: 5643 - type: 0 - auth: - Token: "secret-saas-token" - tls: - server_name: "test-saas-server" - skip_verify: false - cert: /path/to/server-cert.pem - key: /path/to/server-key.pem - ca: /path/to/server-cert.pem + otlp_exporters: + - server: + host: "127.0.0.1" + port: 5643 + auth: + Token: "secret-saas-token" + tls: + server_name: "test-saas-server" + skip_verify: false + cert: /path/to/server-cert.pem + key: /path/to/server-key.pem + ca: /path/to/server-cert.pem extensions: health: server: diff --git a/internal/config/types.go b/internal/config/types.go index acc5dd9842..cfa80cf453 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -15,18 +15,6 @@ import ( "github.com/google/uuid" ) -var ( - supportedExporters = map[string]struct{}{ - "debug": {}, - "otlp": {}, - "prometheus": {}, - } - - supportedProcessors = map[string]struct{}{ - "batch": {}, - } -) - type ServerType int const ( @@ -86,12 +74,24 @@ type ( } Collector struct { - ConfigPath string `yaml:"-" mapstructure:"config_path"` - Log *Log `yaml:"-" mapstructure:"log"` - Exporters []Exporter `yaml:"-" mapstructure:"exporters"` - Extensions Extensions `yaml:"-" mapstructure:"extensions"` - Processors []Processor `yaml:"-" mapstructure:"processors"` - Receivers Receivers `yaml:"-" mapstructure:"receivers"` + ConfigPath string `yaml:"-" mapstructure:"config_path"` + Log *Log `yaml:"-" mapstructure:"log"` + Exporters Exporters `yaml:"-" mapstructure:"exporters"` + Extensions Extensions `yaml:"-" mapstructure:"extensions"` + Processors Processors `yaml:"-" mapstructure:"processors"` + Receivers Receivers `yaml:"-" mapstructure:"receivers"` + } + + Exporters struct { + Debug *DebugExporter `yaml:"-" mapstructure:"debug"` + PrometheusExporter *PrometheusExporter `yaml:"-" mapstructure:"prometheus_exporter"` + OtlpExporters []OtlpExporter `yaml:"-" mapstructure:"otlp_exporters"` + } + + OtlpExporter struct { + Server *ServerConfig `yaml:"-" mapstructure:"server"` + Auth *AuthConfig `yaml:"-" mapstructure:"auth"` + TLS *TLSConfig `yaml:"-" mapstructure:"tls"` } Extensions struct { @@ -104,18 +104,24 @@ type ( Path string `yaml:"-" mapstructure:"path"` } - // OTel Collector Exporter configuration. - Exporter struct { + DebugExporter struct{} + + PrometheusExporter struct { Server *ServerConfig `yaml:"-" mapstructure:"server"` - Auth *AuthConfig `yaml:"-" mapstructure:"auth"` TLS *TLSConfig `yaml:"-" mapstructure:"tls"` - Type string `yaml:"-" mapstructure:"type"` } - // OTel Collector Processor configuration. - Processor struct { - Type string `yaml:"-" mapstructure:"type"` + // OTel Collector Processors configuration. + Processors struct { + Batch *Batch `yaml:"-" mapstructure:"batch"` } + + Batch struct { + SendBatchSize uint32 `yaml:"-" mapstructure:"send_batch_size"` + SendBatchMaxSize uint32 `yaml:"-" mapstructure:"send_batch_max_size"` + Timeout time.Duration `yaml:"-" mapstructure:"timeout"` + } + // OTel Collector Receiver configuration. Receivers struct { OtlpReceivers []OtlpReceiver `yaml:"-" mapstructure:"otlp_receivers"` @@ -250,29 +256,6 @@ func (col *Collector) Validate(allowedDirectories []string) error { err = errors.Join(err, nginxReceiver.Validate(allowedDirectories)) } - for _, exp := range col.Exporters { - t := strings.ToLower(exp.Type) - - if _, ok := supportedExporters[t]; !ok { - err = errors.Join(err, fmt.Errorf("unsupported exporter type: %s", exp.Type)) - continue - } - - // normalize field too - exp.Type = t - } - - for _, proc := range col.Processors { - t := strings.ToLower(proc.Type) - - if _, ok := supportedProcessors[t]; !ok { - err = errors.Join(err, fmt.Errorf("unsupported processor type: %s", proc.Type)) - continue - } - - proc.Type = t - } - return err } diff --git a/test/config/agent/nginx-agent-otel-load.conf b/test/config/agent/nginx-agent-otel-load.conf index b328dab3c0..0b8ff73178 100644 --- a/test/config/agent/nginx-agent-otel-load.conf +++ b/test/config/agent/nginx-agent-otel-load.conf @@ -23,14 +23,12 @@ collector: port: 4317 type: 0 processors: - - type: batch + batch: {} exporters: - - type: otlp - server: - host: "127.0.0.1" - port: 5643 - type: 0 - + otlp_exporters: + - server: + host: "127.0.0.1" + port: 5643 extensions: health: server: diff --git a/test/config/collector/test-opentelemetry-collector-agent.yaml b/test/config/collector/test-opentelemetry-collector-agent.yaml index ae30185a8c..4e74a9207f 100644 --- a/test/config/collector/test-opentelemetry-collector-agent.yaml +++ b/test/config/collector/test-opentelemetry-collector-agent.yaml @@ -25,9 +25,12 @@ receivers: processors: batch: + send_batch_size: 8192 + timeout: 200ms + send_batch_max_size: 0 exporters: - otlp: + otlp/0: endpoint: "127.0.0.1:1234" compression: none timeout: 10s @@ -40,7 +43,6 @@ exporters: insecure: true prometheus: endpoint: "localhost:9876" - namespace: "nginx-agent" debug: verbosity: detailed sampling_initial: 5 @@ -66,6 +68,6 @@ service: processors: - batch exporters: - - otlp + - otlp/0 - prometheus - debug diff --git a/test/mock/collector/nginx-agent.conf b/test/mock/collector/nginx-agent.conf index a1074b7ecc..d67410115a 100644 --- a/test/mock/collector/nginx-agent.conf +++ b/test/mock/collector/nginx-agent.conf @@ -51,13 +51,12 @@ collector: key: /tmp/key.pem generate_self_signed_cert: true processors: - - type: batch + batch: exporters: - - type: otlp - server: - host: "otel-collector" - port: 4317 - type: 0 + otlp_exporters: + - server: + host: "otel-collector" + port: 4317 extensions: health: server: diff --git a/test/types/config.go b/test/types/config.go index ad93f325ce..15d82c278d 100644 --- a/test/types/config.go +++ b/test/types/config.go @@ -14,9 +14,7 @@ import ( ) const ( - apiPort = 8980 commandPort = 8981 - metricsPort = 8982 clientPermitWithoutStream = true clientTime = 50 * time.Second @@ -51,22 +49,24 @@ func AgentConfig() *config.Config { AllowedDirectories: []string{"/tmp/"}, Collector: &config.Collector{ ConfigPath: "/etc/nginx-agent/nginx-agent-otelcol.yaml", - Exporters: []config.Exporter{ - { - Type: "otlp", - Server: &config.ServerConfig{ - Host: "127.0.0.1", - Port: randomPort1, - Type: 0, - }, - Auth: &config.AuthConfig{ - Token: "super-secret-token", + Exporters: config.Exporters{ + OtlpExporters: []config.OtlpExporter{ + { + Server: &config.ServerConfig{ + Host: "127.0.0.1", + Port: randomPort1, + }, + Auth: &config.AuthConfig{ + Token: "super-secret-token", + }, }, }, }, - Processors: []config.Processor{ - { - Type: "batch", + Processors: config.Processors{ + Batch: &config.Batch{ + SendBatchSize: config.DefCollectorBatchProcessorSendBatchSize, + SendBatchMaxSize: config.DefCollectorBatchProcessorSendBatchMaxSize, + Timeout: config.DefCollectorBatchProcessorTimeout, }, }, Receivers: config.Receivers{ From ead9df833282ba4a6ffef8c4e9ef0880d708c3a5 Mon Sep 17 00:00:00 2001 From: Donal Hurley Date: Thu, 26 Sep 2024 11:41:28 +0100 Subject: [PATCH 3/3] Fix nginx response status metrics (#854) --- .../internal/record/recorder.go | 68 -- .../internal/record/recorder_test.go | 160 ---- .../basic-nginx.http.response.status.yaml | 26 - .../multicode-nginx.http.response.status.yaml | 73 -- .../scraper/accesslog/nginx_log_scraper.go | 78 +- .../scraper/accesslog/testdata/expected.yaml | 33 +- .../accesslog/testdata/test-access.log | 6 + internal/config/defaults.go | 17 +- scripts/testing/load/Dockerfile | 2 +- .../provisioning/dashboards/host-metrics.json | 4 +- .../dashboards/nginx-dashboard.json | 876 ++++++++---------- 11 files changed, 481 insertions(+), 862 deletions(-) delete mode 100644 internal/collector/nginxossreceiver/internal/record/recorder.go delete mode 100644 internal/collector/nginxossreceiver/internal/record/recorder_test.go delete mode 100644 internal/collector/nginxossreceiver/internal/record/testdata/basic-nginx.http.response.status.yaml delete mode 100644 internal/collector/nginxossreceiver/internal/record/testdata/multicode-nginx.http.response.status.yaml diff --git a/internal/collector/nginxossreceiver/internal/record/recorder.go b/internal/collector/nginxossreceiver/internal/record/recorder.go deleted file mode 100644 index ee8bee3103..0000000000 --- a/internal/collector/nginxossreceiver/internal/record/recorder.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) F5, Inc. -// -// This source code is licensed under the Apache License, Version 2.0 license found in the -// LICENSE file in the root directory of this source tree. - -package record - -import ( - "fmt" - "strconv" - "time" - - "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/metadata" - "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/model" - "go.opentelemetry.io/collector/pdata/pcommon" -) - -const ( - // Needed for "magic number" linter. - status100 = 100 - status200 = 200 - status300 = 300 - status400 = 400 - status500 = 500 - - percent = 100 -) - -// Item extracts data from NGINX Access Items and records them using the given MetricsBuilder. -func Item(ai *model.NginxAccessItem, mb *metadata.MetricsBuilder) error { - now := pcommon.NewTimestampFromTime(time.Now()) - - if ai.Status != "" { - codeRange, err := mapCodeRange(ai.Status) - if err != nil { - return fmt.Errorf("parse status range: %w", err) - } - - mb.RecordNginxHTTPResponseStatusDataPoint(now, 1, codeRange) - } - - return nil -} - -func mapCodeRange(statusCode string) (metadata.AttributeNginxStatusRange, error) { - number, err := strconv.Atoi(statusCode) - if err != nil { - return 0, fmt.Errorf("cast status code to int: %w", err) - } - - // We want to "floor" the response code, so we can map it to the correct range (i.e. to 1xx, 2xx, 4xx or 5xx). - codeRange := (number / percent) * percent - - switch codeRange { - case status100: - return metadata.AttributeNginxStatusRange1xx, nil - case status200: - return metadata.AttributeNginxStatusRange2xx, nil - case status300: - return metadata.AttributeNginxStatusRange3xx, nil - case status400: - return metadata.AttributeNginxStatusRange4xx, nil - case status500: - return metadata.AttributeNginxStatusRange5xx, nil - default: - return 0, fmt.Errorf("unknown code range: %d", codeRange) - } -} diff --git a/internal/collector/nginxossreceiver/internal/record/recorder_test.go b/internal/collector/nginxossreceiver/internal/record/recorder_test.go deleted file mode 100644 index a99c7a725a..0000000000 --- a/internal/collector/nginxossreceiver/internal/record/recorder_test.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) F5, Inc. -// -// This source code is licensed under the Apache License, Version 2.0 license found in the -// LICENSE file in the root directory of this source tree. - -package record - -import ( - "errors" - "path/filepath" - "testing" - - "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/metadata" - "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/model" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/receiver/receivertest" -) - -const testDataDir = "testdata" - -func TestRecordAccessItem(t *testing.T) { - tests := []struct { - name string - expectedPath string - expErrMsg string - input []*model.NginxAccessItem - shouldErr bool - }{ - { - name: "Test 1: basic nginx.http.response.status case", - input: []*model.NginxAccessItem{ - { - BodyBytesSent: "615", - Status: "200", - RemoteAddress: "127.0.0.1", - HTTPUserAgent: "PostmanRuntime/7.36.1", - Request: "GET / HTTP/1.1", - BytesSent: "853", - RequestLength: "226", - RequestTime: "0.000", - GzipRatio: "-", - ServerProtocol: "HTTP/1.1", - UpstreamConnectTime: "-", - UpstreamHeaderTime: "-", - UpstreamResponseTime: "-", - UpstreamResponseLength: "-", - UpstreamStatus: "", - UpstreamCacheStatus: "", - }, - { - BodyBytesSent: "28", - Status: "200", - RemoteAddress: "127.0.0.1", - HTTPUserAgent: "PostmanRuntime/7.36.1", - Request: "GET /frontend1 HTTP/1.1", - BytesSent: "190", - RequestLength: "235", - RequestTime: "0.004", - GzipRatio: "-", - ServerProtocol: "HTTP/1.1", - UpstreamConnectTime: "0.003", - UpstreamHeaderTime: "0.003", - UpstreamResponseTime: "0.003", - UpstreamResponseLength: "28", - UpstreamStatus: "", - UpstreamCacheStatus: "", - }, - }, - expectedPath: "basic-nginx.http.response.status.yaml", - }, - { - name: "Test 2: all nginx.http.response.status status codes", - input: []*model.NginxAccessItem{ - { // The recorder only parses the status code for this metric, omitting other fields for brevity. - Status: "100", - }, - { - Status: "103", - }, - { - Status: "200", - }, - { - Status: "202", - }, - { - Status: "300", - }, - { - Status: "302", - }, - { - Status: "400", - }, - { - Status: "404", - }, - { - Status: "500", - }, - { - Status: "502", - }, - }, - expectedPath: "multicode-nginx.http.response.status.yaml", - }, - { - name: "Test 3: random string in status code", - input: []*model.NginxAccessItem{ - { - Status: "not-a-status-code", - }, - }, - shouldErr: true, - expErrMsg: "cast status code to int", - }, - { - name: "Test 4: non-existent status code range", - input: []*model.NginxAccessItem{ - { - Status: "700", - }, - }, - shouldErr: true, - expErrMsg: "unknown code range: 700", - }, - } - - mb := metadata.NewMetricsBuilder(metadata.DefaultMetricsBuilderConfig(), receivertest.NewNopSettings()) - - for _, test := range tests { - t.Run(test.name, func(tt *testing.T) { - var err error - for _, item := range test.input { - recordErr := Item(item, mb) - err = errors.Join(err, recordErr) - } - - if test.shouldErr { - require.Error(tt, err) - assert.Contains(tt, err.Error(), test.expErrMsg) - } else { - require.NoError(tt, err) - expectedFile := filepath.Join(testDataDir, test.expectedPath) - expected, readErr := golden.ReadMetrics(expectedFile) - require.NoError(t, readErr) - - actual := mb.Emit() - require.NoError(tt, pmetrictest.CompareMetrics(expected, actual, - pmetrictest.IgnoreStartTimestamp(), - pmetrictest.IgnoreMetricDataPointsOrder(), - pmetrictest.IgnoreTimestamp(), - pmetrictest.IgnoreMetricsOrder())) - } - }) - } -} diff --git a/internal/collector/nginxossreceiver/internal/record/testdata/basic-nginx.http.response.status.yaml b/internal/collector/nginxossreceiver/internal/record/testdata/basic-nginx.http.response.status.yaml deleted file mode 100644 index 994992e871..0000000000 --- a/internal/collector/nginxossreceiver/internal/record/testdata/basic-nginx.http.response.status.yaml +++ /dev/null @@ -1,26 +0,0 @@ -resourceMetrics: - - resource: {} - scopeMetrics: - - metrics: - - description: The number of responses, grouped by status code range. - name: nginx.http.response.status - sum: - aggregationTemporality: 2 # Cumulative - isMonotonic: true - dataPoints: - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "2xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "2xx" - timeUnixNano: "1000000" - unit: responses - scope: - name: otelcol/nginxreceiver - version: latest diff --git a/internal/collector/nginxossreceiver/internal/record/testdata/multicode-nginx.http.response.status.yaml b/internal/collector/nginxossreceiver/internal/record/testdata/multicode-nginx.http.response.status.yaml deleted file mode 100644 index 38ee8518dc..0000000000 --- a/internal/collector/nginxossreceiver/internal/record/testdata/multicode-nginx.http.response.status.yaml +++ /dev/null @@ -1,73 +0,0 @@ -resourceMetrics: - - resource: {} - scopeMetrics: - - metrics: - - description: The number of responses, grouped by status code range. - name: nginx.http.response.status - sum: - aggregationTemporality: 2 # Cumulative - isMonotonic: true - dataPoints: - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "1xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "1xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "2xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "2xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "3xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "3xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "4xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "4xx" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "5xx" - timeUnixNano: "1000000" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "5xx" - timeUnixNano: "1000000" - unit: responses - scope: - name: otelcol/nginxreceiver - version: latest diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go index 93391655d9..1868fa0e68 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go @@ -8,7 +8,11 @@ package accesslog import ( "context" "fmt" + "strconv" "sync" + "time" + + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" @@ -27,10 +31,11 @@ import ( "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/config" "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/metadata" "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/model" - "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/record" "github.com/nginx/agent/v3/internal/collector/nginxossreceiver/internal/scraper/accesslog/operator/input/file" ) +const Percentage = 100 + type ( NginxLogScraper struct { outChan <-chan []*entry.Entry @@ -43,6 +48,18 @@ type ( entries []*entry.Entry mut sync.Mutex } + + NginxMetrics struct { + responseStatuses ResponseStatuses + } + + ResponseStatuses struct { + oneHundredStatusRange int64 + twoHundredStatusRange int64 + threeHundredStatusRange int64 + fourHundredStatusRange int64 + fiveHundredStatusRange int64 + } ) var _ scraperhelper.Scraper = (*NginxLogScraper)(nil) @@ -55,7 +72,7 @@ func NewScraper( logger.Info("Creating NGINX access log scraper") mb := metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings) - operators := []operator.Config{} + operators := make([]operator.Config, 0) for _, accessLog := range cfg.AccessLogs { logger.Info("Adding access log file operator", zap.String("file_path", accessLog.FilePath)) @@ -106,21 +123,66 @@ func (nls *NginxLogScraper) Start(parentCtx context.Context, _ component.Host) e func (nls *NginxLogScraper) Scrape(_ context.Context) (pmetric.Metrics, error) { nls.mut.Lock() defer nls.mut.Unlock() + + nginxMetrics := NginxMetrics{} + for _, ent := range nls.entries { - nls.logger.Info("Scraping NGINX access log", zap.Any("entity", ent)) + nls.logger.Debug("Scraping NGINX access log", zap.Any("entity", ent)) item, ok := ent.Body.(*model.NginxAccessItem) if !ok { - nls.logger.Info("Failed to cast log entry to *model.NginxAccessItem", zap.Any("entry", ent.Body)) + nls.logger.Warn("Failed to cast log entry to *model.NginxAccessItem", zap.Any("entry", ent.Body)) continue } - err := record.Item(item, nls.mb) - if err != nil { - nls.logger.Info("Recording metric failed", zap.Any("item", item), zap.Error(err)) - continue + if v, err := strconv.Atoi(item.Status); err == nil { + codeRange := fmt.Sprintf("%dxx", v/Percentage) + + switch codeRange { + case "1xx": + nginxMetrics.responseStatuses.oneHundredStatusRange++ + case "2xx": + nginxMetrics.responseStatuses.twoHundredStatusRange++ + case "3xx": + nginxMetrics.responseStatuses.threeHundredStatusRange++ + case "4xx": + nginxMetrics.responseStatuses.fourHundredStatusRange++ + case "5xx": + nginxMetrics.responseStatuses.fiveHundredStatusRange++ + default: + nls.logger.Error("Unknown status range", zap.String("codeRange", codeRange)) + continue + } } } + nls.entries = make([]*entry.Entry, 0) + timeNow := pcommon.NewTimestampFromTime(time.Now()) + + nls.mb.RecordNginxHTTPResponseStatusDataPoint( + timeNow, + nginxMetrics.responseStatuses.oneHundredStatusRange, + metadata.AttributeNginxStatusRange1xx, + ) + nls.mb.RecordNginxHTTPResponseStatusDataPoint( + timeNow, + nginxMetrics.responseStatuses.twoHundredStatusRange, + metadata.AttributeNginxStatusRange2xx, + ) + nls.mb.RecordNginxHTTPResponseStatusDataPoint( + timeNow, + nginxMetrics.responseStatuses.threeHundredStatusRange, + metadata.AttributeNginxStatusRange3xx, + ) + nls.mb.RecordNginxHTTPResponseStatusDataPoint( + timeNow, + nginxMetrics.responseStatuses.fourHundredStatusRange, + metadata.AttributeNginxStatusRange4xx, + ) + nls.mb.RecordNginxHTTPResponseStatusDataPoint( + timeNow, + nginxMetrics.responseStatuses.fiveHundredStatusRange, + metadata.AttributeNginxStatusRange5xx, + ) return nls.mb.Emit(), nil } diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml index 94a4d72368..edc97f287a 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml @@ -8,48 +8,27 @@ resourceMetrics: aggregationTemporality: 2 # Cumulative isMonotonic: true dataPoints: - - asInt: 1 + - asInt: 0 attributes: - key: nginx.status_range value: - stringValue: 2xx - timeUnixNano: "1000000" - - asInt: 1 + stringValue: "1xx" + - asInt: 4 attributes: - key: nginx.status_range value: stringValue: "2xx" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "3xx" - - asInt: 1 + - asInt: 2 attributes: - key: nginx.status_range value: stringValue: "3xx" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "4xx" - - asInt: 1 + - asInt: 6 attributes: - key: nginx.status_range value: stringValue: "4xx" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "5xx" - - asInt: 1 - attributes: - - key: nginx.status_range - value: - stringValue: "5xx" - - asInt: 1 + - asInt: 3 attributes: - key: nginx.status_range value: diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/test-access.log b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/test-access.log index a9d8ed8349..9ebbe29294 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/test-access.log +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/test-access.log @@ -1,8 +1,14 @@ 127.0.0.1 - - [16/Apr/2024:09:00:45 +0100] "GET /example HTTP/1.0" 200 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" +127.0.0.1 - - [16/Apr/2024:09:00:45 +0100] "GET /example HTTP/1.0" 200 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" +127.0.0.1 - - [16/Apr/2024:09:00:45 +0100] "GET /example HTTP/1.0" 200 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" 127.0.0.1 - - [16/Apr/2024:09:00:45 +0100] "GET /example HTTP/1.1" 203 28 "-" "PostmanRuntime/7.36.1" "-" "190" "235" "0.004" "-" "HTTP/1.1" "0.003""0.003" "28" "0.003" 127.0.0.1 - - [16/Apr/2024:09:03:02 +0100] "GET /example HTTP/1.0" 300 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" 127.0.0.1 - - [16/Apr/2024:09:03:02 +0100] "GET /example HTTP/1.1" 303 28 "-" "PostmanRuntime/7.36.1" "-" "190" "235" "0.002" "-" "HTTP/1.1" "0.000""0.003" "28" "0.003" 127.0.0.1 - - [16/Apr/2024:10:57:45 +0100] "GET /example HTTP/1.0" 400 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" +127.0.0.1 - - [16/Apr/2024:10:57:45 +0100] "GET /example HTTP/1.0" 400 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" +127.0.0.1 - - [16/Apr/2024:10:57:45 +0100] "GET /example HTTP/1.0" 400 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" +127.0.0.1 - - [16/Apr/2024:10:57:45 +0100] "GET /example HTTP/1.0" 400 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" +127.0.0.1 - - [16/Apr/2024:10:57:45 +0100] "GET /example HTTP/1.0" 400 28 "-" "PostmanRuntime/7.36.1" "-" "185" "222" "0.000" "-" "HTTP/1.0" "-""-" "-" "-" 127.0.0.1 - - [16/Apr/2024:10:57:45 +0100] "GET /example HTTP/1.1" 401 28 "-" "PostmanRuntime/7.36.1" "-" "190" "235" "0.406" "-" "HTTP/1.1" "0.297""0.407" "28" "0.407" 127.0.0.1 - - [16/Apr/2024:10:57:55 +0100] "GET / HTTP/1.1" 500 615 "-" "PostmanRuntime/7.36.1" "-" "853" "226" "0.000" "-" "HTTP/1.1" "-""-" "-" "-" 127.0.0.1 - - [16/Apr/2024:10:58:25 +0100] "GET / HTTP/1.1" 502 615 "-" "PostmanRuntime/7.36.1" "-" "853" "226" "0.000" "-" "HTTP/1.1" "-""-" "-" "-" diff --git a/internal/config/defaults.go b/internal/config/defaults.go index cede509067..57d7903eb5 100644 --- a/internal/config/defaults.go +++ b/internal/config/defaults.go @@ -14,15 +14,14 @@ const ( DefNginxReloadMonitoringPeriod = 10 * time.Second DefTreatErrorsAsWarnings = true - DefCollectorConfigPath = "/var/run/nginx-agent/opentelemetry-collector-agent.yaml" - DefCollectorTLSGenSelfSignedCert = false - DefCollectorLogLevel = "INFO" - DefCollectorLogPath = "/var/log/nginx-agent/opentelemetry-collector-agent.log" - DefConfigDirectories = "/etc/nginx:/usr/local/etc/nginx:/usr/share/nginx/modules" - DefCollectorTLSCertPath = "/var/lib/nginx-agent/cert.pem" - DefCollectorTLSKeyPath = "/var/lib/nginx-agent/key.pem" - DefCollectorTLSCAPath = "/var/lib/nginx-agent/ca.pem" - DefCollectorTLSSANNames = "127.0.0.1,::1,localhost" + DefCollectorConfigPath = "/etc/nginx-agent/opentelemetry-collector-agent.yaml" + DefCollectorLogLevel = "INFO" + DefCollectorLogPath = "/var/log/nginx-agent/opentelemetry-collector-agent.log" + DefConfigDirectories = "/etc/nginx:/usr/local/etc/nginx:/usr/share/nginx/modules" + DefCollectorTLSCertPath = "/var/lib/nginx-agent/cert.pem" + DefCollectorTLSKeyPath = "/var/lib/nginx-agent/key.pem" + DefCollectorTLSCAPath = "/var/lib/nginx-agent/ca.pem" + DefCollectorTLSSANNames = "127.0.0.1,::1,localhost" DefCommandServerHostKey = "" DefCommandServerPortKey = 0 diff --git a/scripts/testing/load/Dockerfile b/scripts/testing/load/Dockerfile index c4fd26a277..af065a08fe 100644 --- a/scripts/testing/load/Dockerfile +++ b/scripts/testing/load/Dockerfile @@ -80,7 +80,7 @@ ENV PATH="/usr/local/go/bin:${PATH}" ENV PATH=$PATH:/usr/local/go/bin RUN mv /agent/test/config/agent/nginx-agent-otel-load.conf /agent/test/load/nginx-agent.conf -RUN mkdir /var/run/nginx-agent/ /var/log/nginx-agent/ +RUN mkdir /var/run/nginx-agent/ /var/log/nginx-agent/ /etc/nginx-agent/ WORKDIR /agent/ CMD make install-tools diff --git a/test/mock/collector/grafana/provisioning/dashboards/host-metrics.json b/test/mock/collector/grafana/provisioning/dashboards/host-metrics.json index 7ba158a2cc..2fd9cee461 100644 --- a/test/mock/collector/grafana/provisioning/dashboards/host-metrics.json +++ b/test/mock/collector/grafana/provisioning/dashboards/host-metrics.json @@ -209,7 +209,7 @@ "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "{{nginx_conn_outcome}}", + "legendFormat": "__auto", "range": true, "refId": "A", "useBackend": false @@ -308,7 +308,7 @@ "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "Requests", + "legendFormat": "__auto", "range": true, "refId": "A", "useBackend": false diff --git a/test/mock/collector/grafana/provisioning/dashboards/nginx-dashboard.json b/test/mock/collector/grafana/provisioning/dashboards/nginx-dashboard.json index a9224370a8..1744513236 100644 --- a/test/mock/collector/grafana/provisioning/dashboards/nginx-dashboard.json +++ b/test/mock/collector/grafana/provisioning/dashboards/nginx-dashboard.json @@ -1,540 +1,440 @@ { "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] }, "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, "links": [], "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "id": 5, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { + { "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" + "type": "prometheus", + "uid": "otel-prometheus-scraper" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "nginx_http_response_status", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "HTTP Response Status", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { - "color": "red", - "value": 80 + "datasource": { + "type": "prometheus", + "uid": "otel-prometheus-scraper" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "nginx_http_conn_count", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{nginx_conn_outcome}}", + "range": true, + "refId": "A", + "useBackend": false } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 + ], + "title": "HTTP Connections Count", + "type": "timeseries" }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { + { "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" + "type": "prometheus", + "uid": "otel-prometheus-scraper" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "nginx_connections_current", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "NGINX Connections Current", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { - "color": "red", - "value": 80 + "datasource": { + "type": "prometheus", + "uid": "otel-prometheus-scraper" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "nginx_http_conn", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{nginx_conn_outcome}}", + "range": true, + "refId": "A", + "useBackend": false } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 8 + ], + "title": "HTTP Connections", + "type": "timeseries" }, - "id": 1, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { + { "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" + "type": "prometheus", + "uid": "otel-prometheus-scraper" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "nginx_http_requests", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "NGINX Requests Total", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ { - "color": "red", - "value": 80 + "datasource": { + "type": "prometheus", + "uid": "otel-prometheus-scraper" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "nginx_http_requests", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 8 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } + ], + "title": "Total HTTP Requests", + "type": "timeseries" }, - "targets": [ - { + { "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" + "type": "prometheus", + "uid": "otel-prometheus-scraper" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "nginx_http_conn_count", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "NGINX Connections Current Count", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, - { - "color": "red", - "value": 80 + "tooltip": { + "mode": "single", + "sort": "none" } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 16 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "maxHeight": 600, - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "otel-prometheus-scraper" }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "nginx_http_conn", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "__auto", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "NGINX Connection Totals", - "type": "timeseries" - } + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "otel-prometheus-scraper" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "nginx_http_response_status", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "HTTP Response Status", + "type": "timeseries" + } ], "refresh": "5s", "schemaVersion": 39, "tags": [], "templating": { - "list": [] + "list": [] }, "time": { - "from": "now-5m", - "to": "now" + "from": "now-5m", + "to": "now" }, "timepicker": {}, "timezone": "browser", "title": "NGINX OSS", "uid": "bdogpq9khs9hcb", - "version": 1, + "version": 6, "weekStart": "" - } +}