From a3c015f1cb16d8c514199287b1c34a522c95ab88 Mon Sep 17 00:00:00 2001 From: Christopher van de Sande Date: Fri, 11 Oct 2024 13:07:06 +0100 Subject: [PATCH 01/12] Improve comments for docs, make private objects public --- pkg/tls/self_signed_cert.go | 72 ++++++++++++++++++++----------------- 1 file changed, 39 insertions(+), 33 deletions(-) diff --git a/pkg/tls/self_signed_cert.go b/pkg/tls/self_signed_cert.go index 65e57c4402..d531754e73 100644 --- a/pkg/tls/self_signed_cert.go +++ b/pkg/tls/self_signed_cert.go @@ -3,7 +3,7 @@ // This source code is licensed under the Apache License, Version 2.0 license found in the // LICENSE file in the root directory of this source tree. -// Package gencert generates self-signed TLS certificates. +// Package gencert generates a certificate authority (CA) and a server certificate signed by it. package tls import ( @@ -20,28 +20,32 @@ import ( "time" ) +// Predefined constants for Org and file permissions const ( - caOrganization = "F5 Inc. CA" - certOrganization = "F5 Inc." - certFilePermissions = 0o600 - keyFilePermissions = 0o600 + CaOrganization = "F5 Inc. CA" + CertOrganization = "F5 Inc." + CertFilePermissions = 0o600 + KeyFilePermissions = 0o600 ) -type certReq struct { - template *x509.Certificate - parent *x509.Certificate - publicKey *ecdsa.PublicKey - privateKey *ecdsa.PrivateKey +// CertReq contains a ECDSA key pair and 2 x509.Certificate templates, a server and parent. +// When generating a CA, template and parent are identical, making the CA "self-signed". +// When generating a server certificate, the `parent` is the CA template and `template` is the server. +type CertReq struct { + Template *x509.Certificate + Parent *x509.Certificate + PublicKey *ecdsa.PublicKey + PrivateKey *ecdsa.PrivateKey } // Returns x509 Certificate object and bytes in PEM format -func genCert(req *certReq) (*x509.Certificate, []byte, error) { +func GenCert(req *CertReq) (*x509.Certificate, []byte, error) { certBytes, createCertErr := x509.CreateCertificate( rand.Reader, - req.template, - req.parent, - req.publicKey, - req.privateKey, + req.Template, + req.Parent, + req.PublicKey, + req.PrivateKey, ) if createCertErr != nil { @@ -70,7 +74,7 @@ func GenerateCA(now time.Time, caCertPath string) (*x509.Certificate, *ecdsa.Pri // Create CA certificate template caTemplate := x509.Certificate{ SerialNumber: big.NewInt(1), - Subject: pkix.Name{Organization: []string{certOrganization}}, + Subject: pkix.Name{Organization: []string{CertOrganization}}, NotBefore: now.Add(-time.Minute), NotAfter: now.AddDate(1, 0, 0), // 1 year KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, @@ -81,14 +85,14 @@ func GenerateCA(now time.Time, caCertPath string) (*x509.Certificate, *ecdsa.Pri } // CA is self signed - caRequest := certReq{ - template: &caTemplate, - parent: &caTemplate, - publicKey: &caKeyPair.PublicKey, - privateKey: caKeyPair, + caRequest := CertReq{ + Template: &caTemplate, + Parent: &caTemplate, + PublicKey: &caKeyPair.PublicKey, + PrivateKey: caKeyPair, } - caCert, caCertPEM, caErr := genCert(&caRequest) + caCert, caCertPEM, caErr := GenCert(&caRequest) if caErr != nil { return &x509.Certificate{}, &ecdsa.PrivateKey{}, fmt.Errorf( "error generating certificate authority: %w", @@ -96,7 +100,7 @@ func GenerateCA(now time.Time, caCertPath string) (*x509.Certificate, *ecdsa.Pri } // Write the CA certificate to a file - writeCAErr := os.WriteFile(caCertPath, caCertPEM, certFilePermissions) + writeCAErr := os.WriteFile(caCertPath, caCertPEM, CertFilePermissions) if writeCAErr != nil { return &x509.Certificate{}, &ecdsa.PrivateKey{}, fmt.Errorf( "failed to write ca file: %w", @@ -107,7 +111,9 @@ func GenerateCA(now time.Time, caCertPath string) (*x509.Certificate, *ecdsa.Pri return caCert, caKeyPair, nil } -// Writes CA, Cert, Key to specified destinations. If cert files are already present, does nothing, returns true +// Writes CA, Cert, Key to specified destinations. +// Hostnames are a list of subject alternative names. +// If cert files are already present, does nothing, returns true. // nolint: revive func GenerateServerCerts(hostnames []string, caPath, certPath, keyPath string) (existingCert bool, err error) { // Check for and return existing cert if it already exists @@ -141,7 +147,7 @@ func GenerateServerCerts(hostnames []string, caPath, certPath, keyPath string) ( servTemplate := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ - Organization: []string{caOrganization}, + Organization: []string{CaOrganization}, }, NotBefore: now.Add(-time.Minute), NotAfter: now.AddDate(1, 0, 0), // 1 year @@ -150,21 +156,21 @@ func GenerateServerCerts(hostnames []string, caPath, certPath, keyPath string) ( DNSNames: hostnames, } - servRequest := certReq{ - template: &servTemplate, - parent: caCert, - publicKey: &servKeyPair.PublicKey, - privateKey: caKeyPair, + servRequest := CertReq{ + Template: &servTemplate, + Parent: caCert, + PublicKey: &servKeyPair.PublicKey, + PrivateKey: caKeyPair, } // Generate server certficated signed by the CA - _, servCertPEM, servCertErr := genCert(&servRequest) + _, servCertPEM, servCertErr := GenCert(&servRequest) if servCertErr != nil { return false, fmt.Errorf("error generating server certificate: %w", servCertErr) } // Write the certificate to a file - writeCertErr := os.WriteFile(certPath, servCertPEM, certFilePermissions) + writeCertErr := os.WriteFile(certPath, servCertPEM, CertFilePermissions) if writeCertErr != nil { return false, fmt.Errorf("failed to write certificate file: %w", writeCertErr) } @@ -176,7 +182,7 @@ func GenerateServerCerts(hostnames []string, caPath, certPath, keyPath string) ( } b := pem.Block{Type: "EC PRIVATE KEY", Bytes: servKeyBytes} servKeyPEM := pem.EncodeToMemory(&b) - writeKeyErr := os.WriteFile(keyPath, servKeyPEM, keyFilePermissions) + writeKeyErr := os.WriteFile(keyPath, servKeyPEM, KeyFilePermissions) if writeKeyErr != nil { return false, fmt.Errorf("failed to write key file: %w", writeKeyErr) } From 111d06e9246f9df02ed15a10f855ad3b9ca5a92a Mon Sep 17 00:00:00 2001 From: Chris <99910348+CVanF5@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:17:21 +0100 Subject: [PATCH 02/12] Update pkg/tls/self_signed_cert.go Co-authored-by: Donal Hurley --- pkg/tls/self_signed_cert.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/tls/self_signed_cert.go b/pkg/tls/self_signed_cert.go index d531754e73..1233b82054 100644 --- a/pkg/tls/self_signed_cert.go +++ b/pkg/tls/self_signed_cert.go @@ -39,7 +39,7 @@ type CertReq struct { } // Returns x509 Certificate object and bytes in PEM format -func GenCert(req *CertReq) (*x509.Certificate, []byte, error) { +func GenerateCertificate(req *CertReq) (*x509.Certificate, []byte, error) { certBytes, createCertErr := x509.CreateCertificate( rand.Reader, req.Template, From 7b733e4a7bfe1820c4fbdddb546382be92f1e045 Mon Sep 17 00:00:00 2001 From: Chris <99910348+CVanF5@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:17:29 +0100 Subject: [PATCH 03/12] Update pkg/tls/self_signed_cert.go Co-authored-by: Donal Hurley --- pkg/tls/self_signed_cert.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/tls/self_signed_cert.go b/pkg/tls/self_signed_cert.go index 1233b82054..160fc9de5d 100644 --- a/pkg/tls/self_signed_cert.go +++ b/pkg/tls/self_signed_cert.go @@ -111,7 +111,7 @@ func GenerateCA(now time.Time, caCertPath string) (*x509.Certificate, *ecdsa.Pri return caCert, caKeyPair, nil } -// Writes CA, Cert, Key to specified destinations. +// GenerateServerCerts creates a server CA, Cert and Key and writes them to specified destinations. // Hostnames are a list of subject alternative names. // If cert files are already present, does nothing, returns true. // nolint: revive From ea608000c41b024657728344f2f6ca052ebc9f34 Mon Sep 17 00:00:00 2001 From: RRashmit <132996156+RRashmit@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:34:00 +0100 Subject: [PATCH 04/12] chore: enabling publishing packages (#903) --- .github/workflows/ci.yml | 46 +-- api/grpc/mpi/v1/command.pb.go | 686 +++++++--------------------------- api/grpc/mpi/v1/common.pb.go | 48 +-- api/grpc/mpi/v1/files.pb.go | 290 +++----------- 4 files changed, 204 insertions(+), 866 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d70db7d41..efe33d215a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -307,34 +307,22 @@ jobs: if: ${{ success() && github.ref_name == 'v3'}} run: git push 'https://github-actions:${{ secrets.GITHUB_TOKEN }}@github.com/nginx/agent.git' benchmark-results:benchmark-results - publish-packages-vars: - name: Set workflow variables + publish-packages: + name: Publish NGINX Agent v3 packages if: ${{ github.ref_name == 'v3' && !github.event.pull_request.head.repo.fork }} - runs-on: ubuntu-22.04 - outputs: - package_build_num: ${{ steps.get_build_num.outputs.build_num }} - steps: - - name: Get the build number - id: get_build_num - run: echo "build_num=${{ github.run_number }}-$(echo ${{ github.sha }} | cut -c1-7)" >> $GITHUB_OUTPUT - -# publish-packages: -# name: Publish NGINX Agent v3 packages -# if: ${{ github.ref_name == 'v3' && -# !github.event.pull_request.head.repo.fork }} -# needs: [ lint, unit-test, performance-tests, -# load-tests, official-oss-image-integration-tests, -# official-plus-image-integration-tests, -# race-condition-test, publish-packages-vars ] -# uses: ./.github/workflows/release-branch.yml -# secrets: inherit -# permissions: -# id-token: write -# contents: read -# with: -# packageVersion: "3.0.0" -# packageBuildNo: "${{ needs.publish-packages-vars.outputs.package_build_num }}" -# uploadAzure: true -# publishPackages: true -# releaseBranch: "v3" + needs: [ lint, unit-test, performance-tests, + load-tests, official-oss-image-integration-tests, + official-plus-image-integration-tests, + race-condition-test ] + uses: ./.github/workflows/release-branch.yml + secrets: inherit + permissions: + id-token: write + contents: read + with: + packageVersion: "3.0.0" + packageBuildNo: "${{ github.run_number }}}" + uploadAzure: true + publishPackages: true + releaseBranch: "v3" diff --git a/api/grpc/mpi/v1/command.pb.go b/api/grpc/mpi/v1/command.pb.go index 76ea4a02fd..7f6e52f857 100644 --- a/api/grpc/mpi/v1/command.pb.go +++ b/api/grpc/mpi/v1/command.pb.go @@ -8,7 +8,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: mpi/v1/command.proto @@ -162,11 +162,9 @@ type CreateConnectionRequest struct { func (x *CreateConnectionRequest) Reset() { *x = CreateConnectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateConnectionRequest) String() string { @@ -177,7 +175,7 @@ func (*CreateConnectionRequest) ProtoMessage() {} func (x *CreateConnectionRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -227,11 +225,9 @@ type Resource struct { func (x *Resource) Reset() { *x = Resource{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Resource) String() string { @@ -242,7 +238,7 @@ func (*Resource) ProtoMessage() {} func (x *Resource) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -326,11 +322,9 @@ type HostInfo struct { func (x *HostInfo) Reset() { *x = HostInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HostInfo) String() string { @@ -341,7 +335,7 @@ func (*HostInfo) ProtoMessage() {} func (x *HostInfo) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -397,11 +391,9 @@ type ReleaseInfo struct { func (x *ReleaseInfo) Reset() { *x = ReleaseInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReleaseInfo) String() string { @@ -412,7 +404,7 @@ func (*ReleaseInfo) ProtoMessage() {} func (x *ReleaseInfo) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -474,11 +466,9 @@ type ContainerInfo struct { func (x *ContainerInfo) Reset() { *x = ContainerInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ContainerInfo) String() string { @@ -489,7 +479,7 @@ func (*ContainerInfo) ProtoMessage() {} func (x *ContainerInfo) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -525,11 +515,9 @@ type CreateConnectionResponse struct { func (x *CreateConnectionResponse) Reset() { *x = CreateConnectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CreateConnectionResponse) String() string { @@ -540,7 +528,7 @@ func (*CreateConnectionResponse) ProtoMessage() {} func (x *CreateConnectionResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -583,11 +571,9 @@ type UpdateDataPlaneStatusRequest struct { func (x *UpdateDataPlaneStatusRequest) Reset() { *x = UpdateDataPlaneStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateDataPlaneStatusRequest) String() string { @@ -598,7 +584,7 @@ func (*UpdateDataPlaneStatusRequest) ProtoMessage() {} func (x *UpdateDataPlaneStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -636,11 +622,9 @@ type UpdateDataPlaneStatusResponse struct { func (x *UpdateDataPlaneStatusResponse) Reset() { *x = UpdateDataPlaneStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateDataPlaneStatusResponse) String() string { @@ -651,7 +635,7 @@ func (*UpdateDataPlaneStatusResponse) ProtoMessage() {} func (x *UpdateDataPlaneStatusResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -680,11 +664,9 @@ type InstanceHealth struct { func (x *InstanceHealth) Reset() { *x = InstanceHealth{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InstanceHealth) String() string { @@ -695,7 +677,7 @@ func (*InstanceHealth) ProtoMessage() {} func (x *InstanceHealth) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -745,11 +727,9 @@ type UpdateDataPlaneHealthRequest struct { func (x *UpdateDataPlaneHealthRequest) Reset() { *x = UpdateDataPlaneHealthRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateDataPlaneHealthRequest) String() string { @@ -760,7 +740,7 @@ func (*UpdateDataPlaneHealthRequest) ProtoMessage() {} func (x *UpdateDataPlaneHealthRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -798,11 +778,9 @@ type UpdateDataPlaneHealthResponse struct { func (x *UpdateDataPlaneHealthResponse) Reset() { *x = UpdateDataPlaneHealthResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateDataPlaneHealthResponse) String() string { @@ -813,7 +791,7 @@ func (*UpdateDataPlaneHealthResponse) ProtoMessage() {} func (x *UpdateDataPlaneHealthResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -844,11 +822,9 @@ type DataPlaneResponse struct { func (x *DataPlaneResponse) Reset() { *x = DataPlaneResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DataPlaneResponse) String() string { @@ -859,7 +835,7 @@ func (*DataPlaneResponse) ProtoMessage() {} func (x *DataPlaneResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -916,11 +892,9 @@ type ManagementPlaneRequest struct { func (x *ManagementPlaneRequest) Reset() { *x = ManagementPlaneRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ManagementPlaneRequest) String() string { @@ -931,7 +905,7 @@ func (*ManagementPlaneRequest) ProtoMessage() {} func (x *ManagementPlaneRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1058,11 +1032,9 @@ type StatusRequest struct { func (x *StatusRequest) Reset() { *x = StatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StatusRequest) String() string { @@ -1073,7 +1045,7 @@ func (*StatusRequest) ProtoMessage() {} func (x *StatusRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1097,11 +1069,9 @@ type HealthRequest struct { func (x *HealthRequest) Reset() { *x = HealthRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthRequest) String() string { @@ -1112,7 +1082,7 @@ func (*HealthRequest) ProtoMessage() {} func (x *HealthRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1139,11 +1109,9 @@ type ConfigApplyRequest struct { func (x *ConfigApplyRequest) Reset() { *x = ConfigApplyRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ConfigApplyRequest) String() string { @@ -1154,7 +1122,7 @@ func (*ConfigApplyRequest) ProtoMessage() {} func (x *ConfigApplyRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1188,11 +1156,9 @@ type ConfigUploadRequest struct { func (x *ConfigUploadRequest) Reset() { *x = ConfigUploadRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ConfigUploadRequest) String() string { @@ -1203,7 +1169,7 @@ func (*ConfigUploadRequest) ProtoMessage() {} func (x *ConfigUploadRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1234,11 +1200,9 @@ type APIActionRequest struct { func (x *APIActionRequest) Reset() { *x = APIActionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *APIActionRequest) String() string { @@ -1249,7 +1213,7 @@ func (*APIActionRequest) ProtoMessage() {} func (x *APIActionRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1273,11 +1237,9 @@ type CommandStatusRequest struct { func (x *CommandStatusRequest) Reset() { *x = CommandStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CommandStatusRequest) String() string { @@ -1288,7 +1250,7 @@ func (*CommandStatusRequest) ProtoMessage() {} func (x *CommandStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1319,11 +1281,9 @@ type Instance struct { func (x *Instance) Reset() { *x = Instance{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Instance) String() string { @@ -1334,7 +1294,7 @@ func (*Instance) ProtoMessage() {} func (x *Instance) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1386,11 +1346,9 @@ type InstanceMeta struct { func (x *InstanceMeta) Reset() { *x = InstanceMeta{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InstanceMeta) String() string { @@ -1401,7 +1359,7 @@ func (*InstanceMeta) ProtoMessage() {} func (x *InstanceMeta) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1453,11 +1411,9 @@ type InstanceConfig struct { func (x *InstanceConfig) Reset() { *x = InstanceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InstanceConfig) String() string { @@ -1468,7 +1424,7 @@ func (*InstanceConfig) ProtoMessage() {} func (x *InstanceConfig) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1539,11 +1495,9 @@ type InstanceRuntime struct { func (x *InstanceRuntime) Reset() { *x = InstanceRuntime{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InstanceRuntime) String() string { @@ -1554,7 +1508,7 @@ func (*InstanceRuntime) ProtoMessage() {} func (x *InstanceRuntime) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1647,11 +1601,9 @@ type InstanceChild struct { func (x *InstanceChild) Reset() { *x = InstanceChild{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InstanceChild) String() string { @@ -1662,7 +1614,7 @@ func (*InstanceChild) ProtoMessage() {} func (x *InstanceChild) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1704,11 +1656,9 @@ type NGINXRuntimeInfo struct { func (x *NGINXRuntimeInfo) Reset() { *x = NGINXRuntimeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NGINXRuntimeInfo) String() string { @@ -1719,7 +1669,7 @@ func (*NGINXRuntimeInfo) ProtoMessage() {} func (x *NGINXRuntimeInfo) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1791,11 +1741,9 @@ type NGINXPlusRuntimeInfo struct { func (x *NGINXPlusRuntimeInfo) Reset() { *x = NGINXPlusRuntimeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NGINXPlusRuntimeInfo) String() string { @@ -1806,7 +1754,7 @@ func (*NGINXPlusRuntimeInfo) ProtoMessage() {} func (x *NGINXPlusRuntimeInfo) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1872,11 +1820,9 @@ type InstanceAction struct { func (x *InstanceAction) Reset() { *x = InstanceAction{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InstanceAction) String() string { @@ -1887,7 +1833,7 @@ func (*InstanceAction) ProtoMessage() {} func (x *InstanceAction) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1924,11 +1870,9 @@ type AgentConfig struct { func (x *AgentConfig) Reset() { *x = AgentConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AgentConfig) String() string { @@ -1939,7 +1883,7 @@ func (*AgentConfig) ProtoMessage() {} func (x *AgentConfig) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1949,9 @@ type CommandServer struct { func (x *CommandServer) Reset() { *x = CommandServer{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CommandServer) String() string { @@ -2020,7 +1962,7 @@ func (*CommandServer) ProtoMessage() {} func (x *CommandServer) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2044,11 +1986,9 @@ type MetricsServer struct { func (x *MetricsServer) Reset() { *x = MetricsServer{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetricsServer) String() string { @@ -2059,7 +1999,7 @@ func (*MetricsServer) ProtoMessage() {} func (x *MetricsServer) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2083,11 +2023,9 @@ type FileServer struct { func (x *FileServer) Reset() { *x = FileServer{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_command_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_command_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileServer) String() string { @@ -2098,7 +2036,7 @@ func (*FileServer) ProtoMessage() {} func (x *FileServer) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_command_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2555,380 +2493,6 @@ func file_mpi_v1_command_proto_init() { } file_mpi_v1_common_proto_init() file_mpi_v1_files_proto_init() - if !protoimpl.UnsafeEnabled { - file_mpi_v1_command_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*CreateConnectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*Resource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*HostInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ReleaseInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ContainerInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*CreateConnectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*UpdateDataPlaneStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*UpdateDataPlaneStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*InstanceHealth); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*UpdateDataPlaneHealthRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*UpdateDataPlaneHealthResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*DataPlaneResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*ManagementPlaneRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*StatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*HealthRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*ConfigApplyRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ConfigUploadRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*APIActionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*CommandStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*Instance); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*InstanceMeta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*InstanceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*InstanceRuntime); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*InstanceChild); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*NGINXRuntimeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*NGINXPlusRuntimeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*InstanceAction); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*AgentConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*CommandServer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*MetricsServer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_command_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FileServer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_mpi_v1_command_proto_msgTypes[1].OneofWrappers = []any{ (*Resource_HostInfo)(nil), (*Resource_ContainerInfo)(nil), diff --git a/api/grpc/mpi/v1/common.pb.go b/api/grpc/mpi/v1/common.pb.go index ca47776c11..3d71214d35 100644 --- a/api/grpc/mpi/v1/common.pb.go +++ b/api/grpc/mpi/v1/common.pb.go @@ -5,7 +5,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: mpi/v1/common.proto @@ -104,11 +104,9 @@ type MessageMeta struct { func (x *MessageMeta) Reset() { *x = MessageMeta{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_common_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageMeta) String() string { @@ -119,7 +117,7 @@ func (*MessageMeta) ProtoMessage() {} func (x *MessageMeta) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_common_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -171,11 +169,9 @@ type CommandResponse struct { func (x *CommandResponse) Reset() { *x = CommandResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_common_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CommandResponse) String() string { @@ -186,7 +182,7 @@ func (*CommandResponse) ProtoMessage() {} func (x *CommandResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_common_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -296,32 +292,6 @@ func file_mpi_v1_common_proto_init() { if File_mpi_v1_common_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_mpi_v1_common_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*MessageMeta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_common_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*CommandResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/api/grpc/mpi/v1/files.pb.go b/api/grpc/mpi/v1/files.pb.go index a9d6743ecd..b02e2c6cf8 100644 --- a/api/grpc/mpi/v1/files.pb.go +++ b/api/grpc/mpi/v1/files.pb.go @@ -5,7 +5,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc (unknown) // source: mpi/v1/files.proto @@ -102,11 +102,9 @@ type GetOverviewRequest struct { func (x *GetOverviewRequest) Reset() { *x = GetOverviewRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetOverviewRequest) String() string { @@ -117,7 +115,7 @@ func (*GetOverviewRequest) ProtoMessage() {} func (x *GetOverviewRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -158,11 +156,9 @@ type GetOverviewResponse struct { func (x *GetOverviewResponse) Reset() { *x = GetOverviewResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetOverviewResponse) String() string { @@ -173,7 +169,7 @@ func (*GetOverviewResponse) ProtoMessage() {} func (x *GetOverviewResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -209,11 +205,9 @@ type UpdateOverviewRequest struct { func (x *UpdateOverviewRequest) Reset() { *x = UpdateOverviewRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateOverviewRequest) String() string { @@ -224,7 +218,7 @@ func (*UpdateOverviewRequest) ProtoMessage() {} func (x *UpdateOverviewRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -262,11 +256,9 @@ type UpdateOverviewResponse struct { func (x *UpdateOverviewResponse) Reset() { *x = UpdateOverviewResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateOverviewResponse) String() string { @@ -277,7 +269,7 @@ func (*UpdateOverviewResponse) ProtoMessage() {} func (x *UpdateOverviewResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -306,11 +298,9 @@ type ConfigVersion struct { func (x *ConfigVersion) Reset() { *x = ConfigVersion{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ConfigVersion) String() string { @@ -321,7 +311,7 @@ func (*ConfigVersion) ProtoMessage() {} func (x *ConfigVersion) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -364,11 +354,9 @@ type FileOverview struct { func (x *FileOverview) Reset() { *x = FileOverview{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOverview) String() string { @@ -379,7 +367,7 @@ func (*FileOverview) ProtoMessage() {} func (x *FileOverview) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -422,11 +410,9 @@ type File struct { func (x *File) Reset() { *x = File{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *File) String() string { @@ -437,7 +423,7 @@ func (*File) ProtoMessage() {} func (x *File) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -480,11 +466,9 @@ type GetFileRequest struct { func (x *GetFileRequest) Reset() { *x = GetFileRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetFileRequest) String() string { @@ -495,7 +479,7 @@ func (*GetFileRequest) ProtoMessage() {} func (x *GetFileRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -536,11 +520,9 @@ type GetFileResponse struct { func (x *GetFileResponse) Reset() { *x = GetFileResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetFileResponse) String() string { @@ -551,7 +533,7 @@ func (*GetFileResponse) ProtoMessage() {} func (x *GetFileResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -585,11 +567,9 @@ type FileContents struct { func (x *FileContents) Reset() { *x = FileContents{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileContents) String() string { @@ -600,7 +580,7 @@ func (*FileContents) ProtoMessage() {} func (x *FileContents) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -642,11 +622,9 @@ type FileMeta struct { func (x *FileMeta) Reset() { *x = FileMeta{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileMeta) String() string { @@ -657,7 +635,7 @@ func (*FileMeta) ProtoMessage() {} func (x *FileMeta) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -723,11 +701,9 @@ type UpdateFileRequest struct { func (x *UpdateFileRequest) Reset() { *x = UpdateFileRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateFileRequest) String() string { @@ -738,7 +714,7 @@ func (*UpdateFileRequest) ProtoMessage() {} func (x *UpdateFileRequest) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -786,11 +762,9 @@ type UpdateFileResponse struct { func (x *UpdateFileResponse) Reset() { *x = UpdateFileResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_mpi_v1_files_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_mpi_v1_files_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateFileResponse) String() string { @@ -801,7 +775,7 @@ func (*UpdateFileResponse) ProtoMessage() {} func (x *UpdateFileResponse) ProtoReflect() protoreflect.Message { mi := &file_mpi_v1_files_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1021,164 +995,6 @@ func file_mpi_v1_files_proto_init() { return } file_mpi_v1_common_proto_init() - if !protoimpl.UnsafeEnabled { - file_mpi_v1_files_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GetOverviewRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*GetOverviewResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*UpdateOverviewRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*UpdateOverviewResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ConfigVersion); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*FileOverview); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*File); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*GetFileRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*GetFileResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*FileContents); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileMeta); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*UpdateFileRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_mpi_v1_files_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*UpdateFileResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_mpi_v1_files_proto_msgTypes[6].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ From c5724e7a25341c702d9d25ec277cba685a5a6473 Mon Sep 17 00:00:00 2001 From: Christopher van de Sande Date: Tue, 15 Oct 2024 13:43:22 +0100 Subject: [PATCH 05/12] Apply feedback --- pkg/tls/self_signed_cert.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/tls/self_signed_cert.go b/pkg/tls/self_signed_cert.go index 160fc9de5d..a7220549af 100644 --- a/pkg/tls/self_signed_cert.go +++ b/pkg/tls/self_signed_cert.go @@ -92,7 +92,7 @@ func GenerateCA(now time.Time, caCertPath string) (*x509.Certificate, *ecdsa.Pri PrivateKey: caKeyPair, } - caCert, caCertPEM, caErr := GenCert(&caRequest) + caCert, caCertPEM, caErr := GenerateCertificate(&caRequest) if caErr != nil { return &x509.Certificate{}, &ecdsa.PrivateKey{}, fmt.Errorf( "error generating certificate authority: %w", @@ -164,7 +164,7 @@ func GenerateServerCerts(hostnames []string, caPath, certPath, keyPath string) ( } // Generate server certficated signed by the CA - _, servCertPEM, servCertErr := GenCert(&servRequest) + _, servCertPEM, servCertErr := GenerateCertificate(&servRequest) if servCertErr != nil { return false, fmt.Errorf("error generating server certificate: %w", servCertErr) } From 0ea4a5fbef88ddcd1b4489b47e61e058d46e3cd5 Mon Sep 17 00:00:00 2001 From: Donal Hurley Date: Thu, 17 Oct 2024 11:10:26 +0100 Subject: [PATCH 06/12] Add default config for collector (#899) --- internal/collector/otel_collector_plugin.go | 8 + .../collector/otel_collector_plugin_test.go | 29 ++- internal/collector/otelcol.tmpl | 4 +- internal/collector/settings_test.go | 2 +- internal/config/config.go | 218 ++++++++++++++---- internal/config/config_test.go | 9 +- internal/config/defaults.go | 50 ++-- internal/config/flags.go | 91 +++++--- internal/config/types.go | 23 +- internal/plugin/plugin_manager.go | 7 +- internal/plugin/plugin_manager_test.go | 6 +- nginx-agent.conf | 12 +- test/config/agent/nginx-agent-otel-load.conf | 8 +- test/load/nginx_agent_process_collector.go | 6 + test/types/config.go | 2 +- 15 files changed, 352 insertions(+), 123 deletions(-) diff --git a/internal/collector/otel_collector_plugin.go b/internal/collector/otel_collector_plugin.go index 160ec3e113..5a77432086 100644 --- a/internal/collector/otel_collector_plugin.go +++ b/internal/collector/otel_collector_plugin.go @@ -63,6 +63,7 @@ func New(conf *config.Config) (*Collector, error) { return &Collector{ config: conf, service: oTelCollector, + stopped: true, }, nil } @@ -73,6 +74,13 @@ func (oc *Collector) Init(ctx context.Context, mp bus.MessagePipeInterface) erro var runCtx context.Context runCtx, oc.cancel = context.WithCancel(ctx) + if !oc.config.AreReceiversConfigured() { + slog.InfoContext(runCtx, "No receivers configured for OTel Collector. "+ + "Waiting to discover a receiver before starting OTel collector.") + + return nil + } + err := writeCollectorConfig(oc.config.Collector) if err != nil { return fmt.Errorf("write OTel Collector config: %w", err) diff --git a/internal/collector/otel_collector_plugin_test.go b/internal/collector/otel_collector_plugin_test.go index c852b6a82e..3b3ea10015 100644 --- a/internal/collector/otel_collector_plugin_test.go +++ b/internal/collector/otel_collector_plugin_test.go @@ -5,11 +5,15 @@ package collector import ( + "bytes" "context" "fmt" + "strings" "testing" "time" + "github.com/nginx/agent/v3/test/stub" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/otelcol" @@ -29,6 +33,27 @@ func TestCollector_New(t *testing.T) { require.NoError(t, err, "NewCollector should not return an error with valid config") } +func TestCollector_Init(t *testing.T) { + conf := types.OTelConfig(t) + conf.Collector = &config.Collector{} + + logBuf := &bytes.Buffer{} + stub.StubLoggerWith(logBuf) + + collector, err := New(conf) + require.NoError(t, err, "NewCollector should not return an error with valid config") + + initError := collector.Init(context.Background(), nil) + require.NoError(t, initError) + + if s := logBuf.String(); !strings.Contains(s, "No receivers configured for OTel Collector. "+ + "Waiting to discover a receiver before starting OTel collector.") { + t.Errorf("Unexpected log %s", s) + } + + assert.True(t, collector.stopped) +} + func TestCollector_InitAndClose(t *testing.T) { conf := types.OTelConfig(t) conf.Collector.Log.Path = "" @@ -83,7 +108,7 @@ func TestCollector_Process(t *testing.T) { }, }, receivers: config.Receivers{ - HostMetrics: config.HostMetrics{ + HostMetrics: &config.HostMetrics{ CollectionInterval: time.Minute, InitialDelay: time.Second, Scrapers: &config.HostMetricsScrapers{ @@ -119,7 +144,7 @@ func TestCollector_Process(t *testing.T) { }, }, receivers: config.Receivers{ - HostMetrics: config.HostMetrics{ + HostMetrics: &config.HostMetrics{ CollectionInterval: time.Minute, InitialDelay: time.Second, Scrapers: &config.HostMetricsScrapers{ diff --git a/internal/collector/otelcol.tmpl b/internal/collector/otelcol.tmpl index 44b454ab8e..acbe8df7c8 100644 --- a/internal/collector/otelcol.tmpl +++ b/internal/collector/otelcol.tmpl @@ -1,5 +1,5 @@ receivers: -{{- if ne .Receivers.HostMetrics.CollectionInterval 0 }} +{{- if ne .Receivers.HostMetrics nil }} hostmetrics: collection_interval: {{ .Receivers.HostMetrics.CollectionInterval }} initial_delay: {{ .Receivers.HostMetrics.InitialDelay }} @@ -148,7 +148,7 @@ service: pipelines: metrics: receivers: - {{- if ne .Receivers.HostMetrics.CollectionInterval 0 }} + {{- if ne .Receivers.HostMetrics nil }} - hostmetrics {{- end }} {{- range $index, $otlpReceiver := .Receivers.OtlpReceivers }} diff --git a/internal/collector/settings_test.go b/internal/collector/settings_test.go index fb65198941..0757771116 100644 --- a/internal/collector/settings_test.go +++ b/internal/collector/settings_test.go @@ -64,7 +64,7 @@ func TestTemplateWrite(t *testing.T) { cfg.Collector.Exporters.Debug = &config.DebugExporter{} - cfg.Collector.Receivers.HostMetrics = config.HostMetrics{ + cfg.Collector.Receivers.HostMetrics = &config.HostMetrics{ CollectionInterval: time.Minute, InitialDelay: time.Second, Scrapers: &config.HostMetricsScrapers{ diff --git a/internal/config/config.go b/internal/config/config.go index c3836af80a..3d5b190fdf 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -127,15 +127,15 @@ func registerFlags() { fs.String( LogLevelKey, "info", - `The desired verbosity level for logging messages from nginx-agent. - Available options, in order of severity from highest to lowest, are: - panic, fatal, error, info and debug.`, + "The desired verbosity level for logging messages from nginx-agent. "+ + "Available options, in order of severity from highest to lowest, are: "+ + "panic, fatal, error, info and debug.", ) fs.String( LogPathKey, "", - `The path to output log messages to. - If the default path doesn't exist, log messages are output to stdout/stderr.`, + "The path to output log messages to. "+ + "If the default path doesn't exist, log messages are output to stdout/stderr.", ) fs.Duration( @@ -257,7 +257,7 @@ func registerCommandFlags(fs *flag.FlagSet) { fs.Bool( CommandTLSSkipVerifyKey, DefCommandTLSSkipVerifyKey, - "Testing only. SkipVerify controls client verification of a server's certificate chain and host name.", + "Testing only. Skip verify controls client verification of a server's certificate chain and host name.", ) fs.String( CommandTLSServerNameKey, @@ -276,16 +276,16 @@ func registerCollectorFlags(fs *flag.FlagSet) { fs.String( CollectorLogLevelKey, DefCollectorLogLevel, - `The desired verbosity level for logging messages from nginx-agent OTel collector. - Available options, in order of severity from highest to lowest, are: - ERROR, WARN, INFO and DEBUG.`, + "The desired verbosity level for logging messages from nginx-agent OTel collector. "+ + "Available options, in order of severity from highest to lowest, are: "+ + "ERROR, WARN, INFO and DEBUG.", ) fs.String( CollectorLogPathKey, DefCollectorLogPath, - `The path to output OTel collector log messages to. - If the default path doesn't exist, log messages are output to stdout/stderr.`, + "The path to output OTel collector log messages to. "+ + "If the default path doesn't exist, log messages are output to stdout/stderr.", ) fs.Uint32( @@ -305,6 +305,51 @@ func registerCollectorFlags(fs *flag.FlagSet) { DefCollectorBatchProcessorTimeout, `Time duration after which a batch will be sent regardless of size.`, ) + + fs.String( + CollectorExtensionsHealthServerHostKey, + DefCollectorExtensionsHealthServerHost, + `The hostname of the address to publish the OTel collector health check status.`, + ) + + fs.Int32( + CollectorExtensionsHealthServerPortKey, + DefCollectorExtensionsHealthServerPort, + `The port of the address to publish the OTel collector health check status.`, + ) + + fs.String( + CollectorExtensionsHealthPathKey, + DefCollectorExtensionsHealthPath, + `The path to be configured for the OTel collector health check server`, + ) + + fs.String( + CollectorExtensionsHealthTLSCertKey, + DefCollectorExtensionsHealthTLSCertPath, + "The path to the certificate file to use for TLS communication with the OTel collector health check server.", + ) + fs.String( + CollectorExtensionsHealthTLSKeyKey, + DefCollectorExtensionsHealthTLSKeyPath, + "The path to the certificate key file to use for TLS communication "+ + "with the OTel collector health check server.", + ) + fs.String( + CollectorExtensionsHealthTLSCaKey, + DefCollectorExtensionsHealthTLSCAPath, + "The path to CA certificate file to use for TLS communication with the OTel collector health check server.", + ) + fs.Bool( + CollectorExtensionsHealthTLSSkipVerifyKey, + DefCollectorExtensionsHealthTLSSkipVerify, + "Testing only. Skip verify controls client verification of a server's certificate chain and host name.", + ) + fs.String( + CollectorExtensionsHealthTLSServerNameKey, + DefCollectorExtensionsHealthTLServerNameKey, + "Specifies the name of the server sent in the TLS configuration.", + ) } func seekFileInPaths(fileName string, directories ...string) (string, error) { @@ -383,38 +428,16 @@ func resolveClient() *Client { } func resolveCollector(allowedDirs []string) (*Collector, error) { - // We do not want to return a sentinel error because we are joining all returned errors - // from config resolution and returning them without pattern matching. - // nolint: nilnil - if !viperInstance.IsSet(CollectorRootKey) { - return nil, nil - } - - var ( - err error - exporters Exporters - receivers Receivers - extensions Extensions - log Log - ) + var receivers Receivers - err = errors.Join( - err, - resolveMapStructure(CollectorExportersKey, &exporters), - resolveMapStructure(CollectorReceiversKey, &receivers), - resolveMapStructure(CollectorExtensionsKey, &extensions), - resolveMapStructure(CollectorLogKey, &log), - ) + err := resolveMapStructure(CollectorReceiversKey, &receivers) if err != nil { - return nil, fmt.Errorf("unmarshal collector config: %w", err) + return nil, fmt.Errorf("unmarshal collector receivers config: %w", err) } - if log.Level == "" { - log.Level = DefCollectorLogLevel - } - - if log.Path == "" { - log.Path = DefCollectorLogPath + exporters, err := resolveExporters() + if err != nil { + return nil, fmt.Errorf("unmarshal collector exporters config: %w", err) } col := &Collector{ @@ -422,8 +445,8 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { Exporters: exporters, Processors: resolveProcessors(), Receivers: receivers, - Extensions: extensions, - Log: &log, + Extensions: resolveExtensions(), + Log: resolveCollectorLog(), } // Check for self-signed certificate true in Agent conf @@ -439,17 +462,57 @@ func resolveCollector(allowedDirs []string) (*Collector, error) { return col, nil } -func resolveProcessors() Processors { - processors := Processors{} +func resolveExporters() (Exporters, error) { + var otlpExporters []OtlpExporter + exporters := Exporters{} + + if viperInstance.IsSet(CollectorDebugExporterKey) { + exporters.Debug = &DebugExporter{} + } + + if isPrometheusExporterSet() { + exporters.PrometheusExporter = &PrometheusExporter{ + Server: &ServerConfig{ + Host: viperInstance.GetString(CollectorPrometheusExporterServerHostKey), + Port: viperInstance.GetInt(CollectorPrometheusExporterServerPortKey), + }, + } - if viperInstance.IsSet(CollectorBatchProcessorKey) { - processors.Batch = &Batch{} - processors.Batch.SendBatchSize = viperInstance.GetUint32(CollectorBatchProcessorSendBatchSizeKey) - processors.Batch.SendBatchMaxSize = viperInstance.GetUint32(CollectorBatchProcessorSendBatchMaxSizeKey) - processors.Batch.Timeout = viperInstance.GetDuration(CollectorBatchProcessorTimeoutKey) + if arePrometheusExportTLSSettingsSet() { + exporters.PrometheusExporter.TLS = &TLSConfig{ + Cert: viperInstance.GetString(CollectorPrometheusExporterTLSCertKey), + Key: viperInstance.GetString(CollectorPrometheusExporterTLSKeyKey), + Ca: viperInstance.GetString(CollectorPrometheusExporterTLSCaKey), + SkipVerify: viperInstance.GetBool(CollectorPrometheusExporterTLSSkipVerifyKey), + ServerName: viperInstance.GetString(CollectorPrometheusExporterTLSServerNameKey), + } + } + } + + err := resolveMapStructure(CollectorOtlpExportersKey, &otlpExporters) + if err != nil { + return exporters, err } - return processors + exporters.OtlpExporters = otlpExporters + + return exporters, nil +} + +func isPrometheusExporterSet() bool { + return viperInstance.IsSet(CollectorPrometheusExporterKey) || + (viperInstance.IsSet(CollectorPrometheusExporterServerHostKey) && + viperInstance.IsSet(CollectorPrometheusExporterServerPortKey)) +} + +func resolveProcessors() Processors { + return Processors{ + Batch: &Batch{ + SendBatchSize: viperInstance.GetUint32(CollectorBatchProcessorSendBatchSizeKey), + SendBatchMaxSize: viperInstance.GetUint32(CollectorBatchProcessorSendBatchMaxSizeKey), + Timeout: viperInstance.GetDuration(CollectorBatchProcessorTimeoutKey), + }, + } } // generate self-signed certificate for OTEL receiver @@ -503,6 +566,47 @@ func processOtlpReceivers(tlsConfig *OtlpTLSConfig) error { return nil } +func resolveExtensions() Extensions { + if isHealthExtensionSet() { + health := &Health{ + Server: &ServerConfig{ + Host: viperInstance.GetString(CollectorExtensionsHealthServerHostKey), + Port: viperInstance.GetInt(CollectorExtensionsHealthServerPortKey), + }, + Path: viperInstance.GetString(CollectorExtensionsHealthPathKey), + } + + if areHealthExtensionTLSSettingsSet() { + health.TLS = &TLSConfig{ + Cert: viperInstance.GetString(CollectorExtensionsHealthTLSCertKey), + Key: viperInstance.GetString(CollectorExtensionsHealthTLSKeyKey), + Ca: viperInstance.GetString(CollectorExtensionsHealthTLSCaKey), + SkipVerify: viperInstance.GetBool(CollectorExtensionsHealthTLSSkipVerifyKey), + ServerName: viperInstance.GetString(CollectorExtensionsHealthTLSServerNameKey), + } + } + + return Extensions{ + Health: health, + } + } + + return Extensions{} +} + +func isHealthExtensionSet() bool { + return viperInstance.IsSet(CollectorExtensionsHealthKey) || + (viperInstance.IsSet(CollectorExtensionsHealthServerHostKey) && + viperInstance.IsSet(CollectorExtensionsHealthServerPortKey)) +} + +func resolveCollectorLog() *Log { + return &Log{ + Level: viperInstance.GetString(CollectorLogLevelKey), + Path: viperInstance.GetString(CollectorLogPathKey), + } +} + func resolveCommand() *Command { serverType, ok := parseServerType(viperInstance.GetString(CommandServerTypeKey)) if !ok { @@ -548,6 +652,22 @@ func areTLSSettingsSet() bool { viperInstance.IsSet(CommandTLSServerNameKey) } +func areHealthExtensionTLSSettingsSet() bool { + return viperInstance.IsSet(CollectorExtensionsHealthTLSCertKey) || + viperInstance.IsSet(CollectorExtensionsHealthTLSKeyKey) || + viperInstance.IsSet(CollectorExtensionsHealthTLSCaKey) || + viperInstance.IsSet(CollectorExtensionsHealthTLSSkipVerifyKey) || + viperInstance.IsSet(CollectorExtensionsHealthTLSServerNameKey) +} + +func arePrometheusExportTLSSettingsSet() bool { + return viperInstance.IsSet(CollectorPrometheusExporterTLSCertKey) || + viperInstance.IsSet(CollectorPrometheusExporterTLSKeyKey) || + viperInstance.IsSet(CollectorPrometheusExporterTLSCaKey) || + viperInstance.IsSet(CollectorPrometheusExporterTLSSkipVerifyKey) || + viperInstance.IsSet(CollectorPrometheusExporterTLSServerNameKey) +} + func resolveCommon() *CommonSettings { return &CommonSettings{ InitialInterval: DefBackoffInitialInterval, diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 52949d1523..ca188c3e56 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -179,14 +179,18 @@ func TestResolveCollector(t *testing.T) { viperInstance = viper.NewWithOptions(viper.KeyDelimiter(KeyDelimiter)) viperInstance.Set(CollectorConfigPathKey, expected.ConfigPath) + viperInstance.Set(CollectorLogPathKey, expected.Log.Path) + viperInstance.Set(CollectorLogLevelKey, expected.Log.Level) viperInstance.Set(CollectorReceiversKey, expected.Receivers) viperInstance.Set(CollectorBatchProcessorKey, expected.Processors.Batch) viperInstance.Set(CollectorBatchProcessorSendBatchSizeKey, expected.Processors.Batch.SendBatchSize) viperInstance.Set(CollectorBatchProcessorSendBatchMaxSizeKey, expected.Processors.Batch.SendBatchMaxSize) viperInstance.Set(CollectorBatchProcessorTimeoutKey, expected.Processors.Batch.Timeout) viperInstance.Set(CollectorExportersKey, expected.Exporters) - viperInstance.Set(CollectorExtensionsKey, expected.Extensions) - viperInstance.Set(CollectorLogKey, expected.Log) + viperInstance.Set(CollectorOtlpExportersKey, expected.Exporters.OtlpExporters) + viperInstance.Set(CollectorExtensionsHealthServerHostKey, expected.Extensions.Health.Server.Host) + viperInstance.Set(CollectorExtensionsHealthServerPortKey, expected.Extensions.Health.Server.Port) + viperInstance.Set(CollectorExtensionsHealthPathKey, expected.Extensions.Health.Path) actual, err := resolveCollector(testDefault.AllowedDirectories) require.NoError(t, err) @@ -370,6 +374,7 @@ func getAgentConfig() *Config { Port: 1337, Type: 0, }, + Path: "/", }, }, Log: &Log{ diff --git a/internal/config/defaults.go b/internal/config/defaults.go index ac20722c47..526ea914c9 100644 --- a/internal/config/defaults.go +++ b/internal/config/defaults.go @@ -16,14 +16,6 @@ const ( DefNginxReloadMonitoringPeriod = 10 * time.Second DefTreatErrorsAsWarnings = false - DefCollectorConfigPath = "/etc/nginx-agent/opentelemetry-collector-agent.yaml" - DefCollectorLogLevel = "INFO" - DefCollectorLogPath = "/var/log/nginx-agent/opentelemetry-collector-agent.log" - DefCollectorTLSCertPath = "/var/lib/nginx-agent/cert.pem" - DefCollectorTLSKeyPath = "/var/lib/nginx-agent/key.pem" - DefCollectorTLSCAPath = "/var/lib/nginx-agent/ca.pem" - DefCollectorTLSSANNames = "127.0.0.1,::1,localhost" - DefCommandServerHostKey = "" DefCommandServerPortKey = 0 DefCommandServerTypeKey = "grpc" @@ -34,27 +26,51 @@ const ( DefCommandTLSSkipVerifyKey = false DefCommandTLServerNameKey = "" - DefBackoffInitialInterval = 50 * time.Millisecond - // the value is 0 <= and < 1 - DefBackoffRandomizationFactor = 0.1 + DefMaxMessageSize = 0 // 0 = unset + DefMaxMessageRecieveSize = 4194304 // default 4 MB + DefMaxMessageSendSize = math.MaxInt32 + + // Backoff defaults + DefBackoffInitialInterval = 50 * time.Millisecond + DefBackoffRandomizationFactor = 0.1 // the value is 0 <= and < 1 DefBackoffMultiplier = 1.5 DefBackoffMaxInterval = 200 * time.Millisecond DefBackoffMaxElapsedTime = 3 * time.Second + // Watcher defaults DefInstanceWatcherMonitoringFrequency = 5 * time.Second DefInstanceHealthWatcherMonitoringFrequency = 5 * time.Second DefFileWatcherMonitoringFrequency = 5 * time.Second - // 0 = unset - DefMaxMessageSize = 0 - // default 4 MB - DefMaxMessageRecieveSize = 4194304 - // math.MaxInt32 - DefMaxMessageSendSize = math.MaxInt32 + // Collector defaults + DefCollectorConfigPath = "/etc/nginx-agent/opentelemetry-collector-agent.yaml" + DefCollectorLogLevel = "INFO" + DefCollectorLogPath = "/var/log/nginx-agent/opentelemetry-collector-agent.log" + DefCollectorTLSCertPath = "/var/lib/nginx-agent/cert.pem" + DefCollectorTLSKeyPath = "/var/lib/nginx-agent/key.pem" + DefCollectorTLSCAPath = "/var/lib/nginx-agent/ca.pem" + DefCollectorTLSSANNames = "127.0.0.1,::1,localhost" DefCollectorBatchProcessorSendBatchSize = 8192 DefCollectorBatchProcessorSendBatchMaxSize = 0 DefCollectorBatchProcessorTimeout = 200 * time.Millisecond + + DefCollectorExtensionsHealthServerHost = "localhost" + DefCollectorExtensionsHealthServerPort = 13133 + DefCollectorExtensionsHealthPath = "/" + DefCollectorExtensionsHealthTLSCertPath = "" + DefCollectorExtensionsHealthTLSKeyPath = "" + DefCollectorExtensionsHealthTLSCAPath = "" + DefCollectorExtensionsHealthTLSSkipVerify = false + DefCollectorExtensionsHealthTLServerNameKey = "" + + DefCollectorPrometheusExporterServerHost = "" + DefCollectorPrometheusExporterServerPort = 0 + DefCollectorPrometheusExporterTLSCertPath = "" + DefCollectorPrometheusExporterTLSKeyPath = "" + DefCollectorPrometheusExporterTLSCAPath = "" + DefCollectorPrometheusExporterTLSSkipVerify = false + DefCollectorPrometheusExporterTLServerNameKey = "" ) func DefaultFeatures() []string { diff --git a/internal/config/flags.go b/internal/config/flags.go index ccd4393575..22d04236b7 100644 --- a/internal/config/flags.go +++ b/internal/config/flags.go @@ -26,41 +26,62 @@ const ( var ( // child flags saved as vars to enable easier prefixing. - ClientPermitWithoutStreamKey = pre(ClientRootKey) + "permit_without_stream" - ClientTimeKey = pre(ClientRootKey) + "time" - ClientTimeoutKey = pre(ClientRootKey) + "timeout" - ClientMaxMessageSendSizeKey = pre(ClientRootKey) + "max_message_send_size" - ClientMaxMessageReceiveSizeKey = pre(ClientRootKey) + "max_message_receive_size" - ClientMaxMessageSizeKey = pre(ClientRootKey) + "max_message_size" - CollectorConfigPathKey = pre(CollectorRootKey) + "config_path" - CollectorExportersKey = pre(CollectorRootKey) + "exporters" - CollectorProcessorsKey = pre(CollectorRootKey) + "processors" - CollectorBatchProcessorKey = pre(CollectorProcessorsKey) + "batch" - CollectorBatchProcessorSendBatchSizeKey = pre(CollectorBatchProcessorKey) + "send_batch_size" - CollectorBatchProcessorSendBatchMaxSizeKey = pre(CollectorBatchProcessorKey) + "send_batch_max_size" - CollectorBatchProcessorTimeoutKey = pre(CollectorBatchProcessorKey) + "timeout" - CollectorExtensionsKey = pre(CollectorRootKey) + "extensions" - CollectorReceiversKey = pre(CollectorRootKey) + "receivers" - CollectorLogKey = pre(CollectorRootKey) + "log" - CollectorLogLevelKey = pre(CollectorLogKey) + "level" - CollectorLogPathKey = pre(CollectorLogKey) + "path" - CommandAuthKey = pre(CommandRootKey) + "auth" - CommandAuthTokenKey = pre(CommandAuthKey) + "token" - CommandServerHostKey = pre(CommandServerKey) + "host" - CommandServerKey = pre(CommandRootKey) + "server" - CommandServerPortKey = pre(CommandServerKey) + "port" - CommandServerTypeKey = pre(CommandServerKey) + "type" - CommandTLSCaKey = pre(CommandTLSKey) + "ca" - CommandTLSCertKey = pre(CommandTLSKey) + "cert" - CommandTLSKey = pre(CommandRootKey) + "tls" - CommandTLSKeyKey = pre(CommandTLSKey) + "key" - CommandTLSServerNameKey = pre(CommandRootKey) + "server_name" - CommandTLSSkipVerifyKey = pre(CommandTLSKey) + "skip_verify" - LogLevelKey = pre(LogLevelRootKey) + "level" - LogPathKey = pre(LogLevelRootKey) + "path" - NginxReloadMonitoringPeriodKey = pre(DataPlaneConfigRootKey, "nginx") + "reload_monitoring_period" - NginxTreatWarningsAsErrorsKey = pre(DataPlaneConfigRootKey, "nginx") + "treat_warnings_as_errors" - NginxExcludeLogsKey = pre(DataPlaneConfigRootKey, "nginx") + "exclude_logs" + ClientPermitWithoutStreamKey = pre(ClientRootKey) + "permit_without_stream" + ClientTimeKey = pre(ClientRootKey) + "time" + ClientTimeoutKey = pre(ClientRootKey) + "timeout" + ClientMaxMessageSendSizeKey = pre(ClientRootKey) + "max_message_send_size" + ClientMaxMessageReceiveSizeKey = pre(ClientRootKey) + "max_message_receive_size" + ClientMaxMessageSizeKey = pre(ClientRootKey) + "max_message_size" + CollectorConfigPathKey = pre(CollectorRootKey) + "config_path" + CollectorExportersKey = pre(CollectorRootKey) + "exporters" + CollectorDebugExporterKey = pre(CollectorExportersKey) + "debug" + CollectorPrometheusExporterKey = pre(CollectorExportersKey) + "prometheus_exporter" + CollectorPrometheusExporterServerHostKey = pre(CollectorPrometheusExporterKey) + "server_host" + CollectorPrometheusExporterServerPortKey = pre(CollectorPrometheusExporterKey) + "server_port" + CollectorPrometheusExporterTLSKey = pre(CollectorPrometheusExporterKey) + "tls" + CollectorPrometheusExporterTLSCertKey = pre(CollectorPrometheusExporterTLSKey) + "cert" + CollectorPrometheusExporterTLSKeyKey = pre(CollectorPrometheusExporterTLSKey) + "key" + CollectorPrometheusExporterTLSCaKey = pre(CollectorPrometheusExporterTLSKey) + "ca" + CollectorPrometheusExporterTLSSkipVerifyKey = pre(CollectorPrometheusExporterTLSKey) + "skip_verify" + CollectorPrometheusExporterTLSServerNameKey = pre(CollectorPrometheusExporterTLSKey) + "server_name" + CollectorOtlpExportersKey = pre(CollectorExportersKey) + "otlp_exporters" + CollectorProcessorsKey = pre(CollectorRootKey) + "processors" + CollectorBatchProcessorKey = pre(CollectorProcessorsKey) + "batch" + CollectorBatchProcessorSendBatchSizeKey = pre(CollectorBatchProcessorKey) + "send_batch_size" + CollectorBatchProcessorSendBatchMaxSizeKey = pre(CollectorBatchProcessorKey) + "send_batch_max_size" + CollectorBatchProcessorTimeoutKey = pre(CollectorBatchProcessorKey) + "timeout" + CollectorExtensionsKey = pre(CollectorRootKey) + "extensions" + CollectorExtensionsHealthKey = pre(CollectorExtensionsKey) + "health" + CollectorExtensionsHealthServerHostKey = pre(CollectorExtensionsHealthKey) + "server_host" + CollectorExtensionsHealthServerPortKey = pre(CollectorExtensionsHealthKey) + "server_port" + CollectorExtensionsHealthPathKey = pre(CollectorExtensionsHealthKey) + "path" + CollectorExtensionsHealthTLSKey = pre(CollectorExtensionsHealthKey) + "tls" + CollectorExtensionsHealthTLSCaKey = pre(CollectorExtensionsHealthTLSKey) + "ca" + CollectorExtensionsHealthTLSCertKey = pre(CollectorExtensionsHealthTLSKey) + "cert" + CollectorExtensionsHealthTLSKeyKey = pre(CollectorExtensionsHealthTLSKey) + "key" + CollectorExtensionsHealthTLSServerNameKey = pre(CollectorExtensionsHealthTLSKey) + "server_name" + CollectorExtensionsHealthTLSSkipVerifyKey = pre(CollectorExtensionsHealthTLSKey) + "skip_verify" + CollectorReceiversKey = pre(CollectorRootKey) + "receivers" + CollectorLogKey = pre(CollectorRootKey) + "log" + CollectorLogLevelKey = pre(CollectorLogKey) + "level" + CollectorLogPathKey = pre(CollectorLogKey) + "path" + CommandAuthKey = pre(CommandRootKey) + "auth" + CommandAuthTokenKey = pre(CommandAuthKey) + "token" + CommandServerHostKey = pre(CommandServerKey) + "host" + CommandServerKey = pre(CommandRootKey) + "server" + CommandServerPortKey = pre(CommandServerKey) + "port" + CommandServerTypeKey = pre(CommandServerKey) + "type" + CommandTLSKey = pre(CommandRootKey) + "tls" + CommandTLSCaKey = pre(CommandTLSKey) + "ca" + CommandTLSCertKey = pre(CommandTLSKey) + "cert" + CommandTLSKeyKey = pre(CommandTLSKey) + "key" + CommandTLSServerNameKey = pre(CommandTLSKey) + "server_name" + CommandTLSSkipVerifyKey = pre(CommandTLSKey) + "skip_verify" + LogLevelKey = pre(LogLevelRootKey) + "level" + LogPathKey = pre(LogLevelRootKey) + "path" + NginxReloadMonitoringPeriodKey = pre(DataPlaneConfigRootKey, "nginx") + "reload_monitoring_period" + NginxTreatWarningsAsErrorsKey = pre(DataPlaneConfigRootKey, "nginx") + "treat_warnings_as_errors" + NginxExcludeLogsKey = pre(DataPlaneConfigRootKey, "nginx") + "exclude_logs" ) func pre(prefixes ...string) string { diff --git a/internal/config/types.go b/internal/config/types.go index 4d328d8da9..291546c002 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -124,10 +124,10 @@ type ( // OTel Collector Receiver configuration. Receivers struct { + HostMetrics *HostMetrics `yaml:"-" mapstructure:"host_metrics"` OtlpReceivers []OtlpReceiver `yaml:"-" mapstructure:"otlp_receivers"` NginxReceivers []NginxReceiver `yaml:"-" mapstructure:"nginx_receivers"` NginxPlusReceivers []NginxPlusReceiver `yaml:"-" mapstructure:"nginx_plus_receivers"` - HostMetrics HostMetrics `yaml:"-" mapstructure:"host_metrics"` } OtlpReceiver struct { @@ -294,6 +294,27 @@ func (c *Config) IsFeatureEnabled(feature string) bool { return false } +func (c *Config) IsACollectorExporterConfigured() bool { + if c.Collector == nil { + return false + } + + return c.Collector.Exporters.PrometheusExporter != nil || + c.Collector.Exporters.OtlpExporters != nil || + c.Collector.Exporters.Debug != nil +} + +func (c *Config) AreReceiversConfigured() bool { + if c.Collector == nil { + return false + } + + return c.Collector.Receivers.NginxPlusReceivers != nil || + c.Collector.Receivers.OtlpReceivers != nil || + c.Collector.Receivers.NginxReceivers != nil || + c.Collector.Receivers.HostMetrics != nil +} + func isAllowedDir(dir string, allowedDirs []string) bool { for _, allowedDirectory := range allowedDirs { if strings.HasPrefix(dir, allowedDirectory) { diff --git a/internal/plugin/plugin_manager.go b/internal/plugin/plugin_manager.go index 5288bf53f9..7744ce7833 100644 --- a/internal/plugin/plugin_manager.go +++ b/internal/plugin/plugin_manager.go @@ -58,13 +58,16 @@ func addCommandAndFilePlugins(ctx context.Context, plugins []bus.Plugin, agentCo } func addCollectorPlugin(ctx context.Context, agentConfig *config.Config, plugins []bus.Plugin) []bus.Plugin { - if agentConfig.Collector != nil { + if agentConfig.IsACollectorExporterConfigured() { oTelCollector, err := collector.New(agentConfig) if err == nil { plugins = append(plugins, oTelCollector) } else { - slog.ErrorContext(ctx, "init collector plugin", "error", err) + slog.ErrorContext(ctx, "Failed to initialize collector plugin", "error", err) } + } else { + slog.InfoContext(ctx, "Agent OTel collector isn't started. "+ + "Configure a collector to begin collecting metrics.") } return plugins diff --git a/internal/plugin/plugin_manager_test.go b/internal/plugin/plugin_manager_test.go index 779d38efd7..89b86668b5 100644 --- a/internal/plugin/plugin_manager_test.go +++ b/internal/plugin/plugin_manager_test.go @@ -56,7 +56,11 @@ func TestLoadPlugins(t *testing.T) { }, { name: "Test 3: Load metrics collector plugin", input: &config.Config{ - Collector: &config.Collector{}, + Collector: &config.Collector{ + Exporters: config.Exporters{ + Debug: &config.DebugExporter{}, + }, + }, }, expected: []bus.Plugin{ &resource.Resource{}, diff --git a/nginx-agent.conf b/nginx-agent.conf index a90be6a9eb..24dd050694 100644 --- a/nginx-agent.conf +++ b/nginx-agent.conf @@ -28,9 +28,9 @@ allowed_directories: ## collector metrics settings # collector: -# exporters: # exporters -# - type: otlp # exporter type -# server: -# host: "127.0.0.1" # OTLP exporter server host -# port: 5643 # OTLP exporter server port -# tls: {} +# exporters: # exporters +# otlp_exporters: +# - server: +# host: "127.0.0.1" # OTLP exporter server host +# port: 5643 # OTLP exporter server port +# tls: {} diff --git a/test/config/agent/nginx-agent-otel-load.conf b/test/config/agent/nginx-agent-otel-load.conf index 8154721024..f2a2269cff 100644 --- a/test/config/agent/nginx-agent-otel-load.conf +++ b/test/config/agent/nginx-agent-otel-load.conf @@ -15,6 +15,7 @@ allowed_directories: - /usr/local/etc/nginx - /usr/share/nginx/modules - /var/run/nginx + - /var/log/nginx client: timeout: 10s @@ -22,10 +23,9 @@ client: collector: receivers: otlp_receivers: - - server: - host: "127.0.0.1" - port: 4317 - type: 0 + - server: + host: "127.0.0.1" + port: 4317 processors: batch: {} exporters: diff --git a/test/load/nginx_agent_process_collector.go b/test/load/nginx_agent_process_collector.go index 45de33a5c3..6293b7849d 100644 --- a/test/load/nginx_agent_process_collector.go +++ b/test/load/nginx_agent_process_collector.go @@ -244,6 +244,12 @@ func (cp *agentProcessCollector) Stop() (stopped bool, err error) { cp.isStopped = true + out, catError := exec.Command("cat", "/var/log/nginx-agent/agent.log").Output() + if catError != nil { + log.Println("Error reading /var/log/nginx-agent/agent.log: %w", catError) + } + log.Printf("\nNGINX agent logs:\n%s\n", out) + log.Printf("Gracefully terminating %s pid=%d, sending SIGTEM...", cp.name, cp.cmd.Process.Pid) // Notify resource monitor to stop. diff --git a/test/types/config.go b/test/types/config.go index 71d79fa7c6..99b622f045 100644 --- a/test/types/config.go +++ b/test/types/config.go @@ -70,7 +70,7 @@ func AgentConfig() *config.Config { }, Receivers: config.Receivers{ OtlpReceivers: OtlpReceivers(), - HostMetrics: config.HostMetrics{ + HostMetrics: &config.HostMetrics{ CollectionInterval: time.Minute, InitialDelay: time.Second, Scrapers: &config.HostMetricsScrapers{ From d53fff85f4bcf7e7c66861b5873cba443c9301ba Mon Sep 17 00:00:00 2001 From: RRashmit <132996156+RRashmit@users.noreply.github.com> Date: Thu, 17 Oct 2024 11:50:10 +0100 Subject: [PATCH 07/12] chore: remove extra bracket (#906) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index efe33d215a..34ff72b517 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -322,7 +322,7 @@ jobs: contents: read with: packageVersion: "3.0.0" - packageBuildNo: "${{ github.run_number }}}" + packageBuildNo: "${{ github.run_number }}" uploadAzure: true publishPackages: true releaseBranch: "v3" From 22a5103f2495aa968b22b36abf74302b1e22511e Mon Sep 17 00:00:00 2001 From: Sean Breen <101327931+sean-breen@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:28:31 +0100 Subject: [PATCH 08/12] OpenTelemetry: add resource id (#878) * add attribute processor to config, handle resource update to get id * write configuration before reloading * update template file handling of attributes processor * add attribute processor to mock config * add more nil checks for attribute processor * fix linting errors * commit generated files * fix debug exporter in template * Use slice of Actions in Attributes processor * fix lint: tag alignment * remove docker login, use prod container repo for OSS nginx * add unit test for processing ResourceUpdateTopic * Update mdatagen install instructions in README.md (#887) * Update go version to 1.23 (#865) * Agent Config Changes: rename & update config_dirs & update exclude_logs (#882) * max api version (#880) * add log to inetgration test (#893) * add lock for restarting OTel collector * fix struct alignment * new proto defs * resource-id -> resource.id * fix lint: remove trailing newline * fix unit test * add unit test for negative case * address pr feedback * add updateResourceAttributes function + unit tests * add nolint for cognitive-complexity, pending review * fix nolint * more feedback * fix test condition * PR feedback --------- Co-authored-by: Donal Hurley Co-authored-by: oliveromahony Co-authored-by: aphralG <108004222+aphralG@users.noreply.github.com> --- .../generated_component_test.go | 12 +- .../internal/metadata/generated_metrics.go | 54 ++++-- .../metadata/generated_metrics_test.go | 18 +- .../generated_component_test.go | 12 +- .../internal/metadata/generated_metrics.go | 54 ++++-- .../metadata/generated_metrics_test.go | 76 ++++---- internal/collector/otel_collector_plugin.go | 73 +++++++ .../collector/otel_collector_plugin_test.go | 179 +++++++++++++++++- internal/collector/otelcol.tmpl | 16 ++ internal/config/config.go | 11 +- internal/config/flags.go | 1 + internal/config/types.go | 13 +- 12 files changed, 419 insertions(+), 100 deletions(-) diff --git a/internal/collector/nginxossreceiver/generated_component_test.go b/internal/collector/nginxossreceiver/generated_component_test.go index 3007dc92b4..60de562050 100644 --- a/internal/collector/nginxossreceiver/generated_component_test.go +++ b/internal/collector/nginxossreceiver/generated_component_test.go @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, test := range tests { - t.Run(test.name+"-shutdown", func(t *testing.T) { - c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(test.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go index e74642bc94..c560fe1bc8 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go @@ -313,17 +313,25 @@ type MetricsBuilder struct { metricNginxHTTPResponseStatus metricNginxHTTPResponseStatus } -// metricBuilderOption applies changes to default metrics builder. -type metricBuilderOption func(*MetricsBuilder) +// MetricBuilderOption applies changes to default metrics builder. +type MetricBuilderOption interface { + apply(*MetricsBuilder) +} + +type metricBuilderOptionFunc func(mb *MetricsBuilder) + +func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { + mbof(mb) +} // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { - return func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { + return metricBuilderOptionFunc(func(mb *MetricsBuilder) { mb.startTime = startTime - } + }) } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -336,7 +344,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op(mb) + op.apply(mb) } return mb } @@ -349,20 +357,28 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption func(pmetric.ResourceMetrics) +type ResourceMetricsOption interface { + apply(pmetric.ResourceMetrics) +} + +type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) + +func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { + rmof(rm) +} // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { + return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - } + }) } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { + return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -376,7 +392,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - } + }) } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -384,7 +400,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("otelcol/nginxreceiver") @@ -395,8 +411,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricNginxHTTPRequests.emit(ils.Metrics()) mb.metricNginxHTTPResponseStatus.emit(ils.Metrics()) - for _, op := range rmo { - op(rm) + for _, op := range options { + op.apply(rm) } if ils.Metrics().Len() > 0 { @@ -408,8 +424,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(rmo...) +func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(options...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -437,9 +453,9 @@ func (mb *MetricsBuilder) RecordNginxHTTPResponseStatusDataPoint(ts pcommon.Time // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op(mb) + op.apply(mb) } } diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go index 261655b70a..2581a33ef3 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -78,7 +78,7 @@ func TestMetricsBuilder(t *testing.T) { res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if test.expectEmpty { + if tt.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -88,10 +88,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if test.metricsSet == testDataSetDefault { + if tt.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if test.metricsSet == testDataSetAll { + if tt.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -104,7 +104,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -136,7 +136,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -150,7 +150,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses, grouped by status code range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/internal/collector/nginxplusreceiver/generated_component_test.go b/internal/collector/nginxplusreceiver/generated_component_test.go index 170385ba71..9fbb9944ca 100644 --- a/internal/collector/nginxplusreceiver/generated_component_test.go +++ b/internal/collector/nginxplusreceiver/generated_component_test.go @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, test := range tests { - t.Run(test.name+"-shutdown", func(t *testing.T) { - c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, tt := range tests { + t.Run(tt.name+"-shutdown", func(t *testing.T) { + c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(test.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(tt.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go index 866fb5a735..15dbba1752 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go @@ -3620,17 +3620,25 @@ type MetricsBuilder struct { metricNginxStreamUpstreamZombieCount metricNginxStreamUpstreamZombieCount } -// metricBuilderOption applies changes to default metrics builder. -type metricBuilderOption func(*MetricsBuilder) +// MetricBuilderOption applies changes to default metrics builder. +type MetricBuilderOption interface { + apply(*MetricsBuilder) +} + +type metricBuilderOptionFunc func(mb *MetricsBuilder) + +func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { + mbof(mb) +} // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { - return func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { + return metricBuilderOptionFunc(func(mb *MetricsBuilder) { mb.startTime = startTime - } + }) } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -3697,7 +3705,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op(mb) + op.apply(mb) } return mb } @@ -3710,20 +3718,28 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption func(pmetric.ResourceMetrics) +type ResourceMetricsOption interface { + apply(pmetric.ResourceMetrics) +} + +type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) + +func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { + rmof(rm) +} // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { + return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - } + }) } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return func(rm pmetric.ResourceMetrics) { + return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -3737,7 +3753,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - } + }) } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -3745,7 +3761,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("otelcol/nginxplusreceiver") @@ -3810,8 +3826,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricNginxStreamUpstreamPeerUnavailable.emit(ils.Metrics()) mb.metricNginxStreamUpstreamZombieCount.emit(ils.Metrics()) - for _, op := range rmo { - op(rm) + for _, op := range options { + op.apply(rm) } if ils.Metrics().Len() > 0 { @@ -3823,8 +3839,8 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(rmo...) +func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(options...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -4122,9 +4138,9 @@ func (mb *MetricsBuilder) RecordNginxStreamUpstreamZombieCountDataPoint(ts pcomm // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op(mb) + op.apply(mb) } } diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go index 8e8f64cbb9..6d048f6442 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -294,7 +294,7 @@ func TestMetricsBuilder(t *testing.T) { res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if test.expectEmpty { + if tt.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -304,10 +304,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if test.metricsSet == testDataSetDefault { + if tt.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if test.metricsSet == testDataSetAll { + if tt.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -320,7 +320,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of bytes read from the cache or proxied server.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -370,7 +370,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of responses read from the cache or proxied server.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -390,7 +390,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of NGINX config reloads.", ms.At(i).Description()) assert.Equal(t, "reloads", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -404,7 +404,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -436,7 +436,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections to an endpoint with a limit_conn directive.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -456,7 +456,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests to an endpoint with a limit_req directive.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -476,7 +476,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of HTTP byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -499,7 +499,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests completed without sending a response.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -537,7 +537,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -569,7 +569,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses, grouped by status code range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -592,7 +592,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -630,7 +630,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of byte IO per HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -752,7 +752,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of health check requests made to a HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -781,7 +781,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests forwarded to the HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -831,7 +831,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of responses obtained from the HTTP upstream peer grouped by status range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -887,7 +887,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of times the server became unavailable for client requests (“unavail”).", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -931,7 +931,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests rejected due to the queue overflow.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1036,7 +1036,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.Equal(t, float64(1), dp.DoubleValue()) + assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) attrVal, ok := dp.Attributes().Get("nginx.zone.name") assert.True(t, ok) assert.EqualValues(t, "nginx.zone.name-val", attrVal.Str()) @@ -1047,7 +1047,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of attempts to allocate memory of specified size.", ms.At(i).Description()) assert.Equal(t, "allocations", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1106,7 +1106,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of SSL certificate verification failures.", ms.At(i).Description()) assert.Equal(t, "certificates", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1123,7 +1123,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of SSL handshakes.", ms.At(i).Description()) assert.Equal(t, "handshakes", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1143,7 +1143,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of Stream byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1163,7 +1163,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections accepted from clients.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1180,7 +1180,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Total number of connections completed without creating a session.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1212,7 +1212,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of completed sessions.", ms.At(i).Description()) assert.Equal(t, "sessions", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1232,7 +1232,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of Stream Upstream Peer byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1309,7 +1309,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client connections forwarded to this stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1335,7 +1335,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of stream upstream peers grouped by state.", ms.At(i).Description()) assert.Equal(t, "peers", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1358,7 +1358,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of unsuccessful attempts to communicate with the stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "peers", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1381,7 +1381,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of health check requests made to the stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1434,7 +1434,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the given state then the value will be 1. If no upstream peer is a match then the value will be 0.", ms.At(i).Description()) assert.Equal(t, "deployments", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1487,7 +1487,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) + assert.True(t, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/internal/collector/otel_collector_plugin.go b/internal/collector/otel_collector_plugin.go index 5a77432086..eb2179c670 100644 --- a/internal/collector/otel_collector_plugin.go +++ b/internal/collector/otel_collector_plugin.go @@ -11,8 +11,10 @@ import ( "log/slog" "os" "strings" + "sync" "time" + "github.com/nginx/agent/v3/api/grpc/mpi/v1" "github.com/nginx/agent/v3/internal/backoff" "github.com/nginx/agent/v3/internal/bus" "github.com/nginx/agent/v3/internal/config" @@ -31,6 +33,7 @@ type ( service *otelcol.Collector cancel context.CancelFunc config *config.Config + mu *sync.Mutex stopped bool } ) @@ -64,6 +67,7 @@ func New(conf *config.Config) (*Collector, error) { config: conf, service: oTelCollector, stopped: true, + mu: &sync.Mutex{}, }, nil } @@ -200,6 +204,8 @@ func (oc *Collector) Process(ctx context.Context, msg *bus.Message) { switch msg.Topic { case bus.NginxConfigUpdateTopic: oc.handleNginxConfigUpdate(ctx, msg) + case bus.ResourceUpdateTopic: + oc.handleResourceUpdate(ctx, msg) default: slog.DebugContext(ctx, "OTel collector plugin unknown topic", "topic", msg.Topic) } @@ -208,6 +214,7 @@ func (oc *Collector) Process(ctx context.Context, msg *bus.Message) { // Subscriptions returns the list of topics the plugin is subscribed to func (oc *Collector) Subscriptions() []string { return []string{ + bus.ResourceUpdateTopic, bus.NginxConfigUpdateTopic, } } @@ -233,7 +240,48 @@ func (oc *Collector) handleNginxConfigUpdate(ctx context.Context, msg *bus.Messa } } +func (oc *Collector) handleResourceUpdate(ctx context.Context, msg *bus.Message) { + var reloadCollector bool + resourceUpdateContext, ok := msg.Data.(*v1.Resource) + if !ok { + slog.ErrorContext(ctx, "Unable to cast message payload to *v1.Resource", "payload", msg.Data) + return + } + + if oc.config.Collector.Processors.Attribute == nil { + oc.config.Collector.Processors.Attribute = &config.Attribute{ + Actions: make([]config.Action, 0), + } + } + + if oc.config.Collector.Processors.Attribute != nil && + resourceUpdateContext.GetResourceId() != "" { + reloadCollector = oc.updateAttributeActions( + []config.Action{ + { + Key: "resource.id", + Action: "insert", + Value: resourceUpdateContext.GetResourceId(), + }, + }, + ) + } + + if reloadCollector { + slog.InfoContext(ctx, "Reloading OTel collector config") + err := writeCollectorConfig(oc.config.Collector) + if err != nil { + slog.ErrorContext(ctx, "Failed to write OTel Collector config", "error", err) + return + } + + oc.restartCollector(ctx) + } +} + func (oc *Collector) restartCollector(ctx context.Context) { + oc.mu.Lock() + defer oc.mu.Unlock() err := oc.Close(ctx) if err != nil { slog.ErrorContext(ctx, "Failed to shutdown OTel Collector", "error", err) @@ -353,6 +401,31 @@ func (oc *Collector) updateExistingNginxOSSReceiver( return nginxReceiverFound, reloadCollector } +// nolint: revive +func (oc *Collector) updateAttributeActions( + actionsToAdd []config.Action, +) (reloadCollector bool) { + reloadCollector = false + + if oc.config.Collector.Processors.Attribute.Actions != nil { + OUTER: + for _, toAdd := range actionsToAdd { + for _, action := range oc.config.Collector.Processors.Attribute.Actions { + if action.Key == toAdd.Key { + continue OUTER + } + } + oc.config.Collector.Processors.Attribute.Actions = append( + oc.config.Collector.Processors.Attribute.Actions, + toAdd, + ) + reloadCollector = true + } + } + + return reloadCollector +} + func isOSSReceiverChanged(nginxReceiver config.NginxReceiver, nginxConfigContext *model.NginxConfigContext) bool { return nginxReceiver.StubStatus != nginxConfigContext.StubStatus || len(nginxReceiver.AccessLogs) != len(nginxConfigContext.AccessLogs) diff --git a/internal/collector/otel_collector_plugin_test.go b/internal/collector/otel_collector_plugin_test.go index 3b3ea10015..adac2094e5 100644 --- a/internal/collector/otel_collector_plugin_test.go +++ b/internal/collector/otel_collector_plugin_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/nginx/agent/v3/test/protos" "github.com/nginx/agent/v3/test/stub" "github.com/stretchr/testify/assert" @@ -86,7 +87,7 @@ func TestCollector_InitAndClose(t *testing.T) { } // nolint: revive -func TestCollector_Process(t *testing.T) { +func TestCollector_ProcessNginxConfigUpdateTopic(t *testing.T) { nginxPlusMock := helpers.NewMockNGINXPlusAPIServer(t) defer nginxPlusMock.Close() @@ -225,6 +226,130 @@ func TestCollector_Process(t *testing.T) { } } +func TestCollector_ProcessResourceUpdateTopic(t *testing.T) { + conf := types.OTelConfig(t) + conf.Collector.Log.Path = "" + conf.Collector.Processors.Batch = nil + conf.Collector.Processors.Attribute = nil + + tests := []struct { + message *bus.Message + processors config.Processors + name string + }{ + { + name: "Test 1: Resource update adds resource id action", + message: &bus.Message{ + Topic: bus.ResourceUpdateTopic, + Data: protos.GetHostResource(), + }, + processors: config.Processors{ + Attribute: &config.Attribute{ + Actions: []config.Action{ + { + Key: "resource.id", + Action: "insert", + Value: "1234", + }, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + collector, err := New(conf) + require.NoError(tt, err, "NewCollector should not return an error with valid config") + + ctx := context.Background() + messagePipe := bus.NewMessagePipe(10) + err = messagePipe.Register(10, []bus.Plugin{collector}) + + require.NoError(tt, err) + require.NoError(tt, collector.Init(ctx, messagePipe), "Init should not return an error") + defer collector.Close(ctx) + + assert.Eventually( + tt, + func() bool { return collector.service.GetState() == otelcol.StateRunning }, + 5*time.Second, + 100*time.Millisecond, + ) + + collector.Process(ctx, test.message) + + assert.Eventually( + tt, + func() bool { return collector.service.GetState() == otelcol.StateRunning }, + 5*time.Second, + 100*time.Millisecond, + ) + + assert.Equal(tt, test.processors, collector.config.Collector.Processors) + }) + } +} + +func TestCollector_ProcessResourceUpdateTopicFails(t *testing.T) { + conf := types.OTelConfig(t) + conf.Collector.Log.Path = "" + conf.Collector.Processors.Batch = nil + conf.Collector.Processors.Attribute = nil + + tests := []struct { + message *bus.Message + processors config.Processors + name string + }{ + { + name: "Test 1: Message cannot be parsed to v1.Resource", + message: &bus.Message{ + Topic: bus.ResourceUpdateTopic, + Data: struct{}{}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + collector, err := New(conf) + require.NoError(tt, err, "NewCollector should not return an error with valid config") + + ctx := context.Background() + messagePipe := bus.NewMessagePipe(10) + err = messagePipe.Register(10, []bus.Plugin{collector}) + + require.NoError(tt, err) + require.NoError(tt, collector.Init(ctx, messagePipe), "Init should not return an error") + defer collector.Close(ctx) + + assert.Eventually( + tt, + func() bool { return collector.service.GetState() == otelcol.StateRunning }, + 5*time.Second, + 100*time.Millisecond, + ) + + collector.Process(ctx, test.message) + + assert.Eventually( + tt, + func() bool { return collector.service.GetState() == otelcol.StateRunning }, + 5*time.Second, + 100*time.Millisecond, + ) + + assert.Equal(tt, + config.Processors{ + Batch: nil, + Attribute: nil, + }, + collector.config.Collector.Processors) + }) + } +} + // nolint: dupl func TestCollector_updateExistingNginxOSSReceiver(t *testing.T) { conf := types.OTelConfig(t) @@ -380,3 +505,55 @@ func TestCollector_updateExistingNginxPlusReceiver(t *testing.T) { }) } } + +func TestCollector_updateResourceAttributes(t *testing.T) { + conf := types.OTelConfig(t) + conf.Collector.Log.Path = "" + conf.Collector.Processors.Batch = nil + + tests := []struct { + name string + setupActions []config.Action + actions []config.Action + expectedAttribs []config.Action + expectedReloadRequired bool + }{ + { + name: "Test 1: No Actions returns false", + setupActions: []config.Action{}, + actions: []config.Action{}, + expectedReloadRequired: false, + expectedAttribs: []config.Action{}, + }, + { + name: "Test 2: Adding an action returns true", + setupActions: []config.Action{}, + actions: []config.Action{{Key: "test", Action: "insert", Value: "test value"}}, + expectedReloadRequired: true, + expectedAttribs: []config.Action{{Key: "test", Action: "insert", Value: "test value"}}, + }, + { + name: "Test 3: Adding a duplicate key doesn't append", + setupActions: []config.Action{{Key: "test", Action: "insert", Value: "test value 1"}}, + actions: []config.Action{{Key: "test", Action: "insert", Value: "updated value 2"}}, + expectedReloadRequired: false, + expectedAttribs: []config.Action{{Key: "test", Action: "insert", Value: "test value 1"}}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(tt *testing.T) { + collector, err := New(conf) + require.NoError(tt, err, "NewCollector should not return an error with valid config") + + // set up Actions + conf.Collector.Processors.Attribute = &config.Attribute{Actions: test.setupActions} + + reloadRequired := collector.updateAttributeActions(test.actions) + assert.Equal(tt, + test.expectedAttribs, + conf.Collector.Processors.Attribute.Actions) + assert.Equal(tt, test.expectedReloadRequired, reloadRequired) + }) + } +} diff --git a/internal/collector/otelcol.tmpl b/internal/collector/otelcol.tmpl index acbe8df7c8..d988a21eda 100644 --- a/internal/collector/otelcol.tmpl +++ b/internal/collector/otelcol.tmpl @@ -76,6 +76,17 @@ receivers: {{- end }} processors: +{{- if ne .Processors.Attribute nil }} +{{- if .Processors.Attribute.Actions }} + attributes: + actions: +{{- range .Processors.Attribute.Actions }} + - key: {{ .Key }} + action: {{ .Action }} + value: {{ .Value }} +{{- end }} +{{- end }} +{{- end }} {{- if ne .Processors.Batch nil }} batch: send_batch_size: {{ .Processors.Batch.SendBatchSize }} @@ -164,6 +175,11 @@ service: {{- if ne .Processors.Batch nil }} - batch {{- end }} + {{- if ne .Processors.Attribute nil }} + {{- if .Processors.Attribute.Actions }} + - attributes + {{- end }} + {{- end }} exporters: {{- range $index, $otlpExporter := .Exporters.OtlpExporters }} - otlp/{{$index}} diff --git a/internal/config/config.go b/internal/config/config.go index 3d5b190fdf..05f5887861 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -506,13 +506,22 @@ func isPrometheusExporterSet() bool { } func resolveProcessors() Processors { - return Processors{ + processors := Processors{ Batch: &Batch{ SendBatchSize: viperInstance.GetUint32(CollectorBatchProcessorSendBatchSizeKey), SendBatchMaxSize: viperInstance.GetUint32(CollectorBatchProcessorSendBatchMaxSizeKey), Timeout: viperInstance.GetDuration(CollectorBatchProcessorTimeoutKey), }, } + + if viperInstance.IsSet(CollectorAttributeProcessorKey) { + err := resolveMapStructure(CollectorAttributeProcessorKey, &processors.Attribute) + if err != nil { + return processors + } + } + + return processors } // generate self-signed certificate for OTEL receiver diff --git a/internal/config/flags.go b/internal/config/flags.go index 22d04236b7..a14ba8fcc1 100644 --- a/internal/config/flags.go +++ b/internal/config/flags.go @@ -34,6 +34,7 @@ var ( ClientMaxMessageSizeKey = pre(ClientRootKey) + "max_message_size" CollectorConfigPathKey = pre(CollectorRootKey) + "config_path" CollectorExportersKey = pre(CollectorRootKey) + "exporters" + CollectorAttributeProcessorKey = pre(CollectorProcessorsKey) + "attribute" CollectorDebugExporterKey = pre(CollectorExportersKey) + "debug" CollectorPrometheusExporterKey = pre(CollectorExportersKey) + "prometheus_exporter" CollectorPrometheusExporterServerHostKey = pre(CollectorPrometheusExporterKey) + "server_host" diff --git a/internal/config/types.go b/internal/config/types.go index 291546c002..e35b804c78 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -113,7 +113,18 @@ type ( // OTel Collector Processors configuration. Processors struct { - Batch *Batch `yaml:"-" mapstructure:"batch"` + Attribute *Attribute `yaml:"-" mapstructure:"attribute"` + Batch *Batch `yaml:"-" mapstructure:"batch"` + } + + Attribute struct { + Actions []Action `yaml:"-" mapstructure:"actions"` + } + + Action struct { + Key string `yaml:"key" mapstructure:"key"` + Action string `yaml:"action" mapstructure:"action"` + Value string `yaml:"value" mapstructure:"value"` } Batch struct { From 9999bc5f5bd7d581d051caa9f7e646039c880b7e Mon Sep 17 00:00:00 2001 From: Donal Hurley Date: Wed, 23 Oct 2024 11:52:36 +0100 Subject: [PATCH 09/12] Add header_setter extension for OTel collector (#905) --- Makefile | 5 + .../generated_component_test.go | 12 +- .../internal/metadata/generated_metrics.go | 54 +- .../metadata/generated_metrics_test.go | 18 +- .../generated_component_test.go | 12 +- .../internal/metadata/generated_metrics.go | 54 +- .../metadata/generated_metrics_test.go | 76 +-- internal/collector/otelcol.tmpl | 25 + internal/collector/settings_test.go | 16 + internal/config/config_test.go | 3 - internal/config/types.go | 21 +- .../test-opentelemetry-collector-agent.yaml | 11 + test/mock/collector/docker-compose.yaml | 4 +- test/mock/collector/mock-collector/Dockerfile | 6 + .../collector/mock-collector/auth/auth.go | 71 +++ .../collector/mock-collector/auth/config.go | 24 + test/mock/collector/mock-collector/go.mod | 141 +++++ test/mock/collector/mock-collector/go.sum | 539 ++++++++++++++++++ test/mock/collector/mock-collector/main.go | 114 ++++ test/mock/collector/nginx-agent.conf | 8 + test/mock/collector/otel-collector.yaml | 14 +- test/types/config.go | 3 - 22 files changed, 1086 insertions(+), 145 deletions(-) create mode 100644 test/mock/collector/mock-collector/Dockerfile create mode 100644 test/mock/collector/mock-collector/auth/auth.go create mode 100644 test/mock/collector/mock-collector/auth/config.go create mode 100644 test/mock/collector/mock-collector/go.mod create mode 100644 test/mock/collector/mock-collector/go.sum create mode 100644 test/mock/collector/mock-collector/main.go diff --git a/Makefile b/Makefile index 8307a677af..07761d857f 100644 --- a/Makefile +++ b/Makefile @@ -207,6 +207,11 @@ build-test-oss-image: --build-arg PACKAGES_REPO=$(OSS_PACKAGES_REPO) \ --build-arg BASE_IMAGE=$(BASE_IMAGE) \ --build-arg ENTRY_POINT=./test/docker/entrypoint.sh + +.PHONY: build-mock-collector-image +build-mock-collector-image: + $(CONTAINER_BUILDENV) $(CONTAINER_CLITOOL) build -t mock-collector . \ + --no-cache -f ./test/mock/collector/mock-collector/Dockerfile .PHONY: run-mock-management-otel-collector run-mock-management-otel-collector: ## Run mock management plane OTel collector diff --git a/internal/collector/nginxossreceiver/generated_component_test.go b/internal/collector/nginxossreceiver/generated_component_test.go index 60de562050..3007dc92b4 100644 --- a/internal/collector/nginxossreceiver/generated_component_test.go +++ b/internal/collector/nginxossreceiver/generated_component_test.go @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go index c560fe1bc8..e74642bc94 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go @@ -313,25 +313,17 @@ type MetricsBuilder struct { metricNginxHTTPResponseStatus metricNginxHTTPResponseStatus } -// MetricBuilderOption applies changes to default metrics builder. -type MetricBuilderOption interface { - apply(*MetricsBuilder) -} - -type metricBuilderOptionFunc func(mb *MetricsBuilder) - -func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { - mbof(mb) -} +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { - return metricBuilderOptionFunc(func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { mb.startTime = startTime - }) + } } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -344,7 +336,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op.apply(mb) + op(mb) } return mb } @@ -357,28 +349,20 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption interface { - apply(pmetric.ResourceMetrics) -} - -type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) - -func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { - rmof(rm) -} +type ResourceMetricsOption func(pmetric.ResourceMetrics) // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - }) + } } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -392,7 +376,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - }) + } } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -400,7 +384,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("otelcol/nginxreceiver") @@ -411,8 +395,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxHTTPRequests.emit(ils.Metrics()) mb.metricNginxHTTPResponseStatus.emit(ils.Metrics()) - for _, op := range options { - op.apply(rm) + for _, op := range rmo { + op(rm) } if ils.Metrics().Len() > 0 { @@ -424,8 +408,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(options...) +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -453,9 +437,9 @@ func (mb *MetricsBuilder) RecordNginxHTTPResponseStatusDataPoint(ts pcommon.Time // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op.apply(mb) + op(mb) } } diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go index 2581a33ef3..261655b70a 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -78,7 +78,7 @@ func TestMetricsBuilder(t *testing.T) { res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if tt.expectEmpty { + if test.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -88,10 +88,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if tt.metricsSet == testDataSetDefault { + if test.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if tt.metricsSet == testDataSetAll { + if test.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -104,7 +104,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -136,7 +136,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -150,7 +150,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses, grouped by status code range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/internal/collector/nginxplusreceiver/generated_component_test.go b/internal/collector/nginxplusreceiver/generated_component_test.go index 9fbb9944ca..170385ba71 100644 --- a/internal/collector/nginxplusreceiver/generated_component_test.go +++ b/internal/collector/nginxplusreceiver/generated_component_test.go @@ -46,21 +46,21 @@ func TestComponentLifecycle(t *testing.T) { require.NoError(t, err) require.NoError(t, sub.Unmarshal(&cfg)) - for _, tt := range tests { - t.Run(tt.name+"-shutdown", func(t *testing.T) { - c, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + for _, test := range tests { + t.Run(test.name+"-shutdown", func(t *testing.T) { + c, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) err = c.Shutdown(context.Background()) require.NoError(t, err) }) - t.Run(tt.name+"-lifecycle", func(t *testing.T) { - firstRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + t.Run(test.name+"-lifecycle", func(t *testing.T) { + firstRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) host := componenttest.NewNopHost() require.NoError(t, err) require.NoError(t, firstRcvr.Start(context.Background(), host)) require.NoError(t, firstRcvr.Shutdown(context.Background())) - secondRcvr, err := tt.createFn(context.Background(), receivertest.NewNopSettings(), cfg) + secondRcvr, err := test.createFn(context.Background(), receivertest.NewNopSettings(), cfg) require.NoError(t, err) require.NoError(t, secondRcvr.Start(context.Background(), host)) require.NoError(t, secondRcvr.Shutdown(context.Background())) diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go index 15dbba1752..866fb5a735 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go @@ -3620,25 +3620,17 @@ type MetricsBuilder struct { metricNginxStreamUpstreamZombieCount metricNginxStreamUpstreamZombieCount } -// MetricBuilderOption applies changes to default metrics builder. -type MetricBuilderOption interface { - apply(*MetricsBuilder) -} - -type metricBuilderOptionFunc func(mb *MetricsBuilder) - -func (mbof metricBuilderOptionFunc) apply(mb *MetricsBuilder) { - mbof(mb) -} +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption { - return metricBuilderOptionFunc(func(mb *MetricsBuilder) { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { mb.startTime = startTime - }) + } } -func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder { +func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), @@ -3705,7 +3697,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt } for _, op := range options { - op.apply(mb) + op(mb) } return mb } @@ -3718,28 +3710,20 @@ func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { } // ResourceMetricsOption applies changes to provided resource metrics. -type ResourceMetricsOption interface { - apply(pmetric.ResourceMetrics) -} - -type resourceMetricsOptionFunc func(pmetric.ResourceMetrics) - -func (rmof resourceMetricsOptionFunc) apply(rm pmetric.ResourceMetrics) { - rmof(rm) -} +type ResourceMetricsOption func(pmetric.ResourceMetrics) // WithResource sets the provided resource on the emitted ResourceMetrics. // It's recommended to use ResourceBuilder to create the resource. func WithResource(res pcommon.Resource) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { res.CopyTo(rm.Resource()) - }) + } } // WithStartTimeOverride overrides start time for all the resource metrics data points. // This option should be only used if different start time has to be set on metrics coming from different resources. func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { - return resourceMetricsOptionFunc(func(rm pmetric.ResourceMetrics) { + return func(rm pmetric.ResourceMetrics) { var dps pmetric.NumberDataPointSlice metrics := rm.ScopeMetrics().At(0).Metrics() for i := 0; i < metrics.Len(); i++ { @@ -3753,7 +3737,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { dps.At(j).SetStartTimestamp(start) } } - }) + } } // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for @@ -3761,7 +3745,7 @@ func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. // Resource attributes should be provided as ResourceMetricsOption arguments. -func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { rm := pmetric.NewResourceMetrics() ils := rm.ScopeMetrics().AppendEmpty() ils.Scope().SetName("otelcol/nginxplusreceiver") @@ -3826,8 +3810,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { mb.metricNginxStreamUpstreamPeerUnavailable.emit(ils.Metrics()) mb.metricNginxStreamUpstreamZombieCount.emit(ils.Metrics()) - for _, op := range options { - op.apply(rm) + for _, op := range rmo { + op(rm) } if ils.Metrics().Len() > 0 { @@ -3839,8 +3823,8 @@ func (mb *MetricsBuilder) EmitForResource(options ...ResourceMetricsOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user config, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(options ...ResourceMetricsOption) pmetric.Metrics { - mb.EmitForResource(options...) +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) metrics := mb.metricsBuffer mb.metricsBuffer = pmetric.NewMetrics() return metrics @@ -4138,9 +4122,9 @@ func (mb *MetricsBuilder) RecordNginxStreamUpstreamZombieCountDataPoint(ts pcomm // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. -func (mb *MetricsBuilder) Reset(options ...MetricBuilderOption) { +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { - op.apply(mb) + op(mb) } } diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go index 6d048f6442..8e8f64cbb9 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go @@ -43,14 +43,14 @@ func TestMetricsBuilder(t *testing.T) { expectEmpty: true, }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { start := pcommon.Timestamp(1_000_000_000) ts := pcommon.Timestamp(1_000_001_000) observedZapCore, observedLogs := observer.New(zap.WarnLevel) settings := receivertest.NewNopSettings() settings.Logger = zap.New(observedZapCore) - mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, tt.name), settings, WithStartTime(start)) + mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 @@ -294,7 +294,7 @@ func TestMetricsBuilder(t *testing.T) { res := pcommon.NewResource() metrics := mb.Emit(WithResource(res)) - if tt.expectEmpty { + if test.expectEmpty { assert.Equal(t, 0, metrics.ResourceMetrics().Len()) return } @@ -304,10 +304,10 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, res, rm.Resource()) assert.Equal(t, 1, rm.ScopeMetrics().Len()) ms := rm.ScopeMetrics().At(0).Metrics() - if tt.metricsSet == testDataSetDefault { + if test.metricsSet == testDataSetDefault { assert.Equal(t, defaultMetricsCount, ms.Len()) } - if tt.metricsSet == testDataSetAll { + if test.metricsSet == testDataSetAll { assert.Equal(t, allMetricsCount, ms.Len()) } validatedMetrics := make(map[string]bool) @@ -320,7 +320,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of bytes read from the cache or proxied server.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -370,7 +370,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of responses read from the cache or proxied server.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -390,7 +390,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of NGINX config reloads.", ms.At(i).Description()) assert.Equal(t, "reloads", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -404,7 +404,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -436,7 +436,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections to an endpoint with a limit_conn directive.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -456,7 +456,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests to an endpoint with a limit_req directive.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -476,7 +476,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of HTTP byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -499,7 +499,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests completed without sending a response.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -537,7 +537,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -569,7 +569,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of responses, grouped by status code range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -592,7 +592,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests received from clients.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -630,7 +630,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of byte IO per HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -752,7 +752,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of health check requests made to a HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -781,7 +781,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client requests forwarded to the HTTP upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -831,7 +831,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of responses obtained from the HTTP upstream peer grouped by status range.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -887,7 +887,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Number of times the server became unavailable for client requests (“unavail”).", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -931,7 +931,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of requests rejected due to the queue overflow.", ms.At(i).Description()) assert.Equal(t, "responses", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1036,7 +1036,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) - assert.InDelta(t, float64(1), dp.DoubleValue(), 0.01) + assert.Equal(t, float64(1), dp.DoubleValue()) attrVal, ok := dp.Attributes().Get("nginx.zone.name") assert.True(t, ok) assert.EqualValues(t, "nginx.zone.name-val", attrVal.Str()) @@ -1047,7 +1047,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The number of attempts to allocate memory of specified size.", ms.At(i).Description()) assert.Equal(t, "allocations", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1106,7 +1106,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of SSL certificate verification failures.", ms.At(i).Description()) assert.Equal(t, "certificates", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1123,7 +1123,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of SSL handshakes.", ms.At(i).Description()) assert.Equal(t, "handshakes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1143,7 +1143,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of Stream byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1163,7 +1163,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of connections accepted from clients.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1180,7 +1180,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Total number of connections completed without creating a session.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1212,7 +1212,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of completed sessions.", ms.At(i).Description()) assert.Equal(t, "sessions", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1232,7 +1232,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of Stream Upstream Peer byte IO.", ms.At(i).Description()) assert.Equal(t, "bytes", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1309,7 +1309,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of client connections forwarded to this stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "connections", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1335,7 +1335,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of stream upstream peers grouped by state.", ms.At(i).Description()) assert.Equal(t, "peers", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1358,7 +1358,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of unsuccessful attempts to communicate with the stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "peers", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1381,7 +1381,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "The total number of health check requests made to the stream upstream peer.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1434,7 +1434,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "Current state of upstream peers in deployment. If any of the upstream peers in the deployment match the given state then the value will be 1. If no upstream peer is a match then the value will be 0.", ms.At(i).Description()) assert.Equal(t, "deployments", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) @@ -1487,7 +1487,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) assert.Equal(t, "How many times the server became unavailable for client connections (state “unavail”) due to the number of unsuccessful attempts reaching the max_fails threshold.", ms.At(i).Description()) assert.Equal(t, "requests", ms.At(i).Unit()) - assert.True(t, ms.At(i).Sum().IsMonotonic()) + assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) dp := ms.At(i).Sum().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) diff --git a/internal/collector/otelcol.tmpl b/internal/collector/otelcol.tmpl index d988a21eda..b30e62181c 100644 --- a/internal/collector/otelcol.tmpl +++ b/internal/collector/otelcol.tmpl @@ -112,6 +112,10 @@ exporters: {{ if gt (len .TLS.Cert) 0 -}}cert_file: "{{- .TLS.Cert -}}"{{- end }} {{ if gt (len .TLS.Key) 0 -}}key_file: "{{- .TLS.Key -}}"{{- end }} {{- end }} + {{- if .Authenticator }} + auth: + authenticator: {{ .Authenticator -}} + {{- end }} {{- end }} {{- if ne .Exporters.PrometheusExporter nil }} prometheus: @@ -139,6 +143,24 @@ extensions: key_file: "{{ .Extensions.Health.Server.TLS.Key -}}" {{- end }} {{- end }} + + {{- if ne .Extensions.HeadersSetter nil }} + headers_setter: + headers: + {{- range $index, $header := .Extensions.HeadersSetter.Headers }} + - action: "{{ .Action -}}" + key: "{{ .Key -}}" + {{- if .Value }} + value: "{{ .Value -}}" + {{- end }} + {{- if .DefaultValue }} + default_value: "{{ .DefaultValue -}}" + {{- end }} + {{- if .FromContext }} + from_context: "{{ .FromContext -}}" + {{- end }} + {{- end }} + {{- end }} {{- end }} service: @@ -155,6 +177,9 @@ service: {{- if ne .Extensions.Health nil }} - health_check {{- end}} + {{- if ne .Extensions.HeadersSetter nil }} + - headers_setter + {{- end}} {{- end}} pipelines: metrics: diff --git a/internal/collector/settings_test.go b/internal/collector/settings_test.go index 0757771116..a512a5bc26 100644 --- a/internal/collector/settings_test.go +++ b/internal/collector/settings_test.go @@ -100,6 +100,22 @@ func TestTemplateWrite(t *testing.T) { }, }) + cfg.Collector.Extensions.HeadersSetter = &config.HeadersSetter{ + Headers: []config.Header{ + { + Action: "insert", + Key: "authorization", + Value: "key1", + }, { + Action: "upsert", + Key: "uuid", + Value: "1234", + }, + }, + } + + cfg.Collector.Exporters.OtlpExporters[0].Authenticator = "headers_setter" + require.NotNil(t, cfg) err := writeCollectorConfig(cfg.Collector) diff --git a/internal/config/config_test.go b/internal/config/config_test.go index ca188c3e56..ad58a7a287 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -313,9 +313,6 @@ func getAgentConfig() *Config { Host: "127.0.0.1", Port: 1234, }, - Auth: &AuthConfig{ - Token: "super-secret-token", - }, TLS: &TLSConfig{ Cert: "/path/to/server-cert.pem", Key: "/path/to/server-cert.pem", diff --git a/internal/config/types.go b/internal/config/types.go index e35b804c78..c44585d511 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -89,13 +89,14 @@ type ( } OtlpExporter struct { - Server *ServerConfig `yaml:"-" mapstructure:"server"` - Auth *AuthConfig `yaml:"-" mapstructure:"auth"` - TLS *TLSConfig `yaml:"-" mapstructure:"tls"` + Server *ServerConfig `yaml:"-" mapstructure:"server"` + TLS *TLSConfig `yaml:"-" mapstructure:"tls"` + Authenticator string `yaml:"-" mapstructure:"authenticator"` } Extensions struct { - Health *Health `yaml:"-" mapstructure:"health"` + Health *Health `yaml:"-" mapstructure:"health"` + HeadersSetter *HeadersSetter `yaml:"-" mapstructure:"headers_setter"` } Health struct { @@ -104,6 +105,18 @@ type ( Path string `yaml:"-" mapstructure:"path"` } + HeadersSetter struct { + Headers []Header `yaml:"-" mapstructure:"headers"` + } + + Header struct { + Action string `yaml:"-" mapstructure:"action"` + Key string `yaml:"-" mapstructure:"key"` + Value string `yaml:"-" mapstructure:"value"` + DefaultValue string `yaml:"-" mapstructure:"default_value"` + FromContext string `yaml:"-" mapstructure:"from_context"` + } + DebugExporter struct{} PrometheusExporter struct { diff --git a/test/config/collector/test-opentelemetry-collector-agent.yaml b/test/config/collector/test-opentelemetry-collector-agent.yaml index 4e74a9207f..337e14dae5 100644 --- a/test/config/collector/test-opentelemetry-collector-agent.yaml +++ b/test/config/collector/test-opentelemetry-collector-agent.yaml @@ -41,6 +41,8 @@ exporters: max_elapsed_time: 10m tls: insecure: true + auth: + authenticator: headers_setter prometheus: endpoint: "localhost:9876" debug: @@ -50,6 +52,14 @@ exporters: extensions: health_check: endpoint: "localhost:1337" + headers_setter: + headers: + - action: "insert" + key: "authorization" + value: "key1" + - action: "upsert" + key: "uuid" + value: "1234" service: telemetry: @@ -59,6 +69,7 @@ service: error_output_paths: ["/var/log/nginx-agent/opentelemetry-collector-agent.log"] extensions: - health_check + - headers_setter pipelines: metrics: receivers: diff --git a/test/mock/collector/docker-compose.yaml b/test/mock/collector/docker-compose.yaml index 895e6dda1b..9330c9b4e6 100644 --- a/test/mock/collector/docker-compose.yaml +++ b/test/mock/collector/docker-compose.yaml @@ -26,10 +26,8 @@ services: - metrics otel-collector: - image: otel/opentelemetry-collector-contrib:0.99.0 + image: mock-collector container_name: mock-collector-otel-collector - restart: on-failure - command: [ "--config=/etc/otel-collector.yaml" ] ports: - 4320:4317 - 9775:9090 diff --git a/test/mock/collector/mock-collector/Dockerfile b/test/mock/collector/mock-collector/Dockerfile new file mode 100644 index 0000000000..d220e9f2f2 --- /dev/null +++ b/test/mock/collector/mock-collector/Dockerfile @@ -0,0 +1,6 @@ +FROM golang:bookworm + +WORKDIR /mock-collector +COPY ./test/mock/collector/mock-collector/ ./ + +CMD ["go", "run", "main.go"] diff --git a/test/mock/collector/mock-collector/auth/auth.go b/test/mock/collector/mock-collector/auth/auth.go new file mode 100644 index 0000000000..9b30bfb26c --- /dev/null +++ b/test/mock/collector/mock-collector/auth/auth.go @@ -0,0 +1,71 @@ +// Copyright (c) F5, Inc. +// +// This source code is licensed under the Apache License, Version 2.0 license found in the +// LICENSE file in the root directory of this source tree. + +package auth + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/extension/auth" + "go.uber.org/zap" +) + +const ( + AuthenticatorName = "headers_check" +) + +var ( + aType = component.MustNewType(AuthenticatorName) + HeadersCheckID = component.MustNewID(AuthenticatorName) +) + +type HeadersCheck struct { + AuthenticatorID component.ID `mapstructure:"authenticator"` + logger *zap.SugaredLogger +} + +type Option func(*HeadersCheck) + +// Ensure that the authenticator implements the auth.Server interface. +var _ auth.Server = (*HeadersCheck)(nil) + +func NewFactory() extension.Factory { + return extension.NewFactory( + aType, + CreateDefaultConfig, + CreateAuthExtensionFunc, + component.StabilityLevelBeta, + ) +} + +func (a *HeadersCheck) Start(_ context.Context, _ component.Host) error { + return nil +} + +func (a *HeadersCheck) Shutdown(_ context.Context) error { + return nil +} + +func (a *HeadersCheck) Authenticate(ctx context.Context, headers map[string][]string) (context.Context, error) { + a.logger.Info("Headers", zap.Any("headers", headers)) + return ctx, nil +} + +func CreateAuthExtensionFunc( + _ context.Context, + setting extension.Settings, + _ component.Config, +) (extension.Extension, error) { + logger := setting.Logger.Sugar() + + a := &HeadersCheck{ + AuthenticatorID: setting.ID, + logger: logger, + } + + return a, nil +} diff --git a/test/mock/collector/mock-collector/auth/config.go b/test/mock/collector/mock-collector/auth/config.go new file mode 100644 index 0000000000..95f6d08922 --- /dev/null +++ b/test/mock/collector/mock-collector/auth/config.go @@ -0,0 +1,24 @@ +// Copyright (c) F5, Inc. +// +// This source code is licensed under the Apache License, Version 2.0 license found in the +// LICENSE file in the root directory of this source tree. + +package auth + +import ( + "go.opentelemetry.io/collector/component" +) + +type ServerConfig struct { + AuthenticatorID string `mapstructure:"authenticator"` +} + +type Config struct { + AuthenticatorID component.ID `mapstructure:",squash"` +} + +func CreateDefaultConfig() component.Config { + return &Config{ + AuthenticatorID: HeadersCheckID, + } +} diff --git a/test/mock/collector/mock-collector/go.mod b/test/mock/collector/mock-collector/go.mod new file mode 100644 index 0000000000..adc289f63c --- /dev/null +++ b/test/mock/collector/mock-collector/go.mod @@ -0,0 +1,141 @@ +module go.opentelemetry.io/collector/cmd/builder + +go 1.22.0 + +toolchain go1.23.2 + +require ( + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.111.0 + go.opentelemetry.io/collector/component v0.111.0 + go.opentelemetry.io/collector/confmap v1.17.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 + go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0 + go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.17.0 + go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0 + go.opentelemetry.io/collector/connector v0.111.0 + go.opentelemetry.io/collector/exporter v0.111.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.111.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0 + go.opentelemetry.io/collector/extension v0.111.0 + go.opentelemetry.io/collector/extension/auth v0.111.0 + go.opentelemetry.io/collector/otelcol v0.111.0 + go.opentelemetry.io/collector/processor v0.111.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.111.0 + go.opentelemetry.io/collector/receiver v0.111.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/ebitengine/purego v0.8.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.10 // indirect + github.com/knadh/koanf/maps v0.1.1 // indirect + github.com/knadh/koanf/providers/confmap v0.1.0 // indirect + github.com/knadh/koanf/v2 v2.1.1 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mostynb/go-grpc-compression v1.2.3 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rs/cors v1.11.1 // indirect + github.com/shirou/gopsutil/v4 v4.24.9 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.9.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/collector v0.111.0 // indirect + go.opentelemetry.io/collector/client v1.17.0 // indirect + go.opentelemetry.io/collector/component/componentprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.111.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.111.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.17.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.111.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.111.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.17.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.17.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.17.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.111.0 // indirect + go.opentelemetry.io/collector/config/configtls v1.17.0 // indirect + go.opentelemetry.io/collector/config/internal v0.111.0 // indirect + go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/consumer v0.111.0 // indirect + go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.111.0 // indirect + go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 // indirect + go.opentelemetry.io/collector/featuregate v1.17.0 // indirect + go.opentelemetry.io/collector/internal/globalgates v0.111.0 // indirect + go.opentelemetry.io/collector/internal/globalsignal v0.111.0 // indirect + go.opentelemetry.io/collector/pdata v1.17.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.111.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.111.0 // indirect + go.opentelemetry.io/collector/pipeline v0.111.0 // indirect + go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 // indirect + go.opentelemetry.io/collector/semconv v0.111.0 // indirect + go.opentelemetry.io/collector/service v0.111.0 // indirect + go.opentelemetry.io/contrib/config v0.10.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect + go.opentelemetry.io/contrib/propagators/b3 v1.30.0 // indirect + go.opentelemetry.io/otel v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 // indirect + go.opentelemetry.io/otel/log v0.6.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/sdk v1.30.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.6.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect + go.opentelemetry.io/otel/trace v1.30.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + gonum.org/v1/gonum v0.15.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/test/mock/collector/mock-collector/go.sum b/test/mock/collector/mock-collector/go.sum new file mode 100644 index 0000000000..bc470f44fa --- /dev/null +++ b/test/mock/collector/mock-collector/go.sum @@ -0,0 +1,539 @@ +cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= +cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= +github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= +github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4= +github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.0 h1:JbqvnEzRvPpxhCJzJJ2y0RbiZ8nyjccVUrSM3q+GvvE= +github.com/ebitengine/purego v0.8.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0= +github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw= +github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w= +github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I= +github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= +github.com/knadh/koanf/maps v0.1.1/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= +github.com/knadh/koanf/providers/confmap v0.1.0 h1:gOkxhHkemwG4LezxxN8DMOFopOPghxRVp7JbIvdvqzU= +github.com/knadh/koanf/providers/confmap v0.1.0/go.mod h1:2uLhxQzJnyHKfxG927awZC7+fyHFdQkd697K4MdLnIU= +github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= +github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso= +github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= +github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= +github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.111.0 h1:VAXpIvh4yqPUgvsEP6rX5/GMuowJgL8gMoEVr3cgh5E= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.111.0/go.mod h1:J3jMd98FFiMSiYsk2VX5SV821hcaif/juOVke+nLvP4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0 h1:QhEwQTGTXitMPbmyloNfLVz1r9YzZ8izJUJivI8obzs= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.111.0/go.mod h1:I7nEkR7TDPFw162jYtPJZVevkniQfQ0FLIFuu2RGK3A= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 h1:Hh3Lt6GIw/jMfCSJ5XjBoZRmjZ1pbJJu6Xi7WrDTUi0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0/go.mod h1:rQ9lQhijXIJIT5UGuwiKoEcWW6bdWJ4fnO+PndfuYEw= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0 h1:kUUO8VNv/d9Tpx0NvOsRnUsz/JvZ8SWRnK+vT0cNjuU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.111.0/go.mod h1:SstR8PglIFBVGCZHS69bwJGl6TaCQQ5aLSEoas/8SRA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0 h1:RSbk3ty1D9zeBC/elcqVdJoZjpAa331Wha99yNHnH6w= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.111.0/go.mod h1:iDBwbN0by4Y75X6j5PuRoJL5MpoaDv0l7s8dHFQHJPU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0 h1:BCev4nJfHH2u9AsWFfxR1o1Vt5HoW9myN4jaktZInRA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.111.0/go.mod h1:xJ8w6JN/tfRpUXTU6jx/bYmTIcy7OTz7PVFVR/SdqC8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0 h1:abeLe2WANVWpnNua41Aa+RTmYYGs0gk1oQRd2/XH7Uo= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.111.0/go.mod h1:Nij85WmJr/+q0HeAvGulEYxFE+PMlhFelPWN6yzCuuw= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= +github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= +github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/shirou/gopsutil/v4 v4.24.9 h1:KIV+/HaHD5ka5f570RZq+2SaeFsb/pq+fp2DGNWYoOI= +github.com/shirou/gopsutil/v4 v4.24.9/go.mod h1:3fkaHNeYsUFCGZ8+9vZVWtbyM1k2eRnlL+bWO8Bxa/Q= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector v0.111.0 h1:D3LJTYrrK2ac94E2PXPSbVkArqxbklbCLsE4MAJQdRo= +go.opentelemetry.io/collector v0.111.0/go.mod h1:eZi4Z1DmHy+sVqbUI8dZNvhrH7HZIlX+0AKorOtv6nE= +go.opentelemetry.io/collector/client v1.17.0 h1:eJB4r4nPY0WrQ6IQEEbOPCOfQU7N15yzZud9y5fKfms= +go.opentelemetry.io/collector/client v1.17.0/go.mod h1:egG3tOG68zvC04hgl6cW2H/oWCUCCdDWtL4WpbcSUys= +go.opentelemetry.io/collector/component v0.111.0 h1:AiDIrhkq6sbHnU9Rhq6t4DC4Gal43bryd1+NTJNojAQ= +go.opentelemetry.io/collector/component v0.111.0/go.mod h1:wYwbRuhzK5bm5x1bX+ukm1tT50QXYLs4MKwzyfiVGoE= +go.opentelemetry.io/collector/component/componentprofiles v0.111.0 h1:yT3Sa833G9GMiXkAOuYi30afd/5vTmDQpZo6+X/XjXM= +go.opentelemetry.io/collector/component/componentprofiles v0.111.0/go.mod h1:v9cm6ndumcbCSqZDBs0vRReRW7KSYax1RZVhs/CiZCo= +go.opentelemetry.io/collector/component/componentstatus v0.111.0 h1:DojO8TbkysTtEoxzN6fJqhgCsu0QhxgJ9R+1bitnowM= +go.opentelemetry.io/collector/component/componentstatus v0.111.0/go.mod h1:wKozN6s9dykUB9aLSBXSPT9SJ2fckNvGSFZx4fRZbSY= +go.opentelemetry.io/collector/config/configauth v0.111.0 h1:0CcgX4TzK5iu2YtryIu3al8lNI+9fqjbGoyvAFk9ZCw= +go.opentelemetry.io/collector/config/configauth v0.111.0/go.mod h1:5oyYNL3gnYMYNdNsEjFvA2Tdc1yjG8L+HQFIjPo6kK8= +go.opentelemetry.io/collector/config/configcompression v1.17.0 h1:5CzLHTPOgHaKod1ZQLYs0o7GZDBhdsLQRm8Lcbo79vU= +go.opentelemetry.io/collector/config/configcompression v1.17.0/go.mod h1:pnxkFCLUZLKWzYJvfSwZnPrnm0twX14CYj2ADth5xiU= +go.opentelemetry.io/collector/config/configgrpc v0.111.0 h1:XwHBWCP0m/d6YZ0VZltzVvnz5hDB9ik7sPRjJIdmjUk= +go.opentelemetry.io/collector/config/configgrpc v0.111.0/go.mod h1:K9OLwZM8dGNL1Jul/FGxlRsnLd1umgDyA+yxq2BNXUs= +go.opentelemetry.io/collector/config/confighttp v0.111.0 h1:nZJFHKYYeCasyhhFC71iZf6GAs6pfFcNOga6b8+lFvc= +go.opentelemetry.io/collector/config/confighttp v0.111.0/go.mod h1:heE5JjcLDiH8fMULf55QL2oI9+8Ct58Vq/QfP7TV684= +go.opentelemetry.io/collector/config/confignet v1.17.0 h1:cBmDdiPuIVrHiecgCKyXhRYmDOz9Do5IM7O1JhbB3es= +go.opentelemetry.io/collector/config/confignet v1.17.0/go.mod h1:o3v4joAEjvLwntqexg5ixMqRrU1+Vst+jWuCUaBNgOg= +go.opentelemetry.io/collector/config/configopaque v1.17.0 h1:wHhUgJhmDgNd6M7GW8IU5HjWi/pNmBEe9jBhavoR45g= +go.opentelemetry.io/collector/config/configopaque v1.17.0/go.mod h1:6zlLIyOoRpJJ+0bEKrlZOZon3rOp5Jrz9fMdR4twOS4= +go.opentelemetry.io/collector/config/configretry v1.17.0 h1:9GaiNKgUDx5by+A0aHKojw1BilHSK+8wq2LOmnynN00= +go.opentelemetry.io/collector/config/configretry v1.17.0/go.mod h1:KvQF5cfphq1rQm1dKR4eLDNQYw6iI2fY72NMZVa+0N0= +go.opentelemetry.io/collector/config/configtelemetry v0.111.0 h1:Q3TJRM2A3FIDjIvzWa3uFArsdFN0I/0GzcWynHjC+oY= +go.opentelemetry.io/collector/config/configtelemetry v0.111.0/go.mod h1:R0MBUxjSMVMIhljuDHWIygzzJWQyZHXXWIgQNxcFwhc= +go.opentelemetry.io/collector/config/configtls v1.17.0 h1:5DPgmBgpKEopLGmkjaihZHVA/8yH0LGoOrUZlb86T0Q= +go.opentelemetry.io/collector/config/configtls v1.17.0/go.mod h1:xUV5/xAHJbwrCuT2rGurBGSUqyFFAVVBcQ5DJAENeCc= +go.opentelemetry.io/collector/config/internal v0.111.0 h1:HTrN9xCpX42xlyDskWbhA/2NkSjMasxNEuGkmjjq7Q8= +go.opentelemetry.io/collector/config/internal v0.111.0/go.mod h1:yC7E4h1Uj0SubxcFImh6OvBHFTjMh99+A5PuyIgDWqc= +go.opentelemetry.io/collector/confmap v1.17.0 h1:5UKHtPGtzNGaOGBsJ6aFpvsKElNUXOVuErBfC0eTWLM= +go.opentelemetry.io/collector/confmap v1.17.0/go.mod h1:GrIZ12P/9DPOuTpe2PIS51a0P/ZM6iKtByVee1Uf3+k= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0 h1:KH0ABOBfSPp5XZtHkoXeI9wKoOD9B0eN6TDo08SwN/c= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.17.0/go.mod h1:jyFbV9hLrYJf2zNjqcpzkzB6zmPj/Ohr+S+vmPuxyMY= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0 h1:UyMO2ddtO7GKuFjrkR51IxmeBuRJrb1KKatu60oosxI= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.17.0/go.mod h1:SCJ8zvuuaOwQJk+zI87XSuc+HbquP2tsYb9aPlfeeRg= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0 h1:R/U0uWAyppNrxvF+piqhnhcrPSNz3wnwHyEIRCbrmh0= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.17.0/go.mod h1:3mtUk7wwDQyPUsHtCOLi2v0uSZWfC00BhOhqHs4CWs4= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.17.0 h1:lI1uHXqTklVCFXgTPIKwUb8PTP/EpMF2VxwT48fQ54w= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.17.0/go.mod h1:1Vhweh5dDeTUOmcw5WSGHPgHUwZzouf3y2dQr4yFWjA= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0 h1:FtKwwHS8NSNJWrhE7JsFlYhe+2GojENfOQbhQMSTyRo= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.17.0/go.mod h1:9/R8ucfVQEEEHMv9b7M6rSB8nF2k+MfIO93vbDEsaMU= +go.opentelemetry.io/collector/connector v0.111.0 h1:dOaJRO27LyX4ZnkZA51namo2V5idRWvWoMVf4b7obro= +go.opentelemetry.io/collector/connector v0.111.0/go.mod h1:gPwxA1SK+uraSTpX20MG/cNc+axhkBm8+B6z6hh6hYg= +go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0 h1:tJ4+hcWRhknw+cRw6d6dI4CyX3/puqnd1Rg9+mWdwHU= +go.opentelemetry.io/collector/connector/connectorprofiles v0.111.0/go.mod h1:LdfE8hNYcEb+fI5kZp4w3ZGlTLFAmvHAPtTZxS6TZ38= +go.opentelemetry.io/collector/consumer v0.111.0 h1:d2kRTDnu+p0q4D5fTU+Pk59KRm5F2JRYrk30Ep5j0xI= +go.opentelemetry.io/collector/consumer v0.111.0/go.mod h1:FjY9bPbVkFZLKKxnNbGsIqaz3lcFDKGf+7wxA1uCugs= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0 h1:w9kGdTaXdwD/ZtbxVOvuYQEFKBX3THQgEz/enQnMt9s= +go.opentelemetry.io/collector/consumer/consumerprofiles v0.111.0/go.mod h1:Ebt1jDdrQb3G2sNHrWHNr5wS3UJ9k3h8LHCqUPTbxLY= +go.opentelemetry.io/collector/consumer/consumertest v0.111.0 h1:ZEikGRPdrhVAq7xhJVc8WapRBVN/CdPnMEnXgpRGu1U= +go.opentelemetry.io/collector/consumer/consumertest v0.111.0/go.mod h1:EHPrn8ovcTGdTDlCEi1grOXSP3jUUYU0zvl92uA5L+4= +go.opentelemetry.io/collector/exporter v0.111.0 h1:NpiP6xXGOmSi59RlB5gGTB+PtCLldVeK3vCQBJPW0sU= +go.opentelemetry.io/collector/exporter v0.111.0/go.mod h1:FjO80zGWZjqXil8vM1MS8gyxxzZ29WmChTNV2y9xjHo= +go.opentelemetry.io/collector/exporter/debugexporter v0.111.0 h1:KiypGuW+JG1gV9l6pvSEIMKwn+MLJn0Ol62HMe5ytr4= +go.opentelemetry.io/collector/exporter/debugexporter v0.111.0/go.mod h1:7ihw3KDcvrY5kXIRNxB64Pz6kguf5Q0x9mJAvbBLT5Y= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0 h1:fpIRPzqsaEtbVip/wsU6h/GMGISo7UjiiYV61MOMEpQ= +go.opentelemetry.io/collector/exporter/exporterprofiles v0.111.0/go.mod h1:NGUTQd1fminFnw289fVQFN4dxdyedK4GTTrJUc9gCtw= +go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0 h1:eOyd1InTuymfIP4oMzJki28JjpGQzOEK6Y0YlI6pwgA= +go.opentelemetry.io/collector/exporter/otlpexporter v0.111.0/go.mod h1:nOUveQ4KWFqlCA6b0L5DXMosZCcNtit8abEuLHwBaUM= +go.opentelemetry.io/collector/extension v0.111.0 h1:oagGQS3k6Etnm5N5OEkfIWrX4/77t/ZP+B0xfTPUVm8= +go.opentelemetry.io/collector/extension v0.111.0/go.mod h1:ELCpDNpS2qb/31Z8pCMmqTkzfnUV3CanQZMwLW+GCMI= +go.opentelemetry.io/collector/extension/auth v0.111.0 h1:V9DfnMsKdVfsQMeGR5H/nAYHlZnr1Td75kkJOKbCevk= +go.opentelemetry.io/collector/extension/auth v0.111.0/go.mod h1:4O5JQqEdAWuq4giicIy6DKlgkKTC0qgVEJm44RhviZY= +go.opentelemetry.io/collector/extension/experimental/storage v0.111.0 h1:kUJSFjm6IQ6nmcJlfSFPvcEO/XeOP9gJY0Qz9O98DKg= +go.opentelemetry.io/collector/extension/experimental/storage v0.111.0/go.mod h1:qQGvl8Kz2W8b7QywtE8GNqWJMDBo47cjoiIXYuE+/zM= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0 h1:Ps2/2TUbAkxgZu1YxSxDweZDLJx5x7CyNKCINZkLFtY= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.111.0/go.mod h1:q4kBSWsOX62hAp7si+Y0Y0ZXWyCpXjiRuWWz7IL/MDI= +go.opentelemetry.io/collector/extension/zpagesextension v0.111.0 h1:X+YXkJ3kX8c3xN/Mfiqc/gKB7NaQnG4Cge9R60lKOyw= +go.opentelemetry.io/collector/extension/zpagesextension v0.111.0/go.mod h1:v5u5Ots6HgbhKsvRXB+SF9cmVTgkUATNiejHbpsa0rY= +go.opentelemetry.io/collector/featuregate v1.17.0 h1:vpfXyWe7DFqCsDArsR9rAKKtVpt72PKjzjeqPegViws= +go.opentelemetry.io/collector/featuregate v1.17.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs= +go.opentelemetry.io/collector/internal/globalgates v0.111.0 h1:pPf/U401i/bEJ8ucbYMyqOdkujyZ92Gbm6RFkJrDvBc= +go.opentelemetry.io/collector/internal/globalgates v0.111.0/go.mod h1:HqIBKc8J5Vccn93gkN1uaVK42VbVsuVyjmo5b1MORZo= +go.opentelemetry.io/collector/internal/globalsignal v0.111.0 h1:oq0nSD+7K2Q1Fx5d3s6lPRdKZeTL0FEg4sIaR7ZJzIc= +go.opentelemetry.io/collector/internal/globalsignal v0.111.0/go.mod h1:GqMXodPWOxK5uqpX8MaMXC2389y2XJTa5nPwf8FYDK8= +go.opentelemetry.io/collector/otelcol v0.111.0 h1:RcS1/BDsEBGdI4YjosdElxYwsA2tTtiYEuWjEF0p8vk= +go.opentelemetry.io/collector/otelcol v0.111.0/go.mod h1:B/ri/CwsW7zeLXkCcB3XtarxjJ80eIC+z8guGhFFpis= +go.opentelemetry.io/collector/pdata v1.17.0 h1:z8cjjT2FThAehWu5fbF48OnZyK5q8xd1UhC4XszDo0w= +go.opentelemetry.io/collector/pdata v1.17.0/go.mod h1:yZaQ9KZAm/qie96LTygRKxOXMq0/54h8OW7330ycuvQ= +go.opentelemetry.io/collector/pdata/pprofile v0.111.0 h1:4if6rItcX8a6X4bIh6lwQnlE+ncKXQaIim7F5O7ZA58= +go.opentelemetry.io/collector/pdata/pprofile v0.111.0/go.mod h1:iBwrNFB6za1qspy46ZE41H3MmcxUogn2AuYbrWdoMd8= +go.opentelemetry.io/collector/pdata/testdata v0.111.0 h1:Fqyf1NJ0az+HbsvKSCNw8pfa1Y6c4FhZwlMK4ZulG0s= +go.opentelemetry.io/collector/pdata/testdata v0.111.0/go.mod h1:7SypOzbVtRsCkns6Yxa4GztnkVGkk7b9fW24Ow75q5s= +go.opentelemetry.io/collector/pipeline v0.111.0 h1:qENDGvWWnDXguEfmj8eO+5kr8Y6XFKytU5SuMinz3Ls= +go.opentelemetry.io/collector/pipeline v0.111.0/go.mod h1:ZZMU3019geEU283rTW5M/LkcqLqHp/YI2Nl6/Vp68PQ= +go.opentelemetry.io/collector/processor v0.111.0 h1:85Llb9ekzzvzAXgFaw/n7LHFJ5QAjeOulGJlDLEAR3g= +go.opentelemetry.io/collector/processor v0.111.0/go.mod h1:78Z4f96j9trPFZIRCiQk6nVRo6vua4cW9VYNfHTBsvo= +go.opentelemetry.io/collector/processor/batchprocessor v0.111.0 h1:JoBjX0LjmQ3n22o54sxAN9T6sgxumBLDqq0RElvYAVc= +go.opentelemetry.io/collector/processor/batchprocessor v0.111.0/go.mod h1:8Dw89aInFh4dX3A0iyIcpbQ1A/8hVWtxjrJKyAOb9TQ= +go.opentelemetry.io/collector/processor/processorprofiles v0.111.0 h1:QxnwbqClJvS7zDWgsIaqqDs5YsmHgFvmZKQsmoLTqJM= +go.opentelemetry.io/collector/processor/processorprofiles v0.111.0/go.mod h1:8qPd8Af0XX7Wlupe8JHmdhkKMiiJ5AO7OEFYW3fN0CQ= +go.opentelemetry.io/collector/receiver v0.111.0 h1:6cRHZ9cUxYfRPkArUCkIhoo7Byf6tq/2qvbMIKlhG3s= +go.opentelemetry.io/collector/receiver v0.111.0/go.mod h1:QSl/n9ikDP+6n39QcRY/VLjwQI0qbT1RQp512uBQl3g= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0 h1:VsQ55DvHvjYop+wbpY6qCSF0cfoMNMZEd0pANa5l+9Y= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.111.0/go.mod h1:/zUX2GHa7CIeqGRl+hpQk3zQ1QCaUpBK42XGqrXAbzQ= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0 h1:oYLAdGMQQR7gB6wVkbV0G4EMsrmiOs3O0qf3hh/3avw= +go.opentelemetry.io/collector/receiver/receiverprofiles v0.111.0/go.mod h1:M/OfdEGnvyB+fSTSW4RPKj5N06FXL8oKSIf60FlrKmM= +go.opentelemetry.io/collector/semconv v0.111.0 h1:ELleMtLBzeZ3xhfhYPmFcLc0hJMqRxhOB0eY60WLivw= +go.opentelemetry.io/collector/semconv v0.111.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= +go.opentelemetry.io/collector/service v0.111.0 h1:6yGjjbZvlYbir+vzi/9ACF965m8i96ScPTjpVvki3ms= +go.opentelemetry.io/collector/service v0.111.0/go.mod h1:tti8TAosPuRj51/bbrSvf6OIJoSyTkywEvTdY/fAuwY= +go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= +go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0 h1:hCq2hNMwsegUvPzI7sPOvtO9cqyy5GbWt/Ybp2xrx8Q= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.55.0/go.mod h1:LqaApwGx/oUmzsbqxkzuBvyoPpkxk3JQWnqfVrJ3wCA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/contrib/propagators/b3 v1.30.0 h1:vumy4r1KMyaoQRltX7cJ37p3nluzALX9nugCjNNefuY= +go.opentelemetry.io/contrib/propagators/b3 v1.30.0/go.mod h1:fRbvRsaeVZ82LIl3u0rIvusIel2UUf+JcaaIpy5taho= +go.opentelemetry.io/contrib/zpages v0.55.0 h1:F+xj261Ulwl79QC+2O+IO1b3NbwppUDwN+7LbDSdQcY= +go.opentelemetry.io/contrib/zpages v0.55.0/go.mod h1:dDqDGDfbXSjt/k9orZk4Huulvz1letX1YWTKts5GQpo= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0 h1:QSKmLBzbFULSyHzOdO9JsN9lpE4zkrz1byYGmJecdVE= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.6.0/go.mod h1:sTQ/NH8Yrirf0sJ5rWqVu+oT82i4zL9FaF6rWcqnptM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 h1:WypxHH02KX2poqqbaadmkMYalGyy/vil4HE4PM4nRJc= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0/go.mod h1:U79SV99vtvGSEBeeHnpgGJfTsnsdkWLpPN/CcHAzBSI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 h1:VrMAbeJz4gnVDg2zEzjHG4dEH86j4jO6VYB+NgtGD8s= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0/go.mod h1:qqN/uFdpeitTvm+JDqqnjm517pmQRYxTORbETHq5tOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ= +go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0 h1:bZHOb8k/CwwSt0DgvgaoOhBXWNdWqFWaIsGTtg1H3KE= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.6.0/go.mod h1:XlV163j81kDdIt5b5BXCjdqVfqJFy/LJrHA697SorvQ= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0 h1:IyFlqNsi8VT/nwYlLJfdM0y1gavxGpEvnf6FtVfZ6X4= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.30.0/go.mod h1:bxiX8eUeKoAEQmbq/ecUT8UqZwCjZW52yJrXJUSozsk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0 h1:kn1BudCgwtE7PxLqcZkErpD8GKqLZ6BSzeW9QihQJeM= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.30.0/go.mod h1:ljkUDtAMdleoi9tIG1R6dJUpVwDcYjw3J2Q6Q/SuiC0= +go.opentelemetry.io/otel/log v0.6.0 h1:nH66tr+dmEgW5y+F9LanGJUBYPrRgP4g2EkmPE3LeK8= +go.opentelemetry.io/otel/log v0.6.0/go.mod h1:KdySypjQHhP069JX0z/t26VHwa8vSwzgaKmXtIB3fJM= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk/log v0.6.0 h1:4J8BwXY4EeDE9Mowg+CyhWVBhTSLXVXodiXxS/+PGqI= +go.opentelemetry.io/otel/sdk/log v0.6.0/go.mod h1:L1DN8RMAduKkrwRAFDEX3E3TLOq46+XMGSbUfHU/+vE= +go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= +go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= +gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= +google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= +google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/test/mock/collector/mock-collector/main.go b/test/mock/collector/mock-collector/main.go new file mode 100644 index 0000000000..b7e948467b --- /dev/null +++ b/test/mock/collector/mock-collector/main.go @@ -0,0 +1,114 @@ +// Copyright (c) F5, Inc. +// +// This source code is licensed under the Apache License, Version 2.0 license found in the +// LICENSE file in the root directory of this source tree. + +package main + +import ( + "log" + + "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter" + "go.opentelemetry.io/collector/cmd/builder/auth" + "go.opentelemetry.io/collector/connector" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/debugexporter" + "go.opentelemetry.io/collector/exporter/otlpexporter" + "go.opentelemetry.io/collector/extension" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/batchprocessor" + "go.opentelemetry.io/collector/receiver" + "go.opentelemetry.io/collector/receiver/otlpreceiver" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + envprovider "go.opentelemetry.io/collector/confmap/provider/envprovider" + fileprovider "go.opentelemetry.io/collector/confmap/provider/fileprovider" + httpprovider "go.opentelemetry.io/collector/confmap/provider/httpprovider" + httpsprovider "go.opentelemetry.io/collector/confmap/provider/httpsprovider" + yamlprovider "go.opentelemetry.io/collector/confmap/provider/yamlprovider" + "go.opentelemetry.io/collector/otelcol" +) + +func main() { + println("Starting mock collector") + + info := component.BuildInfo{ + Command: "mock-collector", + Description: "Mock OTel Collector distribution for Developers", + Version: "1.0.0", + } + + set := otelcol.CollectorSettings{ + BuildInfo: info, + Factories: components, + ConfigProviderSettings: otelcol.ConfigProviderSettings{ + ResolverSettings: confmap.ResolverSettings{ + ProviderFactories: []confmap.ProviderFactory{ + envprovider.NewFactory(), + fileprovider.NewFactory(), + httpprovider.NewFactory(), + httpsprovider.NewFactory(), + yamlprovider.NewFactory(), + }, + URIs: []string{"/etc/otel-collector.yaml"}, + }, + }, + } + + if err := runInteractive(set); err != nil { + log.Fatal(err) + } +} + +func runInteractive(params otelcol.CollectorSettings) error { + cmd := otelcol.NewCommand(params) + if err := cmd.Execute(); err != nil { + log.Fatalf("collector server run finished with error: %v", err) + } + + return nil +} + +func components() (otelcol.Factories, error) { + var err error + factories := otelcol.Factories{} + + factories.Extensions, err = extension.MakeFactoryMap( + auth.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Receivers, err = receiver.MakeFactoryMap( + otlpreceiver.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Exporters, err = exporter.MakeFactoryMap( + debugexporter.NewFactory(), + otlpexporter.NewFactory(), + prometheusexporter.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + + factories.Processors, err = processor.MakeFactoryMap( + batchprocessor.NewFactory(), + ) + if err != nil { + return otelcol.Factories{}, err + } + factories.ProcessorModules = make(map[component.Type]string, len(factories.Processors)) + + factories.Connectors, err = connector.MakeFactoryMap() + if err != nil { + return otelcol.Factories{}, err + } + + return factories, nil +} diff --git a/test/mock/collector/nginx-agent.conf b/test/mock/collector/nginx-agent.conf index 0ed4119425..c24e7d208e 100644 --- a/test/mock/collector/nginx-agent.conf +++ b/test/mock/collector/nginx-agent.conf @@ -31,6 +31,8 @@ client: timeout: 10s collector: + log: + level: DEBUG receivers: host_metrics: collection_interval: 1m0s @@ -61,8 +63,14 @@ collector: - server: host: "otel-collector" port: 4317 + authenticator: headers_setter extensions: health: server: host: "127.0.0.1" port: 1337 + headers_setter: + headers: + - action: insert + key: "authorization" + value: "fake-authorization" diff --git a/test/mock/collector/otel-collector.yaml b/test/mock/collector/otel-collector.yaml index 815b38dad8..1372d21df1 100644 --- a/test/mock/collector/otel-collector.yaml +++ b/test/mock/collector/otel-collector.yaml @@ -3,6 +3,8 @@ receivers: protocols: grpc: endpoint: 0.0.0.0:4317 + auth: + authenticator: headers_check exporters: debug: verbosity: detailed @@ -13,11 +15,17 @@ exporters: processors: batch: +extensions: + headers_check: + service: + telemetry: + logs: + level: "debug" + output_paths: ["/mock-collector/collector.log"] + error_output_paths: ["/mock-collector/collector.log"] + extensions: [ headers_check ] pipelines: - traces: - receivers: [otlp] - exporters: [debug] metrics: receivers: [otlp] processors: [batch] diff --git a/test/types/config.go b/test/types/config.go index 99b622f045..0f093d3410 100644 --- a/test/types/config.go +++ b/test/types/config.go @@ -55,9 +55,6 @@ func AgentConfig() *config.Config { Host: "127.0.0.1", Port: randomPort1, }, - Auth: &config.AuthConfig{ - Token: "super-secret-token", - }, }, }, }, From c28b4107f1198e9e35c9aaa473a8323c3afcfad8 Mon Sep 17 00:00:00 2001 From: oliveromahony Date: Thu, 24 Oct 2024 13:43:32 +0100 Subject: [PATCH 10/12] updated files.proto to include cert meta (#890) --- api/grpc/mpi/v1/files.pb.go | 605 +++++++++++++++++++++++---- api/grpc/mpi/v1/files.pb.validate.go | 420 +++++++++++++++++++ api/grpc/mpi/v1/files.proto | 100 +++++ docs/proto/protos.md | 86 ++++ test/helpers/os_utils.go | 21 - 5 files changed, 1122 insertions(+), 110 deletions(-) diff --git a/api/grpc/mpi/v1/files.pb.go b/api/grpc/mpi/v1/files.pb.go index b02e2c6cf8..bb2a47f361 100644 --- a/api/grpc/mpi/v1/files.pb.go +++ b/api/grpc/mpi/v1/files.pb.go @@ -27,6 +27,115 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Enum to represent the possible signature algorithms used for certificates +type SignatureAlgorithm int32 + +const ( + // Default, unknown or unsupported algorithm + SignatureAlgorithm_SIGNATURE_ALGORITHM_UNKNOWN SignatureAlgorithm = 0 + // MD2 with RSA (Unsupported) + SignatureAlgorithm_MD2_WITH_RSA SignatureAlgorithm = 1 + // MD5 with RSA (Only supported for signing, not verification) + SignatureAlgorithm_MD5_WITH_RSA SignatureAlgorithm = 2 + // SHA-1 with RSA (Only supported for signing and for verification of CRLs, CSRs, and OCSP responses) + SignatureAlgorithm_SHA1_WITH_RSA SignatureAlgorithm = 3 + // SHA-256 with RSA + SignatureAlgorithm_SHA256_WITH_RSA SignatureAlgorithm = 4 + // SHA-384 with RSA + SignatureAlgorithm_SHA384_WITH_RSA SignatureAlgorithm = 5 + // SHA-512 with RSA + SignatureAlgorithm_SHA512_WITH_RSA SignatureAlgorithm = 6 + // DSA with SHA-1 (Unsupported) + SignatureAlgorithm_DSA_WITH_SHA1 SignatureAlgorithm = 7 + // DSA with SHA-256 (Unsupported) + SignatureAlgorithm_DSA_WITH_SHA256 SignatureAlgorithm = 8 + // ECDSA with SHA-1 (Only supported for signing and for verification of CRLs, CSRs, and OCSP responses) + SignatureAlgorithm_ECDSA_WITH_SHA1 SignatureAlgorithm = 9 + // ECDSA with SHA-256 + SignatureAlgorithm_ECDSA_WITH_SHA256 SignatureAlgorithm = 10 + // ECDSA with SHA-384 + SignatureAlgorithm_ECDSA_WITH_SHA384 SignatureAlgorithm = 11 + // ECDSA with SHA-512 + SignatureAlgorithm_ECDSA_WITH_SHA512 SignatureAlgorithm = 12 + // SHA-256 with RSA-PSS + SignatureAlgorithm_SHA256_WITH_RSA_PSS SignatureAlgorithm = 13 + // SHA-384 with RSA-PSS + SignatureAlgorithm_SHA384_WITH_RSA_PSS SignatureAlgorithm = 14 + // SHA-512 with RSA-PSS + SignatureAlgorithm_SHA512_WITH_RSA_PSS SignatureAlgorithm = 15 + // Pure Ed25519 + SignatureAlgorithm_PURE_ED25519 SignatureAlgorithm = 16 +) + +// Enum value maps for SignatureAlgorithm. +var ( + SignatureAlgorithm_name = map[int32]string{ + 0: "SIGNATURE_ALGORITHM_UNKNOWN", + 1: "MD2_WITH_RSA", + 2: "MD5_WITH_RSA", + 3: "SHA1_WITH_RSA", + 4: "SHA256_WITH_RSA", + 5: "SHA384_WITH_RSA", + 6: "SHA512_WITH_RSA", + 7: "DSA_WITH_SHA1", + 8: "DSA_WITH_SHA256", + 9: "ECDSA_WITH_SHA1", + 10: "ECDSA_WITH_SHA256", + 11: "ECDSA_WITH_SHA384", + 12: "ECDSA_WITH_SHA512", + 13: "SHA256_WITH_RSA_PSS", + 14: "SHA384_WITH_RSA_PSS", + 15: "SHA512_WITH_RSA_PSS", + 16: "PURE_ED25519", + } + SignatureAlgorithm_value = map[string]int32{ + "SIGNATURE_ALGORITHM_UNKNOWN": 0, + "MD2_WITH_RSA": 1, + "MD5_WITH_RSA": 2, + "SHA1_WITH_RSA": 3, + "SHA256_WITH_RSA": 4, + "SHA384_WITH_RSA": 5, + "SHA512_WITH_RSA": 6, + "DSA_WITH_SHA1": 7, + "DSA_WITH_SHA256": 8, + "ECDSA_WITH_SHA1": 9, + "ECDSA_WITH_SHA256": 10, + "ECDSA_WITH_SHA384": 11, + "ECDSA_WITH_SHA512": 12, + "SHA256_WITH_RSA_PSS": 13, + "SHA384_WITH_RSA_PSS": 14, + "SHA512_WITH_RSA_PSS": 15, + "PURE_ED25519": 16, + } +) + +func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { + p := new(SignatureAlgorithm) + *p = x + return p +} + +func (x SignatureAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_mpi_v1_files_proto_enumTypes[0].Descriptor() +} + +func (SignatureAlgorithm) Type() protoreflect.EnumType { + return &file_mpi_v1_files_proto_enumTypes[0] +} + +func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SignatureAlgorithm.Descriptor instead. +func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_mpi_v1_files_proto_rawDescGZIP(), []int{0} +} + // Action enumeration type File_FileAction int32 @@ -72,11 +181,11 @@ func (x File_FileAction) String() string { } func (File_FileAction) Descriptor() protoreflect.EnumDescriptor { - return file_mpi_v1_files_proto_enumTypes[0].Descriptor() + return file_mpi_v1_files_proto_enumTypes[1].Descriptor() } func (File_FileAction) Type() protoreflect.EnumType { - return &file_mpi_v1_files_proto_enumTypes[0] + return &file_mpi_v1_files_proto_enumTypes[1] } func (x File_FileAction) Number() protoreflect.EnumNumber { @@ -618,6 +727,12 @@ type FileMeta struct { Permissions string `protobuf:"bytes,4,opt,name=permissions,proto3" json:"permissions,omitempty"` // The size of the file in bytes Size int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` + // additional file information + // + // Types that are assignable to FileType: + // + // *FileMeta_CertificateMeta + FileType isFileMeta_FileType `protobuf_oneof:"file_type"` } func (x *FileMeta) Reset() { @@ -685,6 +800,30 @@ func (x *FileMeta) GetSize() int64 { return 0 } +func (m *FileMeta) GetFileType() isFileMeta_FileType { + if m != nil { + return m.FileType + } + return nil +} + +func (x *FileMeta) GetCertificateMeta() *CertificateMeta { + if x, ok := x.GetFileType().(*FileMeta_CertificateMeta); ok { + return x.CertificateMeta + } + return nil +} + +type isFileMeta_FileType interface { + isFileMeta_FileType() +} + +type FileMeta_CertificateMeta struct { + CertificateMeta *CertificateMeta `protobuf:"bytes,6,opt,name=certificate_meta,json=certificateMeta,proto3,oneof"` +} + +func (*FileMeta_CertificateMeta) isFileMeta_FileType() {} + // Represents the update file request type UpdateFileRequest struct { state protoimpl.MessageState @@ -797,6 +936,220 @@ func (x *UpdateFileResponse) GetFileMeta() *FileMeta { return nil } +// Define the certificate message based on https://pkg.go.dev/crypto/x509#Certificate +// and https://github.com/googleapis/googleapis/blob/005df4681b89bd204a90b76168a6dc9d9e7bf4fe/google/cloud/iot/v1/resources.proto#L341 +type CertificateMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Serial number of the certificate, usually a unique identifier, RFC5280 states the upper limit for serial number is 20 octets + SerialNumber []byte `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"` + // Issuer details (who issued the certificate) + Issuer string `protobuf:"bytes,2,opt,name=issuer,proto3" json:"issuer,omitempty"` + // Subject details (to whom the certificate is issued) + Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"` + // Subject Alternative Names (SAN) including DNS names and IP addresses + Sans *SubjectAlternativeNames `protobuf:"bytes,4,opt,name=sans,proto3" json:"sans,omitempty"` + // Timestamps representing the start of certificate validity (Not Before, Not After) + Dates *CertificateDates `protobuf:"bytes,5,opt,name=dates,proto3" json:"dates,omitempty"` + // The algorithm used to sign the certificate (e.g., SHA256-RSA) + SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,6,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=mpi.v1.SignatureAlgorithm" json:"signature_algorithm,omitempty"` + // The type of public key in the certificate. + PublicKeyAlgorithm string `protobuf:"bytes,7,opt,name=public_key_algorithm,json=publicKeyAlgorithm,proto3" json:"public_key_algorithm,omitempty"` +} + +func (x *CertificateMeta) Reset() { + *x = CertificateMeta{} + mi := &file_mpi_v1_files_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CertificateMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateMeta) ProtoMessage() {} + +func (x *CertificateMeta) ProtoReflect() protoreflect.Message { + mi := &file_mpi_v1_files_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateMeta.ProtoReflect.Descriptor instead. +func (*CertificateMeta) Descriptor() ([]byte, []int) { + return file_mpi_v1_files_proto_rawDescGZIP(), []int{13} +} + +func (x *CertificateMeta) GetSerialNumber() []byte { + if x != nil { + return x.SerialNumber + } + return nil +} + +func (x *CertificateMeta) GetIssuer() string { + if x != nil { + return x.Issuer + } + return "" +} + +func (x *CertificateMeta) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *CertificateMeta) GetSans() *SubjectAlternativeNames { + if x != nil { + return x.Sans + } + return nil +} + +func (x *CertificateMeta) GetDates() *CertificateDates { + if x != nil { + return x.Dates + } + return nil +} + +func (x *CertificateMeta) GetSignatureAlgorithm() SignatureAlgorithm { + if x != nil { + return x.SignatureAlgorithm + } + return SignatureAlgorithm_SIGNATURE_ALGORITHM_UNKNOWN +} + +func (x *CertificateMeta) GetPublicKeyAlgorithm() string { + if x != nil { + return x.PublicKeyAlgorithm + } + return "" +} + +// Represents the dates for which a certificate is valid +type CertificateDates struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The start date that for when the certificate is valid + NotBefore int64 `protobuf:"varint,1,opt,name=not_before,json=notBefore,proto3" json:"not_before,omitempty"` + // The end date that for when the certificate is valid + NotAfter int64 `protobuf:"varint,2,opt,name=not_after,json=notAfter,proto3" json:"not_after,omitempty"` +} + +func (x *CertificateDates) Reset() { + *x = CertificateDates{} + mi := &file_mpi_v1_files_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CertificateDates) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CertificateDates) ProtoMessage() {} + +func (x *CertificateDates) ProtoReflect() protoreflect.Message { + mi := &file_mpi_v1_files_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CertificateDates.ProtoReflect.Descriptor instead. +func (*CertificateDates) Descriptor() ([]byte, []int) { + return file_mpi_v1_files_proto_rawDescGZIP(), []int{14} +} + +func (x *CertificateDates) GetNotBefore() int64 { + if x != nil { + return x.NotBefore + } + return 0 +} + +func (x *CertificateDates) GetNotAfter() int64 { + if x != nil { + return x.NotAfter + } + return 0 +} + +// Represents the Subject Alternative Names for a certificate +type SubjectAlternativeNames struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // List of DNS names in the Subject Alternative Name (SAN) extension + DnsNames []string `protobuf:"bytes,1,rep,name=dns_names,json=dnsNames,proto3" json:"dns_names,omitempty"` + // List of ip addresses in the SAN extension + IpAddresses []string `protobuf:"bytes,2,rep,name=ip_addresses,json=ipAddresses,proto3" json:"ip_addresses,omitempty"` +} + +func (x *SubjectAlternativeNames) Reset() { + *x = SubjectAlternativeNames{} + mi := &file_mpi_v1_files_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SubjectAlternativeNames) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubjectAlternativeNames) ProtoMessage() {} + +func (x *SubjectAlternativeNames) ProtoReflect() protoreflect.Message { + mi := &file_mpi_v1_files_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubjectAlternativeNames.ProtoReflect.Descriptor instead. +func (*SubjectAlternativeNames) Descriptor() ([]byte, []int) { + return file_mpi_v1_files_proto_rawDescGZIP(), []int{15} +} + +func (x *SubjectAlternativeNames) GetDnsNames() []string { + if x != nil { + return x.DnsNames + } + return nil +} + +func (x *SubjectAlternativeNames) GetIpAddresses() []string { + if x != nil { + return x.IpAddresses + } + return nil +} + var File_mpi_v1_files_proto protoreflect.FileDescriptor var file_mpi_v1_files_proto_rawDesc = []byte{ @@ -873,7 +1226,7 @@ var file_mpi_v1_files_proto_rawDesc = []byte{ 0x65, 0x6e, 0x74, 0x73, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x2a, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x08, 0x46, + 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xa2, 0x02, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x08, 0xba, 0x48, 0x05, 0x72, 0x03, 0x3a, 0x01, 0x2f, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, @@ -886,42 +1239,105 @@ var file_mpi_v1_files_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x10, 0xba, 0x48, 0x0d, 0x72, 0x0b, 0x32, 0x09, 0x30, 0x5b, 0x30, 0x2d, 0x37, 0x5d, 0x7b, 0x33, 0x7d, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x9f, 0x01, 0x0a, - 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x04, - 0x66, 0x69, 0x6c, 0x65, 0x12, 0x30, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x08, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6d, - 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x22, 0x43, - 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x32, 0xaf, 0x02, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x76, 0x69, - 0x65, 0x77, 0x12, 0x1a, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, - 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, - 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x76, - 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, - 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x12, - 0x1d, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, - 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, - 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x76, - 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x3c, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x70, - 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, - 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x19, 0x2e, 0x6d, - 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, 0x6d, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x44, 0x0a, 0x10, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x48, + 0x00, 0x52, 0x0f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, + 0x9f, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, + 0x65, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x30, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x52, + 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x0c, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x22, 0x43, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x6d, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x08, 0x66, 0x69, + 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xe9, 0x02, 0x0a, 0x0f, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0d, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x09, 0xba, 0x48, 0x06, 0x7a, 0x04, 0x10, 0x01, 0x18, 0x15, 0x52, 0x0c, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x06, 0x69, 0x73, + 0x73, 0x75, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, + 0x02, 0x10, 0x01, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x07, 0x73, + 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, + 0x0a, 0x04, 0x73, 0x61, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x04, 0x73, + 0x61, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x05, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x65, 0x73, 0x52, 0x05, 0x64, 0x61, + 0x74, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1a, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x30, 0x0a, 0x14, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x61, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x22, 0x4e, 0x0a, 0x10, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x44, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x6f, 0x74, 0x5f, 0x62, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6e, 0x6f, 0x74, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x74, 0x5f, 0x61, 0x66, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6e, 0x6f, 0x74, 0x41, 0x66, 0x74, + 0x65, 0x72, 0x22, 0x59, 0x0a, 0x17, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x70, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0b, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x2a, 0x8a, 0x03, + 0x0a, 0x12, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, + 0x69, 0x74, 0x68, 0x6d, 0x12, 0x1f, 0x0a, 0x1b, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x41, 0x4c, 0x47, 0x4f, 0x52, 0x49, 0x54, 0x48, 0x4d, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x44, 0x32, 0x5f, 0x57, 0x49, 0x54, + 0x48, 0x5f, 0x52, 0x53, 0x41, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x44, 0x35, 0x5f, 0x57, + 0x49, 0x54, 0x48, 0x5f, 0x52, 0x53, 0x41, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x48, 0x41, + 0x31, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x52, 0x53, 0x41, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x52, 0x53, 0x41, 0x10, + 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x5f, 0x57, 0x49, 0x54, 0x48, + 0x5f, 0x52, 0x53, 0x41, 0x10, 0x05, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x52, 0x53, 0x41, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x44, + 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x07, 0x12, 0x13, + 0x0a, 0x0f, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, + 0x36, 0x10, 0x08, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, + 0x48, 0x5f, 0x53, 0x48, 0x41, 0x31, 0x10, 0x09, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x43, 0x44, 0x53, + 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x0a, 0x12, + 0x15, 0x0a, 0x11, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x53, 0x48, + 0x41, 0x33, 0x38, 0x34, 0x10, 0x0b, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, + 0x57, 0x49, 0x54, 0x48, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x0c, 0x12, 0x17, 0x0a, + 0x13, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x50, 0x53, 0x53, 0x10, 0x0d, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x0e, 0x12, + 0x17, 0x0a, 0x13, 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x52, + 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x55, 0x52, 0x45, + 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x10, 0x32, 0xaf, 0x02, 0x0a, 0x0b, 0x46, + 0x69, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x47, 0x65, + 0x74, 0x4f, 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1a, 0x2e, 0x6d, 0x70, 0x69, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x76, + 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1d, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x76, 0x69, 0x65, 0x77, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x46, 0x69, + 0x6c, 0x65, 0x12, 0x16, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x46, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x19, 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, + 0x2e, 0x6d, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x08, 0x5a, 0x06, + 0x6d, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -936,57 +1352,65 @@ func file_mpi_v1_files_proto_rawDescGZIP() []byte { return file_mpi_v1_files_proto_rawDescData } -var file_mpi_v1_files_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_mpi_v1_files_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_mpi_v1_files_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_mpi_v1_files_proto_msgTypes = make([]protoimpl.MessageInfo, 16) var file_mpi_v1_files_proto_goTypes = []any{ - (File_FileAction)(0), // 0: mpi.v1.File.FileAction - (*GetOverviewRequest)(nil), // 1: mpi.v1.GetOverviewRequest - (*GetOverviewResponse)(nil), // 2: mpi.v1.GetOverviewResponse - (*UpdateOverviewRequest)(nil), // 3: mpi.v1.UpdateOverviewRequest - (*UpdateOverviewResponse)(nil), // 4: mpi.v1.UpdateOverviewResponse - (*ConfigVersion)(nil), // 5: mpi.v1.ConfigVersion - (*FileOverview)(nil), // 6: mpi.v1.FileOverview - (*File)(nil), // 7: mpi.v1.File - (*GetFileRequest)(nil), // 8: mpi.v1.GetFileRequest - (*GetFileResponse)(nil), // 9: mpi.v1.GetFileResponse - (*FileContents)(nil), // 10: mpi.v1.FileContents - (*FileMeta)(nil), // 11: mpi.v1.FileMeta - (*UpdateFileRequest)(nil), // 12: mpi.v1.UpdateFileRequest - (*UpdateFileResponse)(nil), // 13: mpi.v1.UpdateFileResponse - (*MessageMeta)(nil), // 14: mpi.v1.MessageMeta - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (SignatureAlgorithm)(0), // 0: mpi.v1.SignatureAlgorithm + (File_FileAction)(0), // 1: mpi.v1.File.FileAction + (*GetOverviewRequest)(nil), // 2: mpi.v1.GetOverviewRequest + (*GetOverviewResponse)(nil), // 3: mpi.v1.GetOverviewResponse + (*UpdateOverviewRequest)(nil), // 4: mpi.v1.UpdateOverviewRequest + (*UpdateOverviewResponse)(nil), // 5: mpi.v1.UpdateOverviewResponse + (*ConfigVersion)(nil), // 6: mpi.v1.ConfigVersion + (*FileOverview)(nil), // 7: mpi.v1.FileOverview + (*File)(nil), // 8: mpi.v1.File + (*GetFileRequest)(nil), // 9: mpi.v1.GetFileRequest + (*GetFileResponse)(nil), // 10: mpi.v1.GetFileResponse + (*FileContents)(nil), // 11: mpi.v1.FileContents + (*FileMeta)(nil), // 12: mpi.v1.FileMeta + (*UpdateFileRequest)(nil), // 13: mpi.v1.UpdateFileRequest + (*UpdateFileResponse)(nil), // 14: mpi.v1.UpdateFileResponse + (*CertificateMeta)(nil), // 15: mpi.v1.CertificateMeta + (*CertificateDates)(nil), // 16: mpi.v1.CertificateDates + (*SubjectAlternativeNames)(nil), // 17: mpi.v1.SubjectAlternativeNames + (*MessageMeta)(nil), // 18: mpi.v1.MessageMeta + (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp } var file_mpi_v1_files_proto_depIdxs = []int32{ - 14, // 0: mpi.v1.GetOverviewRequest.message_meta:type_name -> mpi.v1.MessageMeta - 5, // 1: mpi.v1.GetOverviewRequest.config_version:type_name -> mpi.v1.ConfigVersion - 6, // 2: mpi.v1.GetOverviewResponse.overview:type_name -> mpi.v1.FileOverview - 14, // 3: mpi.v1.UpdateOverviewRequest.message_meta:type_name -> mpi.v1.MessageMeta - 6, // 4: mpi.v1.UpdateOverviewRequest.overview:type_name -> mpi.v1.FileOverview - 7, // 5: mpi.v1.FileOverview.files:type_name -> mpi.v1.File - 5, // 6: mpi.v1.FileOverview.config_version:type_name -> mpi.v1.ConfigVersion - 11, // 7: mpi.v1.File.file_meta:type_name -> mpi.v1.FileMeta - 0, // 8: mpi.v1.File.action:type_name -> mpi.v1.File.FileAction - 14, // 9: mpi.v1.GetFileRequest.message_meta:type_name -> mpi.v1.MessageMeta - 11, // 10: mpi.v1.GetFileRequest.file_meta:type_name -> mpi.v1.FileMeta - 10, // 11: mpi.v1.GetFileResponse.contents:type_name -> mpi.v1.FileContents - 15, // 12: mpi.v1.FileMeta.modified_time:type_name -> google.protobuf.Timestamp - 7, // 13: mpi.v1.UpdateFileRequest.file:type_name -> mpi.v1.File - 10, // 14: mpi.v1.UpdateFileRequest.contents:type_name -> mpi.v1.FileContents - 14, // 15: mpi.v1.UpdateFileRequest.message_meta:type_name -> mpi.v1.MessageMeta - 11, // 16: mpi.v1.UpdateFileResponse.file_meta:type_name -> mpi.v1.FileMeta - 1, // 17: mpi.v1.FileService.GetOverview:input_type -> mpi.v1.GetOverviewRequest - 3, // 18: mpi.v1.FileService.UpdateOverview:input_type -> mpi.v1.UpdateOverviewRequest - 8, // 19: mpi.v1.FileService.GetFile:input_type -> mpi.v1.GetFileRequest - 12, // 20: mpi.v1.FileService.UpdateFile:input_type -> mpi.v1.UpdateFileRequest - 2, // 21: mpi.v1.FileService.GetOverview:output_type -> mpi.v1.GetOverviewResponse - 4, // 22: mpi.v1.FileService.UpdateOverview:output_type -> mpi.v1.UpdateOverviewResponse - 9, // 23: mpi.v1.FileService.GetFile:output_type -> mpi.v1.GetFileResponse - 13, // 24: mpi.v1.FileService.UpdateFile:output_type -> mpi.v1.UpdateFileResponse - 21, // [21:25] is the sub-list for method output_type - 17, // [17:21] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name + 18, // 0: mpi.v1.GetOverviewRequest.message_meta:type_name -> mpi.v1.MessageMeta + 6, // 1: mpi.v1.GetOverviewRequest.config_version:type_name -> mpi.v1.ConfigVersion + 7, // 2: mpi.v1.GetOverviewResponse.overview:type_name -> mpi.v1.FileOverview + 18, // 3: mpi.v1.UpdateOverviewRequest.message_meta:type_name -> mpi.v1.MessageMeta + 7, // 4: mpi.v1.UpdateOverviewRequest.overview:type_name -> mpi.v1.FileOverview + 8, // 5: mpi.v1.FileOverview.files:type_name -> mpi.v1.File + 6, // 6: mpi.v1.FileOverview.config_version:type_name -> mpi.v1.ConfigVersion + 12, // 7: mpi.v1.File.file_meta:type_name -> mpi.v1.FileMeta + 1, // 8: mpi.v1.File.action:type_name -> mpi.v1.File.FileAction + 18, // 9: mpi.v1.GetFileRequest.message_meta:type_name -> mpi.v1.MessageMeta + 12, // 10: mpi.v1.GetFileRequest.file_meta:type_name -> mpi.v1.FileMeta + 11, // 11: mpi.v1.GetFileResponse.contents:type_name -> mpi.v1.FileContents + 19, // 12: mpi.v1.FileMeta.modified_time:type_name -> google.protobuf.Timestamp + 15, // 13: mpi.v1.FileMeta.certificate_meta:type_name -> mpi.v1.CertificateMeta + 8, // 14: mpi.v1.UpdateFileRequest.file:type_name -> mpi.v1.File + 11, // 15: mpi.v1.UpdateFileRequest.contents:type_name -> mpi.v1.FileContents + 18, // 16: mpi.v1.UpdateFileRequest.message_meta:type_name -> mpi.v1.MessageMeta + 12, // 17: mpi.v1.UpdateFileResponse.file_meta:type_name -> mpi.v1.FileMeta + 17, // 18: mpi.v1.CertificateMeta.sans:type_name -> mpi.v1.SubjectAlternativeNames + 16, // 19: mpi.v1.CertificateMeta.dates:type_name -> mpi.v1.CertificateDates + 0, // 20: mpi.v1.CertificateMeta.signature_algorithm:type_name -> mpi.v1.SignatureAlgorithm + 2, // 21: mpi.v1.FileService.GetOverview:input_type -> mpi.v1.GetOverviewRequest + 4, // 22: mpi.v1.FileService.UpdateOverview:input_type -> mpi.v1.UpdateOverviewRequest + 9, // 23: mpi.v1.FileService.GetFile:input_type -> mpi.v1.GetFileRequest + 13, // 24: mpi.v1.FileService.UpdateFile:input_type -> mpi.v1.UpdateFileRequest + 3, // 25: mpi.v1.FileService.GetOverview:output_type -> mpi.v1.GetOverviewResponse + 5, // 26: mpi.v1.FileService.UpdateOverview:output_type -> mpi.v1.UpdateOverviewResponse + 10, // 27: mpi.v1.FileService.GetFile:output_type -> mpi.v1.GetFileResponse + 14, // 28: mpi.v1.FileService.UpdateFile:output_type -> mpi.v1.UpdateFileResponse + 25, // [25:29] is the sub-list for method output_type + 21, // [21:25] is the sub-list for method input_type + 21, // [21:21] is the sub-list for extension type_name + 21, // [21:21] is the sub-list for extension extendee + 0, // [0:21] is the sub-list for field type_name } func init() { file_mpi_v1_files_proto_init() } @@ -996,13 +1420,16 @@ func file_mpi_v1_files_proto_init() { } file_mpi_v1_common_proto_init() file_mpi_v1_files_proto_msgTypes[6].OneofWrappers = []any{} + file_mpi_v1_files_proto_msgTypes[10].OneofWrappers = []any{ + (*FileMeta_CertificateMeta)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_mpi_v1_files_proto_rawDesc, - NumEnums: 1, - NumMessages: 13, + NumEnums: 2, + NumMessages: 16, NumExtensions: 0, NumServices: 1, }, diff --git a/api/grpc/mpi/v1/files.pb.validate.go b/api/grpc/mpi/v1/files.pb.validate.go index 1408956917..38ee78b14c 100644 --- a/api/grpc/mpi/v1/files.pb.validate.go +++ b/api/grpc/mpi/v1/files.pb.validate.go @@ -1432,6 +1432,52 @@ func (m *FileMeta) validate(all bool) error { // no validation rules for Size + switch v := m.FileType.(type) { + case *FileMeta_CertificateMeta: + if v == nil { + err := FileMetaValidationError{ + field: "FileType", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetCertificateMeta()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FileMetaValidationError{ + field: "CertificateMeta", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FileMetaValidationError{ + field: "CertificateMeta", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCertificateMeta()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FileMetaValidationError{ + field: "CertificateMeta", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + if len(errors) > 0 { return FileMetaMultiError(errors) } @@ -1828,3 +1874,377 @@ var _ interface { Cause() error ErrorName() string } = UpdateFileResponseValidationError{} + +// Validate checks the field values on CertificateMeta with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CertificateMeta) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateMeta with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CertificateMetaMultiError, or nil if none found. +func (m *CertificateMeta) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateMeta) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for SerialNumber + + // no validation rules for Issuer + + // no validation rules for Subject + + if all { + switch v := interface{}(m.GetSans()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateMetaValidationError{ + field: "Sans", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateMetaValidationError{ + field: "Sans", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSans()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateMetaValidationError{ + field: "Sans", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetDates()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CertificateMetaValidationError{ + field: "Dates", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CertificateMetaValidationError{ + field: "Dates", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDates()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CertificateMetaValidationError{ + field: "Dates", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for SignatureAlgorithm + + // no validation rules for PublicKeyAlgorithm + + if len(errors) > 0 { + return CertificateMetaMultiError(errors) + } + + return nil +} + +// CertificateMetaMultiError is an error wrapping multiple validation errors +// returned by CertificateMeta.ValidateAll() if the designated constraints +// aren't met. +type CertificateMetaMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateMetaMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateMetaMultiError) AllErrors() []error { return m } + +// CertificateMetaValidationError is the validation error returned by +// CertificateMeta.Validate if the designated constraints aren't met. +type CertificateMetaValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateMetaValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateMetaValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateMetaValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateMetaValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateMetaValidationError) ErrorName() string { return "CertificateMetaValidationError" } + +// Error satisfies the builtin error interface +func (e CertificateMetaValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateMeta.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateMetaValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateMetaValidationError{} + +// Validate checks the field values on CertificateDates with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CertificateDates) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CertificateDates with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CertificateDatesMultiError, or nil if none found. +func (m *CertificateDates) ValidateAll() error { + return m.validate(true) +} + +func (m *CertificateDates) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NotBefore + + // no validation rules for NotAfter + + if len(errors) > 0 { + return CertificateDatesMultiError(errors) + } + + return nil +} + +// CertificateDatesMultiError is an error wrapping multiple validation errors +// returned by CertificateDates.ValidateAll() if the designated constraints +// aren't met. +type CertificateDatesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CertificateDatesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CertificateDatesMultiError) AllErrors() []error { return m } + +// CertificateDatesValidationError is the validation error returned by +// CertificateDates.Validate if the designated constraints aren't met. +type CertificateDatesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CertificateDatesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CertificateDatesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CertificateDatesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CertificateDatesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CertificateDatesValidationError) ErrorName() string { return "CertificateDatesValidationError" } + +// Error satisfies the builtin error interface +func (e CertificateDatesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCertificateDates.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CertificateDatesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CertificateDatesValidationError{} + +// Validate checks the field values on SubjectAlternativeNames with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SubjectAlternativeNames) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SubjectAlternativeNames with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SubjectAlternativeNamesMultiError, or nil if none found. +func (m *SubjectAlternativeNames) ValidateAll() error { + return m.validate(true) +} + +func (m *SubjectAlternativeNames) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return SubjectAlternativeNamesMultiError(errors) + } + + return nil +} + +// SubjectAlternativeNamesMultiError is an error wrapping multiple validation +// errors returned by SubjectAlternativeNames.ValidateAll() if the designated +// constraints aren't met. +type SubjectAlternativeNamesMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SubjectAlternativeNamesMultiError) Error() string { + var msgs []string + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SubjectAlternativeNamesMultiError) AllErrors() []error { return m } + +// SubjectAlternativeNamesValidationError is the validation error returned by +// SubjectAlternativeNames.Validate if the designated constraints aren't met. +type SubjectAlternativeNamesValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SubjectAlternativeNamesValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SubjectAlternativeNamesValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SubjectAlternativeNamesValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SubjectAlternativeNamesValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SubjectAlternativeNamesValidationError) ErrorName() string { + return "SubjectAlternativeNamesValidationError" +} + +// Error satisfies the builtin error interface +func (e SubjectAlternativeNamesValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSubjectAlternativeNames.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SubjectAlternativeNamesValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SubjectAlternativeNamesValidationError{} diff --git a/api/grpc/mpi/v1/files.proto b/api/grpc/mpi/v1/files.proto index 909d5dcd3a..75c889b5c0 100644 --- a/api/grpc/mpi/v1/files.proto +++ b/api/grpc/mpi/v1/files.proto @@ -123,6 +123,10 @@ message FileMeta { string permissions = 4 [(buf.validate.field).string.pattern = "0[0-7]{3}"]; // The size of the file in bytes int64 size = 5; + // additional file information + oneof file_type { + CertificateMeta certificate_meta = 6; + } } // Represents the update file request @@ -140,3 +144,99 @@ message UpdateFileResponse { // Meta-information associated with the updated file FileMeta file_meta = 1; } + +// Define the certificate message based on https://pkg.go.dev/crypto/x509#Certificate +// and https://github.com/googleapis/googleapis/blob/005df4681b89bd204a90b76168a6dc9d9e7bf4fe/google/cloud/iot/v1/resources.proto#L341 +message CertificateMeta { + // Serial number of the certificate, usually a unique identifier, RFC5280 states the upper limit for serial number is 20 octets + bytes serial_number = 1 [(buf.validate.field).bytes.min_len = 1, (buf.validate.field).bytes.max_len = 21]; + + // Issuer details (who issued the certificate) + string issuer = 2 [(buf.validate.field).string.min_len = 1]; + + // Subject details (to whom the certificate is issued) + string subject = 3 [(buf.validate.field).string.min_len = 1]; + + // Subject Alternative Names (SAN) including DNS names and IP addresses + SubjectAlternativeNames sans = 4; + + // Timestamps representing the start of certificate validity (Not Before, Not After) + CertificateDates dates = 5; + + // The algorithm used to sign the certificate (e.g., SHA256-RSA) + SignatureAlgorithm signature_algorithm = 6; + + // The type of public key in the certificate. + string public_key_algorithm = 7; +} + +// Represents the dates for which a certificate is valid +message CertificateDates { + // The start date that for when the certificate is valid + int64 not_before = 1; + // The end date that for when the certificate is valid + int64 not_after = 2; +} + +// Represents the Subject Alternative Names for a certificate +message SubjectAlternativeNames { + // List of DNS names in the Subject Alternative Name (SAN) extension + repeated string dns_names = 1; + + // List of ip addresses in the SAN extension + repeated string ip_addresses = 2; +} + +// Enum to represent the possible signature algorithms used for certificates +enum SignatureAlgorithm { + // Default, unknown or unsupported algorithm + SIGNATURE_ALGORITHM_UNKNOWN = 0; + + // MD2 with RSA (Unsupported) + MD2_WITH_RSA = 1; + + // MD5 with RSA (Only supported for signing, not verification) + MD5_WITH_RSA = 2; + + // SHA-1 with RSA (Only supported for signing and for verification of CRLs, CSRs, and OCSP responses) + SHA1_WITH_RSA = 3; + + // SHA-256 with RSA + SHA256_WITH_RSA = 4; + + // SHA-384 with RSA + SHA384_WITH_RSA = 5; + + // SHA-512 with RSA + SHA512_WITH_RSA = 6; + + // DSA with SHA-1 (Unsupported) + DSA_WITH_SHA1 = 7; + + // DSA with SHA-256 (Unsupported) + DSA_WITH_SHA256 = 8; + + // ECDSA with SHA-1 (Only supported for signing and for verification of CRLs, CSRs, and OCSP responses) + ECDSA_WITH_SHA1 = 9; + + // ECDSA with SHA-256 + ECDSA_WITH_SHA256 = 10; + + // ECDSA with SHA-384 + ECDSA_WITH_SHA384 = 11; + + // ECDSA with SHA-512 + ECDSA_WITH_SHA512 = 12; + + // SHA-256 with RSA-PSS + SHA256_WITH_RSA_PSS = 13; + + // SHA-384 with RSA-PSS + SHA384_WITH_RSA_PSS = 14; + + // SHA-512 with RSA-PSS + SHA512_WITH_RSA_PSS = 15; + + // Pure Ed25519 + PURE_ED25519 = 16; +} \ No newline at end of file diff --git a/docs/proto/protos.md b/docs/proto/protos.md index 46a4aa7467..398288d9a1 100644 --- a/docs/proto/protos.md +++ b/docs/proto/protos.md @@ -10,6 +10,8 @@ - [CommandResponse.CommandStatus](#mpi-v1-CommandResponse-CommandStatus) - [mpi/v1/files.proto](#mpi_v1_files-proto) + - [CertificateDates](#mpi-v1-CertificateDates) + - [CertificateMeta](#mpi-v1-CertificateMeta) - [ConfigVersion](#mpi-v1-ConfigVersion) - [File](#mpi-v1-File) - [FileContents](#mpi-v1-FileContents) @@ -19,12 +21,14 @@ - [GetFileResponse](#mpi-v1-GetFileResponse) - [GetOverviewRequest](#mpi-v1-GetOverviewRequest) - [GetOverviewResponse](#mpi-v1-GetOverviewResponse) + - [SubjectAlternativeNames](#mpi-v1-SubjectAlternativeNames) - [UpdateFileRequest](#mpi-v1-UpdateFileRequest) - [UpdateFileResponse](#mpi-v1-UpdateFileResponse) - [UpdateOverviewRequest](#mpi-v1-UpdateOverviewRequest) - [UpdateOverviewResponse](#mpi-v1-UpdateOverviewResponse) - [File.FileAction](#mpi-v1-File-FileAction) + - [SignatureAlgorithm](#mpi-v1-SignatureAlgorithm) - [FileService](#mpi-v1-FileService) @@ -145,6 +149,44 @@ Command status enum + + +### CertificateDates +Represents the dates for which a certificate is valid + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| not_before | [int64](#int64) | | The start date that for when the certificate is valid | +| not_after | [int64](#int64) | | The end date that for when the certificate is valid | + + + + + + + + +### CertificateMeta +Define the certificate message based on https://pkg.go.dev/crypto/x509#Certificate +and https://github.com/googleapis/googleapis/blob/005df4681b89bd204a90b76168a6dc9d9e7bf4fe/google/cloud/iot/v1/resources.proto#L341 + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| serial_number | [bytes](#bytes) | | Serial number of the certificate, usually a unique identifier, RFC5280 states the upper limit for serial number is 20 octets | +| issuer | [string](#string) | | Issuer details (who issued the certificate) | +| subject | [string](#string) | | Subject details (to whom the certificate is issued) | +| sans | [SubjectAlternativeNames](#mpi-v1-SubjectAlternativeNames) | | Subject Alternative Names (SAN) including DNS names and IP addresses | +| dates | [CertificateDates](#mpi-v1-CertificateDates) | | Timestamps representing the start of certificate validity (Not Before, Not After) | +| signature_algorithm | [SignatureAlgorithm](#mpi-v1-SignatureAlgorithm) | | The algorithm used to sign the certificate (e.g., SHA256-RSA) | +| public_key_algorithm | [string](#string) | | The type of public key in the certificate. | + + + + + + ### ConfigVersion @@ -205,6 +247,7 @@ Meta information about the file, the name (including path) and hash | modified_time | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | Last modified time of the file (created time if never modified) | | permissions | [string](#string) | | The permission set associated with a particular file | | size | [int64](#int64) | | The size of the file in bytes | +| certificate_meta | [CertificateMeta](#mpi-v1-CertificateMeta) | | | @@ -289,6 +332,22 @@ Represents the response payload to a GetOverviewRequest, requesting a list of lo + + +### SubjectAlternativeNames +Represents the Subject Alternative Names for a certificate + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| dns_names | [string](#string) | repeated | List of DNS names in the Subject Alternative Name (SAN) extension | +| ip_addresses | [string](#string) | repeated | List of ip addresses in the SAN extension | + + + + + + ### UpdateFileRequest @@ -363,6 +422,33 @@ Action enumeration | FILE_ACTION_DELETE | 4 | File deleted | + + + +### SignatureAlgorithm +Enum to represent the possible signature algorithms used for certificates + +| Name | Number | Description | +| ---- | ------ | ----------- | +| SIGNATURE_ALGORITHM_UNKNOWN | 0 | Default, unknown or unsupported algorithm | +| MD2_WITH_RSA | 1 | MD2 with RSA (Unsupported) | +| MD5_WITH_RSA | 2 | MD5 with RSA (Only supported for signing, not verification) | +| SHA1_WITH_RSA | 3 | SHA-1 with RSA (Only supported for signing and for verification of CRLs, CSRs, and OCSP responses) | +| SHA256_WITH_RSA | 4 | SHA-256 with RSA | +| SHA384_WITH_RSA | 5 | SHA-384 with RSA | +| SHA512_WITH_RSA | 6 | SHA-512 with RSA | +| DSA_WITH_SHA1 | 7 | DSA with SHA-1 (Unsupported) | +| DSA_WITH_SHA256 | 8 | DSA with SHA-256 (Unsupported) | +| ECDSA_WITH_SHA1 | 9 | ECDSA with SHA-1 (Only supported for signing and for verification of CRLs, CSRs, and OCSP responses) | +| ECDSA_WITH_SHA256 | 10 | ECDSA with SHA-256 | +| ECDSA_WITH_SHA384 | 11 | ECDSA with SHA-384 | +| ECDSA_WITH_SHA512 | 12 | ECDSA with SHA-512 | +| SHA256_WITH_RSA_PSS | 13 | SHA-256 with RSA-PSS | +| SHA384_WITH_RSA_PSS | 14 | SHA-384 with RSA-PSS | +| SHA512_WITH_RSA_PSS | 15 | SHA-512 with RSA-PSS | +| PURE_ED25519 | 16 | Pure Ed25519 | + + diff --git a/test/helpers/os_utils.go b/test/helpers/os_utils.go index 1a0a7ae1a7..5ff3dc9462 100644 --- a/test/helpers/os_utils.go +++ b/test/helpers/os_utils.go @@ -6,14 +6,9 @@ package helpers import ( - "encoding/json" "os" - "path" - "path/filepath" "testing" - "github.com/nginx/agent/v3/api/grpc/mpi/v1" - "github.com/stretchr/testify/require" ) @@ -45,19 +40,3 @@ func RemoveFileWithErrorCheck(t testing.TB, fileName string) { require.NoError(t, err) } - -func CreateCacheFiles(t testing.TB, cachePath string, cacheData map[string]*v1.FileMeta) { - t.Helper() - cache, err := json.MarshalIndent(cacheData, "", " ") - require.NoError(t, err) - - err = os.MkdirAll(path.Dir(cachePath), filePermission) - require.NoError(t, err) - - for _, file := range cacheData { - CreateFileWithErrorCheck(t, filepath.Dir(file.GetName()), filepath.Base(file.GetName())) - } - - err = os.WriteFile(cachePath, cache, filePermission) - require.NoError(t, err) -} From 9a2d9780c2c59a756ce0b7fc32c0e128b3df4602 Mon Sep 17 00:00:00 2001 From: Sean Breen <101327931+sean-breen@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:28:22 +0100 Subject: [PATCH 11/12] OpenTelemetry: Add nginx instance id and instance type (#908) * add attribute processor to config, handle resource update to get id * write configuration before reloading * update template file handling of attributes processor * add attribute processor to mock config * add more nil checks for attribute processor * fix linting errors * commit generated files * fix debug exporter in template * Use slice of Actions in Attributes processor * fix lint: tag alignment * remove docker login, use prod container repo for OSS nginx * add unit test for processing ResourceUpdateTopic * Update mdatagen install instructions in README.md (#887) * Update go version to 1.23 (#865) * Agent Config Changes: rename & update config_dirs & update exclude_logs (#882) * max api version (#880) * add log to inetgration test (#893) * add lock for restarting OTel collector * fix struct alignment * new proto defs * resource-id -> resource.id * fix lint: remove trailing newline * fix unit test * add unit test for negative case * address pr feedback * add updateResourceAttributes function + unit tests * add nolint for cognitive-complexity, pending review * fix nolint * more feedback * fix test condition * add resource for each scraper * adding instance id to otel metadata config * PR feedback * add instance id and type to nginxplus receiver otel resource * add instance id and type to otel resource - nginx oss log scraper - nginx oss stub status scraper - nginx plus api scraper * fix lint errors * run gofumpt * update go.mod * remove comment * Fix unit test expected files * increase timeout by 1 sec * format * nginx.instance.* to instance.* * PR feedback * increase timeout values for collector plugin test * Update internal/collector/nginxplusreceiver/scraper.go Co-authored-by: Donal Hurley * Update internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go Co-authored-by: Donal Hurley * Update internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go Co-authored-by: Donal Hurley --------- Co-authored-by: Donal Hurley Co-authored-by: oliveromahony Co-authored-by: aphralG <108004222+aphralG@users.noreply.github.com> --- go.mod | 2 +- .../nginxossreceiver/documentation.md | 7 ++ .../internal/config/config.go | 4 +- .../internal/metadata/generated_config.go | 50 +++++++++++++- .../metadata/generated_config_test.go | 56 +++++++++++++++- .../internal/metadata/generated_metrics.go | 66 ++++++++++++++----- .../metadata/generated_metrics_test.go | 14 +++- .../internal/metadata/generated_resource.go | 43 ++++++++++++ .../metadata/generated_resource_test.go | 46 +++++++++++++ .../internal/metadata/testdata/config.yaml | 30 +++++++++ .../scraper/accesslog/nginx_log_scraper.go | 9 ++- .../accesslog/nginx_log_scraper_test.go | 3 +- .../scraper/accesslog/testdata/expected.yaml | 6 +- .../scraper/stubstatus/stub_status_scraper.go | 12 +++- .../stubstatus/stub_status_scraper_test.go | 3 +- .../scraper/stubstatus/testdata/expected.yaml | 6 +- .../expected_with_connections_as_gauge.yaml | 6 +- .../collector/nginxossreceiver/metadata.yaml | 12 +++- .../testdata/integration/expected.yaml | 3 +- .../collector/nginxplusreceiver/config.go | 2 +- .../nginxplusreceiver/documentation.md | 7 ++ .../internal/metadata/generated_config.go | 50 +++++++++++++- .../metadata/generated_config_test.go | 56 +++++++++++++++- .../internal/metadata/generated_metrics.go | 32 +++++++++ .../metadata/generated_metrics_test.go | 14 +++- .../internal/metadata/generated_resource.go | 43 ++++++++++++ .../metadata/generated_resource_test.go | 46 +++++++++++++ .../internal/metadata/testdata/config.yaml | 30 +++++++++ .../collector/nginxplusreceiver/metadata.yaml | 12 +++- .../collector/nginxplusreceiver/scraper.go | 14 +++- .../nginxplusreceiver/scraper_test.go | 2 +- .../nginxplusreceiver/testdata/expected.yaml | 6 +- internal/collector/otel_collector_plugin.go | 26 ++++---- .../collector/otel_collector_plugin_test.go | 53 ++++++++------- internal/collector/otelcol.tmpl | 22 ++++++- internal/config/types.go | 11 ++++ .../test-opentelemetry-collector-agent.yaml | 2 + test/mock/collector/otel-collector.yaml | 2 + 38 files changed, 726 insertions(+), 82 deletions(-) create mode 100644 internal/collector/nginxossreceiver/internal/metadata/generated_resource.go create mode 100644 internal/collector/nginxossreceiver/internal/metadata/generated_resource_test.go create mode 100644 internal/collector/nginxplusreceiver/internal/metadata/generated_resource.go create mode 100644 internal/collector/nginxplusreceiver/internal/metadata/generated_resource_test.go diff --git a/go.mod b/go.mod index 72ecff38fa..53c97d51bb 100644 --- a/go.mod +++ b/go.mod @@ -69,6 +69,7 @@ require ( go.opentelemetry.io/collector/exporter/otlpexporter v0.108.1 go.opentelemetry.io/collector/exporter/otlphttpexporter v0.108.1 go.opentelemetry.io/collector/extension v0.108.1 + go.opentelemetry.io/collector/filter v0.108.1 go.opentelemetry.io/collector/otelcol v0.108.1 go.opentelemetry.io/collector/pdata v1.14.1 go.opentelemetry.io/collector/processor v0.108.1 @@ -270,7 +271,6 @@ require ( go.opentelemetry.io/collector/extension/auth v0.108.1 // indirect go.opentelemetry.io/collector/extension/zpagesextension v0.108.1 // indirect go.opentelemetry.io/collector/featuregate v1.14.1 // indirect - go.opentelemetry.io/collector/filter v0.108.1 // indirect go.opentelemetry.io/collector/internal/globalgates v0.108.1 // indirect go.opentelemetry.io/collector/pdata/pprofile v0.108.1 // indirect go.opentelemetry.io/collector/pdata/testdata v0.108.1 // indirect diff --git a/internal/collector/nginxossreceiver/documentation.md b/internal/collector/nginxossreceiver/documentation.md index 9ea1efe73a..2a2aa19fe4 100644 --- a/internal/collector/nginxossreceiver/documentation.md +++ b/internal/collector/nginxossreceiver/documentation.md @@ -61,3 +61,10 @@ The number of responses, grouped by status code range. | Name | Description | Values | | ---- | ----------- | ------ | | nginx.status_range | A status code range or bucket for a HTTP response's status code. | Str: ``1xx``, ``2xx``, ``3xx``, ``4xx``, ``5xx`` | + +## Resource Attributes + +| Name | Description | Values | Enabled | +| ---- | ----------- | ------ | ------- | +| instance.id | The nginx instance id. | Any Str | true | +| instance.type | The nginx instance type (nginx, nginxplus). | Any Str | true | diff --git a/internal/collector/nginxossreceiver/internal/config/config.go b/internal/collector/nginxossreceiver/internal/config/config.go index adb267de6f..5d8db53dae 100644 --- a/internal/collector/nginxossreceiver/internal/config/config.go +++ b/internal/collector/nginxossreceiver/internal/config/config.go @@ -22,9 +22,9 @@ const ( type Config struct { confighttp.ClientConfig `mapstructure:",squash"` - AccessLogs []AccessLog `mapstructure:"access_logs"` - scraperhelper.ControllerConfig `mapstructure:",squash"` + AccessLogs []AccessLog `mapstructure:"access_logs"` MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"` + scraperhelper.ControllerConfig `mapstructure:",squash"` } type AccessLog struct { diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_config.go b/internal/collector/nginxossreceiver/internal/metadata/generated_config.go index 26347611f7..69ac3809b3 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_config.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_config.go @@ -4,6 +4,7 @@ package metadata import ( "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/filter" ) // MetricConfig provides common config for a particular metric. @@ -50,13 +51,58 @@ func DefaultMetricsConfig() MetricsConfig { } } +// ResourceAttributeConfig provides common config for a particular resource attribute. +type ResourceAttributeConfig struct { + Enabled bool `mapstructure:"enabled"` + // Experimental: MetricsInclude defines a list of filters for attribute values. + // If the list is not empty, only metrics with matching resource attribute values will be emitted. + MetricsInclude []filter.Config `mapstructure:"metrics_include"` + // Experimental: MetricsExclude defines a list of filters for attribute values. + // If the list is not empty, metrics with matching resource attribute values will not be emitted. + // MetricsInclude has higher priority than MetricsExclude. + MetricsExclude []filter.Config `mapstructure:"metrics_exclude"` + + enabledSetByUser bool +} + +func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(rac) + if err != nil { + return err + } + rac.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// ResourceAttributesConfig provides config for nginx resource attributes. +type ResourceAttributesConfig struct { + InstanceID ResourceAttributeConfig `mapstructure:"instance.id"` + InstanceType ResourceAttributeConfig `mapstructure:"instance.type"` +} + +func DefaultResourceAttributesConfig() ResourceAttributesConfig { + return ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{ + Enabled: true, + }, + InstanceType: ResourceAttributeConfig{ + Enabled: true, + }, + } +} + // MetricsBuilderConfig is a configuration for nginx metrics builder. type MetricsBuilderConfig struct { - Metrics MetricsConfig `mapstructure:"metrics"` + Metrics MetricsConfig `mapstructure:"metrics"` + ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` } func DefaultMetricsBuilderConfig() MetricsBuilderConfig { return MetricsBuilderConfig{ - Metrics: DefaultMetricsConfig(), + Metrics: DefaultMetricsConfig(), + ResourceAttributes: DefaultResourceAttributesConfig(), } } diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_config_test.go b/internal/collector/nginxossreceiver/internal/metadata/generated_config_test.go index 85d198d88e..2a603a104f 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_config_test.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_config_test.go @@ -30,6 +30,10 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxHTTPRequests: MetricConfig{Enabled: true}, NginxHTTPResponseStatus: MetricConfig{Enabled: true}, }, + ResourceAttributes: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: true}, + InstanceType: ResourceAttributeConfig{Enabled: true}, + }, }, }, { @@ -41,13 +45,17 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxHTTPRequests: MetricConfig{Enabled: false}, NginxHTTPResponseStatus: MetricConfig{Enabled: false}, }, + ResourceAttributes: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: false}, + InstanceType: ResourceAttributeConfig{Enabled: false}, + }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{})); diff != "" { + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { t.Errorf("Config mismatch (-expected +actual):\n%s", diff) } }) @@ -63,3 +71,49 @@ func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { require.NoError(t, sub.Unmarshal(&cfg)) return cfg } + +func TestResourceAttributesConfig(t *testing.T) { + tests := []struct { + name string + want ResourceAttributesConfig + }{ + { + name: "default", + want: DefaultResourceAttributesConfig(), + }, + { + name: "all_set", + want: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: true}, + InstanceType: ResourceAttributeConfig{Enabled: true}, + }, + }, + { + name: "none_set", + want: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: false}, + InstanceType: ResourceAttributeConfig{Enabled: false}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadResourceAttributesConfig(t *testing.T, name string) ResourceAttributesConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + sub, err = sub.Sub("resource_attributes") + require.NoError(t, err) + cfg := DefaultResourceAttributesConfig() + require.NoError(t, sub.Unmarshal(&cfg)) + return cfg +} diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go index e74642bc94..dfe01f264b 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/filter" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" @@ -302,15 +303,17 @@ func newMetricNginxHTTPResponseStatus(cfg MetricConfig) metricNginxHTTPResponseS // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user config. type MetricsBuilder struct { - config MetricsBuilderConfig // config of the metrics builder. - startTime pcommon.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. - buildInfo component.BuildInfo // contains version information. - metricNginxHTTPConn metricNginxHTTPConn - metricNginxHTTPConnCount metricNginxHTTPConnCount - metricNginxHTTPRequests metricNginxHTTPRequests - metricNginxHTTPResponseStatus metricNginxHTTPResponseStatus + config MetricsBuilderConfig // config of the metrics builder. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information. + resourceAttributeIncludeFilter map[string]filter.Filter + resourceAttributeExcludeFilter map[string]filter.Filter + metricNginxHTTPConn metricNginxHTTPConn + metricNginxHTTPConnCount metricNginxHTTPConnCount + metricNginxHTTPRequests metricNginxHTTPRequests + metricNginxHTTPResponseStatus metricNginxHTTPResponseStatus } // metricBuilderOption applies changes to default metrics builder. @@ -325,14 +328,28 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - config: mbc, - startTime: pcommon.NewTimestampFromTime(time.Now()), - metricsBuffer: pmetric.NewMetrics(), - buildInfo: settings.BuildInfo, - metricNginxHTTPConn: newMetricNginxHTTPConn(mbc.Metrics.NginxHTTPConn), - metricNginxHTTPConnCount: newMetricNginxHTTPConnCount(mbc.Metrics.NginxHTTPConnCount), - metricNginxHTTPRequests: newMetricNginxHTTPRequests(mbc.Metrics.NginxHTTPRequests), - metricNginxHTTPResponseStatus: newMetricNginxHTTPResponseStatus(mbc.Metrics.NginxHTTPResponseStatus), + config: mbc, + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: settings.BuildInfo, + metricNginxHTTPConn: newMetricNginxHTTPConn(mbc.Metrics.NginxHTTPConn), + metricNginxHTTPConnCount: newMetricNginxHTTPConnCount(mbc.Metrics.NginxHTTPConnCount), + metricNginxHTTPRequests: newMetricNginxHTTPRequests(mbc.Metrics.NginxHTTPRequests), + metricNginxHTTPResponseStatus: newMetricNginxHTTPResponseStatus(mbc.Metrics.NginxHTTPResponseStatus), + resourceAttributeIncludeFilter: make(map[string]filter.Filter), + resourceAttributeExcludeFilter: make(map[string]filter.Filter), + } + if mbc.ResourceAttributes.InstanceID.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["instance.id"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceID.MetricsInclude) + } + if mbc.ResourceAttributes.InstanceID.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["instance.id"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceID.MetricsExclude) + } + if mbc.ResourceAttributes.InstanceType.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["instance.type"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceType.MetricsInclude) + } + if mbc.ResourceAttributes.InstanceType.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["instance.type"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceType.MetricsExclude) } for _, op := range options { @@ -341,6 +358,11 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt return mb } +// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. +func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { + return NewResourceBuilder(mb.config.ResourceAttributes) +} + // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { @@ -398,6 +420,16 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { for _, op := range rmo { op(rm) } + for attr, filter := range mb.resourceAttributeIncludeFilter { + if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { + return + } + } + for attr, filter := range mb.resourceAttributeExcludeFilter { + if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { + return + } + } if ils.Metrics().Len() > 0 { mb.updateCapacity(rm) diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go index 261655b70a..ee4f069719 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_metrics_test.go @@ -42,6 +42,15 @@ func TestMetricsBuilder(t *testing.T) { resAttrsSet: testDataSetNone, expectEmpty: true, }, + { + name: "filter_set_include", + resAttrsSet: testDataSetAll, + }, + { + name: "filter_set_exclude", + resAttrsSet: testDataSetAll, + expectEmpty: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -75,7 +84,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxHTTPResponseStatusDataPoint(ts, 1, AttributeNginxStatusRange1xx) - res := pcommon.NewResource() + rb := mb.NewResourceBuilder() + rb.SetInstanceID("instance.id-val") + rb.SetInstanceType("instance.type-val") + res := rb.Emit() metrics := mb.Emit(WithResource(res)) if test.expectEmpty { diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_resource.go b/internal/collector/nginxossreceiver/internal/metadata/generated_resource.go new file mode 100644 index 0000000000..d5c655c954 --- /dev/null +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_resource.go @@ -0,0 +1,43 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml. +// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines. +type ResourceBuilder struct { + config ResourceAttributesConfig + res pcommon.Resource +} + +// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application. +func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder { + return &ResourceBuilder{ + config: rac, + res: pcommon.NewResource(), + } +} + +// SetInstanceID sets provided value as "instance.id" attribute. +func (rb *ResourceBuilder) SetInstanceID(val string) { + if rb.config.InstanceID.Enabled { + rb.res.Attributes().PutStr("instance.id", val) + } +} + +// SetInstanceType sets provided value as "instance.type" attribute. +func (rb *ResourceBuilder) SetInstanceType(val string) { + if rb.config.InstanceType.Enabled { + rb.res.Attributes().PutStr("instance.type", val) + } +} + +// Emit returns the built resource and resets the internal builder state. +func (rb *ResourceBuilder) Emit() pcommon.Resource { + r := rb.res + rb.res = pcommon.NewResource() + return r +} diff --git a/internal/collector/nginxossreceiver/internal/metadata/generated_resource_test.go b/internal/collector/nginxossreceiver/internal/metadata/generated_resource_test.go new file mode 100644 index 0000000000..f861ef1011 --- /dev/null +++ b/internal/collector/nginxossreceiver/internal/metadata/generated_resource_test.go @@ -0,0 +1,46 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResourceBuilder(t *testing.T) { + for _, tt := range []string{"default", "all_set", "none_set"} { + t.Run(tt, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt) + rb := NewResourceBuilder(cfg) + rb.SetInstanceID("instance.id-val") + rb.SetInstanceType("instance.type-val") + + res := rb.Emit() + assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource + + switch tt { + case "default": + assert.Equal(t, 2, res.Attributes().Len()) + case "all_set": + assert.Equal(t, 2, res.Attributes().Len()) + case "none_set": + assert.Equal(t, 0, res.Attributes().Len()) + return + default: + assert.Failf(t, "unexpected test case: %s", tt) + } + + val, ok := res.Attributes().Get("instance.id") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "instance.id-val", val.Str()) + } + val, ok = res.Attributes().Get("instance.type") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "instance.type-val", val.Str()) + } + }) + } +} diff --git a/internal/collector/nginxossreceiver/internal/metadata/testdata/config.yaml b/internal/collector/nginxossreceiver/internal/metadata/testdata/config.yaml index 6f3036383a..8829a931e8 100644 --- a/internal/collector/nginxossreceiver/internal/metadata/testdata/config.yaml +++ b/internal/collector/nginxossreceiver/internal/metadata/testdata/config.yaml @@ -9,6 +9,11 @@ all_set: enabled: true nginx.http.response.status: enabled: true + resource_attributes: + instance.id: + enabled: true + instance.type: + enabled: true none_set: metrics: nginx.http.conn: @@ -19,3 +24,28 @@ none_set: enabled: false nginx.http.response.status: enabled: false + resource_attributes: + instance.id: + enabled: false + instance.type: + enabled: false +filter_set_include: + resource_attributes: + instance.id: + enabled: true + metrics_include: + - regexp: ".*" + instance.type: + enabled: true + metrics_include: + - regexp: ".*" +filter_set_exclude: + resource_attributes: + instance.id: + enabled: true + metrics_exclude: + - strict: "instance.id-val" + instance.type: + enabled: true + metrics_exclude: + - strict: "instance.type-val" diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go index 1868fa0e68..342e2b6eaf 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper.go @@ -42,6 +42,7 @@ type ( cfg *config.Config logger *zap.Logger mb *metadata.MetricsBuilder + rb *metadata.ResourceBuilder pipe *pipeline.DirectedPipeline wg *sync.WaitGroup cancel context.CancelFunc @@ -70,7 +71,12 @@ func NewScraper( ) (*NginxLogScraper, error) { logger := settings.Logger logger.Info("Creating NGINX access log scraper") + mb := metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings) + rb := mb.NewResourceBuilder() + rb.SetInstanceID(settings.ID.Name()) + rb.SetInstanceType("nginx") + logger.Debug("NGINX OSS resource info", zap.Any("resource", rb)) operators := make([]operator.Config, 0) @@ -93,6 +99,7 @@ func NewScraper( cfg: cfg, logger: logger, mb: mb, + rb: rb, mut: sync.Mutex{}, outChan: outChan, pipe: stanzaPipeline, @@ -184,7 +191,7 @@ func (nls *NginxLogScraper) Scrape(_ context.Context) (pmetric.Metrics, error) { metadata.AttributeNginxStatusRange5xx, ) - return nls.mb.Emit(), nil + return nls.mb.Emit(metadata.WithResource(nls.rb.Emit())), nil } func (nls *NginxLogScraper) Shutdown(_ context.Context) error { diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go index 986faf55d7..44ff91c648 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/nginx_log_scraper_test.go @@ -77,7 +77,8 @@ func TestAccessLogScraper(t *testing.T) { pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreTimestamp(), - pmetrictest.IgnoreMetricsOrder())) + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceAttributeValue("instance.id"))) } func TestAccessLogScraperError(t *testing.T) { diff --git a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml index edc97f287a..6b639b8e54 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml +++ b/internal/collector/nginxossreceiver/internal/scraper/accesslog/testdata/expected.yaml @@ -1,5 +1,9 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: instance.type + value: + stringValue: nginx scopeMetrics: - metrics: - description: The number of responses, grouped by status code range. diff --git a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go index 30a3f82f87..08e93ddc2c 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go +++ b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper.go @@ -29,6 +29,7 @@ type NginxStubStatusScraper struct { settings component.TelemetrySettings cfg *config.Config mb *metadata.MetricsBuilder + rb *metadata.ResourceBuilder } var _ scraperhelper.Scraper = (*NginxStubStatusScraper)(nil) @@ -37,11 +38,20 @@ func NewScraper( settings receiver.Settings, cfg *config.Config, ) *NginxStubStatusScraper { + logger := settings.Logger + logger.Info("Creating NGINX stub status scraper") + mb := metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings) + rb := mb.NewResourceBuilder() + rb.SetInstanceID(settings.ID.Name()) + rb.SetInstanceType("nginx") + logger.Debug("NGINX OSS resource info", zap.Any("resource", rb)) + return &NginxStubStatusScraper{ settings: settings.TelemetrySettings, cfg: cfg, mb: mb, + rb: rb, } } @@ -111,5 +121,5 @@ func (s *NginxStubStatusScraper) Scrape(context.Context) (pmetric.Metrics, error metadata.AttributeNginxConnOutcomeWAITING, ) - return s.mb.Emit(), nil + return s.mb.Emit(metadata.WithResource(s.rb.Emit())), nil } diff --git a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper_test.go b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper_test.go index 8ef8dfd74f..6c34c86092 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper_test.go +++ b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/stub_status_scraper_test.go @@ -52,7 +52,8 @@ func TestStubStatusScraper(t *testing.T) { pmetrictest.IgnoreStartTimestamp(), pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreTimestamp(), - pmetrictest.IgnoreMetricsOrder())) + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreResourceAttributeValue("instance.id"))) } func TestStubStatusScraperError(t *testing.T) { diff --git a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml index fc4ff5bc25..5d1cd6d165 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml +++ b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected.yaml @@ -1,5 +1,9 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: instance.type + value: + stringValue: nginx scopeMetrics: - metrics: - description: The total number of connections. diff --git a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml index 458e7e7df4..3f17e3ac98 100644 --- a/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml +++ b/internal/collector/nginxossreceiver/internal/scraper/stubstatus/testdata/expected_with_connections_as_gauge.yaml @@ -1,5 +1,9 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: instance.type + value: + stringValue: nginx scopeMetrics: - metrics: - description: The total number of accepted client connections diff --git a/internal/collector/nginxossreceiver/metadata.yaml b/internal/collector/nginxossreceiver/metadata.yaml index 04dc43a616..ff49eab4cd 100644 --- a/internal/collector/nginxossreceiver/metadata.yaml +++ b/internal/collector/nginxossreceiver/metadata.yaml @@ -7,7 +7,17 @@ status: beta: [metrics] distributions: [contrib] codeowners: - active: [olli-holmala, oliveromahony, apgralG, dhurley, craigell] + active: [olli-holmala, oliveromahony, apgralG, dhurley, craigell, sean-breen] + +resource_attributes: + instance.id: + description: The nginx instance id. + type: string + enabled: true + instance.type: + description: The nginx instance type (nginx, nginxplus). + type: string + enabled: true attributes: nginx.conn.outcome: diff --git a/internal/collector/nginxossreceiver/testdata/integration/expected.yaml b/internal/collector/nginxossreceiver/testdata/integration/expected.yaml index 9c5bc3463b..40813436db 100644 --- a/internal/collector/nginxossreceiver/testdata/integration/expected.yaml +++ b/internal/collector/nginxossreceiver/testdata/integration/expected.yaml @@ -1,5 +1,6 @@ resourceMetrics: - - resource: {} + - resource: + instance.type: nginx scopeMetrics: - metrics: - description: The total number of accepted client connections diff --git a/internal/collector/nginxplusreceiver/config.go b/internal/collector/nginxplusreceiver/config.go index c19be570e0..2297f5534e 100644 --- a/internal/collector/nginxplusreceiver/config.go +++ b/internal/collector/nginxplusreceiver/config.go @@ -18,8 +18,8 @@ const defaultCollectInterval = 10 * time.Second type Config struct { confighttp.ClientConfig `mapstructure:",squash"` - scraperhelper.ControllerConfig `mapstructure:",squash"` MetricsBuilderConfig metadata.MetricsBuilderConfig `mapstructure:",squash"` + scraperhelper.ControllerConfig `mapstructure:",squash"` } // Validate checks if the receiver configuration is valid diff --git a/internal/collector/nginxplusreceiver/documentation.md b/internal/collector/nginxplusreceiver/documentation.md index 6c52660272..7dce5a8621 100644 --- a/internal/collector/nginxplusreceiver/documentation.md +++ b/internal/collector/nginxplusreceiver/documentation.md @@ -906,3 +906,10 @@ The current number of peers removed from the group but still processing active c | ---- | ----------- | ------ | | nginx.zone.name | The name of the shared memory zone. | Any Str | | nginx.upstream.name | The name of the upstream block. | Any Str | + +## Resource Attributes + +| Name | Description | Values | Enabled | +| ---- | ----------- | ------ | ------- | +| instance.id | The nginx instance id. | Any Str | true | +| instance.type | The nginx instance type (nginx, nginxplus) | Any Str | true | diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_config.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_config.go index eb83eb63c0..2059f6cd64 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_config.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_config.go @@ -4,6 +4,7 @@ package metadata import ( "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/filter" ) // MetricConfig provides common config for a particular metric. @@ -266,13 +267,58 @@ func DefaultMetricsConfig() MetricsConfig { } } +// ResourceAttributeConfig provides common config for a particular resource attribute. +type ResourceAttributeConfig struct { + Enabled bool `mapstructure:"enabled"` + // Experimental: MetricsInclude defines a list of filters for attribute values. + // If the list is not empty, only metrics with matching resource attribute values will be emitted. + MetricsInclude []filter.Config `mapstructure:"metrics_include"` + // Experimental: MetricsExclude defines a list of filters for attribute values. + // If the list is not empty, metrics with matching resource attribute values will not be emitted. + // MetricsInclude has higher priority than MetricsExclude. + MetricsExclude []filter.Config `mapstructure:"metrics_exclude"` + + enabledSetByUser bool +} + +func (rac *ResourceAttributeConfig) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(rac) + if err != nil { + return err + } + rac.enabledSetByUser = parser.IsSet("enabled") + return nil +} + +// ResourceAttributesConfig provides config for nginxplus resource attributes. +type ResourceAttributesConfig struct { + InstanceID ResourceAttributeConfig `mapstructure:"instance.id"` + InstanceType ResourceAttributeConfig `mapstructure:"instance.type"` +} + +func DefaultResourceAttributesConfig() ResourceAttributesConfig { + return ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{ + Enabled: true, + }, + InstanceType: ResourceAttributeConfig{ + Enabled: true, + }, + } +} + // MetricsBuilderConfig is a configuration for nginxplus metrics builder. type MetricsBuilderConfig struct { - Metrics MetricsConfig `mapstructure:"metrics"` + Metrics MetricsConfig `mapstructure:"metrics"` + ResourceAttributes ResourceAttributesConfig `mapstructure:"resource_attributes"` } func DefaultMetricsBuilderConfig() MetricsBuilderConfig { return MetricsBuilderConfig{ - Metrics: DefaultMetricsConfig(), + Metrics: DefaultMetricsConfig(), + ResourceAttributes: DefaultResourceAttributesConfig(), } } diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_config_test.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_config_test.go index a5b204deeb..4f4503192c 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_config_test.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_config_test.go @@ -84,6 +84,10 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxStreamUpstreamPeerUnavailable: MetricConfig{Enabled: true}, NginxStreamUpstreamZombieCount: MetricConfig{Enabled: true}, }, + ResourceAttributes: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: true}, + InstanceType: ResourceAttributeConfig{Enabled: true}, + }, }, }, { @@ -149,13 +153,17 @@ func TestMetricsBuilderConfig(t *testing.T) { NginxStreamUpstreamPeerUnavailable: MetricConfig{Enabled: false}, NginxStreamUpstreamZombieCount: MetricConfig{Enabled: false}, }, + ResourceAttributes: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: false}, + InstanceType: ResourceAttributeConfig{Enabled: false}, + }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := loadMetricsBuilderConfig(t, tt.name) - if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{})); diff != "" { + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(MetricConfig{}, ResourceAttributeConfig{})); diff != "" { t.Errorf("Config mismatch (-expected +actual):\n%s", diff) } }) @@ -171,3 +179,49 @@ func loadMetricsBuilderConfig(t *testing.T, name string) MetricsBuilderConfig { require.NoError(t, sub.Unmarshal(&cfg)) return cfg } + +func TestResourceAttributesConfig(t *testing.T) { + tests := []struct { + name string + want ResourceAttributesConfig + }{ + { + name: "default", + want: DefaultResourceAttributesConfig(), + }, + { + name: "all_set", + want: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: true}, + InstanceType: ResourceAttributeConfig{Enabled: true}, + }, + }, + { + name: "none_set", + want: ResourceAttributesConfig{ + InstanceID: ResourceAttributeConfig{Enabled: false}, + InstanceType: ResourceAttributeConfig{Enabled: false}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt.name) + if diff := cmp.Diff(tt.want, cfg, cmpopts.IgnoreUnexported(ResourceAttributeConfig{})); diff != "" { + t.Errorf("Config mismatch (-expected +actual):\n%s", diff) + } + }) + } +} + +func loadResourceAttributesConfig(t *testing.T, name string) ResourceAttributesConfig { + cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config.yaml")) + require.NoError(t, err) + sub, err := cm.Sub(name) + require.NoError(t, err) + sub, err = sub.Sub("resource_attributes") + require.NoError(t, err) + cfg := DefaultResourceAttributesConfig() + require.NoError(t, sub.Unmarshal(&cfg)) + return cfg +} diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go index 866fb5a735..21768959cd 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics.go @@ -6,6 +6,7 @@ import ( "time" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/filter" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver" @@ -3560,6 +3561,8 @@ type MetricsBuilder struct { metricsCapacity int // maximum observed number of metrics per resource. metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information. + resourceAttributeIncludeFilter map[string]filter.Filter + resourceAttributeExcludeFilter map[string]filter.Filter metricNginxCacheBytes metricNginxCacheBytes metricNginxCacheMemoryLimit metricNginxCacheMemoryLimit metricNginxCacheMemoryUsage metricNginxCacheMemoryUsage @@ -3694,6 +3697,20 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt metricNginxStreamUpstreamPeerTtfbTime: newMetricNginxStreamUpstreamPeerTtfbTime(mbc.Metrics.NginxStreamUpstreamPeerTtfbTime), metricNginxStreamUpstreamPeerUnavailable: newMetricNginxStreamUpstreamPeerUnavailable(mbc.Metrics.NginxStreamUpstreamPeerUnavailable), metricNginxStreamUpstreamZombieCount: newMetricNginxStreamUpstreamZombieCount(mbc.Metrics.NginxStreamUpstreamZombieCount), + resourceAttributeIncludeFilter: make(map[string]filter.Filter), + resourceAttributeExcludeFilter: make(map[string]filter.Filter), + } + if mbc.ResourceAttributes.InstanceID.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["instance.id"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceID.MetricsInclude) + } + if mbc.ResourceAttributes.InstanceID.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["instance.id"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceID.MetricsExclude) + } + if mbc.ResourceAttributes.InstanceType.MetricsInclude != nil { + mb.resourceAttributeIncludeFilter["instance.type"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceType.MetricsInclude) + } + if mbc.ResourceAttributes.InstanceType.MetricsExclude != nil { + mb.resourceAttributeExcludeFilter["instance.type"] = filter.CreateFilter(mbc.ResourceAttributes.InstanceType.MetricsExclude) } for _, op := range options { @@ -3702,6 +3719,11 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, opt return mb } +// NewResourceBuilder returns a new resource builder that should be used to build a resource associated with for the emitted metrics. +func (mb *MetricsBuilder) NewResourceBuilder() *ResourceBuilder { + return NewResourceBuilder(mb.config.ResourceAttributes) +} + // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { @@ -3813,6 +3835,16 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { for _, op := range rmo { op(rm) } + for attr, filter := range mb.resourceAttributeIncludeFilter { + if val, ok := rm.Resource().Attributes().Get(attr); ok && !filter.Matches(val.AsString()) { + return + } + } + for attr, filter := range mb.resourceAttributeExcludeFilter { + if val, ok := rm.Resource().Attributes().Get(attr); ok && filter.Matches(val.AsString()) { + return + } + } if ils.Metrics().Len() > 0 { mb.updateCapacity(rm) diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go index 8e8f64cbb9..f12924e8e0 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_metrics_test.go @@ -42,6 +42,15 @@ func TestMetricsBuilder(t *testing.T) { resAttrsSet: testDataSetNone, expectEmpty: true, }, + { + name: "filter_set_include", + resAttrsSet: testDataSetAll, + }, + { + name: "filter_set_exclude", + resAttrsSet: testDataSetAll, + expectEmpty: true, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -291,7 +300,10 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordNginxStreamUpstreamZombieCountDataPoint(ts, 1, "nginx.zone.name-val", "nginx.upstream.name-val") - res := pcommon.NewResource() + rb := mb.NewResourceBuilder() + rb.SetInstanceID("instance.id-val") + rb.SetInstanceType("instance.type-val") + res := rb.Emit() metrics := mb.Emit(WithResource(res)) if test.expectEmpty { diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_resource.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_resource.go new file mode 100644 index 0000000000..d5c655c954 --- /dev/null +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_resource.go @@ -0,0 +1,43 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceBuilder is a helper struct to build resources predefined in metadata.yaml. +// The ResourceBuilder is not thread-safe and must not to be used in multiple goroutines. +type ResourceBuilder struct { + config ResourceAttributesConfig + res pcommon.Resource +} + +// NewResourceBuilder creates a new ResourceBuilder. This method should be called on the start of the application. +func NewResourceBuilder(rac ResourceAttributesConfig) *ResourceBuilder { + return &ResourceBuilder{ + config: rac, + res: pcommon.NewResource(), + } +} + +// SetInstanceID sets provided value as "instance.id" attribute. +func (rb *ResourceBuilder) SetInstanceID(val string) { + if rb.config.InstanceID.Enabled { + rb.res.Attributes().PutStr("instance.id", val) + } +} + +// SetInstanceType sets provided value as "instance.type" attribute. +func (rb *ResourceBuilder) SetInstanceType(val string) { + if rb.config.InstanceType.Enabled { + rb.res.Attributes().PutStr("instance.type", val) + } +} + +// Emit returns the built resource and resets the internal builder state. +func (rb *ResourceBuilder) Emit() pcommon.Resource { + r := rb.res + rb.res = pcommon.NewResource() + return r +} diff --git a/internal/collector/nginxplusreceiver/internal/metadata/generated_resource_test.go b/internal/collector/nginxplusreceiver/internal/metadata/generated_resource_test.go new file mode 100644 index 0000000000..f861ef1011 --- /dev/null +++ b/internal/collector/nginxplusreceiver/internal/metadata/generated_resource_test.go @@ -0,0 +1,46 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestResourceBuilder(t *testing.T) { + for _, tt := range []string{"default", "all_set", "none_set"} { + t.Run(tt, func(t *testing.T) { + cfg := loadResourceAttributesConfig(t, tt) + rb := NewResourceBuilder(cfg) + rb.SetInstanceID("instance.id-val") + rb.SetInstanceType("instance.type-val") + + res := rb.Emit() + assert.Equal(t, 0, rb.Emit().Attributes().Len()) // Second call should return empty Resource + + switch tt { + case "default": + assert.Equal(t, 2, res.Attributes().Len()) + case "all_set": + assert.Equal(t, 2, res.Attributes().Len()) + case "none_set": + assert.Equal(t, 0, res.Attributes().Len()) + return + default: + assert.Failf(t, "unexpected test case: %s", tt) + } + + val, ok := res.Attributes().Get("instance.id") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "instance.id-val", val.Str()) + } + val, ok = res.Attributes().Get("instance.type") + assert.True(t, ok) + if ok { + assert.EqualValues(t, "instance.type-val", val.Str()) + } + }) + } +} diff --git a/internal/collector/nginxplusreceiver/internal/metadata/testdata/config.yaml b/internal/collector/nginxplusreceiver/internal/metadata/testdata/config.yaml index 94ed619f0d..148884fdbc 100644 --- a/internal/collector/nginxplusreceiver/internal/metadata/testdata/config.yaml +++ b/internal/collector/nginxplusreceiver/internal/metadata/testdata/config.yaml @@ -117,6 +117,11 @@ all_set: enabled: true nginx.stream.upstream.zombie.count: enabled: true + resource_attributes: + instance.id: + enabled: true + instance.type: + enabled: true none_set: metrics: nginx.cache.bytes: @@ -235,3 +240,28 @@ none_set: enabled: false nginx.stream.upstream.zombie.count: enabled: false + resource_attributes: + instance.id: + enabled: false + instance.type: + enabled: false +filter_set_include: + resource_attributes: + instance.id: + enabled: true + metrics_include: + - regexp: ".*" + instance.type: + enabled: true + metrics_include: + - regexp: ".*" +filter_set_exclude: + resource_attributes: + instance.id: + enabled: true + metrics_exclude: + - strict: "instance.id-val" + instance.type: + enabled: true + metrics_exclude: + - strict: "instance.type-val" diff --git a/internal/collector/nginxplusreceiver/metadata.yaml b/internal/collector/nginxplusreceiver/metadata.yaml index 642cf2f92d..13d27868f3 100644 --- a/internal/collector/nginxplusreceiver/metadata.yaml +++ b/internal/collector/nginxplusreceiver/metadata.yaml @@ -7,7 +7,17 @@ status: beta: [metrics] distributions: [contrib] codeowners: - active: [olli-holmala, oliveromahony, apgralG, dhurley, craigell] + active: [olli-holmala, oliveromahony, apgralG, dhurley, craigell, sean-breen] + +resource_attributes: + instance.id: + description: The nginx instance id. + type: string + enabled: true + instance.type: + description: The nginx instance type (nginx, nginxplus) + type: string + enabled: true attributes: nginx.cache.outcome: diff --git a/internal/collector/nginxplusreceiver/scraper.go b/internal/collector/nginxplusreceiver/scraper.go index c4d6d8fe3d..dd09ba6011 100644 --- a/internal/collector/nginxplusreceiver/scraper.go +++ b/internal/collector/nginxplusreceiver/scraper.go @@ -36,6 +36,7 @@ type nginxPlusScraper struct { settings component.TelemetrySettings cfg *Config mb *metadata.MetricsBuilder + rb *metadata.ResourceBuilder logger *zap.Logger } @@ -43,8 +44,10 @@ func newNginxPlusScraper( settings receiver.Settings, cfg *Config, ) (*nginxPlusScraper, error) { - mb := metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings) + logger := settings.Logger + logger.Info("Creating NGINX Plus scraper") + mb := metadata.NewMetricsBuilder(cfg.MetricsBuilderConfig, settings) plusClient, err := plusapi.NewNginxClient(cfg.Endpoint, plusapi.WithMaxAPIVersion(), ) @@ -52,11 +55,17 @@ func newNginxPlusScraper( return nil, err } + rb := mb.NewResourceBuilder() + rb.SetInstanceID(settings.ID.Name()) + rb.SetInstanceType("nginxplus") + logger.Debug("NGINX Plus resource info", zap.Any("resource", rb)) + return &nginxPlusScraper{ plusClient: plusClient, settings: settings.TelemetrySettings, cfg: cfg, mb: mb, + rb: rb, logger: settings.Logger, }, nil } @@ -68,10 +77,9 @@ func (nps *nginxPlusScraper) scrape(ctx context.Context) (pmetric.Metrics, error } nps.logger.Debug("NGINX Plus stats", zap.Any("stats", stats)) - nps.recordMetrics(stats) - return nps.mb.Emit(), nil + return nps.mb.Emit(metadata.WithResource(nps.rb.Emit())), nil } func (nps *nginxPlusScraper) recordMetrics(stats *plusapi.Stats) { diff --git a/internal/collector/nginxplusreceiver/scraper_test.go b/internal/collector/nginxplusreceiver/scraper_test.go index 046b7798a5..a10d1117af 100644 --- a/internal/collector/nginxplusreceiver/scraper_test.go +++ b/internal/collector/nginxplusreceiver/scraper_test.go @@ -44,6 +44,6 @@ func TestScraper(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreMetricsOrder(), - ), + pmetrictest.IgnoreResourceAttributeValue("instance.id")), ) } diff --git a/internal/collector/nginxplusreceiver/testdata/expected.yaml b/internal/collector/nginxplusreceiver/testdata/expected.yaml index e8458f3910..acb9089ae1 100644 --- a/internal/collector/nginxplusreceiver/testdata/expected.yaml +++ b/internal/collector/nginxplusreceiver/testdata/expected.yaml @@ -1,5 +1,9 @@ resourceMetrics: - - resource: {} + - resource: + attributes: + - key: instance.type + value: + stringValue: nginxplus scopeMetrics: - metrics: - description: The total number of NGINX config reloads. diff --git a/internal/collector/otel_collector_plugin.go b/internal/collector/otel_collector_plugin.go index eb2179c670..0f4e3e7e9e 100644 --- a/internal/collector/otel_collector_plugin.go +++ b/internal/collector/otel_collector_plugin.go @@ -248,16 +248,16 @@ func (oc *Collector) handleResourceUpdate(ctx context.Context, msg *bus.Message) return } - if oc.config.Collector.Processors.Attribute == nil { - oc.config.Collector.Processors.Attribute = &config.Attribute{ - Actions: make([]config.Action, 0), + if oc.config.Collector.Processors.Resource == nil { + oc.config.Collector.Processors.Resource = &config.Resource{ + Attributes: make([]config.ResourceAttribute, 0), } } - if oc.config.Collector.Processors.Attribute != nil && + if oc.config.Collector.Processors.Resource != nil && resourceUpdateContext.GetResourceId() != "" { - reloadCollector = oc.updateAttributeActions( - []config.Action{ + reloadCollector = oc.updateResourceAttributes( + []config.ResourceAttribute{ { Key: "resource.id", Action: "insert", @@ -402,21 +402,21 @@ func (oc *Collector) updateExistingNginxOSSReceiver( } // nolint: revive -func (oc *Collector) updateAttributeActions( - actionsToAdd []config.Action, +func (oc *Collector) updateResourceAttributes( + attributesToAdd []config.ResourceAttribute, ) (reloadCollector bool) { reloadCollector = false - if oc.config.Collector.Processors.Attribute.Actions != nil { + if oc.config.Collector.Processors.Resource.Attributes != nil { OUTER: - for _, toAdd := range actionsToAdd { - for _, action := range oc.config.Collector.Processors.Attribute.Actions { + for _, toAdd := range attributesToAdd { + for _, action := range oc.config.Collector.Processors.Resource.Attributes { if action.Key == toAdd.Key { continue OUTER } } - oc.config.Collector.Processors.Attribute.Actions = append( - oc.config.Collector.Processors.Attribute.Actions, + oc.config.Collector.Processors.Resource.Attributes = append( + oc.config.Collector.Processors.Resource.Attributes, toAdd, ) reloadCollector = true diff --git a/internal/collector/otel_collector_plugin_test.go b/internal/collector/otel_collector_plugin_test.go index adac2094e5..f79f4252a9 100644 --- a/internal/collector/otel_collector_plugin_test.go +++ b/internal/collector/otel_collector_plugin_test.go @@ -231,6 +231,7 @@ func TestCollector_ProcessResourceUpdateTopic(t *testing.T) { conf.Collector.Log.Path = "" conf.Collector.Processors.Batch = nil conf.Collector.Processors.Attribute = nil + conf.Collector.Processors.Resource = nil tests := []struct { message *bus.Message @@ -238,14 +239,14 @@ func TestCollector_ProcessResourceUpdateTopic(t *testing.T) { name string }{ { - name: "Test 1: Resource update adds resource id action", + name: "Test 1: Resource update adds resource id attribute", message: &bus.Message{ Topic: bus.ResourceUpdateTopic, Data: protos.GetHostResource(), }, processors: config.Processors{ - Attribute: &config.Attribute{ - Actions: []config.Action{ + Resource: &config.Resource{ + Attributes: []config.ResourceAttribute{ { Key: "resource.id", Action: "insert", @@ -273,7 +274,7 @@ func TestCollector_ProcessResourceUpdateTopic(t *testing.T) { assert.Eventually( tt, func() bool { return collector.service.GetState() == otelcol.StateRunning }, - 5*time.Second, + 6*time.Second, 100*time.Millisecond, ) @@ -282,7 +283,7 @@ func TestCollector_ProcessResourceUpdateTopic(t *testing.T) { assert.Eventually( tt, func() bool { return collector.service.GetState() == otelcol.StateRunning }, - 5*time.Second, + 6*time.Second, 100*time.Millisecond, ) @@ -296,6 +297,7 @@ func TestCollector_ProcessResourceUpdateTopicFails(t *testing.T) { conf.Collector.Log.Path = "" conf.Collector.Processors.Batch = nil conf.Collector.Processors.Attribute = nil + conf.Collector.Processors.Resource = nil tests := []struct { message *bus.Message @@ -327,7 +329,7 @@ func TestCollector_ProcessResourceUpdateTopicFails(t *testing.T) { assert.Eventually( tt, func() bool { return collector.service.GetState() == otelcol.StateRunning }, - 5*time.Second, + 6*time.Second, 100*time.Millisecond, ) @@ -336,7 +338,7 @@ func TestCollector_ProcessResourceUpdateTopicFails(t *testing.T) { assert.Eventually( tt, func() bool { return collector.service.GetState() == otelcol.StateRunning }, - 5*time.Second, + 6*time.Second, 100*time.Millisecond, ) @@ -344,6 +346,7 @@ func TestCollector_ProcessResourceUpdateTopicFails(t *testing.T) { config.Processors{ Batch: nil, Attribute: nil, + Resource: nil, }, collector.config.Collector.Processors) }) @@ -510,34 +513,38 @@ func TestCollector_updateResourceAttributes(t *testing.T) { conf := types.OTelConfig(t) conf.Collector.Log.Path = "" conf.Collector.Processors.Batch = nil + conf.Collector.Processors.Attribute = nil + conf.Collector.Processors.Resource = nil tests := []struct { name string - setupActions []config.Action - actions []config.Action - expectedAttribs []config.Action + setup []config.ResourceAttribute + attributes []config.ResourceAttribute + expectedAttribs []config.ResourceAttribute expectedReloadRequired bool }{ { name: "Test 1: No Actions returns false", - setupActions: []config.Action{}, - actions: []config.Action{}, + setup: []config.ResourceAttribute{}, + attributes: []config.ResourceAttribute{}, expectedReloadRequired: false, - expectedAttribs: []config.Action{}, + expectedAttribs: []config.ResourceAttribute{}, }, { name: "Test 2: Adding an action returns true", - setupActions: []config.Action{}, - actions: []config.Action{{Key: "test", Action: "insert", Value: "test value"}}, + setup: []config.ResourceAttribute{}, + attributes: []config.ResourceAttribute{{Key: "test", Action: "insert", Value: "test value"}}, expectedReloadRequired: true, - expectedAttribs: []config.Action{{Key: "test", Action: "insert", Value: "test value"}}, + expectedAttribs: []config.ResourceAttribute{{Key: "test", Action: "insert", Value: "test value"}}, }, { - name: "Test 3: Adding a duplicate key doesn't append", - setupActions: []config.Action{{Key: "test", Action: "insert", Value: "test value 1"}}, - actions: []config.Action{{Key: "test", Action: "insert", Value: "updated value 2"}}, + name: "Test 3: Adding a duplicate key doesn't append", + setup: []config.ResourceAttribute{{Key: "test", Action: "insert", Value: "test value 1"}}, + attributes: []config.ResourceAttribute{ + {Key: "test", Action: "insert", Value: "updated value 2"}, + }, expectedReloadRequired: false, - expectedAttribs: []config.Action{{Key: "test", Action: "insert", Value: "test value 1"}}, + expectedAttribs: []config.ResourceAttribute{{Key: "test", Action: "insert", Value: "test value 1"}}, }, } @@ -547,12 +554,12 @@ func TestCollector_updateResourceAttributes(t *testing.T) { require.NoError(tt, err, "NewCollector should not return an error with valid config") // set up Actions - conf.Collector.Processors.Attribute = &config.Attribute{Actions: test.setupActions} + conf.Collector.Processors.Resource = &config.Resource{Attributes: test.setup} - reloadRequired := collector.updateAttributeActions(test.actions) + reloadRequired := collector.updateResourceAttributes(test.attributes) assert.Equal(tt, test.expectedAttribs, - conf.Collector.Processors.Attribute.Actions) + conf.Collector.Processors.Resource.Attributes) assert.Equal(tt, test.expectedReloadRequired, reloadRequired) }) } diff --git a/internal/collector/otelcol.tmpl b/internal/collector/otelcol.tmpl index b30e62181c..e6b3baca83 100644 --- a/internal/collector/otelcol.tmpl +++ b/internal/collector/otelcol.tmpl @@ -76,6 +76,17 @@ receivers: {{- end }} processors: +{{- if ne .Processors.Resource nil }} +{{- if .Processors.Resource.Attributes }} + resource: + attributes: +{{- range .Processors.Resource.Attributes }} + - key: {{ .Key }} + action: {{ .Action }} + value: {{ .Value }} +{{- end }} +{{- end }} +{{- end }} {{- if ne .Processors.Attribute nil }} {{- if .Processors.Attribute.Actions }} attributes: @@ -120,6 +131,8 @@ exporters: {{- if ne .Exporters.PrometheusExporter nil }} prometheus: endpoint: "{{ .Exporters.PrometheusExporter.Server.Host -}}:{{- .Exporters.PrometheusExporter.Server.Port }}" + resource_to_telemetry_conversion: + enabled: true {{- end }} {{- if ne .Exporters.Debug nil }} debug: @@ -197,14 +210,19 @@ service: - nginxplus/{{- .InstanceID -}} {{- end }} processors: - {{- if ne .Processors.Batch nil }} - - batch + {{- if ne .Processors.Resource nil }} + {{- if .Processors.Resource.Attributes }} + - resource + {{- end }} {{- end }} {{- if ne .Processors.Attribute nil }} {{- if .Processors.Attribute.Actions }} - attributes {{- end }} {{- end }} + {{- if ne .Processors.Batch nil }} + - batch + {{- end }} exporters: {{- range $index, $otlpExporter := .Exporters.OtlpExporters }} - otlp/{{$index}} diff --git a/internal/config/types.go b/internal/config/types.go index c44585d511..0e36ae0819 100644 --- a/internal/config/types.go +++ b/internal/config/types.go @@ -127,6 +127,7 @@ type ( // OTel Collector Processors configuration. Processors struct { Attribute *Attribute `yaml:"-" mapstructure:"attribute"` + Resource *Resource `yaml:"-" mapstructure:"resource"` Batch *Batch `yaml:"-" mapstructure:"batch"` } @@ -140,6 +141,16 @@ type ( Value string `yaml:"value" mapstructure:"value"` } + Resource struct { + Attributes []ResourceAttribute `yaml:"-" mapstructure:"attributes"` + } + + ResourceAttribute struct { + Key string `yaml:"key" mapstructure:"key"` + Action string `yaml:"action" mapstructure:"action"` + Value string `yaml:"value" mapstructure:"value"` + } + Batch struct { SendBatchSize uint32 `yaml:"-" mapstructure:"send_batch_size"` SendBatchMaxSize uint32 `yaml:"-" mapstructure:"send_batch_max_size"` diff --git a/test/config/collector/test-opentelemetry-collector-agent.yaml b/test/config/collector/test-opentelemetry-collector-agent.yaml index 337e14dae5..a4f5084a26 100644 --- a/test/config/collector/test-opentelemetry-collector-agent.yaml +++ b/test/config/collector/test-opentelemetry-collector-agent.yaml @@ -45,6 +45,8 @@ exporters: authenticator: headers_setter prometheus: endpoint: "localhost:9876" + resource_to_telemetry_conversion: + enabled: true debug: verbosity: detailed sampling_initial: 5 diff --git a/test/mock/collector/otel-collector.yaml b/test/mock/collector/otel-collector.yaml index 1372d21df1..4473d1f4e1 100644 --- a/test/mock/collector/otel-collector.yaml +++ b/test/mock/collector/otel-collector.yaml @@ -10,6 +10,8 @@ exporters: verbosity: detailed prometheus: endpoint: "0.0.0.0:9775" + resource_to_telemetry_conversion: + enabled: true add_metric_suffixes: false processors: From 0bdbf9711d2bb77189fe0dde9ea4504cad27d84d Mon Sep 17 00:00:00 2001 From: aphralG <108004222+aphralG@users.noreply.github.com> Date: Thu, 24 Oct 2024 16:34:35 +0100 Subject: [PATCH 12/12] Refactor Config Apply Data Plane Response (#904) --- internal/file/file_plugin.go | 23 +++++++----- internal/file/file_plugin_test.go | 44 +++++++++++++++++++++++ internal/resource/resource_plugin.go | 12 +++---- internal/resource/resource_plugin_test.go | 20 +++++++---- internal/watcher/watcher_plugin.go | 18 ++++++---- internal/watcher/watcher_plugin_test.go | 41 ++++++++++++++++++--- 6 files changed, 125 insertions(+), 33 deletions(-) diff --git a/internal/file/file_plugin.go b/internal/file/file_plugin.go index 4e83a0acbf..7aa962677b 100644 --- a/internal/file/file_plugin.go +++ b/internal/file/file_plugin.go @@ -69,7 +69,7 @@ func (fp *FilePlugin) Process(ctx context.Context, msg *bus.Message) { case bus.ConfigApplyRequestTopic: fp.handleConfigApplyRequest(ctx, msg) case bus.ConfigApplySuccessfulTopic, bus.RollbackCompleteTopic: - fp.clearCache() + fp.handleConfigApplyRollbackComplete(ctx, msg) case bus.ConfigApplyFailedTopic: fp.handleConfigApplyFailedRequest(ctx, msg) default: @@ -89,9 +89,16 @@ func (fp *FilePlugin) Subscriptions() []string { } } -func (fp *FilePlugin) clearCache() { - slog.Debug("Clearing cache after config apply") +func (fp *FilePlugin) handleConfigApplyRollbackComplete(ctx context.Context, msg *bus.Message) { + response, ok := msg.Data.(*mpi.DataPlaneResponse) + + if !ok { + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.DataPlaneResponse", "payload", msg.Data) + return + } + fp.fileManagerService.ClearCache() + fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) } func (fp *FilePlugin) handleConfigApplyFailedRequest(ctx context.Context, msg *bus.Message) { @@ -114,9 +121,9 @@ func (fp *FilePlugin) handleConfigApplyFailedRequest(ctx context.Context, msg *b mpi.CommandResponse_COMMAND_STATUS_FAILURE, "Config apply failed, rollback failed", data.InstanceID, data.Error.Error()) + fp.fileManagerService.ClearCache() fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: applyResponse}) - fp.fileManagerService.ClearCache() return } @@ -160,9 +167,9 @@ func (fp *FilePlugin) handleConfigApplyRequest(ctx context.Context, msg *bus.Mes "", ) + fp.fileManagerService.ClearCache() fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.ConfigApplySuccessfulTopic, Data: instanceID}) - fp.fileManagerService.ClearCache() return case model.Error: @@ -180,8 +187,8 @@ func (fp *FilePlugin) handleConfigApplyRequest(ctx context.Context, msg *bus.Mes err.Error(), ) - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) fp.fileManagerService.ClearCache() + fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) return case model.RollbackRequired: @@ -212,8 +219,8 @@ func (fp *FilePlugin) handleConfigApplyRequest(ctx context.Context, msg *bus.Mes "Config apply failed, rollback failed", configApplyRequest.GetOverview().GetConfigVersion().GetInstanceId(), rollbackErr.Error()) - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) fp.fileManagerService.ClearCache() + fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) return } @@ -224,8 +231,8 @@ func (fp *FilePlugin) handleConfigApplyRequest(ctx context.Context, msg *bus.Mes "Config apply failed, rollback successful", configApplyRequest.GetOverview().GetConfigVersion().GetInstanceId(), err.Error()) - fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) fp.fileManagerService.ClearCache() + fp.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) return case model.OK: diff --git a/internal/file/file_plugin_test.go b/internal/file/file_plugin_test.go index e1ca4b3d55..f111c3a820 100644 --- a/internal/file/file_plugin_test.go +++ b/internal/file/file_plugin_test.go @@ -12,6 +12,9 @@ import ( "testing" "time" + "github.com/google/uuid" + "google.golang.org/protobuf/types/known/timestamppb" + mpi "github.com/nginx/agent/v3/api/grpc/mpi/v1" "github.com/nginx/agent/v3/api/grpc/mpi/v1/v1fakes" "github.com/nginx/agent/v3/internal/bus" @@ -426,3 +429,44 @@ func TestFilePlugin_Process_ConfigApplyFailedTopic(t *testing.T) { }) } } + +func TestFilePlugin_Process_ConfigApllyRollbackCompleteTopic(t *testing.T) { + ctx := context.Background() + instance := protos.GetNginxOssInstance([]string{}) + mockFileManager := &filefakes.FakeFileManagerServiceInterface{} + + messagePipe := bus.NewFakeMessagePipe() + agentConfig := types.AgentConfig() + fakeGrpcConnection := &grpcfakes.FakeGrpcConnectionInterface{} + filePlugin := NewFilePlugin(agentConfig, fakeGrpcConnection) + + err := filePlugin.Init(ctx, messagePipe) + require.NoError(t, err) + filePlugin.fileManagerService = mockFileManager + + expectedResponse := &mpi.DataPlaneResponse{ + MessageMeta: &mpi.MessageMeta{ + MessageId: uuid.NewString(), + CorrelationId: "dfsbhj6-bc92-30c1-a9c9-85591422068e", + Timestamp: timestamppb.Now(), + }, + CommandResponse: &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_OK, + Message: "Config apply successful", + Error: "", + }, + InstanceId: instance.GetInstanceMeta().GetInstanceId(), + } + + filePlugin.Process(ctx, &bus.Message{Topic: bus.ConfigApplySuccessfulTopic, Data: expectedResponse}) + + messages := messagePipe.GetMessages() + response, ok := messages[0].Data.(*mpi.DataPlaneResponse) + assert.True(t, ok) + + assert.Equal(t, expectedResponse.GetCommandResponse().GetStatus(), response.GetCommandResponse().GetStatus()) + assert.Equal(t, expectedResponse.GetCommandResponse().GetMessage(), response.GetCommandResponse().GetMessage()) + assert.Equal(t, expectedResponse.GetCommandResponse().GetError(), response.GetCommandResponse().GetError()) + assert.Equal(t, expectedResponse.GetMessageMeta().GetCorrelationId(), response.GetMessageMeta().GetCorrelationId()) + assert.Equal(t, expectedResponse.GetInstanceId(), response.GetInstanceId()) +} diff --git a/internal/resource/resource_plugin.go b/internal/resource/resource_plugin.go index a5779aa2f3..f916761f93 100644 --- a/internal/resource/resource_plugin.go +++ b/internal/resource/resource_plugin.go @@ -137,17 +137,15 @@ func (r *Resource) handleWriteConfigSuccessful(ctx context.Context, msg *bus.Mes return } + response := r.createDataPlaneResponse(data.CorrelationID, mpi.CommandResponse_COMMAND_STATUS_OK, "Config apply successful", data.InstanceID, "") - instance := r.resourceService.Instance(data.InstanceID) - - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: response}) r.messagePipe.Process( ctx, &bus.Message{ Topic: bus.ConfigApplySuccessfulTopic, - Data: instance.GetInstanceMeta().GetInstanceId(), + Data: response, }, ) } @@ -170,9 +168,8 @@ func (r *Resource) handleRollbackWrite(ctx context.Context, msg *bus.Message) { mpi.CommandResponse_COMMAND_STATUS_FAILURE, "Config apply failed, rollback failed", data.InstanceID, data.Error.Error()) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: applyResponse}) r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: rollbackResponse}) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.RollbackCompleteTopic, Data: data.InstanceID}) + r.messagePipe.Process(ctx, &bus.Message{Topic: bus.RollbackCompleteTopic, Data: applyResponse}) return } @@ -181,8 +178,7 @@ func (r *Resource) handleRollbackWrite(ctx context.Context, msg *bus.Message) { mpi.CommandResponse_COMMAND_STATUS_FAILURE, "Config apply failed, rollback successful", data.InstanceID, data.Error.Error()) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.DataPlaneResponseTopic, Data: applyResponse}) - r.messagePipe.Process(ctx, &bus.Message{Topic: bus.RollbackCompleteTopic, Data: data.InstanceID}) + r.messagePipe.Process(ctx, &bus.Message{Topic: bus.RollbackCompleteTopic, Data: applyResponse}) } func (*Resource) createDataPlaneResponse(correlationID string, status mpi.CommandResponse_CommandStatus, diff --git a/internal/resource/resource_plugin_test.go b/internal/resource/resource_plugin_test.go index cebeb61b3b..49f1bb0425 100644 --- a/internal/resource/resource_plugin_test.go +++ b/internal/resource/resource_plugin_test.go @@ -133,7 +133,7 @@ func TestResource_Process_Apply(t *testing.T) { }, }, applyErr: nil, - topic: []string{bus.DataPlaneResponseTopic, bus.ConfigApplySuccessfulTopic}, + topic: []string{bus.ConfigApplySuccessfulTopic}, }, { name: "Test 2: Write Config Successful Topic - Fail Status", @@ -167,7 +167,10 @@ func TestResource_Process_Apply(t *testing.T) { resourcePlugin.Process(ctx, test.message) assert.Equal(t, test.topic[0], messagePipe.GetMessages()[0].Topic) - assert.Equal(t, test.topic[1], messagePipe.GetMessages()[1].Topic) + + if len(test.topic) > 1 { + assert.Equal(t, test.topic[1], messagePipe.GetMessages()[1].Topic) + } if test.applyErr != nil { response, ok := messagePipe.GetMessages()[0].Data.(*mpi.DataPlaneResponse) @@ -198,7 +201,7 @@ func TestResource_Process_Rollback(t *testing.T) { }, }, rollbackErr: nil, - topic: []string{bus.RollbackCompleteTopic, bus.DataPlaneResponseTopic}, + topic: []string{bus.RollbackCompleteTopic}, }, { name: "Test 2: Rollback Write Topic - Fail Status", @@ -211,7 +214,7 @@ func TestResource_Process_Rollback(t *testing.T) { }, }, rollbackErr: errors.New("error reloading"), - topic: []string{bus.RollbackCompleteTopic, bus.DataPlaneResponseTopic, bus.DataPlaneResponseTopic}, + topic: []string{bus.RollbackCompleteTopic, bus.DataPlaneResponseTopic}, }, } @@ -238,12 +241,15 @@ func TestResource_Process_Rollback(t *testing.T) { assert.Equal(tt, len(test.topic), len(messagePipe.GetMessages())) assert.Equal(t, test.topic[0], messagePipe.GetMessages()[0].Topic) - assert.Equal(t, test.topic[1], messagePipe.GetMessages()[1].Topic) + + if len(test.topic) > 1 { + assert.Equal(t, test.topic[1], messagePipe.GetMessages()[1].Topic) + } if test.rollbackErr != nil { - rollbackResponse, ok := messagePipe.GetMessages()[2].Data.(*mpi.DataPlaneResponse) + rollbackResponse, ok := messagePipe.GetMessages()[1].Data.(*mpi.DataPlaneResponse) assert.True(tt, ok) - assert.Equal(t, test.topic[2], messagePipe.GetMessages()[2].Topic) + assert.Equal(t, test.topic[1], messagePipe.GetMessages()[1].Topic) assert.Equal(tt, test.rollbackErr.Error(), rollbackResponse.GetCommandResponse().GetError()) } }) diff --git a/internal/watcher/watcher_plugin.go b/internal/watcher/watcher_plugin.go index 255df0b115..f062d736ae 100644 --- a/internal/watcher/watcher_plugin.go +++ b/internal/watcher/watcher_plugin.go @@ -150,32 +150,38 @@ func (w *Watcher) handleConfigApplyRequest(ctx context.Context, msg *bus.Message } func (w *Watcher) handleConfigApplySuccess(ctx context.Context, msg *bus.Message) { - data, ok := msg.Data.(string) + response, ok := msg.Data.(*mpi.DataPlaneResponse) if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to string", "payload", msg.Data, "topic", msg.Topic) + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.DataPlaneResponse", "payload", + msg.Data, "topic", msg.Topic) return } + instanceID := response.GetInstanceId() + w.instancesWithConfigApplyInProgress = slices.DeleteFunc( w.instancesWithConfigApplyInProgress, func(element string) bool { - return element == data + return element == instanceID }, ) w.fileWatcherService.SetEnabled(true) - w.instanceWatcherService.ReparseConfig(ctx, data) + w.instanceWatcherService.ReparseConfig(ctx, instanceID) } func (w *Watcher) handleRollbackComplete(ctx context.Context, msg *bus.Message) { - instanceID, ok := msg.Data.(string) + response, ok := msg.Data.(*mpi.DataPlaneResponse) if !ok { - slog.ErrorContext(ctx, "Unable to cast message payload to string", "payload", msg.Data, "topic", msg.Topic) + slog.ErrorContext(ctx, "Unable to cast message payload to *mpi.DataPlaneResponse", "payload", + msg.Data, "topic", msg.Topic) return } + instanceID := response.GetInstanceId() + w.instancesWithConfigApplyInProgress = slices.DeleteFunc( w.instancesWithConfigApplyInProgress, func(element string) bool { diff --git a/internal/watcher/watcher_plugin_test.go b/internal/watcher/watcher_plugin_test.go index 28dcc59616..d7c6830975 100644 --- a/internal/watcher/watcher_plugin_test.go +++ b/internal/watcher/watcher_plugin_test.go @@ -10,6 +10,9 @@ import ( "testing" "time" + "github.com/google/uuid" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/nginx/agent/v3/internal/watcher/health" "github.com/nginx/agent/v3/internal/watcher/instance" "github.com/nginx/agent/v3/internal/watcher/watcherfakes" @@ -130,9 +133,24 @@ func TestWatcher_Process_ConfigApplyRequestTopic(t *testing.T) { func TestWatcher_Process_ConfigApplySuccessfulTopic(t *testing.T) { ctx := context.Background() data := protos.GetNginxOssInstance([]string{}) + + response := &mpi.DataPlaneResponse{ + MessageMeta: &mpi.MessageMeta{ + MessageId: uuid.NewString(), + CorrelationId: "dfsbhj6-bc92-30c1-a9c9-85591422068e", + Timestamp: timestamppb.Now(), + }, + CommandResponse: &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_OK, + Message: "Config apply successful", + Error: "", + }, + InstanceId: data.GetInstanceMeta().GetInstanceId(), + } + message := &bus.Message{ Topic: bus.ConfigApplySuccessfulTopic, - Data: data.GetInstanceMeta().GetInstanceId(), + Data: response, } fakeWatcherService := &watcherfakes.FakeInstanceWatcherServiceInterface{} @@ -148,14 +166,29 @@ func TestWatcher_Process_ConfigApplySuccessfulTopic(t *testing.T) { func TestWatcher_Process_RollbackCompleteTopic(t *testing.T) { ctx := context.Background() - instanceID := "123" + ossInstance := protos.GetNginxOssInstance([]string{}) + + response := &mpi.DataPlaneResponse{ + MessageMeta: &mpi.MessageMeta{ + MessageId: uuid.NewString(), + CorrelationId: "dfsbhj6-bc92-30c1-a9c9-85591422068e", + Timestamp: timestamppb.Now(), + }, + CommandResponse: &mpi.CommandResponse{ + Status: mpi.CommandResponse_COMMAND_STATUS_OK, + Message: "Config apply successful", + Error: "", + }, + InstanceId: ossInstance.GetInstanceMeta().GetInstanceId(), + } + message := &bus.Message{ Topic: bus.RollbackCompleteTopic, - Data: instanceID, + Data: response, } watcherPlugin := NewWatcher(types.AgentConfig()) - watcherPlugin.instancesWithConfigApplyInProgress = []string{instanceID} + watcherPlugin.instancesWithConfigApplyInProgress = []string{ossInstance.GetInstanceMeta().GetInstanceId()} watcherPlugin.Process(ctx, message)