diff --git a/charts/grafana-sampling/Chart.yaml b/charts/grafana-sampling/Chart.yaml index 3b2c88ca6b..692b0a0fe7 100644 --- a/charts/grafana-sampling/Chart.yaml +++ b/charts/grafana-sampling/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: grafana-sampling description: A Helm chart for a layered OTLP tail sampling and metrics generation pipeline. type: application -version: 1.1.1 +version: 1.1.2 appVersion: "v1.5.1" sources: - https://github.com/grafana/alloy diff --git a/charts/grafana-sampling/README.md b/charts/grafana-sampling/README.md index fede95c43a..35cb945785 100644 --- a/charts/grafana-sampling/README.md +++ b/charts/grafana-sampling/README.md @@ -1,6 +1,6 @@ # grafana-sampling -![Version: 1.1.1](https://img.shields.io/badge/Version-1.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.5.1](https://img.shields.io/badge/AppVersion-v1.5.1-informational?style=flat-square) +![Version: 1.1.2](https://img.shields.io/badge/Version-1.1.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.5.1](https://img.shields.io/badge/AppVersion-v1.5.1-informational?style=flat-square) A Helm chart for a layered OTLP tail sampling and metrics generation pipeline. @@ -143,6 +143,8 @@ A major chart version change indicates that there is an incompatible breaking ch | batch.statefulset.send_batch_max_size | int | `0` | | | batch.statefulset.send_batch_size | int | `8192` | | | batch.statefulset.timeout | string | `"200ms"` | | +| deployment.otlp.receiver | object | `{"grpc":{"max_recv_msg_size":"4MB"}}` | otlp receiver settings for deployment (loadbalancer) | +| deployment.otlp.receiver.grpc.max_recv_msg_size | string | `"4MB"` | gRPC max message receive size. Default to 4MB | | metricsGeneration.dimensions | list | `["service.namespace","service.version","deployment.environment","k8s.cluster.name","k8s.pod.name"]` | Additional dimensions to add to generated metrics. | | metricsGeneration.enabled | bool | `true` | Toggle generation of spanmetrics and servicegraph metrics. | | metricsGeneration.legacy | bool | `true` | Use legacy metric names that match those used by the Tempo metrics generator. | @@ -153,4 +155,6 @@ A major chart version change indicates that there is an incompatible breaking ch | sampling.failedRequests.sample | bool | `false` | Toggle sampling failed requests. | | sampling.successfulRequests.percentage | int | `10` | Percentage of successful requests to sample. | | sampling.successfulRequests.sample | bool | `true` | Toggle sampling successful requests. | +| statefulset.otlp.receiver | object | `{"grpc":{"max_recv_msg_size":"4MB"}}` | otlp receiver settings for statefulset (sampler) | +| statefulset.otlp.receiver.grpc.max_recv_msg_size | string | `"4MB"` | gRPC max message receive size. Default to 4MB | diff --git a/charts/grafana-sampling/templates/_otelcol_receiver_otlp.alloy.txt b/charts/grafana-sampling/templates/_otelcol_receiver_otlp.alloy.txt index 34c9428a21..b2ac0b962c 100644 --- a/charts/grafana-sampling/templates/_otelcol_receiver_otlp.alloy.txt +++ b/charts/grafana-sampling/templates/_otelcol_receiver_otlp.alloy.txt @@ -3,7 +3,10 @@ otelcol.receiver.otlp "default" { // https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.otlp/ // configures the default grpc endpoint "0.0.0.0:4317" - grpc { } + grpc { + max_recv_msg_size = {{ .Values.deployment.otlp.receiver.grpc.max_recv_msg_size | quote }} + } + // configures the default http/protobuf endpoint "0.0.0.0:4318" http { } @@ -19,7 +22,9 @@ otelcol.receiver.otlp "default" { // https://grafana.com/docs/alloy/latest/reference/components/otelcol.receiver.otlp/ // configures the default grpc endpoint "0.0.0.0:4317" - grpc { } + grpc { + max_recv_msg_size = {{ .Values.statefulset.otlp.receiver.grpc.max_recv_msg_size | quote }} + } output { traces = [ diff --git a/charts/grafana-sampling/values.yaml b/charts/grafana-sampling/values.yaml index 23b0d0cac4..f5d6e07ba8 100644 --- a/charts/grafana-sampling/values.yaml +++ b/charts/grafana-sampling/values.yaml @@ -61,6 +61,23 @@ batch: send_batch_size: 8192 send_batch_max_size: 0 + +deployment: + otlp: + # -- otlp receiver settings for deployment (loadbalancer) + receiver: + grpc: + # -- gRPC max message receive size. Default to 4MB + max_recv_msg_size: 4MB + +statefulset: + otlp: + # -- otlp receiver settings for statefulset (sampler) + receiver: + grpc: + # -- gRPC max message receive size. Default to 4MB + max_recv_msg_size: 4MB + # @ignored Ignore alloy deployment alloy-deployment: # -- Do not change this. diff --git a/charts/grafana/Chart.yaml b/charts/grafana/Chart.yaml index 0dce609ce2..9deccc3ff6 100644 --- a/charts/grafana/Chart.yaml +++ b/charts/grafana/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: grafana -version: 8.8.1 +version: 8.8.2 appVersion: 11.4.0 kubeVersion: "^1.8.0-0" description: The leading tool for querying and visualizing time series and metrics. diff --git a/charts/grafana/templates/servicemonitor.yaml b/charts/grafana/templates/servicemonitor.yaml index 0359013520..e3d1520be9 100644 --- a/charts/grafana/templates/servicemonitor.yaml +++ b/charts/grafana/templates/servicemonitor.yaml @@ -38,6 +38,10 @@ spec: metricRelabelings: {{- toYaml . | nindent 6 }} {{- end }} + {{- with .Values.serviceMonitor.basicAuth }} + basicAuth: + {{- toYaml . | nindent 6 }} + {{- end }} jobLabel: "{{ .Release.Name }}" selector: matchLabels: diff --git a/charts/grafana/values.yaml b/charts/grafana/values.yaml index 78a0411cfe..5095c37441 100644 --- a/charts/grafana/values.yaml +++ b/charts/grafana/values.yaml @@ -260,6 +260,7 @@ serviceMonitor: scrapeTimeout: 30s relabelings: [] metricRelabelings: [] + basicAuth: {} targetLabels: [] extraExposePorts: [] diff --git a/charts/rollout-operator/Chart.yaml b/charts/rollout-operator/Chart.yaml index 871542067e..ce7f822f92 100644 --- a/charts/rollout-operator/Chart.yaml +++ b/charts/rollout-operator/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: rollout-operator description: "Grafana rollout-operator" type: application -version: 0.22.0 -appVersion: v0.22.0 +version: 0.23.0 +appVersion: v0.23.0 home: https://github.com/grafana/rollout-operator kubeVersion: ^1.10.0-0 diff --git a/charts/rollout-operator/README.md b/charts/rollout-operator/README.md index 6c580b0f74..1c82a507d6 100644 --- a/charts/rollout-operator/README.md +++ b/charts/rollout-operator/README.md @@ -4,7 +4,7 @@ Helm chart for deploying [Grafana rollout-operator](https://github.com/grafana/r # rollout-operator -![Version: 0.22.0](https://img.shields.io/badge/Version-0.22.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.22.0](https://img.shields.io/badge/AppVersion-v0.22.0-informational?style=flat-square) +![Version: 0.23.0](https://img.shields.io/badge/Version-0.23.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.23.0](https://img.shields.io/badge/AppVersion-v0.23.0-informational?style=flat-square) Grafana rollout-operator diff --git a/charts/tempo-distributed/Chart.yaml b/charts/tempo-distributed/Chart.yaml index bc14f9b5db..ed00ee8a96 100644 --- a/charts/tempo-distributed/Chart.yaml +++ b/charts/tempo-distributed/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: tempo-distributed description: Grafana Tempo in MicroService mode type: application -version: 1.26.3 +version: 1.28.2 appVersion: 2.6.0 engine: gotpl home: https://grafana.com/docs/tempo/latest/ diff --git a/charts/tempo-distributed/README.md b/charts/tempo-distributed/README.md index 39b3a100f4..d850f84e52 100755 --- a/charts/tempo-distributed/README.md +++ b/charts/tempo-distributed/README.md @@ -1,6 +1,6 @@ # tempo-distributed -![Version: 1.26.3](https://img.shields.io/badge/Version-1.26.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.0](https://img.shields.io/badge/AppVersion-2.6.0-informational?style=flat-square) +![Version: 1.28.2](https://img.shields.io/badge/Version-1.28.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.0](https://img.shields.io/badge/AppVersion-2.6.0-informational?style=flat-square) Grafana Tempo in MicroService mode @@ -48,6 +48,10 @@ The command removes all the Kubernetes components associated with the chart and A major chart version change indicates that there is an incompatible breaking change needing manual actions. +### From Chart versions < 1.23.0 + +A default affinity has been defined in this version for the compactor following the standard used in other components. + ### From Chart versions < 1.21.0 Upgrading to chart 1.21.0 will set the memberlist cluster_label config option. During rollout your cluster will temporarilly be split into two memberlist clusters until all components are rolled out. @@ -273,6 +277,7 @@ The memcached default args are removed and should be provided manually. The sett | cache.caches[0].roles[0] | string | `"parquet-footer"` | | | cache.caches[0].roles[1] | string | `"bloom"` | | | cache.caches[0].roles[2] | string | `"frontend-search"` | | +| compactor.affinity | string | Hard node and soft zone anti-affinity | Affinity for compactor pods. Passed through `tpl` and, thus, to be configured as string | | compactor.autoscaling | object | `{"enabled":false,"hpa":{"behavior":{},"enabled":false,"targetCPUUtilizationPercentage":100,"targetMemoryUtilizationPercentage":null},"keda":{"enabled":false,"triggers":[]},"maxReplicas":3,"minReplicas":1}` | Autoscaling configurations | | compactor.autoscaling.enabled | bool | `false` | Enable autoscaling for the compactor | | compactor.autoscaling.hpa | object | `{"behavior":{},"enabled":false,"targetCPUUtilizationPercentage":100,"targetMemoryUtilizationPercentage":null}` | Autoscaling via HPA object | @@ -510,6 +515,7 @@ The memcached default args are removed and should be provided manually. The sett | global.image.pullSecrets | list | `[]` | Optional list of imagePullSecrets for all images, excluding enterprise. Names of existing secrets with private container registry credentials. Ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod Example: pullSecrets: [ my-dockerconfigjson-secret ] | | global.image.registry | string | `"docker.io"` | Overrides the Docker registry globally for all images, excluding enterprise. | | global.priorityClassName | string | `nil` | Overrides the priorityClassName for all pods | +| global.storageClass | string | `nil` | Global storage class to be used for persisted components | | global_overrides | object | `{"per_tenant_override_config":"/runtime-config/overrides.yaml"}` | The standard overrides configuration section. This can include a `defaults` object for applying to all tenants (not to be confused with the `global` property of the same name, which overrides `max_byte_per_trace` for all tenants). For an example on how to enable the metrics generator using the `global_overrides` object, see the 'Activate metrics generator' section below. Refer to [Standard overrides](https://grafana.com/docs/tempo/latest/configuration/#standard-overrides) for more details. | | ingester.affinity | string | Soft node and soft zone anti-affinity | Affinity for ingester pods. Passed through `tpl` and, thus, to be configured as string | | ingester.annotations | object | `{}` | Annotations for the ingester StatefulSet | @@ -540,12 +546,13 @@ The memcached default args are removed and should be provided manually. The sett | ingester.image.tag | string | `nil` | Docker image tag for the ingester image. Overrides `tempo.image.tag` | | ingester.initContainers | list | `[]` | | | ingester.nodeSelector | object | `{}` | Node selector for ingester pods | +| ingester.persistence | object | `{"annotations":{},"enabled":false,"inMemory":false,"size":"10Gi","storageClass":null}` | Persistence configuration for ingester | | ingester.persistence.annotations | object | `{}` | Annotations for ingester's persist volume claim | | ingester.persistence.enabled | bool | `false` | Enable creating PVCs which is required when using boltdb-shipper | | ingester.persistence.inMemory | bool | `false` | use emptyDir with ramdisk instead of PVC. **Please note that all data in ingester will be lost on pod restart** | | ingester.persistence.size | string | `"10Gi"` | Size of persistent or memory disk | | ingester.persistence.storageClass | string | `nil` | Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). | -| ingester.persistentVolumeClaimRetentionPolicy.enabled | bool | `false` | Enable Persistent volume retention policy for Statefulset | +| ingester.persistentVolumeClaimRetentionPolicy.enabled | bool | `false` | Enable Persistent volume retention policy for StatefulSet | | ingester.persistentVolumeClaimRetentionPolicy.whenDeleted | string | `"Retain"` | Volume retention behavior that applies when the StatefulSet is deleted | | ingester.persistentVolumeClaimRetentionPolicy.whenScaled | string | `"Retain"` | Volume retention behavior when the replica count of the StatefulSet is reduced | | ingester.podAnnotations | object | `{}` | Annotations for ingester pods | @@ -671,6 +678,9 @@ The memcached default args are removed and should be provided manually. The sett | metricsGenerator.persistence.annotations | object | `{}` | Annotations for metrics generator PVCs | | metricsGenerator.persistence.enabled | bool | `false` | Enable creating PVCs if you have kind set to StatefulSet. This disables using local disk or memory configured in walEmptyDir | | metricsGenerator.persistence.storageClass | string | `nil` | Storage class to be used. If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If empty or set to null, no storageClassName spec is set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack). | +| metricsGenerator.persistentVolumeClaimRetentionPolicy.enabled | bool | `false` | Enable Persistent volume retention policy for StatefulSet | +| metricsGenerator.persistentVolumeClaimRetentionPolicy.whenDeleted | string | `"Retain"` | Volume retention behavior that applies when the StatefulSet is deleted | +| metricsGenerator.persistentVolumeClaimRetentionPolicy.whenScaled | string | `"Retain"` | Volume retention behavior when the replica count of the StatefulSet is reduced | | metricsGenerator.podAnnotations | object | `{}` | Annotations for metrics-generator pods | | metricsGenerator.podLabels | object | `{}` | Labels for metrics-generator pods | | metricsGenerator.ports | list | `[{"name":"grpc","port":9095,"service":true},{"name":"http-memberlist","port":7946,"service":false},{"name":"http-metrics","port":3100,"service":true}]` | Default ports | @@ -830,7 +840,7 @@ The memcached default args are removed and should be provided manually. The sett | server.http_server_read_timeout | string | `"30s"` | Read timeout for HTTP server | | server.http_server_write_timeout | string | `"30s"` | Write timeout for HTTP server | | server.logFormat | string | `"logfmt"` | Log format. Can be set to logfmt (default) or json. | -| server.logLevel | string | `"info"` | Log level. Can be set to trace, debug, info (default), warn, error, fatal, panic | +| server.logLevel | string | `"info"` | Log level. Can be set to debug, info (default), warn, error | | serviceAccount.annotations | object | `{}` | Annotations for the service account | | serviceAccount.automountServiceAccountToken | bool | `false` | | | serviceAccount.create | bool | `true` | Specifies whether a ServiceAccount should be created | diff --git a/charts/tempo-distributed/README.md.gotmpl b/charts/tempo-distributed/README.md.gotmpl index 26332f669c..ae86c8b34a 100644 --- a/charts/tempo-distributed/README.md.gotmpl +++ b/charts/tempo-distributed/README.md.gotmpl @@ -41,6 +41,10 @@ The command removes all the Kubernetes components associated with the chart and A major chart version change indicates that there is an incompatible breaking change needing manual actions. +### From Chart versions < 1.23.0 + +A default affinity has been defined in this version for the compactor following the standard used in other components. + ### From Chart versions < 1.21.0 Upgrading to chart 1.21.0 will set the memberlist cluster_label config option. During rollout your cluster will temporarilly be split into two memberlist clusters until all components are rolled out. diff --git a/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml b/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml index 2671a23994..54c545f80b 100644 --- a/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml +++ b/charts/tempo-distributed/templates/ingester/statefulset-ingester.yaml @@ -1,5 +1,7 @@ {{- $dict := dict "ctx" . "component" "ingester" "memberlist" true -}} {{- $zonesMap := include "ingester.zoneAwareReplicationMap" $dict | fromYaml -}} +{{- $storageClass := .Values.ingester.persistence.storageClass | default .Values.global.storageClass }} +{{- if eq $storageClass "-" }}{{- $storageClass = "" }}{{- end }} {{- range $zoneName, $rolloutZone := $zonesMap -}} {{- with $ -}} {{- $_ := set $dict "rolloutZoneName" $zoneName -}} @@ -193,9 +195,7 @@ spec: spec: accessModes: - ReadWriteOnce - {{- with .Values.ingester.persistence.storageClass }} - storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} - {{- end }} + storageClassName: {{ if $storageClass }}{{ $storageClass }}{{ else }}{{- "" }}{{ end }} resources: requests: storage: {{ .Values.ingester.persistence.size | quote }} diff --git a/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml b/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml index f057fba246..573e3e5eee 100644 --- a/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml +++ b/charts/tempo-distributed/templates/metrics-generator/statefulset-metrics-generator.yaml @@ -1,5 +1,7 @@ {{- if and (.Values.metricsGenerator.enabled) (eq .Values.metricsGenerator.kind "StatefulSet") }} {{ $dict := dict "ctx" . "component" "metrics-generator" "memberlist" true }} +{{- $storageClass := .Values.metricsGenerator.persistence.storageClass | default .Values.global.storageClass }} +{{- if eq $storageClass "-" }}{{- $storageClass = "" }}{{- end }} apiVersion: apps/v1 kind: StatefulSet metadata: @@ -144,6 +146,11 @@ spec: - name: wal emptyDir: {{- toYaml .Values.metricsGenerator.walEmptyDir | nindent 12 }} {{- else }} + {{- if .Values.metricsGenerator.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.metricsGenerator.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.metricsGenerator.persistentVolumeClaimRetentionPolicy.whenScaled }} + {{- end }} volumeClaimTemplates: - apiVersion: v1 kind: PersistentVolumeClaim @@ -156,9 +163,7 @@ spec: spec: accessModes: - ReadWriteOnce - {{- with .Values.metricsGenerator.persistence.storageClass }} - storageClassName: {{ if (eq "-" .) }}""{{ else }}{{ . }}{{ end }} - {{- end }} + storageClassName: {{ if $storageClass }}{{ $storageClass }}{{ else }}{{- "" }}{{ end }} resources: requests: storage: {{ .Values.metricsGenerator.persistence.size | quote }} diff --git a/charts/tempo-distributed/values.yaml b/charts/tempo-distributed/values.yaml index 090ff5a0f3..ba66b3dbbd 100755 --- a/charts/tempo-distributed/values.yaml +++ b/charts/tempo-distributed/values.yaml @@ -19,6 +19,8 @@ global: # -- Common environment variables to add to all pods directly managed by this chart. # scope: admin-api, compactor, distributor, enterprise-federation-frontend, gateway, ingester, memcached, metrics-generator, querier, query-frontend, tokengen extraEnv: [] + # -- Global storage class to be used for persisted components + storageClass: null fullnameOverride: '' # fullnameOverride: tempo @@ -203,6 +205,7 @@ ingester: extraVolumeMounts: [] # -- Extra volumes for ingester deployment extraVolumes: [] + # -- Persistence configuration for ingester persistence: # -- Enable creating PVCs which is required when using boltdb-shipper enabled: false @@ -219,7 +222,7 @@ ingester: # -- Annotations for ingester's persist volume claim annotations: {} persistentVolumeClaimRetentionPolicy: - # -- Enable Persistent volume retention policy for Statefulset + # -- Enable Persistent volume retention policy for StatefulSet enabled: false # -- Volume retention behavior when the replica count of the StatefulSet is reduced whenScaled: Retain @@ -399,6 +402,13 @@ metricsGenerator: extraVolumeMounts: [] # -- Extra volumes for metrics-generator deployment extraVolumes: [] + persistentVolumeClaimRetentionPolicy: + # -- Enable Persistent volume retention policy for StatefulSet + enabled: false + # -- Volume retention behavior when the replica count of the StatefulSet is reduced + whenScaled: Retain + # -- Volume retention behavior that applies when the StatefulSet is deleted + whenDeleted: Retain # -- Default ports ports: - name: grpc @@ -630,6 +640,22 @@ compactor: podLabels: {} # -- Annotations for compactor pods podAnnotations: {} + # -- Affinity for compactor pods. Passed through `tpl` and, thus, to be configured as string + # @default -- Hard node and soft zone anti-affinity + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + {{- include "tempo.selectorLabels" (dict "ctx" . "component" "compactor") | nindent 10 }} + topologyKey: kubernetes.io/hostname + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + {{- include "tempo.selectorLabels" (dict "ctx" . "component" "compactor") | nindent 12 }} + topologyKey: topology.kubernetes.io/zone # -- Additional CLI args for the compactor extraArgs: [] # -- Environment variables to add to the compactor pods @@ -1479,7 +1505,7 @@ config: | server: # -- HTTP server listen host httpListenPort: 3100 - # -- Log level. Can be set to trace, debug, info (default), warn, error, fatal, panic + # -- Log level. Can be set to debug, info (default), warn, error logLevel: info # -- Log format. Can be set to logfmt (default) or json. logFormat: logfmt diff --git a/charts/tempo/Chart.yaml b/charts/tempo/Chart.yaml index 6fe3bb9f57..681ebbe197 100644 --- a/charts/tempo/Chart.yaml +++ b/charts/tempo/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: tempo description: Grafana Tempo Single Binary Mode type: application -version: 1.15.0 +version: 1.17.0 appVersion: 2.6.1 engine: gotpl home: https://grafana.net diff --git a/charts/tempo/README.md b/charts/tempo/README.md index 5c99822194..5ab8e69317 100644 --- a/charts/tempo/README.md +++ b/charts/tempo/README.md @@ -1,6 +1,6 @@ # tempo -![Version: 1.15.0](https://img.shields.io/badge/Version-1.15.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.1](https://img.shields.io/badge/AppVersion-2.6.1-informational?style=flat-square) +![Version: 1.17.0](https://img.shields.io/badge/Version-1.17.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.6.1](https://img.shields.io/badge/AppVersion-2.6.1-informational?style=flat-square) Grafana Tempo Single Binary Mode @@ -19,6 +19,7 @@ Grafana Tempo Single Binary Mode | extraVolumes | list | `[]` | Volumes to add | | fullnameOverride | string | `""` | Overrides the chart's computed fullname | | global.commonLabels | object | `{}` | Common labels for all object directly managed by this chart. | +| hostAliases | list | `[]` | hostAliases to add | | labels | object | `{}` | labels for tempo | | nameOverride | string | `""` | Overrides the chart's name | | networkPolicy.allowExternal | bool | `true` | | @@ -62,6 +63,13 @@ Grafana Tempo Single Binary Mode | tempo.extraVolumeMounts | list | `[]` | Volume mounts to add | | tempo.global_overrides.per_tenant_override_config | string | `"/conf/overrides.yaml"` | | | tempo.ingester | object | `{}` | Configuration options for the ingester | +| tempo.livenessProbe.failureThreshold | int | `3` | | +| tempo.livenessProbe.httpGet.path | string | `"/ready"` | | +| tempo.livenessProbe.httpGet.port | int | `3100` | | +| tempo.livenessProbe.initialDelaySeconds | int | `30` | | +| tempo.livenessProbe.periodSeconds | int | `10` | | +| tempo.livenessProbe.successThreshold | int | `1` | | +| tempo.livenessProbe.timeoutSeconds | int | `5` | | | tempo.memBallastSizeMbs | int | `1024` | | | tempo.metricsGenerator.enabled | bool | `false` | If true, enables Tempo's metrics generator (https://grafana.com/docs/tempo/next/metrics-generator/) | | tempo.metricsGenerator.remoteWriteUrl | string | `"http://prometheus.monitoring:9090/api/v1/write"` | | @@ -70,6 +78,13 @@ Grafana Tempo Single Binary Mode | tempo.pullPolicy | string | `"IfNotPresent"` | | | tempo.querier | object | `{}` | Configuration options for the querier | | tempo.queryFrontend | object | `{}` | Configuration options for the query-fronted | +| tempo.readinessProbe.failureThreshold | int | `3` | | +| tempo.readinessProbe.httpGet.path | string | `"/ready"` | | +| tempo.readinessProbe.httpGet.port | int | `3100` | | +| tempo.readinessProbe.initialDelaySeconds | int | `20` | | +| tempo.readinessProbe.periodSeconds | int | `10` | | +| tempo.readinessProbe.successThreshold | int | `1` | | +| tempo.readinessProbe.timeoutSeconds | int | `5` | | | tempo.receivers.jaeger.protocols.grpc.endpoint | string | `"0.0.0.0:14250"` | | | tempo.receivers.jaeger.protocols.thrift_binary.endpoint | string | `"0.0.0.0:6832"` | | | tempo.receivers.jaeger.protocols.thrift_compact.endpoint | string | `"0.0.0.0:6831"` | | diff --git a/charts/tempo/templates/statefulset.yaml b/charts/tempo/templates/statefulset.yaml index 53c9605fad..36224e6e41 100644 --- a/charts/tempo/templates/statefulset.yaml +++ b/charts/tempo/templates/statefulset.yaml @@ -52,6 +52,10 @@ spec: {{- end }} {{- end }} {{- end }} + {{- with .Values.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} containers: - args: - -config.file=/conf/tempo.yaml @@ -87,6 +91,10 @@ spec: name: otlp-http - containerPort: 55678 name: opencensus + livenessProbe: + {{- toYaml .Values.tempo.livenessProbe | nindent 12 }} + readinessProbe: + {{- toYaml .Values.tempo.readinessProbe | nindent 12 }} resources: {{- toYaml .Values.tempo.resources | nindent 10 }} {{- with .Values.tempo.securityContext }} diff --git a/charts/tempo/values.yaml b/charts/tempo/values.yaml index 310b8c85b5..77840070ad 100644 --- a/charts/tempo/values.yaml +++ b/charts/tempo/values.yaml @@ -65,6 +65,25 @@ tempo: server: # -- HTTP server listen port http_listen_port: 3100 + # Readiness and Liveness Probe Configuration Options + livenessProbe: + httpGet: + path: /ready + port: 3100 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readinessProbe: + httpGet: + path: /ready + port: 3100 + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 storage: trace: # tempo storage backend @@ -312,7 +331,11 @@ affinity: {} # -- The name of the PriorityClass priorityClassName: null - +# -- hostAliases to add +hostAliases: [] +# - ip: 1.2.3.4 +# hostnames: +# - domain.tld networkPolicy: ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. ##