diff --git a/collector-dashboards/otel-collector-hikaricp-prom-dashboard/README.md b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/README.md new file mode 100644 index 0000000..6fd10be --- /dev/null +++ b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/README.md @@ -0,0 +1,68 @@ +--- +# Ingest metrics using the Hikaricp integration + +The OTel Collector has a variety of [third party receivers](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/master/receiver) that provide integration with a wide variety of metric sources. + +Please note that not all metrics receivers available for the OpenTelemetry Collector have been tested by ServiceNow Cloud Observability, and there may be bugs or unexpected issues in using these contributed receivers with ServiceNow Cloud Observability metrics. File any issues with the appropriate OpenTelemetry community. +{: .callout} + +## Prerequisites for local installation + +You must have a ServiceNow Cloud Observability [access token](/docs/create-and-manage-access-tokens) for the project to report metrics to. + +## Running the Example + +### Run the example by running: + +#### 1. Run Hkaricp & collector + +Next install collector and point it to the hikaricp metrics endpoint, "host.docker.internal:8080" in this case. + +```bash +docker compose up -d +``` + +## Configuration + +Installation procedures for the OpenTelemetry Collector varies by deployment mode. Please refer to the [collector documentation](https://opentelemetry.io/docs/collector/) for more information. + + +The following example configuration collects metrics from Hikaricp and sends them into ServiceNow Cloud Observability: + +```yaml +receivers: + prometheus: + config: + scrape_configs: + - job_name: 'hikaricp' + scrape_interval: 20s + scrape_timeout: 20s + metrics_path: '/actuator/prometheus' + tls_config: + insecure_skip_verify: true + scheme: http + static_configs: + - targets: ['host.docker.internal:8080'] + +exporters: + logging: + loglevel: debug + otlp: + endpoint: ingest.lightstep.com:443 + headers: + "lightstep-access-token": "${LS_ACCESS_TOKEN}" + + +processors: + batch: + +service: + telemetry: + logs: + level: DEBUG + pipelines: + metrics: + receivers: [prometheus] + processors: [batch] + exporters: [logging, otlp] +``` \ No newline at end of file diff --git a/collector-dashboards/otel-collector-hikaricp-prom-dashboard/main.tf b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/main.tf new file mode 100644 index 0000000..d93222b --- /dev/null +++ b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/main.tf @@ -0,0 +1,329 @@ +terraform { + required_providers { + lightstep = { + source = "lightstep/lightstep" + version = "~> 1.70.10" + } + } + required_version = ">= v1.0.11" +} + +resource "lightstep_dashboard" "otel_collector_hikaricp_dashboard" { + project_name = var.cloud_observability_project + dashboard_name = "HikariCP and System Metrics Dashboard" + dashboard_description = "Monitor HikariCP and System Metrics." + + chart { + name = "Total Connections" + rank = "0" + type = "timeseries" + query { + query_name = "a" + display = "line" + hidden = false + query_string = "metric hikaricp_connections | rate | group_by [], sum" + } + } + + chart { + name = "Active Connections" + rank = "1" + type = "timeseries" + query { + query_name = "b" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_active | rate | group_by [], sum" + } + } + + chart { + name = "Connection Creation Time" + rank = "2" + type = "timeseries" + query { + query_name = "c" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_creation_seconds | rate | group_by [], sum" + } + } + + chart { + name = "Idle Connections" + rank = "3" + type = "timeseries" + query { + query_name = "d" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_idle | rate | group_by [], sum" + } + } + + chart { + name = "Max Connections" + rank = "4" + type = "timeseries" + query { + query_name = "e" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_max | rate | group_by [], sum" + } + } + + chart { + name = "Pending Threads" + rank = "5" + type = "timeseries" + query { + query_name = "f" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_pending | rate | group_by [], sum" + } + } + + chart { + name = "Min Connections" + rank = "6" + type = "timeseries" + query { + query_name = "g" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_min | rate | group_by [], sum" + } + } + + chart { + name = "Connection Timeout Total Count" + rank = "7" + type = "timeseries" + query { + query_name = "h" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_timeout_total | rate | group_by [], sum" + } + } + + chart { + name = "Connection Usage Time" + rank = "8" + type = "timeseries" + query { + query_name = "i" + display = "line" + hidden = false + query_string = "metric hikaricp_connections_usage_seconds | rate | group_by [], sum" + } + } + + chart { + name = "System CPU Usage" + rank = "9" + type = "timeseries" + query { + query_name = "j" + display = "line" + hidden = false + query_string = "metric system_cpu_usage | rate | group_by [], sum" + } + } + + chart { + name = "System CPU Count" + rank = "10" + type = "timeseries" + query { + query_name = "k" + display = "line" + hidden = false + query_string = "metric system_cpu_count | rate | group_by [], sum" + } + } + + chart { + name = "JVM Memory Used" + rank = "11" + type = "timeseries" + query { + query_name = "l" + display = "line" + hidden = false + query_string = "metric jvm_memory_used_bytes | rate | group_by [], sum" + } + } + + chart { + name = "JVM Threads Live" + rank = "12" + type = "timeseries" + query { + query_name = "m" + display = "line" + hidden = false + query_string = "metric jvm_threads_live_threads | rate | group_by [], sum" + } + } + + chart { + name = "JVM Buffer Count Buffers" + rank = "13" + type = "timeseries" + query { + query_name = "n" + display = "line" + hidden = false + query_string = "metric jvm_buffer_count_buffers | rate | group_by [], sum" + } + } + + chart { + name = "JVM Buffer Memory Used Bytes" + rank = "14" + type = "timeseries" + query { + query_name = "o" + display = "line" + hidden = false + query_string = "metric jvm_buffer_memory_used_bytes | rate | group_by [], sum" + } + } + + chart { + name = "JVM Buffer Total Capacity Bytes" + rank = "15" + type = "timeseries" + query { + query_name = "p" + display = "line" + hidden = false + query_string = "metric jvm_buffer_total_capacity_bytes | rate | group_by [], sum" + } + } + + chart { + name = "Process CPU Usage" + rank = "16" + type = "timeseries" + query { + query_name = "s" + display = "line" + hidden = false + query_string = "metric process_cpu_usage | rate | group_by [], sum" + } + } + + chart { + name = "Process Uptime Seconds" + rank = "17" + type = "timeseries" + query { + query_name = "t" + display = "line" + hidden = false + query_string = "metric process_uptime_seconds | rate | group_by [], sum" + } + } + + chart { + name = "System Load Average 1m" + rank = "18" + type = "timeseries" + query { + query_name = "u" + display = "line" + hidden = false + query_string = "metric system_load_average_1m | rate | group_by [], sum" + } + } + + chart { + name = "Tomcat Active Current Sessions" + rank = "21" + type = "timeseries" + query { + query_name = "v" + display = "line" + hidden = false + query_string = "metric tomcat_sessions_active_current_sessions | rate | group_by [], sum" + } + } + + chart { + name = "Tomcat Max Active Sessions" + rank = "22" + type = "timeseries" + query { + query_name = "w" + display = "line" + hidden = false + query_string = "metric tomcat_sessions_active_max_sessions | rate | group_by [], sum" + } + } + + chart { + name = "Tomcat Sessions Alive Max Seconds" + rank = "23" + type = "timeseries" + query { + query_name = "x" + display = "line" + hidden = false + query_string = "metric tomcat_sessions_alive_max_seconds | rate | group_by [], sum" + } + } + + chart { + name = "Tomcat Sessions Created Total" + rank = "24" + type = "timeseries" + query { + query_name = "y" + display = "line" + hidden = false + query_string = "metric tomcat_sessions_created_sessions_total | rate | group_by [], sum" + } + } + + chart { + name = "Tomcat Sessions Expired Total" + rank = "25" + type = "timeseries" + query { + query_name = "z" + display = "line" + hidden = false + query_string = "metric tomcat_sessions_expired_sessions_total | rate | group_by [], sum" + } + } + + chart { + name = "System CPU Count" + rank = "26" + type = "timeseries" + query { + query_name = "ab" + display = "line" + hidden = false + query_string = "metric system_cpu_count | rate | group_by [], sum" + } + } + + chart { + name = "System CPU Usage" + rank = "27" + type = "timeseries" + query { + query_name = "ac" + display = "line" + hidden = false + query_string = "metric system_cpu_usage | rate | group_by [], sum" + } + } + +} + diff --git a/collector-dashboards/otel-collector-hikaricp-prom-dashboard/outputs.tf b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/outputs.tf new file mode 100644 index 0000000..a8bf26d --- /dev/null +++ b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/outputs.tf @@ -0,0 +1,4 @@ +output "dashboard_url" { + value = "https://app.lightstep.com/${var.lightstep_project}/dashboard/${lightstep_dashboard.otel_collector_hikaricp_dashboard.id}" + description = "OpenTelemetry Hikaricp Dashboard URL" +} diff --git a/collector-dashboards/otel-collector-hikaricp-prom-dashboard/variables.tf b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/variables.tf new file mode 100644 index 0000000..21ee69f --- /dev/null +++ b/collector-dashboards/otel-collector-hikaricp-prom-dashboard/variables.tf @@ -0,0 +1,4 @@ +variable "lightstep_project" { + description = "Name of Lightstep project" + type = string +}