From 6bcc3986a3bece1e231b5f0e567de8e2e163823a Mon Sep 17 00:00:00 2001 From: Naved Ansari Date: Mon, 21 Oct 2024 15:47:33 -0400 Subject: [PATCH] This provides a way to pass in hours to be ignored via the command line. --- openshift_metrics/invoice.py | 9 +++++---- openshift_metrics/merge.py | 27 +++++++++++++++++++++++++-- openshift_metrics/utils.py | 9 +++++---- 3 files changed, 35 insertions(+), 10 deletions(-) diff --git a/openshift_metrics/invoice.py b/openshift_metrics/invoice.py index d1f194b..a39a37c 100644 --- a/openshift_metrics/invoice.py +++ b/openshift_metrics/invoice.py @@ -1,7 +1,7 @@ import math from dataclasses import dataclass, field from collections import namedtuple -from typing import List, Tuple +from typing import List, Tuple, Optional from decimal import Decimal, ROUND_HALF_UP import datetime @@ -138,7 +138,7 @@ def get_runtime( def end_time(self) -> int: return self.start_time + self.duration - def generate_pod_row(self): + def generate_pod_row(self, ignore_times): """ This returns a row to represent pod data. It converts the epoch_time stamps to datetime timestamps so it's more readable. @@ -154,7 +154,7 @@ def generate_pod_row(self): memory_request = self.memory_request.quantize( Decimal(".0001"), rounding=ROUND_HALF_UP ) - runtime = self.get_runtime().quantize(Decimal(".0001"), rounding=ROUND_HALF_UP) + runtime = self.get_runtime(ignore_times).quantize(Decimal(".0001"), rounding=ROUND_HALF_UP) return [ self.namespace, start_time, @@ -195,6 +195,7 @@ class ProjectInvoce: intitution: str institution_specific_code: str rates: Rates + ignore_hours: Optional[List[Tuple[datetime.datetime, datetime.datetime]]] = None su_hours: dict = field( default_factory=lambda: { SU_CPU: 0, @@ -210,7 +211,7 @@ class ProjectInvoce: def add_pod(self, pod: Pod) -> None: """Aggregate a pods data""" su_type, su_count, _ = pod.get_service_unit() - duration_in_hours = pod.get_runtime() + duration_in_hours = pod.get_runtime(self.ignore_hours) self.su_hours[su_type] += su_count * duration_in_hours def get_rate(self, su_type) -> Decimal: diff --git a/openshift_metrics/merge.py b/openshift_metrics/merge.py index 0954015..cd41bbd 100644 --- a/openshift_metrics/merge.py +++ b/openshift_metrics/merge.py @@ -5,6 +5,7 @@ import argparse from datetime import datetime import json +from typing import Tuple import utils @@ -17,6 +18,19 @@ def compare_dates(date_str1, date_str2): return date1 < date2 +def parse_timestamp_range(timestamp_range: str) -> Tuple[datetime, datetime]: + try: + start_str, end_str = timestamp_range.split(",") + start_dt = datetime.fromisoformat(start_str) + end_dt = datetime.fromisoformat(end_str) + if start_dt < end_dt: + raise argparse.ArgumentTypeError("Ignore start time is after ignore end time") + return start_dt, end_dt + except ValueError: + raise argparse.ArgumentTypeError( + "Timestamp range must be in the format 'YYYY-MM-DDTHH:MM:SS,YYYY-MM-DDTHH:MM:SS'" + ) + def main(): """Reads the metrics from files and generates the reports""" parser = argparse.ArgumentParser() @@ -26,6 +40,13 @@ def main(): "--upload-to-s3", action="store_true" ) + parser.add_argument( + "--ignore-hours", + type=parse_timestamp_range, + nargs="*", + help="List of timestamp ranges to ignore in the format 'YYYY-MM-DDTHH:MM:SS,YYYY-MM-DDTHH:MM:SS'" + ) + args = parser.parse_args() files = args.files @@ -33,6 +54,7 @@ def main(): output_file = args.output_file else: output_file = f"{datetime.today().strftime('%Y-%m-%d')}.csv" + ignore_hours = args.ignore_hours report_start_date = None report_end_date = None @@ -77,9 +99,10 @@ def main(): utils.write_metrics_by_namespace( condensed_metrics_dict, output_file, - report_month + report_month, + ignore_hours, ) - utils.write_metrics_by_pod(condensed_metrics_dict, "pod-" + output_file) + utils.write_metrics_by_pod(condensed_metrics_dict, "pod-" + output_file, ignore_hours) if args.upload_to_s3: primary_location = ( diff --git a/openshift_metrics/utils.py b/openshift_metrics/utils.py index 1ea9012..6a10c8d 100755 --- a/openshift_metrics/utils.py +++ b/openshift_metrics/utils.py @@ -110,7 +110,7 @@ def csv_writer(rows, file_name): csvwriter.writerows(rows) -def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month): +def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month, ignore_hours=None): """ Process metrics dictionary to aggregate usage by namespace and then write that to a file """ @@ -157,7 +157,8 @@ def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month): invoice_address="", intitution="", institution_specific_code=cf_institution_code, - rates=rates + rates=rates, + ignore_hours=ignore_hours, ) invoices[namespace] = project_invoice @@ -186,7 +187,7 @@ def write_metrics_by_namespace(condensed_metrics_dict, file_name, report_month): csv_writer(rows, file_name) -def write_metrics_by_pod(condensed_metrics_dict, file_name): +def write_metrics_by_pod(condensed_metrics_dict, file_name, ignore_hours=None): """ Generates metrics report by pod. """ @@ -227,6 +228,6 @@ def write_metrics_by_pod(condensed_metrics_dict, file_name): node_hostname=pod_metric_dict.get("node", "Unknown Node"), node_model=pod_metric_dict.get("node_model", "Unknown Model"), ) - rows.append(pod_obj.generate_pod_row()) + rows.append(pod_obj.generate_pod_row(ignore_hours)) csv_writer(rows, file_name)