From 4bb3a02d02e7a15ffe65a91bc3dd80774f5dc0ff Mon Sep 17 00:00:00 2001 From: Aaron Gokaslan Date: Mon, 27 Nov 2023 17:38:08 +0000 Subject: [PATCH] [BE]: Enable Ruff + Flake8 G201,G202 logging format rule. (#114474) Standardizes logging calls to always use logging.exception instead of logging.error where appropriate and enforces it with a lint. Pull Request resolved: https://github.com/pytorch/pytorch/pull/114474 Approved by: https://github.com/jansel, https://github.com/malfet --- .flake8 | 2 +- pyproject.toml | 2 +- torch/_dynamo/guards.py | 3 +-- torch/_dynamo/utils.py | 2 +- torch/distributed/elastic/multiprocessing/api.py | 3 +-- torch/distributed/elastic/timer/api.py | 9 ++++----- .../distributed/elastic/timer/file_based_local_timer.py | 8 ++++---- torch/distributed/elastic/timer/local_timer.py | 4 ++-- 8 files changed, 15 insertions(+), 18 deletions(-) diff --git a/.flake8 b/.flake8 index bca578ce563ee..1e61b459df949 100644 --- a/.flake8 +++ b/.flake8 @@ -18,7 +18,7 @@ ignore = # these ignores are from flake8-comprehensions; please fix! C407, # these ignores are from flake8-logging-format; please fix! - G100,G101,G200,G201,G202 + G100,G101,G200 # these ignores are from flake8-simplify. please fix or ignore with commented reason SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12, # flake8-simplify code styles diff --git a/pyproject.toml b/pyproject.toml index 279bd6fa058b8..71157c4f3cf32 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ ignore = [ "F821", "F841", # these ignores are from flake8-logging-format; please fix! - "G101", "G201", "G202", + "G101", # these ignores are from RUFF perf; please fix! "PERF203", "PERF4", # these ignores are from PYI; please fix! diff --git a/torch/_dynamo/guards.py b/torch/_dynamo/guards.py index 1b068402019b0..0ef173155e2fb 100644 --- a/torch/_dynamo/guards.py +++ b/torch/_dynamo/guards.py @@ -1315,9 +1315,8 @@ def get_guard_fail_reason( GuardFail(reason_str or "unknown reason", orig_code_map[code]) ) except Exception as e: - log.error( + log.exception( "Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval", - exc_info=True, ) return reason_str diff --git a/torch/_dynamo/utils.py b/torch/_dynamo/utils.py index ba876a0fbb822..47275ea041855 100644 --- a/torch/_dynamo/utils.py +++ b/torch/_dynamo/utils.py @@ -400,7 +400,7 @@ def write_record_to_file(filename, exec_record): with open(filename, "wb") as f: exec_record.dump(f) except Exception: - log.error("Unable to write execution record %s", filename, exc_info=True) + log.exception("Unable to write execution record %s", filename) def count_calls(g: fx.Graph): diff --git a/torch/distributed/elastic/multiprocessing/api.py b/torch/distributed/elastic/multiprocessing/api.py index 32426be080107..c7c870bdb0733 100644 --- a/torch/distributed/elastic/multiprocessing/api.py +++ b/torch/distributed/elastic/multiprocessing/api.py @@ -477,14 +477,13 @@ def _poll(self) -> Optional[RunProcsResult]: failed_proc = self._pc.processes[failed_local_rank] error_filepath = self.error_files[failed_local_rank] - log.error( + log.exception( "failed (exitcode: %s)" " local_rank: %s (pid: %s)" " of fn: %s (start_method: %s)", failed_proc.exitcode, failed_local_rank, e.pid, fn_name, self.start_method, - exc_info=True, ) self.close() diff --git a/torch/distributed/elastic/timer/api.py b/torch/distributed/elastic/timer/api.py index 6dd3088919883..566a3d4acbc78 100644 --- a/torch/distributed/elastic/timer/api.py +++ b/torch/distributed/elastic/timer/api.py @@ -169,11 +169,10 @@ def _reap_worker_no_throw(self, worker_id: Any) -> bool: """ try: return self._reap_worker(worker_id) - except Exception as e: - log.error( + except Exception: + log.exception( "Uncaught exception thrown from _reap_worker(), " "check that the implementation correctly catches exceptions", - exc_info=e, ) return True @@ -181,8 +180,8 @@ def _watchdog_loop(self): while not self._stop_signaled: try: self._run_watchdog() - except Exception as e: - log.error("Error running watchdog", exc_info=e) + except Exception: + log.exception("Error running watchdog") def _run_watchdog(self): batch_size = max(1, self._request_queue.size()) diff --git a/torch/distributed/elastic/timer/file_based_local_timer.py b/torch/distributed/elastic/timer/file_based_local_timer.py index 597000c6d20d2..26ebce33dcb5b 100644 --- a/torch/distributed/elastic/timer/file_based_local_timer.py +++ b/torch/distributed/elastic/timer/file_based_local_timer.py @@ -225,8 +225,8 @@ def _watchdog_loop(self) -> None: self._run_watchdog(fd) if run_once: break - except Exception as e: - log.error("Error running watchdog", exc_info=e) + except Exception: + log.exception("Error running watchdog") def _run_watchdog(self, fd: io.TextIOWrapper) -> None: timer_requests = self._get_requests(fd, self._max_interval) @@ -328,6 +328,6 @@ def _reap_worker(self, worker_pid: int, signal: int) -> bool: except ProcessLookupError: log.info("Process with pid=%s does not exist. Skipping", worker_pid) return True - except Exception as e: - log.error("Error terminating pid=%s", worker_pid, exc_info=e) + except Exception: + log.exception("Error terminating pid=%s", worker_pid) return False diff --git a/torch/distributed/elastic/timer/local_timer.py b/torch/distributed/elastic/timer/local_timer.py index 240163f1bf6c0..05f467c807a5b 100644 --- a/torch/distributed/elastic/timer/local_timer.py +++ b/torch/distributed/elastic/timer/local_timer.py @@ -120,6 +120,6 @@ def _reap_worker(self, worker_id: int) -> bool: except ProcessLookupError: log.info("Process with pid=%s does not exist. Skipping", worker_id) return True - except Exception as e: - log.error("Error terminating pid=%s", worker_id, exc_info=e) + except Exception: + log.exception("Error terminating pid=%s", worker_id) return False