Skip to content

Commit

Permalink
[BE]: Enable Ruff + Flake8 G201,G202 logging format rule. (pytorch#11…
Browse files Browse the repository at this point in the history
…4474)

Standardizes logging calls to always use logging.exception instead of logging.error where appropriate and enforces it with a lint.

Pull Request resolved: pytorch#114474
Approved by: https://github.com/jansel, https://github.com/malfet
  • Loading branch information
Skylion007 authored and pytorchmergebot committed Nov 27, 2023
1 parent 3a4dea9 commit 4bb3a02
Show file tree
Hide file tree
Showing 8 changed files with 15 additions and 18 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ ignore =
# these ignores are from flake8-comprehensions; please fix!
C407,
# these ignores are from flake8-logging-format; please fix!
G100,G101,G200,G201,G202
G100,G101,G200
# these ignores are from flake8-simplify. please fix or ignore with commented reason
SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12,
# flake8-simplify code styles
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ ignore = [
"F821",
"F841",
# these ignores are from flake8-logging-format; please fix!
"G101", "G201", "G202",
"G101",
# these ignores are from RUFF perf; please fix!
"PERF203", "PERF4",
# these ignores are from PYI; please fix!
Expand Down
3 changes: 1 addition & 2 deletions torch/_dynamo/guards.py
Original file line number Diff line number Diff line change
Expand Up @@ -1315,9 +1315,8 @@ def get_guard_fail_reason(
GuardFail(reason_str or "unknown reason", orig_code_map[code])
)
except Exception as e:
log.error(
log.exception(
"Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval",
exc_info=True,
)

return reason_str
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ def write_record_to_file(filename, exec_record):
with open(filename, "wb") as f:
exec_record.dump(f)
except Exception:
log.error("Unable to write execution record %s", filename, exc_info=True)
log.exception("Unable to write execution record %s", filename)


def count_calls(g: fx.Graph):
Expand Down
3 changes: 1 addition & 2 deletions torch/distributed/elastic/multiprocessing/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,14 +477,13 @@ def _poll(self) -> Optional[RunProcsResult]:
failed_proc = self._pc.processes[failed_local_rank]
error_filepath = self.error_files[failed_local_rank]

log.error(
log.exception(
"failed (exitcode: %s)"
" local_rank: %s (pid: %s)"
" of fn: %s (start_method: %s)",
failed_proc.exitcode,
failed_local_rank, e.pid,
fn_name, self.start_method,
exc_info=True,
)

self.close()
Expand Down
9 changes: 4 additions & 5 deletions torch/distributed/elastic/timer/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,20 +169,19 @@ def _reap_worker_no_throw(self, worker_id: Any) -> bool:
"""
try:
return self._reap_worker(worker_id)
except Exception as e:
log.error(
except Exception:
log.exception(
"Uncaught exception thrown from _reap_worker(), "
"check that the implementation correctly catches exceptions",
exc_info=e,
)
return True

def _watchdog_loop(self):
while not self._stop_signaled:
try:
self._run_watchdog()
except Exception as e:
log.error("Error running watchdog", exc_info=e)
except Exception:
log.exception("Error running watchdog")

def _run_watchdog(self):
batch_size = max(1, self._request_queue.size())
Expand Down
8 changes: 4 additions & 4 deletions torch/distributed/elastic/timer/file_based_local_timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,8 @@ def _watchdog_loop(self) -> None:
self._run_watchdog(fd)
if run_once:
break
except Exception as e:
log.error("Error running watchdog", exc_info=e)
except Exception:
log.exception("Error running watchdog")

def _run_watchdog(self, fd: io.TextIOWrapper) -> None:
timer_requests = self._get_requests(fd, self._max_interval)
Expand Down Expand Up @@ -328,6 +328,6 @@ def _reap_worker(self, worker_pid: int, signal: int) -> bool:
except ProcessLookupError:
log.info("Process with pid=%s does not exist. Skipping", worker_pid)
return True
except Exception as e:
log.error("Error terminating pid=%s", worker_pid, exc_info=e)
except Exception:
log.exception("Error terminating pid=%s", worker_pid)
return False
4 changes: 2 additions & 2 deletions torch/distributed/elastic/timer/local_timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,6 @@ def _reap_worker(self, worker_id: int) -> bool:
except ProcessLookupError:
log.info("Process with pid=%s does not exist. Skipping", worker_id)
return True
except Exception as e:
log.error("Error terminating pid=%s", worker_id, exc_info=e)
except Exception:
log.exception("Error terminating pid=%s", worker_id)
return False

0 comments on commit 4bb3a02

Please sign in to comment.