Skip to content

Commit

Permalink
Revert "[BE]: Enable Ruff + Flake8 G201,G202 logging format rule. (py…
Browse files Browse the repository at this point in the history
…torch#114474)"

This reverts commit d30497f.

Reverted pytorch#114474 on behalf of https://github.com/huydhn due to Sorry for reverting your change, but I see a bunch of inductor failure after the commit https://hud.pytorch.org/pytorch/pytorch/commit/d30497f6b62007c9d1e3c38179528e9d25ac1292, trying to revert to see if it helps fix the issues ([comment](pytorch#114474 (comment)))
  • Loading branch information
pytorchmergebot committed Nov 27, 2023
1 parent 150aaf4 commit 8232d4d
Show file tree
Hide file tree
Showing 8 changed files with 18 additions and 15 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ ignore =
# these ignores are from flake8-comprehensions; please fix!
C407,
# these ignores are from flake8-logging-format; please fix!
G100,G101,G200
G100,G101,G200,G201,G202
# these ignores are from flake8-simplify. please fix or ignore with commented reason
SIM105,SIM108,SIM110,SIM111,SIM113,SIM114,SIM115,SIM116,SIM117,SIM118,SIM119,SIM12,
# flake8-simplify code styles
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ ignore = [
"F821",
"F841",
# these ignores are from flake8-logging-format; please fix!
"G101",
"G101", "G201", "G202",
# these ignores are from RUFF perf; please fix!
"PERF203", "PERF4",
# these ignores are from PYI; please fix!
Expand Down
3 changes: 2 additions & 1 deletion torch/_dynamo/guards.py
Original file line number Diff line number Diff line change
Expand Up @@ -1315,8 +1315,9 @@ def get_guard_fail_reason(
GuardFail(reason_str or "unknown reason", orig_code_map[code])
)
except Exception as e:
log.exception(
log.error(
"Failure in guard_fail_fn callback - raising here will cause a NULL Error on guard eval",
exc_info=True,
)

return reason_str
Expand Down
2 changes: 1 addition & 1 deletion torch/_dynamo/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,7 +400,7 @@ def write_record_to_file(filename, exec_record):
with open(filename, "wb") as f:
exec_record.dump(f)
except Exception:
log.exception("Unable to write execution record %s", filename)
log.error("Unable to write execution record %s", filename, exc_info=True)


def count_calls(g: fx.Graph):
Expand Down
3 changes: 2 additions & 1 deletion torch/distributed/elastic/multiprocessing/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -477,13 +477,14 @@ def _poll(self) -> Optional[RunProcsResult]:
failed_proc = self._pc.processes[failed_local_rank]
error_filepath = self.error_files[failed_local_rank]

log.exception(
log.error(
"failed (exitcode: %s)"
" local_rank: %s (pid: %s)"
" of fn: %s (start_method: %s)",
failed_proc.exitcode,
failed_local_rank, e.pid,
fn_name, self.start_method,
exc_info=True,
)

self.close()
Expand Down
9 changes: 5 additions & 4 deletions torch/distributed/elastic/timer/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,19 +169,20 @@ def _reap_worker_no_throw(self, worker_id: Any) -> bool:
"""
try:
return self._reap_worker(worker_id)
except Exception:
log.exception(
except Exception as e:
log.error(
"Uncaught exception thrown from _reap_worker(), "
"check that the implementation correctly catches exceptions",
exc_info=e,
)
return True

def _watchdog_loop(self):
while not self._stop_signaled:
try:
self._run_watchdog()
except Exception:
log.exception("Error running watchdog")
except Exception as e:
log.error("Error running watchdog", exc_info=e)

def _run_watchdog(self):
batch_size = max(1, self._request_queue.size())
Expand Down
8 changes: 4 additions & 4 deletions torch/distributed/elastic/timer/file_based_local_timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,8 @@ def _watchdog_loop(self) -> None:
self._run_watchdog(fd)
if run_once:
break
except Exception:
log.exception("Error running watchdog")
except Exception as e:
log.error("Error running watchdog", exc_info=e)

def _run_watchdog(self, fd: io.TextIOWrapper) -> None:
timer_requests = self._get_requests(fd, self._max_interval)
Expand Down Expand Up @@ -328,6 +328,6 @@ def _reap_worker(self, worker_pid: int, signal: int) -> bool:
except ProcessLookupError:
log.info("Process with pid=%s does not exist. Skipping", worker_pid)
return True
except Exception:
log.exception("Error terminating pid=%s", worker_pid)
except Exception as e:
log.error("Error terminating pid=%s", worker_pid, exc_info=e)
return False
4 changes: 2 additions & 2 deletions torch/distributed/elastic/timer/local_timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,6 @@ def _reap_worker(self, worker_id: int) -> bool:
except ProcessLookupError:
log.info("Process with pid=%s does not exist. Skipping", worker_id)
return True
except Exception:
log.exception("Error terminating pid=%s", worker_id)
except Exception as e:
log.error("Error terminating pid=%s", worker_id, exc_info=e)
return False

0 comments on commit 8232d4d

Please sign in to comment.