Skip to content

Commit

Permalink
sync torchx .pyre_configuration.internal with external config and upg… (
Browse files Browse the repository at this point in the history
#986) (#986)

Summary:
There are some issues with Pyre versions in github CI, and at the moment we aren't getting type errors.

Making a PR to
(a) bump to Ubuntu 24.04, because the root cause is a glibc issue
(b) upgrade to the  2024-11-25 release
(c) suppress errors

I'm making a fresh PR because the original one doesn't seem to trigger github CI (maybe because it was exported from a diff)


Test Plan: Ran github CI, the problem is fixed.

Reviewed By: jesszzzz

Differential Revision: D66994745

Pulled By: stroxler
  • Loading branch information
stroxler authored Dec 10, 2024
1 parent 5c2db0e commit c8a633a
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 8 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pyre.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ on:

jobs:
pyre:
runs-on: ubuntu-20.04
runs-on: ubuntu-24.04
steps:
- name: Setup Python
uses: actions/setup-python@v2
Expand Down
6 changes: 3 additions & 3 deletions torchx/examples/apps/lightning/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,17 +64,17 @@ def __len__(self) -> int:
# our trainer and other components that need to load data.


# pyre-fixme[13]: Attribute `test_ds` is never initialized.
# pyre-fixme[13]: Attribute `train_ds` is never initialized.
# pyre-fixme[13]: Attribute `val_ds` is never initialized.
class TinyImageNetDataModule(pl.LightningDataModule):
"""
TinyImageNetDataModule is a pytorch LightningDataModule for the tiny
imagenet dataset.
"""

# pyre-fixme[13]: Attribute `test_ds` is never initialized.
train_ds: ImageFolderSamplesDataset
# pyre-fixme[13]: Attribute `train_ds` is never initialized.
val_ds: ImageFolderSamplesDataset
# pyre-fixme[13]: Attribute `val_ds` is never initialized.
test_ds: ImageFolderSamplesDataset

def __init__(
Expand Down
2 changes: 2 additions & 0 deletions torchx/examples/apps/tracker/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ def test(
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# pyre-fixme[58]: `+` is not supported for operand types `int` and
# `Union[bool, float, int]`.
test_loss += F.nll_loss(
output, target, reduction="sum"
).item() # sum up batch loss
Expand Down
4 changes: 3 additions & 1 deletion torchx/pipelines/kfp/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,9 @@ def component_spec_from_app(app: api.AppDef) -> Tuple[str, api.Role]:

role = app.roles[0]
assert (
role.num_replicas == 1
role.num_replicas
== 1
# pyre-fixme[16]: `AppDef` has no attribute `num_replicas`.
), f"KFP adapter only supports one replica, got {app.num_replicas}"

command = [role.entrypoint, *role.args]
Expand Down
2 changes: 2 additions & 0 deletions torchx/schedulers/aws_batch_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -809,6 +809,8 @@ def _stream_events(
startFromHead=True,
**args,
)
# pyre-fixme[66]: Exception handler type annotation `unknown` must
# extend BaseException.
except self._log_client.exceptions.ResourceNotFoundException:
return [] # noqa: B901
if response["nextForwardToken"] == next_token:
Expand Down
4 changes: 2 additions & 2 deletions torchx/schedulers/aws_sagemaker_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,9 +267,9 @@ def _submit_dryrun(
raise ValueError(
f"{key} is controlled by aws_sagemaker_scheduler and is set to {job_def[key]}"
)
value = cfg.get(key) # pyre-ignore[26]
value = cfg.get(key) # type: ignore
if value is not None:
job_def[key] = value
job_def[key] = value # type: ignore

req = AWSSageMakerJob(
job_name=job_name,
Expand Down
2 changes: 1 addition & 1 deletion torchx/schedulers/ray/ray_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def load_actor_json(filename: str) -> List[RayActor]:
return actors


def create_placement_group_async(replicas: List[RayActor]) -> PlacementGroup:
def create_placement_group_async(replicas: List[RayActor]) -> PlacementGroup: # type: ignore
"""return a placement group reference, the corresponding placement group could be scheduled or pending"""
bundles = []
for replica in replicas:
Expand Down

0 comments on commit c8a633a

Please sign in to comment.