Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

build(deps): update ruff requirement from <0.9 to <0.10 #372

Merged
merged 2 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.0 # keep in rough sync with pyproject.toml
rev: v0.9.1 # keep in rough sync with pyproject.toml
hooks:
- name: Ruff formatting
id: ruff-format
Expand Down
2 changes: 1 addition & 1 deletion cumulus_etl/loaders/fhir/bulk_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ async def _download_all_ndjson_files(self, resource_json: dict, item_type: str)
for file in files:
count = resource_counts.get(file["type"], -1) + 1
resource_counts[file["type"]] = count
filename = f'{file["type"]}.{count:03}.ndjson'
filename = f"{file['type']}.{count:03}.ndjson"
coroutines.append(
self._download_ndjson_file(
file["url"],
Expand Down
4 changes: 2 additions & 2 deletions cumulus_etl/loaders/i2b2/oracle/query.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def sql_visit() -> str:
import_date = format_date("IMPORT_DATE")

cols_dates = f"{start_date}, {end_date}, {import_date}, LENGTH_OF_STAY"
cols = "ENCOUNTER_NUM, PATIENT_NUM, LOCATION_CD, INOUT_CD, " f"{cols_dates}"
cols = f"ENCOUNTER_NUM, PATIENT_NUM, LOCATION_CD, INOUT_CD, {cols_dates}"
return f"select {cols} \n from {Table.visit.value}" # noqa: S608


Expand Down Expand Up @@ -97,7 +97,7 @@ def sql_observation_fact(categories: list[str]) -> str:
f"O.CONCEPT_CD, O.INSTANCE_NUM, {import_date}, O.TVAL_CHAR, "
f"O.VALTYPE_CD, O.VALUEFLAG_CD, O.NVAL_NUM, O.UNITS_CD, O.OBSERVATION_BLOB"
)
cols = f"{cols_patient_dim}, {cols_provider_dim}, {cols_visit_dim}, " f"{cols_obs_fact}"
cols = f"{cols_patient_dim}, {cols_provider_dim}, {cols_visit_dim}, {cols_obs_fact}"

matchers = [f"(concept_cd like '{category}:%')" for category in categories]

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ dev = [
"pre-commit",
# Ruff is using minor versions for breaking changes until their 1.0 release.
# See https://docs.astral.sh/ruff/versioning/
"ruff < 0.9", # keep in rough sync with pre-commit-config.yaml
"ruff < 0.10", # keep in rough sync with pre-commit-config.yaml
]

[project.urls]
Expand Down
4 changes: 2 additions & 2 deletions tests/etl/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,9 @@ async def run_etl(
if batch_size:
args.append(f"--batch-size={batch_size}")
if tasks:
args.append(f'--task={",".join(tasks)}')
args.append(f"--task={','.join(tasks)}")
if tags:
args.append(f'--task-filter={",".join(tags)}')
args.append(f"--task-filter={','.join(tags)}")
if philter:
args.append("--philter")
if export_to:
Expand Down
4 changes: 2 additions & 2 deletions tests/etl/test_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ async def test_contained_medications(self):
self.assertEqual(1, med_req_format.write_records.call_count)
batch = med_req_format.write_records.call_args[0][0]
self.assertEqual(
f'#{self.codebook.db.resource_hash("123")}',
f"#{self.codebook.db.resource_hash('123')}",
batch.rows[0]["medicationReference"]["reference"],
)

Expand Down Expand Up @@ -440,7 +440,7 @@ async def test_external_medications(self, mock_download):
batch = med_req_format.write_records.call_args[0][0]
self.assertEqual([self.codebook.db.resource_hash("A")], [row["id"] for row in batch.rows])
self.assertEqual(
f'Medication/{self.codebook.db.resource_hash("123")}',
f"Medication/{self.codebook.db.resource_hash('123')}",
batch.rows[0]["medicationReference"]["reference"],
)

Expand Down
2 changes: 1 addition & 1 deletion tests/loaders/i2b2/test_i2b2_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ async def test_duplicate_ids(self):

common.write_text(
f"{tmpdir}/patient_dimension.csv",
"PATIENT_NUM,BIRTH_DATE\n" "123,1982-10-16\n" "123,1983-11-17\n" "456,2000-01-13\n",
"PATIENT_NUM,BIRTH_DATE\n123,1982-10-16\n123,1983-11-17\n456,2000-01-13\n",
)

results = await i2b2_loader.load_resources({"Patient"})
Expand Down
2 changes: 1 addition & 1 deletion tests/loaders/i2b2/test_i2b2_oracle_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def shorten(sql: str) -> str:
def count_by_date_group(table: schema.Table, column_date="import_date") -> str:
return shorten(
f"""
select {count_by_date(column_date, f'{column_date}_cnt')}
select {count_by_date(column_date, f"{column_date}_cnt")}
from {table.value}
group by {query.cast_date(column_date)}
order by {query.cast_date(column_date)} desc
Expand Down
Loading