Skip to content

Commit

Permalink
small fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
EMCarrami committed Oct 21, 2023
1 parent 6e40f52 commit b25bfc6
Show file tree
Hide file tree
Showing 3 changed files with 138 additions and 5 deletions.
129 changes: 129 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/
8 changes: 5 additions & 3 deletions cprt/data/cprt_datamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,8 @@ def __init__(
super().__init__()
self.batch_size = batch_size

self.protein_tokenizer = AutoTokenizer.from_pretrained(
f"facebook/{esm_model}", model_max_length=max_protein_length
)
self.protein_tokenizer = AutoTokenizer.from_pretrained(f"facebook/{esm_model}")
self.protein_tokenizer.model_max_length = max_protein_length
self.text_tokenizer = GPT2Tokenizer.from_pretrained(language_model)
self.text_tokenizer.pad_token = self.text_tokenizer.eos_token
self.placeholder_length = len(
Expand All @@ -114,6 +113,7 @@ def train_dataloader(self) -> DataLoader: # type: ignore[type-arg]
batch_size=self.batch_size,
shuffle=True,
collate_fn=self.collate_fn,
num_workers=4,
)

def val_dataloader(self) -> DataLoader: # type: ignore[type-arg]
Expand All @@ -123,6 +123,7 @@ def val_dataloader(self) -> DataLoader: # type: ignore[type-arg]
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=4,
)

def test_dataloader(self) -> DataLoader: # type: ignore[type-arg]
Expand All @@ -132,6 +133,7 @@ def test_dataloader(self) -> DataLoader: # type: ignore[type-arg]
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=4,
)

def collate_fn(self, batch: List[Tuple[str, str]]) -> CprtData:
Expand Down
6 changes: 4 additions & 2 deletions cprt/data/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,9 +190,11 @@ def get_uniref_cluster_data(
gzip_info = len(gzip.compress(text_data.encode()))

# keep all fields for final data_dict (set to remove duplications)
data_dic = {k: list(set(info_dict[k])) for k in info_fields}
data_dic: Dict[str, List[str] | str] = {
k: list(set(info_dict[k])) for k in info_fields
}
data_dic |= {k: info_dict[k] for k in BASE_FIELDS}
data_dic["sequence"] = info_dict["sequence"]
data_dic["sequence"] = info_dict["sequence"][0]

data_rows.append(
(
Expand Down

0 comments on commit b25bfc6

Please sign in to comment.