From 395cc7363ca566ee5341761064e04fd9188a2867 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 04:50:45 -0400 Subject: [PATCH] ci: pre-commit autoupdate [pre-commit.ci] (#1100) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- README.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b0e43801a..41b2ab10c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,7 +8,7 @@ default_language_version: python: python3.11 # NOTE: sync with .python-version-default repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: "v0.6.9" + rev: "v0.7.0" hooks: - id: ruff alias: r @@ -20,7 +20,7 @@ repos: verbose: true types_or: [python, pyi, jupyter] - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.11.2" + rev: "v1.12.1" hooks: - id: mypy args: [--strict] diff --git a/README.md b/README.md index f3ab82d26..f80e6d351 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,7 @@ To start an LLM server locally, use the `openllm serve` command and specify the > OpenLLM does not store model weights. A Hugging Face token (HF_TOKEN) is required for gated models. > 1. Create your Hugging Face token [here](https://huggingface.co/settings/tokens). > 2. Request access to the gated model, such as [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B). -> 3. Set your token as an environment variable by running: +> 3. Set your token as an environment variable by running: > ```bash > export HF_TOKEN= > ```