diff --git a/gptme/cli.py b/gptme/cli.py index 79d08bb55..542382296 100644 --- a/gptme/cli.py +++ b/gptme/cli.py @@ -51,7 +51,7 @@ from .prompts import get_prompt from .tabcomplete import register_tabcomplete from .tools import execute_msg, init_tools -from .util import epoch_to_age, generate_unique_name +from .util import epoch_to_age, generate_name logger = logging.getLogger(__name__) print_builtin = __builtins__["print"] # type: ignore diff --git a/gptme/util.py b/gptme/util.py index 36e445b93..13d5a751c 100644 --- a/gptme/util.py +++ b/gptme/util.py @@ -36,20 +36,6 @@ def get_tokenizer(model: str): return tiktoken.get_encoding("cl100k_base") -def len_tokens_approx(content: str | list[Message]) -> int: - """Approximate the number of tokens in a string by assuming tokens have len 3 (lol).""" - if isinstance(content, list): - return sum(len_tokens_approx(msg.content) for msg in content) - return len(content) // 3 - - -def msgs2text(msgs: list[Message]) -> str: - output = "" - for msg in msgs: - output += f"{msg.role.capitalize()}: {msg.content}\n\n" - return output - - def msgs2dicts(msgs: list[Message]) -> list[dict]: """Convert a list of Message objects to a list of dicts ready to pass to an LLM.""" return [msg.to_dict(keys=["role", "content"]) for msg in msgs] @@ -66,10 +52,13 @@ def msgs2dicts(msgs: list[Message]) -> list[dict]: "crawling", "sneaking", "sprinting", + "sneaking", + "dancing", + "singing", + "laughing", ] adjectives = [ "funny", - "red", "happy", "sad", "angry", @@ -78,34 +67,55 @@ def msgs2dicts(msgs: list[Message]) -> list[dict]: "sneaky", "sleepy", "hungry", + # colors + "red", + "blue", + "green", + "pink", + "purple", + "yellow", + "orange", ] nouns = [ - "walrus", - "pelican", "cat", "dog", - "elephant", "rat", "mouse", - "bird", "fish", + "elephant", + "dinosaur", + # birds + "bird", + "pelican", + # fictional "dragon", "unicorn", - "dinosaur", + "mermaid", + "monster", + "alien", + "robot", + # sea creatures + "whale", + "shark", + "walrus", + "octopus", + "squid", + "jellyfish", + "starfish", + "penguin", + "seal", ] -def generate_unique_name(): +def generate_name(): action = random.choice(actions) adjective = random.choice(adjectives) noun = random.choice(nouns) - unique_name = f"{action}-{adjective}-{noun}" - assert is_generated_name(unique_name) - return unique_name + return f"{action}-{adjective}-{noun}" def is_generated_name(name: str) -> bool: - """if name is a name generated by generate_unique_name""" + """if name is a name generated by generate_name""" all_words = actions + adjectives + nouns return name.count("-") == 2 and all(word in all_words for word in name.split("-")) @@ -125,14 +135,14 @@ def epoch_to_age(epoch): return f"{age.days} days ago ({datetime.fromtimestamp(epoch).strftime('%Y-%m-%d')})" -def print_preview(code: str, lang: str): +def print_preview(code: str, lang: str): # pragma: no cover print() print("[bold white]Preview[/bold white]") print(Syntax(code.strip(), lang)) print() -def ask_execute(question="Execute code?", default=True) -> bool: +def ask_execute(question="Execute code?", default=True) -> bool: # pragma: no cover # TODO: add a way to outsource ask_execute decision to another agent/LLM console = Console() choicestr = f"({'Y' if default else 'y'}/{'n' if default else 'N'})" diff --git a/tests/test_util.py b/tests/test_util.py index 2fa9c6ae0..aedffa603 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -1,4 +1,19 @@ +from datetime import datetime + from gptme.tools import is_supported_codeblock +from gptme.util import epoch_to_age, generate_name, is_generated_name + + +def test_generate_name(): + name = generate_name() + assert is_generated_name(name) + + +def test_epoch_to_age(): + epoch_today = datetime.now().timestamp() + assert epoch_to_age(epoch_today) == "just now" + epoch_yesterday = epoch_today - 24 * 60 * 60 + assert epoch_to_age(epoch_yesterday) == "yesterday" def test_is_supported_codeblock():