diff --git a/autogen/agentchat/contrib/agent_eval/agent_eval.py b/autogen/agentchat/contrib/agent_eval/agent_eval.py index 4ad81b72d..58abe0ed1 100644 --- a/autogen/agentchat/contrib/agent_eval/agent_eval.py +++ b/autogen/agentchat/contrib/agent_eval/agent_eval.py @@ -6,12 +6,12 @@ # SPDX-License-Identifier: MIT from typing import Literal, Optional, Union -import autogen -from autogen.agentchat.contrib.agent_eval.criterion import Criterion -from autogen.agentchat.contrib.agent_eval.critic_agent import CriticAgent -from autogen.agentchat.contrib.agent_eval.quantifier_agent import QuantifierAgent -from autogen.agentchat.contrib.agent_eval.subcritic_agent import SubCriticAgent -from autogen.agentchat.contrib.agent_eval.task import Task +from .... import GroupChat, GroupChatManager, UserProxyAgent +from .criterion import Criterion +from .critic_agent import CriticAgent +from .quantifier_agent import QuantifierAgent +from .subcritic_agent import SubCriticAgent +from .task import Task def generate_criteria( @@ -38,7 +38,7 @@ def generate_criteria( llm_config=llm_config, ) - critic_user = autogen.UserProxyAgent( + critic_user = UserProxyAgent( name="critic_user", max_consecutive_auto_reply=0, # terminate without auto-reply human_input_mode="NEVER", @@ -53,10 +53,8 @@ def generate_criteria( ) agents.append(subcritic) - groupchat = autogen.GroupChat( - agents=agents, messages=[], max_round=max_round, speaker_selection_method="round_robin" - ) - critic_manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config) + groupchat = GroupChat(agents=agents, messages=[], max_round=max_round, speaker_selection_method="round_robin") + critic_manager = GroupChatManager(groupchat=groupchat, llm_config=llm_config) critic_user.initiate_chat(critic_manager, message=task.get_sys_message()) criteria = critic_user.last_message() @@ -90,7 +88,7 @@ def quantify_criteria( llm_config=llm_config, ) - quantifier_user = autogen.UserProxyAgent( + quantifier_user = UserProxyAgent( name="quantifier_user", max_consecutive_auto_reply=0, # terminate without auto-reply human_input_mode="NEVER", diff --git a/autogen/agentchat/contrib/agent_eval/critic_agent.py b/autogen/agentchat/contrib/agent_eval/critic_agent.py index 0ac62b9bd..bf7bd8139 100644 --- a/autogen/agentchat/contrib/agent_eval/critic_agent.py +++ b/autogen/agentchat/contrib/agent_eval/critic_agent.py @@ -6,7 +6,7 @@ # SPDX-License-Identifier: MIT from typing import Optional -from autogen.agentchat.conversable_agent import ConversableAgent +from ...conversable_agent import ConversableAgent class CriticAgent(ConversableAgent): diff --git a/autogen/agentchat/contrib/agent_eval/quantifier_agent.py b/autogen/agentchat/contrib/agent_eval/quantifier_agent.py index b933afc23..c475bc9c3 100644 --- a/autogen/agentchat/contrib/agent_eval/quantifier_agent.py +++ b/autogen/agentchat/contrib/agent_eval/quantifier_agent.py @@ -6,7 +6,7 @@ # SPDX-License-Identifier: MIT from typing import Optional -from autogen.agentchat.conversable_agent import ConversableAgent +from ...conversable_agent import ConversableAgent class QuantifierAgent(ConversableAgent): diff --git a/autogen/agentchat/contrib/agent_eval/subcritic_agent.py b/autogen/agentchat/contrib/agent_eval/subcritic_agent.py index 7c0261107..3b163dbd3 100755 --- a/autogen/agentchat/contrib/agent_eval/subcritic_agent.py +++ b/autogen/agentchat/contrib/agent_eval/subcritic_agent.py @@ -6,7 +6,7 @@ # SPDX-License-Identifier: MIT from typing import Optional -from autogen.agentchat.conversable_agent import ConversableAgent +from ...conversable_agent import ConversableAgent class SubCriticAgent(ConversableAgent): diff --git a/autogen/agentchat/contrib/agent_optimizer.py b/autogen/agentchat/contrib/agent_optimizer.py index efc2448b0..d297f075a 100644 --- a/autogen/agentchat/contrib/agent_optimizer.py +++ b/autogen/agentchat/contrib/agent_optimizer.py @@ -8,8 +8,8 @@ import json from typing import Optional -import autogen -from autogen.code_utils import execute_code +from ... import OpenAIWrapper, filter_config +from ...code_utils import execute_code ADD_FUNC = { "type": "function", @@ -209,10 +209,8 @@ def __init__( raise ValueError( "When using OpenAI or Azure OpenAI endpoints, specify a non-empty 'model' either in 'llm_config' or in each config of 'config_list'." ) - self.llm_config["config_list"] = autogen.filter_config( - llm_config["config_list"], {"model": [self.optimizer_model]} - ) - self._client = autogen.OpenAIWrapper(**self.llm_config) + self.llm_config["config_list"] = filter_config(llm_config["config_list"], {"model": [self.optimizer_model]}) + self._client = OpenAIWrapper(**self.llm_config) def record_one_conversation(self, conversation_history: list[dict], is_satisfied: bool = None): """Record one conversation history. diff --git a/autogen/agentchat/contrib/capabilities/agent_capability.py b/autogen/agentchat/contrib/capabilities/agent_capability.py index 4987119b7..e6dda25f2 100644 --- a/autogen/agentchat/contrib/capabilities/agent_capability.py +++ b/autogen/agentchat/contrib/capabilities/agent_capability.py @@ -4,7 +4,7 @@ # # Portions derived from https://github.com/microsoft/autogen are under the MIT License. # SPDX-License-Identifier: MIT -from autogen.agentchat.assistant_agent import ConversableAgent +from ...assistant_agent import ConversableAgent class AgentCapability: diff --git a/autogen/agentchat/contrib/capabilities/teachability.py b/autogen/agentchat/contrib/capabilities/teachability.py index 420d5a02d..acb79779d 100644 --- a/autogen/agentchat/contrib/capabilities/teachability.py +++ b/autogen/agentchat/contrib/capabilities/teachability.py @@ -8,14 +8,15 @@ import pickle from typing import Optional, Union -import chromadb -from chromadb.config import Settings - -from autogen.agentchat.assistant_agent import ConversableAgent -from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability -from autogen.agentchat.contrib.text_analyzer_agent import TextAnalyzerAgent - from ....formatting_utils import colored +from ....import_utils import optional_import_block, require_optional_import +from ...assistant_agent import ConversableAgent +from ..text_analyzer_agent import TextAnalyzerAgent +from .agent_capability import AgentCapability + +with optional_import_block(): + import chromadb + from chromadb.config import Settings class Teachability(AgentCapability): @@ -238,6 +239,7 @@ def _analyze(self, text_to_analyze: Union[dict, str], analysis_instructions: Uni return self.teachable_agent.last_message(self.analyzer)["content"] +@require_optional_import("chromadb", "teachable") class MemoStore: """Provides memory storage and retrieval for a teachable agent, using a vector database. Each DB entry (called a memo) is a pair of strings: an input text and an output text. diff --git a/autogen/agentchat/contrib/capabilities/tools_capability.py b/autogen/agentchat/contrib/capabilities/tools_capability.py index b3bf13b13..068afff8b 100644 --- a/autogen/agentchat/contrib/capabilities/tools_capability.py +++ b/autogen/agentchat/contrib/capabilities/tools_capability.py @@ -2,8 +2,8 @@ # # SPDX-License-Identifier: Apache-2.0 -from autogen.agentchat import ConversableAgent -from autogen.tools import Tool +from ....agentchat import ConversableAgent +from ....tools import Tool class ToolsCapability: diff --git a/autogen/agentchat/contrib/capabilities/transforms.py b/autogen/agentchat/contrib/capabilities/transforms.py index 330bf2f3a..88ad4d701 100644 --- a/autogen/agentchat/contrib/capabilities/transforms.py +++ b/autogen/agentchat/contrib/capabilities/transforms.py @@ -11,10 +11,9 @@ import tiktoken from termcolor import colored -from autogen import token_count_utils -from autogen.cache import AbstractCache, Cache -from autogen.types import MessageContentType - +from .... import token_count_utils +from ....cache import AbstractCache, Cache +from ....types import MessageContentType from . import transforms_util from .text_compressors import LLMLingua, TextCompressor diff --git a/autogen/agentchat/contrib/capabilities/transforms_util.py b/autogen/agentchat/contrib/capabilities/transforms_util.py index a3c2524cd..ab323b8d4 100644 --- a/autogen/agentchat/contrib/capabilities/transforms_util.py +++ b/autogen/agentchat/contrib/capabilities/transforms_util.py @@ -7,10 +7,10 @@ from collections.abc import Hashable from typing import Any, Optional -from autogen import token_count_utils -from autogen.cache.abstract_cache_base import AbstractCache -from autogen.oai.openai_utils import filter_config -from autogen.types import MessageContentType +from .... import token_count_utils +from ....cache.abstract_cache_base import AbstractCache +from ....oai.openai_utils import filter_config +from ....types import MessageContentType def cache_key(content: MessageContentType, *args: Hashable) -> str: diff --git a/autogen/agentchat/contrib/capabilities/vision_capability.py b/autogen/agentchat/contrib/capabilities/vision_capability.py index bad988f33..8c69ccab2 100644 --- a/autogen/agentchat/contrib/capabilities/vision_capability.py +++ b/autogen/agentchat/contrib/capabilities/vision_capability.py @@ -7,16 +7,16 @@ import copy from typing import Callable, Optional, Union -from autogen.agentchat.assistant_agent import ConversableAgent -from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability -from autogen.agentchat.contrib.img_utils import ( +from ....code_utils import content_str +from ....oai.client import OpenAIWrapper +from ...assistant_agent import ConversableAgent +from ..img_utils import ( convert_base64_to_data_uri, get_image_data, get_pil_image, gpt4v_formatter, ) -from autogen.code_utils import content_str -from autogen.oai.client import OpenAIWrapper +from .agent_capability import AgentCapability DEFAULT_DESCRIPTION_PROMPT = ( "Write a detailed caption for this image. " diff --git a/autogen/agentchat/contrib/captainagent/agent_builder.py b/autogen/agentchat/contrib/captainagent/agent_builder.py index 56c50f6e2..26ac24746 100644 --- a/autogen/agentchat/contrib/captainagent/agent_builder.py +++ b/autogen/agentchat/contrib/captainagent/agent_builder.py @@ -15,7 +15,8 @@ from termcolor import colored -import autogen +from .... import AssistantAgent, ConversableAgent, OpenAIWrapper, UserProxyAgent, config_list_from_json +from ....code_utils import CODE_BLOCK_PATTERN __all__ = ["AgentBuilder"] @@ -37,7 +38,7 @@ def _config_check(config: dict): def _retrieve_json(text): - match = re.findall(autogen.code_utils.CODE_BLOCK_PATTERN, text, flags=re.DOTALL) + match = re.findall(CODE_BLOCK_PATTERN, text, flags=re.DOTALL) if not match: return text code_blocks = [] @@ -203,7 +204,7 @@ def __init__( builder_filter_dict.update({"model": builder_model}) if len(builder_model_tags) != 0: builder_filter_dict.update({"tags": builder_model_tags}) - builder_config_list = autogen.config_list_from_json( + builder_config_list = config_list_from_json( config_file_or_env, file_location=config_file_location, filter_dict=builder_filter_dict ) if len(builder_config_list) == 0: @@ -211,7 +212,7 @@ def __init__( f"Fail to initialize build manager: {builder_model}{builder_model_tags} does not exist in {config_file_or_env}. " f'If you want to change this model, please specify the "builder_model" in the constructor.' ) - self.builder_model = autogen.OpenAIWrapper(config_list=builder_config_list) + self.builder_model = OpenAIWrapper(config_list=builder_config_list) self.agent_model = agent_model if isinstance(agent_model, list) else [agent_model] self.agent_model_tags = agent_model_tags @@ -222,7 +223,7 @@ def __init__( self.agent_configs: list[dict] = [] self.open_ports: list[str] = [] self.agent_procs: dict[str, tuple[sp.Popen, str]] = {} - self.agent_procs_assign: dict[str, tuple[autogen.ConversableAgent, str]] = {} + self.agent_procs_assign: dict[str, tuple[ConversableAgent, str]] = {} self.cached_configs: dict = {} self.max_agents = max_agents @@ -239,7 +240,7 @@ def _create_agent( member_name: list[str], llm_config: dict, use_oai_assistant: Optional[bool] = False, - ) -> autogen.AssistantAgent: + ) -> AssistantAgent: """Create a group chat participant agent. If the agent rely on an open-source model, this function will automatically set up an endpoint for that agent. @@ -274,7 +275,7 @@ def _create_agent( filter_dict.update({"model": model_name_or_hf_repo}) if len(model_tags) > 0: filter_dict.update({"tags": model_tags}) - config_list = autogen.config_list_from_json( + config_list = config_list_from_json( self.config_file_or_env, file_location=self.config_file_location, filter_dict=filter_dict ) if len(config_list) == 0: @@ -287,7 +288,7 @@ def _create_agent( current_config = llm_config.copy() current_config.update({"config_list": config_list}) if use_oai_assistant: - from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent + from ..gpt_assistant_agent import GPTAssistantAgent agent = GPTAssistantAgent( name=agent_name, @@ -302,14 +303,14 @@ def _create_agent( "\nThe group also include a Computer_terminal to help you run the python and shell code." ) - model_class = autogen.AssistantAgent + model_class = AssistantAgent if agent_path: module_path, model_class_name = agent_path.replace("/", ".").rsplit(".", 1) module = importlib.import_module(module_path) model_class = getattr(module, model_class_name) - if not issubclass(model_class, autogen.ConversableAgent): + if not issubclass(model_class, ConversableAgent): logger.error(f"{model_class} is not a ConversableAgent. Use AssistantAgent as default") - model_class = autogen.AssistantAgent + model_class = AssistantAgent additional_config = { k: v @@ -365,10 +366,10 @@ def build( coding: Optional[bool] = None, code_execution_config: Optional[dict] = None, use_oai_assistant: Optional[bool] = False, - user_proxy: Optional[autogen.ConversableAgent] = None, + user_proxy: Optional[ConversableAgent] = None, max_agents: Optional[int] = None, **kwargs, - ) -> tuple[list[autogen.ConversableAgent], dict]: + ) -> tuple[list[ConversableAgent], dict]: """Auto build agents based on the building task. Args: @@ -496,9 +497,9 @@ def build_from_library( code_execution_config: Optional[dict] = None, use_oai_assistant: Optional[bool] = False, embedding_model: Optional[str] = "all-mpnet-base-v2", - user_proxy: Optional[autogen.ConversableAgent] = None, + user_proxy: Optional[ConversableAgent] = None, **kwargs, - ) -> tuple[list[autogen.ConversableAgent], dict]: + ) -> tuple[list[ConversableAgent], dict]: """Build agents from a library. The library is a list of agent configs, which contains the name and system_message for each agent. We use a build manager to decide what agent in that library should be involved to the task. @@ -655,8 +656,8 @@ def build_from_library( return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs) def _build_agents( - self, use_oai_assistant: Optional[bool] = False, user_proxy: Optional[autogen.ConversableAgent] = None, **kwargs - ) -> tuple[list[autogen.ConversableAgent], dict]: + self, use_oai_assistant: Optional[bool] = False, user_proxy: Optional[ConversableAgent] = None, **kwargs + ) -> tuple[list[ConversableAgent], dict]: """Build agents with generated configs. Args: @@ -687,7 +688,7 @@ def _build_agents( if coding is True: print("Adding user console proxy...", flush=True) if user_proxy is None: - user_proxy = autogen.UserProxyAgent( + user_proxy = UserProxyAgent( name="Computer_terminal", is_termination_msg=lambda x: x == "TERMINATE" or x == "TERMINATE.", code_execution_config=code_execution_config, @@ -722,7 +723,7 @@ def load( config_json: Optional[str] = None, use_oai_assistant: Optional[bool] = False, **kwargs, - ) -> tuple[list[autogen.ConversableAgent], dict]: + ) -> tuple[list[ConversableAgent], dict]: """Load building configs and call the build function to complete building without calling online LLMs' api. Args: diff --git a/autogen/agentchat/contrib/captainagent/captainagent.py b/autogen/agentchat/contrib/captainagent/captainagent.py index f71f6de03..f496333f6 100644 --- a/autogen/agentchat/contrib/captainagent/captainagent.py +++ b/autogen/agentchat/contrib/captainagent/captainagent.py @@ -8,10 +8,8 @@ from termcolor import colored -import autogen -from autogen import UserProxyAgent -from autogen.agentchat.conversable_agent import ConversableAgent - +from .... import GroupChat, GroupChatManager, UserProxyAgent +from ...conversable_agent import ConversableAgent from .agent_builder import AgentBuilder from .tool_retriever import ToolBuilder, format_ag2_tool, get_full_tool_description @@ -465,13 +463,13 @@ def _run_autobuild(self, group_name: str, execution_task: str, building_task: st self.build_times += 1 # start nested chat - nested_group_chat = autogen.GroupChat( + nested_group_chat = GroupChat( agents=agent_list, messages=[], allow_repeat_speaker=agent_list[:-1] if agent_configs["coding"] is True else agent_list, **self._nested_config["group_chat_config"], ) - manager = autogen.GroupChatManager( + manager = GroupChatManager( groupchat=nested_group_chat, llm_config=self._nested_config["group_chat_llm_config"], ) diff --git a/autogen/agentchat/contrib/captainagent/tool_retriever.py b/autogen/agentchat/contrib/captainagent/tool_retriever.py index a37b171b4..87612c2bc 100644 --- a/autogen/agentchat/contrib/captainagent/tool_retriever.py +++ b/autogen/agentchat/contrib/captainagent/tool_retriever.py @@ -16,15 +16,18 @@ from textwrap import dedent, indent from typing import Optional, Union -import pandas as pd -from sentence_transformers import SentenceTransformer, util - from .... import AssistantAgent, UserProxyAgent from ....coding import CodeExecutor, CodeExtractor, LocalCommandLineCodeExecutor, MarkdownCodeExtractor from ....coding.base import CodeBlock, CodeResult +from ....import_utils import optional_import_block, require_optional_import from ....tools import Tool, get_function_schema, load_basemodels_if_needed +with optional_import_block(): + import pandas as pd + from sentence_transformers import SentenceTransformer, util + +@require_optional_import(["pandas", "sentence_transformers"], "retrievechat") class ToolBuilder: TOOL_PROMPT_DEFAULT = """\n## Functions You have access to the following functions. They can be accessed from the module called 'functions' by their function names. diff --git a/autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py b/autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py index 18f77c47a..feced5c47 100644 --- a/autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +++ b/autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py @@ -1,7 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["pandas", "scipy"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py index 53e9e45c1..d7a2fed0b 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import arxiv -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["arxiv"], ["arxiv"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py index 9cbc68b91..41df6fd92 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import arxiv -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["arxiv"], ["arxiv"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py index ce2a7d111..e609d5f68 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import os -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["PyMuPDF"], ["os"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py index 689b3f419..dc2642d9f 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py @@ -1,7 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["PyMuPDF"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py index 4d68b4090..5b9eb9fd0 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py @@ -5,7 +5,7 @@ from PIL import Image -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["transformers", "torch"], ["transformers", "torch", "PIL", "os"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py index e7d78cb96..7efba600c 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import os -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["easyocr"], ["os"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py index c96257d69..2e1dd81eb 100644 --- a/autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +++ b/autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py @@ -1,7 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["openai-whisper"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py b/autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py index 161a9fb42..b7d4e5809 100644 --- a/autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +++ b/autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py @@ -1,7 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["sympy"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py b/autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py index b13cc42d9..697dfed2a 100644 --- a/autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +++ b/autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py @@ -1,7 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["sympy"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py b/autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py index 5c79a8b91..c826da557 100644 --- a/autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +++ b/autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py @@ -1,7 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["sympy"]) diff --git a/autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py b/autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py index b88e48b73..45f99b81a 100644 --- a/autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +++ b/autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py @@ -1,7 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 -from autogen.coding.func_with_reqs import with_requirements +from ......coding.func_with_reqs import with_requirements @with_requirements(["sympy"]) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index d0a2f6f82..fcd6e55e1 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -11,11 +11,11 @@ from collections import defaultdict from typing import Any, Optional, Union -from autogen import OpenAIWrapper -from autogen.agentchat.agent import Agent -from autogen.agentchat.assistant_agent import AssistantAgent, ConversableAgent -from autogen.oai.openai_utils import create_gpt_assistant, retrieve_assistants_by_name, update_gpt_assistant -from autogen.runtime_logging import log_new_agent, logging_enabled +from ... import OpenAIWrapper +from ...oai.openai_utils import create_gpt_assistant, retrieve_assistants_by_name, update_gpt_assistant +from ...runtime_logging import log_new_agent, logging_enabled +from ..agent import Agent +from ..assistant_agent import AssistantAgent, ConversableAgent logger = logging.getLogger(__name__) diff --git a/autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py b/autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py index 37eca45c0..d9ecb9330 100644 --- a/autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +++ b/autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py @@ -4,18 +4,22 @@ import os import warnings +from typing import Optional -from falkordb import FalkorDB, Graph -from graphrag_sdk import KnowledgeGraph, Source -from graphrag_sdk.model_config import KnowledgeGraphModelConfig -from graphrag_sdk.models import GenerativeModel -from graphrag_sdk.models.openai import OpenAiGenerativeModel -from graphrag_sdk.ontology import Ontology - +from ....import_utils import optional_import_block, require_optional_import from .document import Document from .graph_query_engine import GraphStoreQueryResult +with optional_import_block(): + from falkordb import FalkorDB, Graph + from graphrag_sdk import KnowledgeGraph, Source + from graphrag_sdk.model_config import KnowledgeGraphModelConfig + from graphrag_sdk.models import GenerativeModel + from graphrag_sdk.models.openai import OpenAiGenerativeModel + from graphrag_sdk.ontology import Ontology + +@require_optional_import(["falkordb", "graphrag_sdk"], "graph-rag-falkor-db") class FalkorGraphQueryEngine: """This is a wrapper for FalkorDB KnowledgeGraph.""" @@ -26,8 +30,8 @@ def __init__( port: int = 6379, username: str | None = None, password: str | None = None, - model: GenerativeModel = OpenAiGenerativeModel("gpt-4o"), - ontology: Ontology | None = None, + model: Optional["GenerativeModel"] = None, + ontology: Optional["Ontology"] = None, ): """Initialize a FalkorDB knowledge graph. Please also refer to https://github.com/FalkorDB/GraphRAG-SDK/blob/main/graphrag_sdk/kg.py @@ -50,7 +54,7 @@ def __init__( self.port = port self.username = username self.password = password - self.model = model + self.model = model or OpenAiGenerativeModel("gpt-4o") self.model_config = KnowledgeGraphModelConfig.with_model(model) self.ontology = ontology self.knowledge_graph = None @@ -149,17 +153,17 @@ def delete(self) -> bool: self.falkordb.select_graph(self.ontology_table_name).delete() return True - def __get_ontology_storage_graph(self) -> Graph: + def __get_ontology_storage_graph(self) -> "Graph": return self.falkordb.select_graph(self.ontology_table_name) - def _save_ontology_to_db(self, ontology: Ontology): + def _save_ontology_to_db(self, ontology: "Ontology"): """Save graph ontology to a separate table with {graph_name}_ontology""" if self.ontology_table_name in self.falkordb.list_graphs(): raise ValueError(f"Knowledge graph {self.name} is already created.") graph = self.__get_ontology_storage_graph() ontology.save_to_graph(graph) - def _load_ontology_from_db(self) -> Ontology: + def _load_ontology_from_db(self) -> "Ontology": if self.ontology_table_name not in self.falkordb.list_graphs(): raise ValueError(f"Knowledge graph {self.name} has not been created.") graph = self.__get_ontology_storage_graph() diff --git a/autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py b/autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py index 4b09a2a58..2c7cb3b79 100644 --- a/autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +++ b/autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py @@ -4,8 +4,7 @@ from typing import Any, Optional, Union -from autogen import Agent, ConversableAgent, UserProxyAgent - +from .... import Agent, ConversableAgent, UserProxyAgent from .falkor_graph_query_engine import FalkorGraphQueryEngine from .graph_query_engine import GraphStoreQueryResult from .graph_rag_capability import GraphRagCapability diff --git a/autogen/agentchat/contrib/graph_rag/graph_rag_capability.py b/autogen/agentchat/contrib/graph_rag/graph_rag_capability.py index 70823b78b..129a0417a 100644 --- a/autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +++ b/autogen/agentchat/contrib/graph_rag/graph_rag_capability.py @@ -4,9 +4,8 @@ # # Portions derived from https://github.com/microsoft/autogen are under the MIT License. # SPDX-License-Identifier: MIT -from autogen.agentchat.contrib.capabilities.agent_capability import AgentCapability -from autogen.agentchat.conversable_agent import ConversableAgent - +from ...conversable_agent import ConversableAgent +from ..capabilities.agent_capability import AgentCapability from .graph_query_engine import GraphQueryEngine diff --git a/autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py b/autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py index afba87847..b86dbf287 100644 --- a/autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +++ b/autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py @@ -2,26 +2,35 @@ # # SPDX-License-Identifier: Apache-2.0 import os -from typing import Optional, TypeAlias, Union - -from llama_index.core import PropertyGraphIndex, SimpleDirectoryReader -from llama_index.core.base.embeddings.base import BaseEmbedding -from llama_index.core.indices.property_graph import ( - DynamicLLMPathExtractor, - SchemaLLMPathExtractor, -) -from llama_index.core.indices.property_graph.transformations.schema_llm import Triple -from llama_index.core.llms import LLM -from llama_index.core.readers.json import JSONReader -from llama_index.core.schema import Document as LlamaDocument -from llama_index.embeddings.openai import OpenAIEmbedding -from llama_index.graph_stores.neo4j import Neo4jPropertyGraphStore -from llama_index.llms.openai import OpenAI +import sys +from typing import Optional, Union +if sys.version_info >= (3, 10): + from typing import TypeAlias +else: + from typing_extensions import TypeAlias + +from ....import_utils import optional_import_block, require_optional_import from .document import Document, DocumentType from .graph_query_engine import GraphQueryEngine, GraphStoreQueryResult - +with optional_import_block(): + from llama_index.core import PropertyGraphIndex, SimpleDirectoryReader + from llama_index.core.base.embeddings.base import BaseEmbedding + from llama_index.core.indices.property_graph import ( + DynamicLLMPathExtractor, + SchemaLLMPathExtractor, + ) + from llama_index.core.indices.property_graph.transformations.schema_llm import Triple + from llama_index.core.llms import LLM + from llama_index.core.readers.json import JSONReader + from llama_index.core.schema import Document as LlamaDocument + from llama_index.embeddings.openai import OpenAIEmbedding + from llama_index.graph_stores.neo4j import Neo4jPropertyGraphStore + from llama_index.llms.openai import OpenAI + + +@require_optional_import("llama_index", "neo4j") class Neo4jGraphQueryEngine(GraphQueryEngine): """This class serves as a wrapper for a property graph query engine backed by LlamaIndex and Neo4j, facilitating the creating, connecting, updating, and querying of LlamaIndex property graphs. @@ -49,11 +58,11 @@ def __init__( database: str = "neo4j", username: str = "neo4j", password: str = "neo4j", - llm: LLM = OpenAI(model="gpt-4o", temperature=0.0), - embedding: BaseEmbedding = OpenAIEmbedding(model_name="text-embedding-3-small"), - entities: Optional[TypeAlias] = None, - relations: Optional[TypeAlias] = None, - schema: Optional[Union[dict[str, str], list[Triple]]] = None, + llm: Optional["LLM"] = None, + embedding: Optional["BaseEmbedding"] = None, + entities: Optional["TypeAlias"] = None, + relations: Optional["TypeAlias"] = None, + schema: Optional[Union[dict[str, str], list["Triple"]]] = None, strict: Optional[bool] = False, ): """Initialize a Neo4j Property graph. @@ -78,8 +87,8 @@ def __init__( self.database = database self.username = username self.password = password - self.llm = llm - self.embedding = embedding + self.llm = llm or OpenAI(model="gpt-4o", temperature=0.0) + self.embedding = embedding or OpenAIEmbedding(model_name="text-embedding-3-small") self.entities = entities self.relations = relations self.schema = schema @@ -188,7 +197,7 @@ def _clear(self) -> None: with self.graph_store._driver.session() as session: session.run("MATCH (n) DETACH DELETE n;") - def _load_doc(self, input_doc: list[Document]) -> list[LlamaDocument]: + def _load_doc(self, input_doc: list[Document]) -> list["LlamaDocument"]: """Load documents from the input files. Currently support the following file types: .csv - comma-separated values .docx - Microsoft Word diff --git a/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py b/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py index 9362cb8fe..50a379b17 100644 --- a/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +++ b/autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py @@ -4,8 +4,7 @@ from typing import Any, Optional, Union -from autogen import Agent, ConversableAgent, UserProxyAgent - +from .... import Agent, ConversableAgent, UserProxyAgent from .graph_query_engine import GraphStoreQueryResult from .graph_rag_capability import GraphRagCapability from .neo4j_graph_query_engine import Neo4jGraphQueryEngine diff --git a/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py b/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py index 7b85b0c2d..8f5a6921c 100644 --- a/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +++ b/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py @@ -6,23 +6,26 @@ import logging from typing import List, Optional, Union -from neo4j import GraphDatabase -from neo4j_graphrag.embeddings import Embedder, OpenAIEmbeddings -from neo4j_graphrag.experimental.pipeline.kg_builder import SimpleKGPipeline -from neo4j_graphrag.generation import GraphRAG -from neo4j_graphrag.indexes import create_vector_index -from neo4j_graphrag.llm.openai_llm import LLMInterface, OpenAILLM -from neo4j_graphrag.retrievers import VectorRetriever - +from ....import_utils import optional_import_block, require_optional_import from .document import Document, DocumentType from .graph_query_engine import GraphQueryEngine, GraphStoreQueryResult +with optional_import_block(): + from neo4j import GraphDatabase + from neo4j_graphrag.embeddings import Embedder, OpenAIEmbeddings + from neo4j_graphrag.experimental.pipeline.kg_builder import SimpleKGPipeline + from neo4j_graphrag.generation import GraphRAG + from neo4j_graphrag.indexes import create_vector_index + from neo4j_graphrag.llm.openai_llm import LLMInterface, OpenAILLM + from neo4j_graphrag.retrievers import VectorRetriever + # Set up logging logging.basicConfig(level=logging.INFO) logging.getLogger("httpx").setLevel(logging.WARNING) logger = logging.getLogger(__name__) +@require_optional_import(["neo4j", "neo4j_graphrag"], "neo4j") class Neo4jNativeGraphQueryEngine(GraphQueryEngine): """A graph query engine implemented using the Neo4j GraphRAG SDK. Provides functionality to initialize a knowledge graph, @@ -35,13 +38,10 @@ def __init__( port: int = 7687, username: str = "neo4j", password: str = "password", - embeddings: Optional[Embedder] = OpenAIEmbeddings(model="text-embedding-3-large"), + embeddings: Optional["Embedder"] = None, embedding_dimension: Optional[int] = 3072, - llm: Optional[LLMInterface] = OpenAILLM( - model_name="gpt-4o", - model_params={"response_format": {"type": "json_object"}, "temperature": 0}, - ), - query_llm: Optional[LLMInterface] = OpenAILLM(model_name="gpt-4o", model_params={"temperature": 0}), + llm: Optional["LLMInterface"] = None, + query_llm: Optional["LLMInterface"] = None, entities: Optional[List[str]] = None, relations: Optional[List[str]] = None, potential_schema: Optional[List[tuple[str, str, str]]] = None, @@ -64,10 +64,13 @@ def __init__( """ self.uri = f"{host}:{port}" self.driver = GraphDatabase.driver(self.uri, auth=(username, password)) - self.embeddings = embeddings + self.embeddings = embeddings or OpenAIEmbeddings(model="text-embedding-3-large") self.embedding_dimension = embedding_dimension - self.llm = llm - self.query_llm = query_llm + self.llm = llm or OpenAILLM( + model_name="gpt-4o", + model_params={"response_format": {"type": "json_object"}, "temperature": 0}, + ) + self.query_llm = query_llm or OpenAILLM(model_name="gpt-4o", model_params={"temperature": 0}) self.entities = entities self.relations = relations self.potential_schema = potential_schema diff --git a/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py b/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py index 31175f0a6..a91f14f69 100644 --- a/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +++ b/autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py @@ -4,8 +4,7 @@ from typing import Any, Optional, Union -from autogen import Agent, ConversableAgent - +from .... import Agent, ConversableAgent from .graph_query_engine import GraphStoreQueryResult from .graph_rag_capability import GraphRagCapability from .neo4j_native_graph_query_engine import Neo4jNativeGraphQueryEngine diff --git a/autogen/agentchat/contrib/img_utils.py b/autogen/agentchat/contrib/img_utils.py index e1ca180b8..5f2fd1f47 100644 --- a/autogen/agentchat/contrib/img_utils.py +++ b/autogen/agentchat/contrib/img_utils.py @@ -15,11 +15,11 @@ import requests from ...import_utils import optional_import_block, require_optional_import +from .. import utils with optional_import_block(): from PIL import Image -from autogen.agentchat import utils # Parameters for token counting for images for different models MODEL_PARAMS = { diff --git a/autogen/agentchat/contrib/llamaindex_conversable_agent.py b/autogen/agentchat/contrib/llamaindex_conversable_agent.py index 8e90e51c3..b6751e07a 100644 --- a/autogen/agentchat/contrib/llamaindex_conversable_agent.py +++ b/autogen/agentchat/contrib/llamaindex_conversable_agent.py @@ -6,11 +6,10 @@ # SPDX-License-Identifier: MIT from typing import Optional, Union -from autogen import OpenAIWrapper -from autogen.agentchat import Agent, ConversableAgent -from autogen.agentchat.contrib.vectordb.utils import get_logger - +from ... import OpenAIWrapper from ...import_utils import optional_import_block, require_optional_import +from .. import Agent, ConversableAgent +from .vectordb.utils import get_logger logger = get_logger(__name__) @@ -44,7 +43,7 @@ class LLamaIndexConversableAgent(ConversableAgent): def __init__( self, name: str, - llama_index_agent: AgentRunner, + llama_index_agent: "AgentRunner", description: Optional[str] = None, **kwargs, ): @@ -85,7 +84,7 @@ def _generate_oai_reply( """Generate a reply using autogen.oai.""" user_message, history = self._extract_message_and_history(messages=messages, sender=sender) - chat_response: AgentChatResponse = self._llama_index_agent.chat(message=user_message, chat_history=history) + chat_response: "AgentChatResponse" = self._llama_index_agent.chat(message=user_message, chat_history=history) extracted_response = chat_response.response @@ -100,7 +99,7 @@ async def _a_generate_oai_reply( """Generate a reply using autogen.oai.""" user_message, history = self._extract_message_and_history(messages=messages, sender=sender) - chat_response: AgentChatResponse = await self._llama_index_agent.achat( + chat_response: "AgentChatResponse" = await self._llama_index_agent.achat( message=user_message, chat_history=history ) @@ -110,7 +109,7 @@ async def _a_generate_oai_reply( def _extract_message_and_history( self, messages: Optional[list[dict]] = None, sender: Optional[Agent] = None - ) -> tuple[str, list[ChatMessage]]: + ) -> tuple[str, list["ChatMessage"]]: """Extract the message and history from the messages.""" if not messages: messages = self._oai_messages[sender] @@ -121,7 +120,7 @@ def _extract_message_and_history( message = messages[-1].get("content", "") history = messages[:-1] - history_messages: list[ChatMessage] = [] + history_messages: list["ChatMessage"] = [] for history_message in history: content = history_message.get("content", "") role = history_message.get("role", "user") diff --git a/autogen/agentchat/contrib/llava_agent.py b/autogen/agentchat/contrib/llava_agent.py index 5f1dec12e..35cf38194 100644 --- a/autogen/agentchat/contrib/llava_agent.py +++ b/autogen/agentchat/contrib/llava_agent.py @@ -8,15 +8,17 @@ import logging from typing import Optional -import replicate import requests -from autogen.agentchat.agent import Agent -from autogen.agentchat.contrib.img_utils import get_image_data, llava_formatter -from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent -from autogen.code_utils import content_str - +from ...code_utils import content_str from ...formatting_utils import colored +from ...import_utils import optional_import_block, require_optional_import +from ..agent import Agent +from .img_utils import get_image_data, llava_formatter +from .multimodal_conversable_agent import MultimodalConversableAgent + +with optional_import_block(): + import replicate logger = logging.getLogger(__name__) @@ -95,6 +97,7 @@ def _image_reply(self, messages=None, sender=None, config=None): return True, out +@require_optional_import("replicate", "lmm") def _llava_call_binary_with_config( prompt: str, images: list, config: dict, max_new_tokens: int = 1000, temperature: float = 0.5, seed: int = 1 ): @@ -140,6 +143,7 @@ def _llava_call_binary_with_config( return output +@require_optional_import("replicate", "lmm") def llava_call_binary( prompt: str, images: list, config_list: list, max_new_tokens: int = 1000, temperature: float = 0.5, seed: int = 1 ): diff --git a/autogen/agentchat/contrib/math_user_proxy_agent.py b/autogen/agentchat/contrib/math_user_proxy_agent.py index 089053040..193827477 100644 --- a/autogen/agentchat/contrib/math_user_proxy_agent.py +++ b/autogen/agentchat/contrib/math_user_proxy_agent.py @@ -11,12 +11,11 @@ from pydantic import BaseModel, Extra, root_validator -from autogen._pydantic import PYDANTIC_V1 -from autogen.agentchat import Agent, UserProxyAgent -from autogen.code_utils import UNKNOWN, execute_code, extract_code, infer_lang -from autogen.math_utils import get_answer - +from ..._pydantic import PYDANTIC_V1 +from ...code_utils import UNKNOWN, execute_code, extract_code, infer_lang from ...import_utils import optional_import_block, require_optional_import +from ...math_utils import get_answer +from .. import Agent, UserProxyAgent with optional_import_block() as result: import wolframalpha diff --git a/autogen/agentchat/contrib/multimodal_conversable_agent.py b/autogen/agentchat/contrib/multimodal_conversable_agent.py index c41019f8e..9af171e39 100644 --- a/autogen/agentchat/contrib/multimodal_conversable_agent.py +++ b/autogen/agentchat/contrib/multimodal_conversable_agent.py @@ -7,15 +7,14 @@ import copy from typing import Optional, Union -from autogen import OpenAIWrapper -from autogen.agentchat import Agent, ConversableAgent -from autogen.agentchat.contrib.img_utils import ( +from ... import OpenAIWrapper +from ..._pydantic import model_dump +from ...code_utils import content_str +from .. import Agent, ConversableAgent +from ..contrib.img_utils import ( gpt4v_formatter, message_formatter_pil_to_b64, ) -from autogen.code_utils import content_str - -from ..._pydantic import model_dump DEFAULT_LMM_SYS_MSG = """You are a helpful AI assistant.""" DEFAULT_MODEL = "gpt-4-vision-preview" diff --git a/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py index cf04c4a17..9ec3979b4 100644 --- a/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py @@ -7,15 +7,14 @@ import warnings from typing import Callable, Literal, Optional -from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent -from autogen.agentchat.contrib.vectordb.utils import ( +from ...import_utils import optional_import_block, require_optional_import +from ...retrieve_utils import TEXT_FORMATS, get_files_from_dir, split_files_to_chunks +from ..contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent +from ..contrib.vectordb.utils import ( chroma_results_to_query_results, filter_results_by_distance, get_logger, ) -from autogen.retrieve_utils import TEXT_FORMATS, get_files_from_dir, split_files_to_chunks - -from ...import_utils import optional_import_block, require_optional_import logger = get_logger(__name__) @@ -162,7 +161,7 @@ def retrieve_docs(self, problem: str, n_results: int = 20, search_string: str = def create_qdrant_from_dir( dir_path: str, max_tokens: int = 4000, - client: QdrantClient = None, + client: "QdrantClient" = None, collection_name: str = "all-my-documents", chunk_mode: str = "multi_lines", must_break_at_empty_line: bool = True, @@ -173,8 +172,8 @@ def create_qdrant_from_dir( extra_docs: bool = False, parallel: int = 0, on_disk: bool = False, - quantization_config: Optional[models.QuantizationConfig] = None, - hnsw_config: Optional[models.HnswConfigDiff] = None, + quantization_config: Optional["models.QuantizationConfig"] = None, + hnsw_config: Optional["models.HnswConfigDiff"] = None, payload_indexing: bool = False, qdrant_client_options: Optional[dict] = {}, ): @@ -268,12 +267,12 @@ def create_qdrant_from_dir( def query_qdrant( query_texts: list[str], n_results: int = 10, - client: QdrantClient = None, + client: "QdrantClient" = None, collection_name: str = "all-my-documents", search_string: str = "", embedding_model: str = "BAAI/bge-small-en-v1.5", qdrant_client_options: Optional[dict] = {}, -) -> list[list[QueryResponse]]: +) -> list[list["QueryResponse"]]: """Perform a similarity search with filters on a Qdrant collection Args: diff --git a/autogen/agentchat/contrib/retrieve_assistant_agent.py b/autogen/agentchat/contrib/retrieve_assistant_agent.py index 75a5c9dff..cc7966762 100644 --- a/autogen/agentchat/contrib/retrieve_assistant_agent.py +++ b/autogen/agentchat/contrib/retrieve_assistant_agent.py @@ -7,8 +7,8 @@ import warnings from typing import Any, Optional, Union -from autogen.agentchat.agent import Agent -from autogen.agentchat.assistant_agent import AssistantAgent +from ..agent import Agent +from ..assistant_agent import AssistantAgent class RetrieveAssistantAgent(AssistantAgent): diff --git a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py index 35aad52c4..73eb88562 100644 --- a/autogen/agentchat/contrib/retrieve_user_proxy_agent.py +++ b/autogen/agentchat/contrib/retrieve_user_proxy_agent.py @@ -10,31 +10,29 @@ import uuid from typing import Any, Callable, Literal, Optional, Union -from IPython import get_ipython - -from autogen.agentchat import UserProxyAgent -from autogen.agentchat.agent import Agent -from autogen.agentchat.contrib.vectordb.base import Document, QueryResults, VectorDB, VectorDBFactory -from autogen.agentchat.contrib.vectordb.utils import ( - chroma_results_to_query_results, - filter_results_by_distance, - get_logger, -) -from autogen.code_utils import extract_code -from autogen.retrieve_utils import ( +from ...code_utils import extract_code +from ...formatting_utils import colored +from ...import_utils import optional_import_block, require_optional_import +from ...retrieve_utils import ( TEXT_FORMATS, create_vector_db_from_dir, get_files_from_dir, query_vector_db, split_files_to_chunks, ) -from autogen.token_count_utils import count_token - -from ...formatting_utils import colored -from ...import_utils import optional_import_block, require_optional_import +from ...token_count_utils import count_token +from .. import UserProxyAgent +from ..agent import Agent +from ..contrib.vectordb.base import Document, QueryResults, VectorDB, VectorDBFactory +from ..contrib.vectordb.utils import ( + chroma_results_to_query_results, + filter_results_by_distance, + get_logger, +) with optional_import_block(): import chromadb + from IPython import get_ipython logger = get_logger(__name__) @@ -91,7 +89,7 @@ UPDATE_CONTEXT_IN_PROMPT = "you should reply exactly `UPDATE CONTEXT`" -@require_optional_import("chromadb", "retrievechat") +@require_optional_import(["chromadb", "IPython"], "retrievechat") class RetrieveUserProxyAgent(UserProxyAgent): """(In preview) The Retrieval-Augmented User Proxy retrieves document chunks based on the embedding similarity, and sends them along with the question to the Retrieval-Augmented Assistant diff --git a/autogen/agentchat/contrib/society_of_mind_agent.py b/autogen/agentchat/contrib/society_of_mind_agent.py index 6533974ce..0f409ceff 100644 --- a/autogen/agentchat/contrib/society_of_mind_agent.py +++ b/autogen/agentchat/contrib/society_of_mind_agent.py @@ -9,7 +9,7 @@ import traceback from typing import Callable, Literal, Optional, Union -from autogen import Agent, ConversableAgent, GroupChat, GroupChatManager, OpenAIWrapper +from ... import Agent, ConversableAgent, GroupChat, GroupChatManager, OpenAIWrapper class SocietyOfMindAgent(ConversableAgent): diff --git a/autogen/agentchat/contrib/swarm_agent.py b/autogen/agentchat/contrib/swarm_agent.py index 416a99ab8..7139367bc 100644 --- a/autogen/agentchat/contrib/swarm_agent.py +++ b/autogen/agentchat/contrib/swarm_agent.py @@ -12,9 +12,8 @@ from pydantic import BaseModel -from autogen.oai import OpenAIWrapper -from autogen.tools import get_function_schema - +from ...oai import OpenAIWrapper +from ...tools import get_function_schema from ..agent import Agent from ..chat import ChatResult from ..conversable_agent import ConversableAgent diff --git a/autogen/agentchat/contrib/text_analyzer_agent.py b/autogen/agentchat/contrib/text_analyzer_agent.py index 289d5eb2e..708740d1b 100644 --- a/autogen/agentchat/contrib/text_analyzer_agent.py +++ b/autogen/agentchat/contrib/text_analyzer_agent.py @@ -6,8 +6,8 @@ # SPDX-License-Identifier: MIT from typing import Any, Literal, Optional, Union -from autogen.agentchat.agent import Agent -from autogen.agentchat.assistant_agent import ConversableAgent +from ..agent import Agent +from ..assistant_agent import ConversableAgent system_message = """You are an expert in text analysis. The user will give you TEXT to analyze. diff --git a/autogen/agentchat/contrib/vectordb/chromadb.py b/autogen/agentchat/contrib/vectordb/chromadb.py index 29feab636..03e32764d 100644 --- a/autogen/agentchat/contrib/vectordb/chromadb.py +++ b/autogen/agentchat/contrib/vectordb/chromadb.py @@ -69,7 +69,7 @@ def __init__( def create_collection( self, collection_name: str, overwrite: bool = False, get_or_create: bool = True - ) -> Collection: + ) -> "Collection": """Create a collection in the vector database. Case 1. if the collection does not exist, create the collection. Case 2. the collection exists, if overwrite is True, it will overwrite the collection. @@ -111,7 +111,7 @@ def create_collection( else: raise ValueError(f"Collection {collection_name} already exists.") - def get_collection(self, collection_name: str = None) -> Collection: + def get_collection(self, collection_name: str = None) -> "Collection": """Get the collection from the vector database. Args: @@ -149,7 +149,7 @@ def delete_collection(self, collection_name: str) -> None: self.active_collection = None def _batch_insert( - self, collection: Collection, embeddings=None, ids=None, metadatas=None, documents=None, upsert=False + self, collection: "Collection", embeddings=None, ids=None, metadatas=None, documents=None, upsert=False ) -> None: batch_size = int(CHROMADB_MAX_BATCH_SIZE) for i in range(0, len(documents), min(batch_size, len(documents))): diff --git a/autogen/agentchat/contrib/vectordb/mongodb.py b/autogen/agentchat/contrib/vectordb/mongodb.py index 0065273cb..9e1d1ec3f 100644 --- a/autogen/agentchat/contrib/vectordb/mongodb.py +++ b/autogen/agentchat/contrib/vectordb/mongodb.py @@ -7,18 +7,21 @@ from collections.abc import Iterable, Mapping from copy import deepcopy from time import monotonic, sleep -from typing import Any, Callable, Literal, Union +from typing import Any, Callable, Literal, Optional, Union import numpy as np -from pymongo import MongoClient, UpdateOne, errors -from pymongo.collection import Collection -from pymongo.driver_info import DriverInfo -from pymongo.operations import SearchIndexModel -from sentence_transformers import SentenceTransformer +from ....import_utils import optional_import_block, require_optional_import from .base import Document, ItemID, QueryResults, VectorDB from .utils import get_logger +with optional_import_block(): + from pymongo import MongoClient, UpdateOne, errors + from pymongo.collection import Collection + from pymongo.driver_info import DriverInfo + from pymongo.operations import SearchIndexModel + from sentence_transformers import SentenceTransformer + logger = get_logger(__name__) DEFAULT_INSERT_BATCH_SIZE = 100_000 @@ -31,6 +34,7 @@ def with_id_rename(docs: Iterable) -> list[dict[str, Any]]: return [{**{k: v for k, v in d.items() if k != "_id"}, "id": d["_id"]} for d in docs] +@require_optional_import(["pymongo", "sentence_transformers"], "retrievechat-mongodb") class MongoDBAtlasVectorDB(VectorDB): """A Collection object for MongoDB.""" @@ -38,7 +42,7 @@ def __init__( self, connection_string: str = "", database_name: str = "vector_db", - embedding_function: Callable = SentenceTransformer("all-MiniLM-L6-v2").encode, + embedding_function: Optional[Callable] = None, collection_name: str = None, index_name: str = "vector_index", overwrite: bool = False, @@ -60,7 +64,7 @@ def __init__( wait_until_document_ready: float | None | Blocking call to wait until the database indexes are ready. None, the default, means no wait. """ - self.embedding_function = embedding_function + self.embedding_function = embedding_function or SentenceTransformer("all-MiniLM-L6-v2").encode self.index_name = index_name self._wait_until_index_ready = wait_until_index_ready self._wait_until_document_ready = wait_until_document_ready @@ -82,7 +86,7 @@ def __init__( else: self.active_collection = None - def _is_index_ready(self, collection: Collection, index_name: str): + def _is_index_ready(self, collection: "Collection", index_name: str): """Check for the index name in the list of available search indexes to see if the specified index is of status READY @@ -98,7 +102,7 @@ def _is_index_ready(self, collection: Collection, index_name: str): return True return False - def _wait_for_index(self, collection: Collection, index_name: str, action: str = "create"): + def _wait_for_index(self, collection: "Collection", index_name: str, action: str = "create"): """Waits for the index action to be completed. Otherwise throws a TimeoutError. Timeout set on instantiation. @@ -115,7 +119,7 @@ def _wait_for_index(self, collection: Collection, index_name: str, action: str = raise TimeoutError(f"Index {self.index_name} is not ready!") - def _wait_for_document(self, collection: Collection, index_name: str, doc: Document): + def _wait_for_document(self, collection: "Collection", index_name: str, doc: Document): start = monotonic() while monotonic() - start < self._wait_until_document_ready: query_result = _vector_search( @@ -146,7 +150,7 @@ def create_collection( collection_name: str, overwrite: bool = False, get_or_create: bool = True, - ) -> Collection: + ) -> "Collection": """Create a collection in the vector database and create a vector search index in the collection. Args: @@ -172,7 +176,7 @@ def create_collection( # get_or_create is False and the collection already exists, raise an error. raise ValueError(f"Collection {collection_name} already exists.") - def create_index_if_not_exists(self, index_name: str = "vector_index", collection: Collection = None) -> None: + def create_index_if_not_exists(self, index_name: str = "vector_index", collection: "Collection" = None) -> None: """Creates a vector search index on the specified collection in MongoDB. Args: @@ -182,7 +186,7 @@ def create_index_if_not_exists(self, index_name: str = "vector_index", collectio if not self._is_index_ready(collection, index_name): self.create_vector_search_index(collection, index_name) - def get_collection(self, collection_name: str = None) -> Collection: + def get_collection(self, collection_name: str = None) -> "Collection": """Get the collection from the vector database. Args: @@ -218,7 +222,7 @@ def delete_collection(self, collection_name: str) -> None: def create_vector_search_index( self, - collection: Collection, + collection: "Collection", index_name: Union[str, None] = "vector_index", similarity: Literal["euclidean", "cosine", "dotProduct"] = "cosine", ) -> None: @@ -334,7 +338,7 @@ def insert_docs( self._wait_for_document(collection, self.index_name, docs[-1]) def _insert_batch( - self, collection: Collection, texts: list[str], metadatas: list[Mapping[str, Any]], ids: list[ItemID] + self, collection: "Collection", texts: list[str], metadatas: list[Mapping[str, Any]], ids: list[ItemID] ) -> set[ItemID]: """Compute embeddings for and insert a batch of Documents into the Collection. @@ -500,7 +504,7 @@ def retrieve_docs( def _vector_search( embedding_vector: list[float], n_results: int, - collection: Collection, + collection: "Collection", index_name: str, distance_threshold: float = -1.0, oversampling_factor=10, diff --git a/autogen/agentchat/contrib/vectordb/pgvectordb.py b/autogen/agentchat/contrib/vectordb/pgvectordb.py index 3ca0f8235..0b87b38e6 100644 --- a/autogen/agentchat/contrib/vectordb/pgvectordb.py +++ b/autogen/agentchat/contrib/vectordb/pgvectordb.py @@ -546,7 +546,7 @@ class PGVectorDB(VectorDB): def __init__( self, *, - conn: Optional[psycopg.Connection] = None, + conn: Optional["psycopg.Connection"] = None, connection_string: Optional[str] = None, host: Optional[str] = None, port: Optional[Union[int, str]] = None, @@ -603,7 +603,7 @@ def __init__( def establish_connection( self, - conn: Optional[psycopg.Connection] = None, + conn: Optional["psycopg.Connection"] = None, connection_string: Optional[str] = None, host: Optional[str] = None, port: Optional[Union[int, str]] = None, @@ -611,7 +611,7 @@ def establish_connection( username: Optional[str] = None, password: Optional[str] = None, connect_timeout: Optional[int] = 10, - ) -> psycopg.Connection: + ) -> "psycopg.Connection": """Establishes a connection to a PostgreSQL database using psycopg. Args: diff --git a/autogen/agentchat/contrib/vectordb/qdrant.py b/autogen/agentchat/contrib/vectordb/qdrant.py index 46be5d7b3..d930f9209 100644 --- a/autogen/agentchat/contrib/vectordb/qdrant.py +++ b/autogen/agentchat/contrib/vectordb/qdrant.py @@ -271,7 +271,7 @@ def _point_to_document(self, point) -> Document: def _points_to_documents(self, points) -> list[Document]: return [self._point_to_document(point) for point in points] - def _scored_point_to_document(self, scored_point: models.ScoredPoint) -> tuple[Document, float]: + def _scored_point_to_document(self, scored_point: "models.ScoredPoint") -> tuple[Document, float]: return self._point_to_document(scored_point), scored_point.score def _documents_to_points(self, documents: list[Document]): @@ -290,7 +290,7 @@ def _documents_to_points(self, documents: list[Document]): ] return points - def _scored_points_to_documents(self, scored_points: list[models.ScoredPoint]) -> list[tuple[Document, float]]: + def _scored_points_to_documents(self, scored_points: list["models.ScoredPoint"]) -> list[tuple[Document, float]]: return [self._scored_point_to_document(scored_point) for scored_point in scored_points] def _validate_update_ids(self, collection_name: str, ids: list[str]) -> bool: diff --git a/autogen/logger/file_logger.py b/autogen/logger/file_logger.py index de351d57d..f30a7a520 100644 --- a/autogen/logger/file_logger.py +++ b/autogen/logger/file_logger.py @@ -143,7 +143,7 @@ def log_new_agent(self, agent: ConversableAgent, init_args: dict[str, Any] = {}) def log_event(self, source: str | Agent, name: str, **kwargs: dict[str, Any]) -> None: """Log an event from an agent or a string source.""" - from autogen import Agent + from .. import Agent # This takes an object o as input and returns a string. If the object o cannot be serialized, instead of raising an error, # it returns a string indicating that the object is non-serializable, along with its type's qualified name obtained using __qualname__. diff --git a/autogen/logger/sqlite_logger.py b/autogen/logger/sqlite_logger.py index 9c250ae74..2771f2f3c 100644 --- a/autogen/logger/sqlite_logger.py +++ b/autogen/logger/sqlite_logger.py @@ -307,7 +307,7 @@ def log_new_agent(self, agent: ConversableAgent, init_args: dict[str, Any]) -> N agent (ConversableAgent): Agent to log. init_args (dict[str, Any]): Initialization arguments of the agent """ - from autogen import Agent + from .. import Agent if self.con is None: return diff --git a/autogen/oai/client.py b/autogen/oai/client.py index a8b01ab9d..1e6fdd1a1 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -60,7 +60,7 @@ RateLimitError as cerebras_RateLimitError, ) - from autogen.oai.cerebras import CerebrasClient + from .cerebras import CerebrasClient if cerebras_result.is_successful: cerebras_import_exception: Optional[ImportError] = None @@ -74,7 +74,7 @@ ResourceExhausted as gemini_ResourceExhausted, ) - from autogen.oai.gemini import GeminiClient + from .gemini import GeminiClient if gemini_result.is_successful: gemini_import_exception: Optional[ImportError] = None @@ -88,7 +88,7 @@ RateLimitError as anthorpic_RateLimitError, ) - from autogen.oai.anthropic import AnthropicClient + from .anthropic import AnthropicClient if anthropic_result.is_successful: anthropic_import_exception: Optional[ImportError] = None @@ -102,7 +102,7 @@ SDKError as mistral_SDKError, ) - from autogen.oai.mistral import MistralAIClient + from .mistral import MistralAIClient if mistral_result.is_successful: mistral_import_exception: Optional[ImportError] = None @@ -113,7 +113,7 @@ with optional_import_block() as together_result: from together.error import TogetherException as together_TogetherException - from autogen.oai.together import TogetherClient + from .together import TogetherClient if together_result.is_successful: together_import_exception: Optional[ImportError] = None @@ -128,7 +128,7 @@ RateLimitError as groq_RateLimitError, ) - from autogen.oai.groq import GroqClient + from .groq import GroqClient if groq_result.is_successful: groq_import_exception: Optional[ImportError] = None @@ -143,7 +143,7 @@ TooManyRequestsError as cohere_TooManyRequestsError, ) - from autogen.oai.cohere import CohereClient + from .cohere import CohereClient if cohere_result.is_successful: cohere_import_exception: Optional[ImportError] = None @@ -157,7 +157,7 @@ ResponseError as ollama_ResponseError, ) - from autogen.oai.ollama import OllamaClient + from .ollama import OllamaClient if ollama_result.is_successful: ollama_import_exception: Optional[ImportError] = None @@ -171,7 +171,7 @@ ClientError as bedrock_ClientError, ) - from autogen.oai.bedrock import BedrockClient + from .bedrock import BedrockClient if bedrock_result.is_successful: bedrock_import_exception: Optional[ImportError] = None diff --git a/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py b/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py index 030153164..1c7a17fc1 100644 --- a/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py +++ b/test/agentchat/contrib/graph_rag/test_falkor_graph_rag.py @@ -7,16 +7,17 @@ import sys import pytest -from graphrag_sdk import Attribute, AttributeType, Entity, Ontology, Relation +from autogen.agentchat.contrib.graph_rag.document import Document, DocumentType +from autogen.agentchat.contrib.graph_rag.falkor_graph_query_engine import ( + FalkorGraphQueryEngine, + GraphStoreQueryResult, +) from autogen.import_utils import optional_import_block with optional_import_block() as result: - from autogen.agentchat.contrib.graph_rag.document import Document, DocumentType - from autogen.agentchat.contrib.graph_rag.falkor_graph_query_engine import ( - FalkorGraphQueryEngine, - GraphStoreQueryResult, - ) + import falkordb # noqa: F401 + from graphrag_sdk import Attribute, AttributeType, Entity, Ontology, Relation skip = not result.is_successful diff --git a/test/agentchat/contrib/retrievechat/test_retrievechat.py b/test/agentchat/contrib/retrievechat/test_retrievechat.py index e40e97d16..e9eb32701 100755 --- a/test/agentchat/contrib/retrievechat/test_retrievechat.py +++ b/test/agentchat/contrib/retrievechat/test_retrievechat.py @@ -10,6 +10,10 @@ import pytest +from autogen import AssistantAgent +from autogen.agentchat.contrib.retrieve_user_proxy_agent import ( + RetrieveUserProxyAgent, +) from autogen.import_utils import optional_import_block from ....conftest import Credentials, reason @@ -17,12 +21,9 @@ with optional_import_block() as result: import chromadb import openai # noqa: F401 + from IPython import get_ipython # noqa: F401 from chromadb.utils import embedding_functions as ef - from autogen import AssistantAgent - from autogen.agentchat.contrib.retrieve_user_proxy_agent import ( - RetrieveUserProxyAgent, - ) skip = not result.is_successful