From 846c16c761a38d61cc583eb3b48d193c13c19163 Mon Sep 17 00:00:00 2001 From: Daniel Zaharia Date: Wed, 11 Dec 2024 12:33:26 +0200 Subject: [PATCH 1/2] Added extra_options parameter to OpenAI and Anthropic plugins This should have been added by default from the beginning, as we want to have access to all the parameters from the LLM APIs to be able to customize them. One use case would be observability and monitoring. I tried to integrate LiveKit with Langfuse, and I could pass the additional parameters needed to the completions call. This PR fixes that and let's other developers customize the LLM settings how they want, instead of waiting for each parameter to be added by the LK team (like parallel_tool_calls or tool_choice). --- .../livekit/plugins/anthropic/llm.py | 4 +++ .../livekit/plugins/openai/llm.py | 30 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/livekit-plugins/livekit-plugins-anthropic/livekit/plugins/anthropic/llm.py b/livekit-plugins/livekit-plugins-anthropic/livekit/plugins/anthropic/llm.py index d9e1b2a0d..5d41e7554 100644 --- a/livekit-plugins/livekit-plugins-anthropic/livekit/plugins/anthropic/llm.py +++ b/livekit-plugins/livekit-plugins-anthropic/livekit/plugins/anthropic/llm.py @@ -57,6 +57,7 @@ class LLMOptions: temperature: float | None parallel_tool_calls: bool | None tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] | None + extra_options: dict[str, Any] = {} class LLM(llm.LLM): @@ -71,6 +72,7 @@ def __init__( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> None: """ Create a new instance of Anthropic LLM. @@ -91,6 +93,7 @@ def __init__( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) self._client = client or anthropic.AsyncClient( api_key=api_key, @@ -160,6 +163,7 @@ def chat( top_k=n or anthropic.NOT_GIVEN, stream=True, **opts, + **self._opts.extra_options, ) return LLMStream( diff --git a/livekit-plugins/livekit-plugins-openai/livekit/plugins/openai/llm.py b/livekit-plugins/livekit-plugins-openai/livekit/plugins/openai/llm.py index 63d7a7981..6e9867bb0 100644 --- a/livekit-plugins/livekit-plugins-openai/livekit/plugins/openai/llm.py +++ b/livekit-plugins/livekit-plugins-openai/livekit/plugins/openai/llm.py @@ -63,6 +63,7 @@ class LLMOptions: temperature: float | None parallel_tool_calls: bool | None tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto" + extra_options: dict[str, Any] = {} class LLM(llm.LLM): @@ -77,6 +78,7 @@ def __init__( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> None: """ Create a new instance of OpenAI LLM. @@ -93,6 +95,7 @@ def __init__( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) self._client = client or openai.AsyncClient( api_key=api_key, @@ -127,6 +130,7 @@ def with_azure( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ This automatically infers the following arguments from their corresponding environment variables if they are not provided: @@ -158,6 +162,7 @@ def with_azure( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -171,6 +176,7 @@ def with_cerebras( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of Cerebras LLM. @@ -194,6 +200,7 @@ def with_cerebras( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -206,6 +213,7 @@ def with_vertex( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of VertexAI LLM. @@ -276,6 +284,7 @@ async def _refresh_credentials(self) -> None: temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) vertex_llm._capabilities = llm.LLMCapabilities(supports_choices_on_int=False) return vertex_llm @@ -291,6 +300,7 @@ def with_fireworks( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of Fireworks LLM. @@ -314,6 +324,7 @@ def with_fireworks( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -327,6 +338,7 @@ def with_x_ai( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ): """ Create a new instance of XAI LLM. @@ -349,6 +361,7 @@ def with_x_ai( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -362,6 +375,7 @@ def with_groq( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of Groq LLM. @@ -385,6 +399,7 @@ def with_groq( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -398,6 +413,7 @@ def with_deepseek( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of DeepSeek LLM. @@ -421,6 +437,7 @@ def with_deepseek( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -434,6 +451,7 @@ def with_octo( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of OctoAI LLM. @@ -457,6 +475,7 @@ def with_octo( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -468,6 +487,7 @@ def with_ollama( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of Ollama LLM. @@ -481,6 +501,7 @@ def with_ollama( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -494,6 +515,7 @@ def with_perplexity( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of PerplexityAI LLM. @@ -517,6 +539,7 @@ def with_perplexity( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -530,6 +553,7 @@ def with_together( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of TogetherAI LLM. @@ -553,6 +577,7 @@ def with_together( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -566,6 +591,7 @@ def with_telnyx( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: """ Create a new instance of Telnyx LLM. @@ -589,6 +615,7 @@ def with_telnyx( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) @staticmethod @@ -608,6 +635,7 @@ def create_azure_client( temperature: float | None = None, parallel_tool_calls: bool | None = None, tool_choice: Union[ToolChoice, Literal["auto", "required", "none"]] = "auto", + extra_options: dict[str, Any] = {}, ) -> LLM: logger.warning("This alias is deprecated. Use LLM.with_azure() instead") return LLM.with_azure( @@ -624,6 +652,7 @@ def create_azure_client( temperature=temperature, parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, + extra_options=extra_options, ) def chat( @@ -676,6 +705,7 @@ def chat( stream=True, user=user, **opts, + **self._opts.extra_options, ) return LLMStream( From 149597021bef65ddc475e2ecc9f8f07a3f9d1743 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9o=20Monnom?= Date: Wed, 11 Dec 2024 16:41:49 +0100 Subject: [PATCH 2/2] Create green-phones-lick.md --- .changeset/green-phones-lick.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changeset/green-phones-lick.md diff --git a/.changeset/green-phones-lick.md b/.changeset/green-phones-lick.md new file mode 100644 index 000000000..655bc81f9 --- /dev/null +++ b/.changeset/green-phones-lick.md @@ -0,0 +1,6 @@ +--- +"livekit-plugins-anthropic": patch +"livekit-plugins-openai": patch +--- + +Added extra_options parameter to OpenAI and Anthropic plugins