From 0a51416813509424f9cbd9991abf9379b98410d6 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Tue, 21 Jan 2025 22:59:20 +0100 Subject: [PATCH] fix failing tests with optional imports --- test/cache/test_cache.py | 12 +- test/oai/test_together.py | 433 +++++++++++++++++++------------------- 2 files changed, 218 insertions(+), 227 deletions(-) diff --git a/test/cache/test_cache.py b/test/cache/test_cache.py index 4ecf0fa56..762e7755b 100755 --- a/test/cache/test_cache.py +++ b/test/cache/test_cache.py @@ -15,7 +15,7 @@ with optional_import_block() as result: from azure.cosmos import CosmosClient -skip_azure = not result.is_successful +skip_azure_cosmos = not result.is_successful class TestCache(unittest.TestCase): @@ -31,7 +31,7 @@ def setUp(self): "database_id": "autogen_cache", "container_id": "TestContainer", "cache_seed": "42", - "client": MagicMock(spec=CosmosClient), + "client": MagicMock(spec=CosmosClient) if not skip_azure_cosmos else MagicMock(), } } @@ -42,7 +42,7 @@ def test_redis_cache_initialization(self, mock_cache_factory): mock_cache_factory.assert_called() @patch("autogen.cache.cache_factory.CacheFactory.cache_factory", return_value=MagicMock()) - @unittest.skipIf(skip_azure, "requires azure.cosmos") + @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") def test_cosmosdb_cache_initialization(self, mock_cache_factory): cache = Cache(self.cosmos_config) self.assertIsInstance(cache.cache, MagicMock) @@ -71,7 +71,7 @@ def context_manager_common(self, config): def test_redis_context_manager(self): self.context_manager_common(self.redis_config) - @unittest.skipIf(skip_azure, "requires azure.cosmos") + @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") def test_cosmos_context_manager(self): self.context_manager_common(self.cosmos_config) @@ -90,7 +90,7 @@ def get_set_common(self, config): def test_redis_get_set(self): self.get_set_common(self.redis_config) - @unittest.skipIf(skip_azure, "requires azure.cosmos") + @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") def test_cosmos_get_set(self): self.get_set_common(self.cosmos_config) @@ -104,7 +104,7 @@ def close_common(self, config): def test_redis_close(self): self.close_common(self.redis_config) - @unittest.skipIf(skip_azure, "requires azure.cosmos") + @unittest.skipIf(skip_azure_cosmos, "requires azure.cosmos") def test_cosmos_close(self): self.close_common(self.cosmos_config) diff --git a/test/oai/test_together.py b/test/oai/test_together.py index 876b2c2e1..705b509cc 100644 --- a/test/oai/test_together.py +++ b/test/oai/test_together.py @@ -8,14 +8,8 @@ import pytest -from autogen.import_utils import optional_import_block - -with optional_import_block() as result: - from openai.types.chat.chat_completion import ChatCompletionMessage, Choice # noqa: F401 - - from autogen.oai.together import TogetherClient, calculate_together_cost - -skip = not result.is_successful +from autogen.import_utils import skip_on_missing_imports +from autogen.oai.together import TogetherClient, calculate_together_cost # Fixtures for mock data @@ -37,230 +31,227 @@ def together_client(): return TogetherClient(api_key="fake_api_key") -# Test initialization and configuration -@pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") -def test_initialization(): - # Missing any api_key - with pytest.raises(AssertionError) as assertinfo: - TogetherClient() # Should raise an AssertionError due to missing api_key - - assert ( - "Please include the api_key in your config list entry for Together.AI or set the TOGETHER_API_KEY env variable." - in str(assertinfo.value) - ) - - # Creation works - TogetherClient(api_key="fake_api_key") # Should create okay now. - - -# Test standard initialization -@pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") -def test_valid_initialization(together_client): - assert together_client.api_key == "fake_api_key", "Config api_key should be correctly set" - +@skip_on_missing_imports("together", "together") +class TestTogether: + # Test initialization and configuration + def test_initialization(self) -> None: + # Missing any api_key + with pytest.raises(AssertionError) as assertinfo: + TogetherClient() # Should raise an AssertionError due to missing api_key -# Test parameters -@pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") -def test_parsing_params(together_client): - # All parameters - params = { - "model": "Qwen/Qwen2-72B-Instruct", - "max_tokens": 1000, - "stream": False, - "temperature": 1, - "top_p": 0.8, - "top_k": 50, - "repetition_penalty": 0.5, - "presence_penalty": 1.5, - "frequency_penalty": 1.5, - "min_p": 0.2, - "safety_model": "Meta-Llama/Llama-Guard-7b", - } - expected_params = { - "model": "Qwen/Qwen2-72B-Instruct", - "max_tokens": 1000, - "stream": False, - "temperature": 1, - "top_p": 0.8, - "top_k": 50, - "repetition_penalty": 0.5, - "presence_penalty": 1.5, - "frequency_penalty": 1.5, - "min_p": 0.2, - "safety_model": "Meta-Llama/Llama-Guard-7b", - } - result = together_client.parse_params(params) - assert result == expected_params + assert ( + "Please include the api_key in your config list entry for Together.AI or set the TOGETHER_API_KEY env variable." + in str(assertinfo.value) + ) - # Only model, others set as defaults - params = { - "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", - } - expected_params = { - "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "max_tokens": 512, - "stream": False, - "temperature": None, - "top_p": None, - "top_k": None, - "repetition_penalty": None, - "presence_penalty": None, - "frequency_penalty": None, - "min_p": None, - "safety_model": None, - } - result = together_client.parse_params(params) - assert result == expected_params + # Creation works + TogetherClient(api_key="fake_api_key") # Should create okay now. - # Incorrect types, defaults should be set, will show warnings but not trigger assertions - params = { - "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "max_tokens": "512", - "stream": "Yes", - "temperature": "0.5", - "top_p": "0.8", - "top_k": "50", - "repetition_penalty": "0.5", - "presence_penalty": "1.5", - "frequency_penalty": "1.5", - "min_p": "0.2", - "safety_model": False, - } - result = together_client.parse_params(params) - assert result == expected_params - - # Values outside bounds, should warn and set to defaults - params = { - "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "max_tokens": -200, - "presence_penalty": -5, - "frequency_penalty": 5, - "min_p": -0.5, - } - result = together_client.parse_params(params) - assert result == expected_params - - -# Test cost calculation -@pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") -def test_cost_calculation(mock_response): - response = mock_response( - text="Example response", - choices=[{"message": "Test message 1"}], - usage={"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}, - cost=None, - model="mistralai/Mixtral-8x22B-Instruct-v0.1", - ) - assert ( - calculate_together_cost(response.usage["prompt_tokens"], response.usage["completion_tokens"], response.model) - == 0.000018 - ), "Cost for this should be $0.000018" - - -# Test text generation -@pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") -@patch("autogen.oai.together.TogetherClient.create") -def test_create_response(mock_create, together_client): - # Mock TogetherClient.chat response - mock_together_response = MagicMock() - mock_together_response.choices = [ - MagicMock(finish_reason="stop", message=MagicMock(content="Example Llama response", tool_calls=None)) - ] - mock_together_response.id = "mock_together_response_id" - mock_together_response.model = "meta-llama/Llama-3-8b-chat-hf" - mock_together_response.usage = MagicMock(prompt_tokens=10, completion_tokens=20) # Example token usage - - mock_create.return_value = mock_together_response + # Test standard initialization + def test_valid_initialization(self, together_client: TogetherClient) -> None: + assert together_client.api_key == "fake_api_key", "Config api_key should be correctly set" # Test parameters - params = { - "messages": [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "World"}], - "model": "meta-llama/Llama-3-8b-chat-hf", - } - - # Call the create method - response = together_client.create(params) - - # Assertions to check if response is structured as expected - assert response.choices[0].message.content == "Example Llama response", ( - "Response content should match expected output" - ) - assert response.id == "mock_together_response_id", "Response ID should match the mocked response ID" - assert response.model == "meta-llama/Llama-3-8b-chat-hf", "Response model should match the mocked response model" - assert response.usage.prompt_tokens == 10, "Response prompt tokens should match the mocked response usage" - assert response.usage.completion_tokens == 20, "Response completion tokens should match the mocked response usage" - - -# Test functions/tools -@pytest.mark.skipif(skip, reason="Together.AI dependency is not installed") -@patch("autogen.oai.together.TogetherClient.create") -def test_create_response_with_tool_call(mock_create, together_client): - # Define the mock response directly within the patch - mock_function = MagicMock(name="currency_calculator") - mock_function.name = "currency_calculator" - mock_function.arguments = '{"base_currency": "EUR", "quote_currency": "USD", "base_amount": 123.45}' + def test_parsing_params(self, together_client: TogetherClient) -> None: + # All parameters + params = { + "model": "Qwen/Qwen2-72B-Instruct", + "max_tokens": 1000, + "stream": False, + "temperature": 1, + "top_p": 0.8, + "top_k": 50, + "repetition_penalty": 0.5, + "presence_penalty": 1.5, + "frequency_penalty": 1.5, + "min_p": 0.2, + "safety_model": "Meta-Llama/Llama-Guard-7b", + } + expected_params = { + "model": "Qwen/Qwen2-72B-Instruct", + "max_tokens": 1000, + "stream": False, + "temperature": 1, + "top_p": 0.8, + "top_k": 50, + "repetition_penalty": 0.5, + "presence_penalty": 1.5, + "frequency_penalty": 1.5, + "min_p": 0.2, + "safety_model": "Meta-Llama/Llama-Guard-7b", + } + result = together_client.parse_params(params) + assert result == expected_params - # Define the mock response directly within the patch - mock_create.return_value = MagicMock( - choices=[ - MagicMock( - finish_reason="tool_calls", - message=MagicMock( - content="", # Message is empty for tool responses - tool_calls=[MagicMock(id="gdRdrvnHh", function=mock_function)], - ), + # Only model, others set as defaults + params = { + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + } + expected_params = { + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "max_tokens": 512, + "stream": False, + "temperature": None, + "top_p": None, + "top_k": None, + "repetition_penalty": None, + "presence_penalty": None, + "frequency_penalty": None, + "min_p": None, + "safety_model": None, + } + result = together_client.parse_params(params) + assert result == expected_params + + # Incorrect types, defaults should be set, will show warnings but not trigger assertions + params = { + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "max_tokens": "512", + "stream": "Yes", + "temperature": "0.5", + "top_p": "0.8", + "top_k": "50", + "repetition_penalty": "0.5", + "presence_penalty": "1.5", + "frequency_penalty": "1.5", + "min_p": "0.2", + "safety_model": False, + } + result = together_client.parse_params(params) + assert result == expected_params + + # Values outside bounds, should warn and set to defaults + params = { + "model": "mistralai/Mixtral-8x7B-Instruct-v0.1", + "max_tokens": -200, + "presence_penalty": -5, + "frequency_penalty": 5, + "min_p": -0.5, + } + result = together_client.parse_params(params) + assert result == expected_params + + # Test cost calculation + def test_cost_calculation(self, mock_response: MagicMock) -> None: + response = mock_response( + text="Example response", + choices=[{"message": "Test message 1"}], + usage={"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}, + cost=None, + model="mistralai/Mixtral-8x22B-Instruct-v0.1", + ) + assert ( + calculate_together_cost( + response.usage["prompt_tokens"], response.usage["completion_tokens"], response.model ) - ], - id="mock_together_response_id", - model="meta-llama/Llama-3-8b-chat-hf", - usage=MagicMock(prompt_tokens=10, completion_tokens=20), - ) + == 0.000018 + ), "Cost for this should be $0.000018" + + # Test text generation + @patch("autogen.oai.together.TogetherClient.create") + def test_create_response(self, mock_create, together_client): + # Mock TogetherClient.chat response + mock_together_response = MagicMock() + mock_together_response.choices = [ + MagicMock(finish_reason="stop", message=MagicMock(content="Example Llama response", tool_calls=None)) + ] + mock_together_response.id = "mock_together_response_id" + mock_together_response.model = "meta-llama/Llama-3-8b-chat-hf" + mock_together_response.usage = MagicMock(prompt_tokens=10, completion_tokens=20) # Example token usage + + mock_create.return_value = mock_together_response + + # Test parameters + params = { + "messages": [{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "World"}], + "model": "meta-llama/Llama-3-8b-chat-hf", + } - # Test parameters - converted_functions = [ - { - "type": "function", - "function": { - "description": "Currency exchange calculator.", - "name": "currency_calculator", - "parameters": { - "type": "object", - "properties": { - "base_amount": {"type": "number", "description": "Amount of currency in base_currency"}, - "base_currency": { - "enum": ["USD", "EUR"], - "type": "string", - "default": "USD", - "description": "Base currency", - }, - "quote_currency": { - "enum": ["USD", "EUR"], - "type": "string", - "default": "EUR", - "description": "Quote currency", + # Call the create method + response = together_client.create(params) + + # Assertions to check if response is structured as expected + assert response.choices[0].message.content == "Example Llama response", ( + "Response content should match expected output" + ) + assert response.id == "mock_together_response_id", "Response ID should match the mocked response ID" + assert response.model == "meta-llama/Llama-3-8b-chat-hf", ( + "Response model should match the mocked response model" + ) + assert response.usage.prompt_tokens == 10, "Response prompt tokens should match the mocked response usage" + assert response.usage.completion_tokens == 20, ( + "Response completion tokens should match the mocked response usage" + ) + + # Test functions/tools + @patch("autogen.oai.together.TogetherClient.create") + def test_create_response_with_tool_call(self, mock_create, together_client): + # Define the mock response directly within the patch + mock_function = MagicMock(name="currency_calculator") + mock_function.name = "currency_calculator" + mock_function.arguments = '{"base_currency": "EUR", "quote_currency": "USD", "base_amount": 123.45}' + + # Define the mock response directly within the patch + mock_create.return_value = MagicMock( + choices=[ + MagicMock( + finish_reason="tool_calls", + message=MagicMock( + content="", # Message is empty for tool responses + tool_calls=[MagicMock(id="gdRdrvnHh", function=mock_function)], + ), + ) + ], + id="mock_together_response_id", + model="meta-llama/Llama-3-8b-chat-hf", + usage=MagicMock(prompt_tokens=10, completion_tokens=20), + ) + + # Test parameters + converted_functions = [ + { + "type": "function", + "function": { + "description": "Currency exchange calculator.", + "name": "currency_calculator", + "parameters": { + "type": "object", + "properties": { + "base_amount": {"type": "number", "description": "Amount of currency in base_currency"}, + "base_currency": { + "enum": ["USD", "EUR"], + "type": "string", + "default": "USD", + "description": "Base currency", + }, + "quote_currency": { + "enum": ["USD", "EUR"], + "type": "string", + "default": "EUR", + "description": "Quote currency", + }, }, + "required": ["base_amount"], }, - "required": ["base_amount"], }, + } + ] + + together_messages = [ + { + "role": "user", + "content": "How much is 123.45 EUR in USD?", + "name": None, + "tool_calls": None, + "tool_call_id": None, }, - } - ] - - together_messages = [ - { - "role": "user", - "content": "How much is 123.45 EUR in USD?", - "name": None, - "tool_calls": None, - "tool_call_id": None, - }, - ] + ] - # Call the create method (which is now mocked) - response = together_client.create( - {"messages": together_messages, "tools": converted_functions, "model": "meta-llama/Llama-3-8b-chat-hf"} - ) + # Call the create method (which is now mocked) + response = together_client.create( + {"messages": together_messages, "tools": converted_functions, "model": "meta-llama/Llama-3-8b-chat-hf"} + ) - # Assertions to check if response is structured as expected - assert response.choices[0].message.content == "" - assert response.choices[0].message.tool_calls[0].function.name == "currency_calculator" + # Assertions to check if response is structured as expected + assert response.choices[0].message.content == "" + assert response.choices[0].message.tool_calls[0].function.name == "currency_calculator"