This commit is contained in:
claytonbrown 2026-04-21 18:41:48 +10:00 committed by GitHub
commit 60b98e9288
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 95 additions and 4 deletions

View File

@ -40,3 +40,15 @@ include = ["tradingagents*", "cli*"]
[tool.setuptools.package-data] [tool.setuptools.package-data]
cli = ["static/*"] cli = ["static/*"]
[tool.pytest.ini_options]
testpaths = ["tests"]
addopts = "-ra --strict-markers"
markers = [
"unit: fast isolated unit tests",
"integration: tests requiring external services",
"smoke: quick sanity-check tests",
]
filterwarnings = [
"ignore::DeprecationWarning",
]

67
tests/conftest.py Normal file
View File

@ -0,0 +1,67 @@
"""Shared pytest fixtures that prevent CI hangs when API keys are absent."""
import os
from unittest.mock import MagicMock, patch
import pytest
# ---------------------------------------------------------------------------
# Custom markers
# ---------------------------------------------------------------------------
def pytest_configure(config):
for marker in ("unit", "integration", "smoke"):
config.addinivalue_line("markers", f"{marker}: {marker}-level tests")
# ---------------------------------------------------------------------------
# Auto-use: placeholder API keys so LLM client init never blocks
# ---------------------------------------------------------------------------
_API_KEY_ENV_VARS = [
"OPENAI",
"GOOGLE",
"ANTHROPIC",
"XAI",
"ALPHA_VANTAGE",
]
@pytest.fixture(autouse=True)
def _dummy_api_keys(monkeypatch):
for provider in _API_KEY_ENV_VARS:
env_var = f"{provider}_API_KEY"
monkeypatch.setenv(env_var, os.environ.get(env_var, "placeholder"))
# ---------------------------------------------------------------------------
# Auto-use: safe DEFAULT_CONFIG override (no real API calls)
# ---------------------------------------------------------------------------
_SAFE_CONFIG = {
"llm_provider": "openai",
"deep_think_llm": "gpt-5.4-mini",
"quick_think_llm": "gpt-5.4-mini",
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,
}
@pytest.fixture(autouse=True)
def _safe_default_config():
with patch.dict("tradingagents.default_config.DEFAULT_CONFIG", _SAFE_CONFIG):
yield
# ---------------------------------------------------------------------------
# Reusable mock LLM client
# ---------------------------------------------------------------------------
@pytest.fixture()
def mock_llm_client():
client = MagicMock()
client.get_llm.return_value = MagicMock()
with patch("tradingagents.llm_clients.create_llm_client", return_value=client):
yield client

View File

@ -1,9 +1,12 @@
import unittest import unittest
from unittest.mock import patch from unittest.mock import patch
import pytest
from tradingagents.llm_clients.google_client import GoogleClient from tradingagents.llm_clients.google_client import GoogleClient
@pytest.mark.unit
class TestGoogleApiKeyStandardization(unittest.TestCase): class TestGoogleApiKeyStandardization(unittest.TestCase):
"""Verify GoogleClient accepts unified api_key parameter.""" """Verify GoogleClient accepts unified api_key parameter."""

View File

@ -1,6 +1,8 @@
import unittest import unittest
import warnings import warnings
import pytest
from tradingagents.llm_clients.base_client import BaseLLMClient from tradingagents.llm_clients.base_client import BaseLLMClient
from tradingagents.llm_clients.model_catalog import get_known_models from tradingagents.llm_clients.model_catalog import get_known_models
from tradingagents.llm_clients.validators import validate_model from tradingagents.llm_clients.validators import validate_model
@ -19,6 +21,7 @@ class DummyLLMClient(BaseLLMClient):
return validate_model(self.provider, self.model) return validate_model(self.provider, self.model)
@pytest.mark.unit
class ModelValidationTests(unittest.TestCase): class ModelValidationTests(unittest.TestCase):
def test_cli_catalog_models_are_all_validator_approved(self): def test_cli_catalog_models_are_all_validator_approved(self):
for provider, models in get_known_models().items(): for provider, models in get_known_models().items():

View File

@ -1,9 +1,12 @@
import unittest import unittest
import pytest
from cli.utils import normalize_ticker_symbol from cli.utils import normalize_ticker_symbol
from tradingagents.agents.utils.agent_utils import build_instrument_context from tradingagents.agents.utils.agent_utils import build_instrument_context
@pytest.mark.unit
class TickerSymbolHandlingTests(unittest.TestCase): class TickerSymbolHandlingTests(unittest.TestCase):
def test_normalize_ticker_symbol_preserves_exchange_suffix(self): def test_normalize_ticker_symbol_preserves_exchange_suffix(self):
self.assertEqual(normalize_ticker_symbol(" cnc.to "), "CNC.TO") self.assertEqual(normalize_ticker_symbol(" cnc.to "), "CNC.TO")

View File

@ -1,10 +1,6 @@
from typing import Optional from typing import Optional
from .base_client import BaseLLMClient from .base_client import BaseLLMClient
from .openai_client import OpenAIClient
from .anthropic_client import AnthropicClient
from .google_client import GoogleClient
from .azure_client import AzureOpenAIClient
# Providers that use the OpenAI-compatible chat completions API # Providers that use the OpenAI-compatible chat completions API
_OPENAI_COMPATIBLE = ( _OPENAI_COMPATIBLE = (
@ -20,6 +16,9 @@ def create_llm_client(
) -> BaseLLMClient: ) -> BaseLLMClient:
"""Create an LLM client for the specified provider. """Create an LLM client for the specified provider.
Client modules are imported lazily so that collecting tests or importing
the package does not trigger heavy LLM SDK initialization.
Args: Args:
provider: LLM provider name provider: LLM provider name
model: Model name/identifier model: Model name/identifier
@ -35,15 +34,19 @@ def create_llm_client(
provider_lower = provider.lower() provider_lower = provider.lower()
if provider_lower in _OPENAI_COMPATIBLE: if provider_lower in _OPENAI_COMPATIBLE:
from .openai_client import OpenAIClient
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs) return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "anthropic": if provider_lower == "anthropic":
from .anthropic_client import AnthropicClient
return AnthropicClient(model, base_url, **kwargs) return AnthropicClient(model, base_url, **kwargs)
if provider_lower == "google": if provider_lower == "google":
from .google_client import GoogleClient
return GoogleClient(model, base_url, **kwargs) return GoogleClient(model, base_url, **kwargs)
if provider_lower == "azure": if provider_lower == "azure":
from .azure_client import AzureOpenAIClient
return AzureOpenAIClient(model, base_url, **kwargs) return AzureOpenAIClient(model, base_url, **kwargs)
raise ValueError(f"Unsupported LLM provider: {provider}") raise ValueError(f"Unsupported LLM provider: {provider}")