From 244a986e83a90c0534dadaa842e755a8e7917290 Mon Sep 17 00:00:00 2001 From: Maytekin Date: Tue, 24 Mar 2026 21:54:46 +0000 Subject: [PATCH] feat(llm): add LM Studio provider support and refactor client factory - Add LM Studio as a new provider option in config.json - Introduce LLM_PROVIDER_TYPES configuration for provider-to-client mapping - Refactor factory.py to use centralized provider type configuration - Add results and reports directories to .gitignore The refactor centralizes provider configuration, making it easier to add new providers in the future without modifying the factory logic. LM Studio support enables local model hosting integration. --- .gitignore | 4 +++ config.json | 17 ++++++++++ tradingagents/llm_clients/factory.py | 51 ++++++++++++++++++++++++---- 3 files changed, 66 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index 9a2904a9..3fa25451 100644 --- a/.gitignore +++ b/.gitignore @@ -82,6 +82,10 @@ target/ profile_default/ ipython_config.py +# Results +results +reports + # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: diff --git a/config.json b/config.json index 518223ea..34f62964 100644 --- a/config.json +++ b/config.json @@ -39,6 +39,10 @@ ["Qwen3:latest (8B, local)", "qwen3:latest"], ["GPT-OSS:latest (20B, local)", "gpt-oss:latest"], ["GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"] + ], + "lmstudio": [ + ["qwen3.5-35b-a3b","qwen/qwen3.5-35b-a3b"], + ["openai/gpt-oss-20b", "openai/gpt-oss-20b"] ] }, "DEEP_AGENT_OPTIONS": { @@ -74,11 +78,24 @@ ["GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"], ["GPT-OSS:latest (20B, local)", "gpt-oss:latest"], ["Qwen3:latest (8B, local)", "qwen3:latest"] + ], + "lmstudio": [ + ["qwen3.5-35b-a3b","qwen/qwen3.5-35b-a3b"], + ["openai/gpt-oss-20b", "openai/gpt-oss-20b"] ] }, "DEFAULT_LLM_SETTINGS": { "llm_provider": "openai", "deep_think_llm": "gpt-5.2", "quick_think_llm": "gpt-5-mini" + }, + "LLM_PROVIDER_TYPES": { + "openai": "openai", + "anthropic": "anthropic", + "google": "google", + "xai": "openai", + "openrouter": "openai", + "ollama": "openai", + "lmstudio": "openai" } } diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 93c2a7d3..fda6f024 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -1,3 +1,5 @@ +import json +from pathlib import Path from typing import Optional from .base_client import BaseLLMClient @@ -5,6 +7,45 @@ from .openai_client import OpenAIClient from .anthropic_client import AnthropicClient from .google_client import GoogleClient +CONFIG_PATH = Path(__file__).resolve().parents[2] / "config.json" + + +def _load_config() -> dict: + try: + with CONFIG_PATH.open("r", encoding="utf-8") as config_file: + config = json.load(config_file) + except FileNotFoundError as exc: + raise RuntimeError(f"Config file not found: {CONFIG_PATH}") from exc + except json.JSONDecodeError as exc: + raise RuntimeError(f"Invalid JSON in config file: {CONFIG_PATH}") from exc + except OSError as exc: + raise RuntimeError(f"Unable to read config file: {CONFIG_PATH}") from exc + if not isinstance(config, dict): + raise RuntimeError(f"Invalid config format in file: {CONFIG_PATH}") + return config + + +def _load_provider_types() -> dict[str, str]: + provider_types = _load_config().get("LLM_PROVIDER_TYPES") + if not isinstance(provider_types, dict): + raise RuntimeError( + f"Invalid or missing 'LLM_PROVIDER_TYPES' in config file: {CONFIG_PATH}" + ) + return { + str(name).lower(): str(client_type).lower() + for name, client_type in provider_types.items() + } + + +_PROVIDER_TYPES: dict[str, str] | None = None + + +def _get_provider_types() -> dict[str, str]: + global _PROVIDER_TYPES + if _PROVIDER_TYPES is None: + _PROVIDER_TYPES = _load_provider_types() + return _PROVIDER_TYPES + def create_llm_client( provider: str, @@ -33,17 +74,15 @@ def create_llm_client( ValueError: If provider is not supported """ provider_lower = provider.lower() + provider_type = _get_provider_types().get(provider_lower) - if provider_lower in ("openai", "ollama", "openrouter"): + if provider_type == "openai": return OpenAIClient(model, base_url, provider=provider_lower, **kwargs) - if provider_lower == "xai": - return OpenAIClient(model, base_url, provider="xai", **kwargs) - - if provider_lower == "anthropic": + if provider_type == "anthropic": return AnthropicClient(model, base_url, **kwargs) - if provider_lower == "google": + if provider_type == "google": return GoogleClient(model, base_url, **kwargs) raise ValueError(f"Unsupported LLM provider: {provider}")