From 24925d41bec3e3065a465d56ba902185b506db01 Mon Sep 17 00:00:00 2001 From: liuzhuoya Date: Sun, 12 Apr 2026 11:22:23 +0800 Subject: [PATCH] feat: add SiliconFlow provider support --- .gitignore | 3 +++ cli/utils.py | 1 + main.py | 4 ++-- tradingagents/llm_clients/factory.py | 2 +- tradingagents/llm_clients/model_catalog.py | 12 ++++++++++++ tradingagents/llm_clients/openai_client.py | 1 + 6 files changed, 20 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 9a2904a9..0eb63b91 100644 --- a/.gitignore +++ b/.gitignore @@ -217,3 +217,6 @@ __marimo__/ # Cache **/data_cache/ + +results/* +reports/* \ No newline at end of file diff --git a/cli/utils.py b/cli/utils.py index e071ce06..297d22ea 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -242,6 +242,7 @@ def select_llm_provider() -> tuple[str, str | None]: ("xAI", "https://api.x.ai/v1"), ("Openrouter", "https://openrouter.ai/api/v1"), ("Ollama", "http://localhost:11434/v1"), + ("SiliconFlow", "https://api.siliconflow.cn/v1"), ] choice = questionary.select( diff --git a/main.py b/main.py index c94fde32..9b3a123e 100644 --- a/main.py +++ b/main.py @@ -8,8 +8,8 @@ load_dotenv() # Create a custom config config = DEFAULT_CONFIG.copy() -config["deep_think_llm"] = "gpt-5.4-mini" # Use a different model -config["quick_think_llm"] = "gpt-5.4-mini" # Use a different model +config["deep_think_llm"] = "Qwen/Qwen3.5-122B-A10B" # Use a different model +config["quick_think_llm"] = "Qwen/Qwen3.5-122B-A10B" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds # Configure data vendors (default uses yfinance, no extra API keys needed) diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 93c2a7d3..9198ba81 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -34,7 +34,7 @@ def create_llm_client( """ provider_lower = provider.lower() - if provider_lower in ("openai", "ollama", "openrouter"): + if provider_lower in ("openai", "ollama", "openrouter", "siliconflow"): return OpenAIClient(model, base_url, provider=provider_lower, **kwargs) if provider_lower == "xai": diff --git a/tradingagents/llm_clients/model_catalog.py b/tradingagents/llm_clients/model_catalog.py index fd91c66d..3f364ffb 100644 --- a/tradingagents/llm_clients/model_catalog.py +++ b/tradingagents/llm_clients/model_catalog.py @@ -77,6 +77,18 @@ MODEL_OPTIONS: ProviderModeOptions = { ("Qwen3:latest (8B, local)", "qwen3:latest"), ], }, + "siliconflow": { + "quick": [ + ("Qwen/Qwen3.5-35B-A3B", "Qwen/Qwen3.5-35B-A3B"), + ("Qwen/Qwen3.5-27B", "Qwen/Qwen3.5-27B"), + ("Qwen/Qwen3.5-122B-A10B", "Qwen/Qwen3.5-122B-A10B"), + ], + "deep": [ + ("Qwen/Qwen3.5-122B-A10B", "Qwen/Qwen3.5-122B-A10B"), + ("Pro/zai-org/GLM-5.1", "Pro/zai-org/GLM-5.1"), + ("Pro/MiniMaxAI/MiniMax-M2.5", "Pro/MiniMaxAI/MiniMax-M2.5"), + ], + }, } diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py index 4f2e1b32..9165d3e7 100644 --- a/tradingagents/llm_clients/openai_client.py +++ b/tradingagents/llm_clients/openai_client.py @@ -29,6 +29,7 @@ _PROVIDER_CONFIG = { "xai": ("https://api.x.ai/v1", "XAI_API_KEY"), "openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"), "ollama": ("http://localhost:11434/v1", None), + "siliconflow": ("https://api.siliconflow.cn/v1", "SILICONFLOW_API_KEY"), }