diff --git a/.env.example b/.env.example index 1328b838..bc3cb3f8 100644 --- a/.env.example +++ b/.env.example @@ -3,4 +3,5 @@ OPENAI_API_KEY= GOOGLE_API_KEY= ANTHROPIC_API_KEY= XAI_API_KEY= +MINIMAX_API_KEY= OPENROUTER_API_KEY= diff --git a/README.md b/README.md index 8cf085e8..e204994b 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ ## News - [2026-03] **TradingAgents v0.2.1** released with GPT-5.4, Gemini 3.1, Claude 4.6 model coverage and improved system stability. -- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture. +- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x, MiniMax-M2.7) and improved system architecture. - [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon.
@@ -126,6 +126,7 @@ export OPENAI_API_KEY=... # OpenAI (GPT) export GOOGLE_API_KEY=... # Google (Gemini) export ANTHROPIC_API_KEY=... # Anthropic (Claude) export XAI_API_KEY=... # xAI (Grok) +export MINIMAX_API_KEY=... # MiniMax export OPENROUTER_API_KEY=... # OpenRouter export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage ``` @@ -163,7 +164,7 @@ An interface will appear showing results as they load, letting you track the age ### Implementation Details -We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, OpenRouter, and Ollama. +We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, MiniMax, OpenRouter, and Ollama. ### Python Usage @@ -187,7 +188,7 @@ from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.default_config import DEFAULT_CONFIG config = DEFAULT_CONFIG.copy() -config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama +config["llm_provider"] = "openai" # openai, google, anthropic, xai, minimax, openrouter, ollama config["deep_think_llm"] = "gpt-5.2" # Model for complex reasoning config["quick_think_llm"] = "gpt-5-mini" # Model for quick tasks config["max_debate_rounds"] = 2 diff --git a/cli/utils.py b/cli/utils.py index 5a8ec16c..2b5163ff 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -155,6 +155,12 @@ def select_shallow_thinking_agent(provider) -> str: ("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), ], + "minimax": [ + ("MiniMax-M2.7-highspeed - High-speed M2.7 for low-latency", "MiniMax-M2.7-highspeed"), + ("MiniMax-M2.7 - Latest flagship, enhanced reasoning", "MiniMax-M2.7"), + ("MiniMax-M2.5-highspeed - Ultra-fast, 204K context", "MiniMax-M2.5-highspeed"), + ("MiniMax-M2.5 - Flagship, 204K context", "MiniMax-M2.5"), + ], "openrouter": [ ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), @@ -222,6 +228,12 @@ def select_deep_thinking_agent(provider) -> str: ("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ], + "minimax": [ + ("MiniMax-M2.7 - Latest flagship, enhanced reasoning", "MiniMax-M2.7"), + ("MiniMax-M2.7-highspeed - High-speed M2.7 for low-latency", "MiniMax-M2.7-highspeed"), + ("MiniMax-M2.5 - Flagship, 204K context", "MiniMax-M2.5"), + ("MiniMax-M2.5-highspeed - Ultra-fast, 204K context", "MiniMax-M2.5-highspeed"), + ], "openrouter": [ ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), @@ -263,6 +275,7 @@ def select_llm_provider() -> tuple[str, str]: ("Google", "https://generativelanguage.googleapis.com/v1"), ("Anthropic", "https://api.anthropic.com/"), ("xAI", "https://api.x.ai/v1"), + ("MiniMax", "https://api.minimax.io/v1"), ("Openrouter", "https://openrouter.ai/api/v1"), ("Ollama", "http://localhost:11434/v1"), ] diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 93c2a7d3..65193300 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -15,7 +15,7 @@ def create_llm_client( """Create an LLM client for the specified provider. Args: - provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter) + provider: LLM provider (openai, anthropic, google, xai, minimax, ollama, openrouter) model: Model name/identifier base_url: Optional base URL for API endpoint **kwargs: Additional provider-specific arguments @@ -40,6 +40,9 @@ def create_llm_client( if provider_lower == "xai": return OpenAIClient(model, base_url, provider="xai", **kwargs) + if provider_lower == "minimax": + return OpenAIClient(model, base_url, provider="minimax", **kwargs) + if provider_lower == "anthropic": return AnthropicClient(model, base_url, **kwargs) diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py index 4605c1f9..67cdb7b5 100644 --- a/tradingagents/llm_clients/openai_client.py +++ b/tradingagents/llm_clients/openai_client.py @@ -27,7 +27,7 @@ class UnifiedChatOpenAI(ChatOpenAI): class OpenAIClient(BaseLLMClient): - """Client for OpenAI, Ollama, OpenRouter, and xAI providers.""" + """Client for OpenAI, Ollama, OpenRouter, xAI, and MiniMax providers.""" def __init__( self, @@ -53,6 +53,11 @@ class OpenAIClient(BaseLLMClient): api_key = os.environ.get("OPENROUTER_API_KEY") if api_key: llm_kwargs["api_key"] = api_key + elif self.provider == "minimax": + llm_kwargs["base_url"] = "https://api.minimax.io/v1" + api_key = os.environ.get("MINIMAX_API_KEY") + if api_key: + llm_kwargs["api_key"] = api_key elif self.provider == "ollama": llm_kwargs["base_url"] = "http://localhost:11434/v1" llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth diff --git a/tradingagents/llm_clients/validators.py b/tradingagents/llm_clients/validators.py index 1e2388b3..b9233eb3 100644 --- a/tradingagents/llm_clients/validators.py +++ b/tradingagents/llm_clients/validators.py @@ -48,6 +48,14 @@ VALID_MODELS = { "grok-4-fast-reasoning", "grok-4-fast-non-reasoning", ], + "minimax": [ + # MiniMax M2.7 series (latest flagship, OpenAI-compatible) + "MiniMax-M2.7", + "MiniMax-M2.7-highspeed", + # MiniMax M2.5 series (204K context, OpenAI-compatible) + "MiniMax-M2.5", + "MiniMax-M2.5-highspeed", + ], }