feat: add MiniMax as LLM provider
Add MiniMax (MiniMax-M2.5 and MiniMax-M2.5-highspeed) as a supported LLM provider. MiniMax offers an OpenAI-compatible API with 204K context window support. Changes: - Add MiniMax provider routing in factory (via OpenAI-compatible client) - Add MiniMax API endpoint and key handling in OpenAIClient - Add MiniMax model validation in validators - Add MiniMax models to CLI quick/deep thinking selection - Add MiniMax to provider selection in CLI - Update .env.example with MINIMAX_API_KEY - Update README with MiniMax documentation
This commit is contained in:
parent
f047f26df0
commit
46bd80347a
|
|
@ -3,4 +3,5 @@ OPENAI_API_KEY=
|
||||||
GOOGLE_API_KEY=
|
GOOGLE_API_KEY=
|
||||||
ANTHROPIC_API_KEY=
|
ANTHROPIC_API_KEY=
|
||||||
XAI_API_KEY=
|
XAI_API_KEY=
|
||||||
|
MINIMAX_API_KEY=
|
||||||
OPENROUTER_API_KEY=
|
OPENROUTER_API_KEY=
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@
|
||||||
# TradingAgents: Multi-Agents LLM Financial Trading Framework
|
# TradingAgents: Multi-Agents LLM Financial Trading Framework
|
||||||
|
|
||||||
## News
|
## News
|
||||||
- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x) and improved system architecture.
|
- [2026-02] **TradingAgents v0.2.0** released with multi-provider LLM support (GPT-5.x, Gemini 3.x, Claude 4.x, Grok 4.x, MiniMax-M2.5) and improved system architecture.
|
||||||
- [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon.
|
- [2026-01] **Trading-R1** [Technical Report](https://arxiv.org/abs/2509.11420) released, with [Terminal](https://github.com/TauricResearch/Trading-R1) expected to land soon.
|
||||||
|
|
||||||
<div align="center">
|
<div align="center">
|
||||||
|
|
@ -125,6 +125,7 @@ export OPENAI_API_KEY=... # OpenAI (GPT)
|
||||||
export GOOGLE_API_KEY=... # Google (Gemini)
|
export GOOGLE_API_KEY=... # Google (Gemini)
|
||||||
export ANTHROPIC_API_KEY=... # Anthropic (Claude)
|
export ANTHROPIC_API_KEY=... # Anthropic (Claude)
|
||||||
export XAI_API_KEY=... # xAI (Grok)
|
export XAI_API_KEY=... # xAI (Grok)
|
||||||
|
export MINIMAX_API_KEY=... # MiniMax
|
||||||
export OPENROUTER_API_KEY=... # OpenRouter
|
export OPENROUTER_API_KEY=... # OpenRouter
|
||||||
export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage
|
export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage
|
||||||
```
|
```
|
||||||
|
|
@ -162,7 +163,7 @@ An interface will appear showing results as they load, letting you track the age
|
||||||
|
|
||||||
### Implementation Details
|
### Implementation Details
|
||||||
|
|
||||||
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, OpenRouter, and Ollama.
|
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, MiniMax, OpenRouter, and Ollama.
|
||||||
|
|
||||||
### Python Usage
|
### Python Usage
|
||||||
|
|
||||||
|
|
@ -186,7 +187,7 @@ from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||||
from tradingagents.default_config import DEFAULT_CONFIG
|
from tradingagents.default_config import DEFAULT_CONFIG
|
||||||
|
|
||||||
config = DEFAULT_CONFIG.copy()
|
config = DEFAULT_CONFIG.copy()
|
||||||
config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama
|
config["llm_provider"] = "openai" # openai, google, anthropic, xai, minimax, openrouter, ollama
|
||||||
config["deep_think_llm"] = "gpt-5.2" # Model for complex reasoning
|
config["deep_think_llm"] = "gpt-5.2" # Model for complex reasoning
|
||||||
config["quick_think_llm"] = "gpt-5-mini" # Model for quick tasks
|
config["quick_think_llm"] = "gpt-5-mini" # Model for quick tasks
|
||||||
config["max_debate_rounds"] = 2
|
config["max_debate_rounds"] = 2
|
||||||
|
|
|
||||||
|
|
@ -151,6 +151,10 @@ def select_shallow_thinking_agent(provider) -> str:
|
||||||
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
|
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
|
||||||
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
|
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
|
||||||
],
|
],
|
||||||
|
"minimax": [
|
||||||
|
("MiniMax-M2.5-highspeed - Ultra-fast, 204K context", "MiniMax-M2.5-highspeed"),
|
||||||
|
("MiniMax-M2.5 - Flagship, 204K context", "MiniMax-M2.5"),
|
||||||
|
],
|
||||||
"openrouter": [
|
"openrouter": [
|
||||||
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
||||||
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
||||||
|
|
@ -219,6 +223,10 @@ def select_deep_thinking_agent(provider) -> str:
|
||||||
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
|
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
|
||||||
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
|
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
|
||||||
],
|
],
|
||||||
|
"minimax": [
|
||||||
|
("MiniMax-M2.5 - Flagship, 204K context", "MiniMax-M2.5"),
|
||||||
|
("MiniMax-M2.5-highspeed - Ultra-fast, 204K context", "MiniMax-M2.5-highspeed"),
|
||||||
|
],
|
||||||
"openrouter": [
|
"openrouter": [
|
||||||
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
|
||||||
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
|
||||||
|
|
@ -260,6 +268,7 @@ def select_llm_provider() -> tuple[str, str]:
|
||||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||||
("Anthropic", "https://api.anthropic.com/"),
|
("Anthropic", "https://api.anthropic.com/"),
|
||||||
("xAI", "https://api.x.ai/v1"),
|
("xAI", "https://api.x.ai/v1"),
|
||||||
|
("MiniMax", "https://api.minimax.io/v1"),
|
||||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||||
("Ollama", "http://localhost:11434/v1"),
|
("Ollama", "http://localhost:11434/v1"),
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ def create_llm_client(
|
||||||
"""Create an LLM client for the specified provider.
|
"""Create an LLM client for the specified provider.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
|
provider: LLM provider (openai, anthropic, google, xai, minimax, ollama, openrouter)
|
||||||
model: Model name/identifier
|
model: Model name/identifier
|
||||||
base_url: Optional base URL for API endpoint
|
base_url: Optional base URL for API endpoint
|
||||||
**kwargs: Additional provider-specific arguments
|
**kwargs: Additional provider-specific arguments
|
||||||
|
|
@ -34,6 +34,9 @@ def create_llm_client(
|
||||||
if provider_lower == "xai":
|
if provider_lower == "xai":
|
||||||
return OpenAIClient(model, base_url, provider="xai", **kwargs)
|
return OpenAIClient(model, base_url, provider="xai", **kwargs)
|
||||||
|
|
||||||
|
if provider_lower == "minimax":
|
||||||
|
return OpenAIClient(model, base_url, provider="minimax", **kwargs)
|
||||||
|
|
||||||
if provider_lower == "anthropic":
|
if provider_lower == "anthropic":
|
||||||
return AnthropicClient(model, base_url, **kwargs)
|
return AnthropicClient(model, base_url, **kwargs)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ class UnifiedChatOpenAI(ChatOpenAI):
|
||||||
|
|
||||||
|
|
||||||
class OpenAIClient(BaseLLMClient):
|
class OpenAIClient(BaseLLMClient):
|
||||||
"""Client for OpenAI, Ollama, OpenRouter, and xAI providers."""
|
"""Client for OpenAI, Ollama, OpenRouter, xAI, and MiniMax providers."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
|
@ -55,6 +55,11 @@ class OpenAIClient(BaseLLMClient):
|
||||||
api_key = os.environ.get("OPENROUTER_API_KEY")
|
api_key = os.environ.get("OPENROUTER_API_KEY")
|
||||||
if api_key:
|
if api_key:
|
||||||
llm_kwargs["api_key"] = api_key
|
llm_kwargs["api_key"] = api_key
|
||||||
|
elif self.provider == "minimax":
|
||||||
|
llm_kwargs["base_url"] = "https://api.minimax.io/v1"
|
||||||
|
api_key = os.environ.get("MINIMAX_API_KEY")
|
||||||
|
if api_key:
|
||||||
|
llm_kwargs["api_key"] = api_key
|
||||||
elif self.provider == "ollama":
|
elif self.provider == "ollama":
|
||||||
llm_kwargs["base_url"] = "http://localhost:11434/v1"
|
llm_kwargs["base_url"] = "http://localhost:11434/v1"
|
||||||
llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth
|
llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth
|
||||||
|
|
|
||||||
|
|
@ -63,6 +63,11 @@ VALID_MODELS = {
|
||||||
"grok-4-fast-reasoning",
|
"grok-4-fast-reasoning",
|
||||||
"grok-4-fast-non-reasoning",
|
"grok-4-fast-non-reasoning",
|
||||||
],
|
],
|
||||||
|
"minimax": [
|
||||||
|
# MiniMax M2.5 series (204K context, OpenAI-compatible)
|
||||||
|
"MiniMax-M2.5",
|
||||||
|
"MiniMax-M2.5-highspeed",
|
||||||
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue