diff --git a/.env.example b/.env.example index 1328b838..c4b768d5 100644 --- a/.env.example +++ b/.env.example @@ -4,3 +4,4 @@ GOOGLE_API_KEY= ANTHROPIC_API_KEY= XAI_API_KEY= OPENROUTER_API_KEY= +ZAI_API_KEY= diff --git a/README.md b/README.md index 8cf085e8..704f0b26 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,7 @@ export GOOGLE_API_KEY=... # Google (Gemini) export ANTHROPIC_API_KEY=... # Anthropic (Claude) export XAI_API_KEY=... # xAI (Grok) export OPENROUTER_API_KEY=... # OpenRouter +export ZAI_API_KEY=... # Z.AI (GLM) export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage ``` diff --git a/cli/utils.py b/cli/utils.py index 5a8ec16c..41877609 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -155,6 +155,9 @@ def select_shallow_thinking_agent(provider) -> str: ("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"), ("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"), ], + "zai": [ + ("GLM-5 - Balanced fast research model", "glm-5"), + ], "openrouter": [ ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), @@ -222,6 +225,9 @@ def select_deep_thinking_agent(provider) -> str: ("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ], + "zai": [ + ("GLM-5 - Flagship research model", "glm-5"), + ], "openrouter": [ ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), @@ -263,6 +269,7 @@ def select_llm_provider() -> tuple[str, str]: ("Google", "https://generativelanguage.googleapis.com/v1"), ("Anthropic", "https://api.anthropic.com/"), ("xAI", "https://api.x.ai/v1"), + ("Z.AI", "zai", "https://api.z.ai/api/coding/paas/v4"), ("Openrouter", "https://openrouter.ai/api/v1"), ("Ollama", "http://localhost:11434/v1"), ] diff --git a/tradingagents/llm_clients/factory.py b/tradingagents/llm_clients/factory.py index 93c2a7d3..71b3f6b2 100644 --- a/tradingagents/llm_clients/factory.py +++ b/tradingagents/llm_clients/factory.py @@ -4,6 +4,7 @@ from .base_client import BaseLLMClient from .openai_client import OpenAIClient from .anthropic_client import AnthropicClient from .google_client import GoogleClient +from .zai_client import ZAIClient def create_llm_client( @@ -15,7 +16,7 @@ def create_llm_client( """Create an LLM client for the specified provider. Args: - provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter) + provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter, zai) model: Model name/identifier base_url: Optional base URL for API endpoint **kwargs: Additional provider-specific arguments @@ -46,4 +47,7 @@ def create_llm_client( if provider_lower == "google": return GoogleClient(model, base_url, **kwargs) + if provider_lower == "zai": + return ZAIClient(model, base_url, **kwargs) + raise ValueError(f"Unsupported LLM provider: {provider}") diff --git a/tradingagents/llm_clients/validators.py b/tradingagents/llm_clients/validators.py index 1e2388b3..33c7bf1d 100644 --- a/tradingagents/llm_clients/validators.py +++ b/tradingagents/llm_clients/validators.py @@ -48,6 +48,9 @@ VALID_MODELS = { "grok-4-fast-reasoning", "grok-4-fast-non-reasoning", ], + "zai": [ + "glm-5", + ], } diff --git a/tradingagents/llm_clients/zai_client.py b/tradingagents/llm_clients/zai_client.py new file mode 100644 index 00000000..40355bfd --- /dev/null +++ b/tradingagents/llm_clients/zai_client.py @@ -0,0 +1,47 @@ +import os +from typing import Any, Optional + +from langchain_openai import ChatOpenAI + +from .base_client import BaseLLMClient +from .validators import validate_model + + +class ZAIClient(BaseLLMClient): + """Client for Z.AI GLM models over the OpenAI-compatible API.""" + + DEFAULT_BASE_URL = "https://api.z.ai/api/coding/paas/v4" + + def __init__(self, model: str, base_url: Optional[str] = None, **kwargs): + super().__init__(model, base_url, **kwargs) + + def get_llm(self) -> Any: + """Return configured ChatOpenAI instance for Z.AI.""" + llm_kwargs = { + "model": self.model, + "base_url": self.base_url or self.DEFAULT_BASE_URL, + } + + api_key = os.environ.get("ZAI_API_KEY") + if api_key: + llm_kwargs["api_key"] = api_key + + for key in ( + "timeout", + "max_retries", + "api_key", + "callbacks", + "http_client", + "http_async_client", + "temperature", + "top_p", + "extra_body", + ): + if key in self.kwargs: + llm_kwargs[key] = self.kwargs[key] + + return ChatOpenAI(**llm_kwargs) + + def validate_model(self) -> bool: + """Validate model for Z.AI.""" + return validate_model("zai", self.model)