TradingAgents/tradingagents/llm_clients/google_client.py

86 lines
3.3 KiB
Python

import os
from typing import Any, Optional
from langchain_google_genai import ChatGoogleGenerativeAI
from .base_client import BaseLLMClient
from .validators import validate_model
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
"""ChatGoogleGenerativeAI with normalized content output.
Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}]
This normalizes to string for consistent downstream handling.
"""
def _normalize_content(self, response):
content = response.content
if isinstance(content, list):
texts = [
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
else item if isinstance(item, str) else ""
for item in content
]
response.content = "\n".join(t for t in texts if t)
return response
def invoke(self, input, config=None, **kwargs):
return self._normalize_content(super().invoke(input, config, **kwargs))
class GoogleClient(BaseLLMClient):
"""Client for Google Gemini models."""
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
super().__init__(model, base_url, **kwargs)
def get_llm(self) -> Any:
"""Return configured ChatGoogleGenerativeAI instance."""
import certifi
# Fix SSL certificate path issue on Windows with conda
# Conda sets SSL_CERT_FILE to a non-existent path, so we clear it
# and let certifi handle it properly
ssl_cert_file = os.environ.get("SSL_CERT_FILE", "")
if ssl_cert_file and not os.path.exists(ssl_cert_file):
# Remove invalid SSL_CERT_FILE and use certifi instead
os.environ.pop("SSL_CERT_FILE", None)
os.environ["SSL_CERT_FILE"] = certifi.where()
llm_kwargs = {"model": self.model}
# Get Google API key from kwargs or environment
if "google_api_key" in self.kwargs:
llm_kwargs["google_api_key"] = self.kwargs["google_api_key"]
else:
api_key = os.environ.get("GOOGLE_API_KEY")
if api_key:
llm_kwargs["google_api_key"] = api_key
for key in ("timeout", "max_retries", "callbacks"):
if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key]
# Map thinking_level to appropriate API param based on model
# Gemini 3 Pro: low, high
# Gemini 3 Flash: minimal, low, medium, high
# Gemini 2.5: thinking_budget (0=disable, -1=dynamic)
thinking_level = self.kwargs.get("thinking_level")
if thinking_level:
model_lower = self.model.lower()
if "gemini-3" in model_lower:
# Gemini 3 Pro doesn't support "minimal", use "low" instead
if "pro" in model_lower and thinking_level == "minimal":
thinking_level = "low"
llm_kwargs["thinking_level"] = thinking_level
else:
# Gemini 2.5: map to thinking_budget
llm_kwargs["thinking_budget"] = -1 if thinking_level == "high" else 0
return NormalizedChatGoogleGenerativeAI(**llm_kwargs)
def validate_model(self) -> bool:
"""Validate model for Google."""
return validate_model("google", self.model)