This commit is contained in:
Ruicheng Geng 2026-03-31 22:26:45 +08:00
parent 589b351f2a
commit bc17acc998
6 changed files with 37 additions and 1313 deletions

5
.gitignore vendored
View File

@ -217,3 +217,8 @@ __marimo__/
# Cache # Cache
**/data_cache/ **/data_cache/
.vscode/
**.json

View File

@ -8,14 +8,14 @@ DEFAULT_CONFIG = {
"dataflows/data_cache", "dataflows/data_cache",
), ),
# LLM settings # LLM settings
"llm_provider": "openai", "llm_provider": "google",
"deep_think_llm": "gpt-5.2", "deep_think_llm": "gemini-3-pro-preview",
"quick_think_llm": "gpt-5-mini", "quick_think_llm": "gemini-2.5-flash",
"backend_url": "https://api.openai.com/v1", "backend_url": "https://generativelanguage.googleapis.com/v1beta",
# Provider-specific thinking configuration # Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc. "google_thinking_level": "high", # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low" "openai_reasoning_effort": "high", # "medium", "high", "low"
"anthropic_effort": None, # "high", "medium", "low" "anthropic_effort": "high", # "high", "medium", "low"
# Debate and discussion settings # Debate and discussion settings
"max_debate_rounds": 1, "max_debate_rounds": 1,
"max_risk_discuss_rounds": 1, "max_risk_discuss_rounds": 1,

View File

@ -7,6 +7,7 @@ from datetime import date
from typing import Dict, Any, Tuple, List, Optional from typing import Dict, Any, Tuple, List, Optional
from langgraph.prebuilt import ToolNode from langgraph.prebuilt import ToolNode
from dotenv import load_dotenv
from tradingagents.llm_clients import create_llm_client from tradingagents.llm_clients import create_llm_client
@ -290,3 +291,12 @@ class TradingAgentsGraph:
def process_signal(self, full_signal): def process_signal(self, full_signal):
"""Process a signal to extract the core decision.""" """Process a signal to extract the core decision."""
return self.signal_processor.process_signal(full_signal) return self.signal_processor.process_signal(full_signal)
if __name__ == "__main__":
load_dotenv()
ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy())
# forward propagate
_, decision = ta.propagate("NVDA", "2026-01-15")
print(decision)

View File

@ -1,4 +1,8 @@
from typing import Optional from typing import Optional
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
from .base_client import BaseLLMClient from .base_client import BaseLLMClient
from .openai_client import OpenAIClient from .openai_client import OpenAIClient

View File

@ -1,3 +1,4 @@
import os
from typing import Any, Optional from typing import Any, Optional
from langchain_google_genai import ChatGoogleGenerativeAI from langchain_google_genai import ChatGoogleGenerativeAI
@ -31,6 +32,12 @@ class GoogleClient(BaseLLMClient):
if key in self.kwargs: if key in self.kwargs:
llm_kwargs[key] = self.kwargs[key] llm_kwargs[key] = self.kwargs[key]
# Explicitly fallback to environment variable if google_api_key is missing
if "google_api_key" not in llm_kwargs:
env_key = os.getenv("GOOGLE_API_KEY")
if env_key:
llm_kwargs["google_api_key"] = env_key
# Map thinking_level to appropriate API param based on model # Map thinking_level to appropriate API param based on model
# Gemini 3 Pro: low, high # Gemini 3 Pro: low, high
# Gemini 3 Flash: minimal, low, medium, high # Gemini 3 Flash: minimal, low, medium, high
@ -39,10 +46,9 @@ class GoogleClient(BaseLLMClient):
if thinking_level: if thinking_level:
model_lower = self.model.lower() model_lower = self.model.lower()
if "gemini-3" in model_lower: if "gemini-3" in model_lower:
# Gemini 3 Pro doesn't support "minimal", use "low" instead # Use thinking_budget as Gemini 3 Pro/Flash SDK expects it
if "pro" in model_lower and thinking_level == "minimal": # Mapping: low/minimal -> small budget, high -> large budget
thinking_level = "low" llm_kwargs["thinking_budget"] = 2000 if thinking_level == "high" else 1000
llm_kwargs["thinking_level"] = thinking_level
else: else:
# Gemini 2.5: map to thinking_budget # Gemini 2.5: map to thinking_budget
llm_kwargs["thinking_budget"] = -1 if thinking_level == "high" else 0 llm_kwargs["thinking_budget"] = -1 if thinking_level == "high" else 0

1303
uv.lock

File diff suppressed because it is too large Load Diff