From 4218f5f209928dbb95fdd4811908e0cd010330bd Mon Sep 17 00:00:00 2001 From: sdk451 Date: Thu, 26 Jun 2025 08:02:03 +1000 Subject: [PATCH] Updates for variable LLM types --- .env.txt | 10 ++++++++++ tradingagents/graph/reflection.py | 11 ++++++++--- tradingagents/graph/setup.py | 11 ++++++++--- tradingagents/graph/signal_processing.py | 10 ++++++++-- tradingagents/graph/trading_graph.py | 6 +++--- 5 files changed, 37 insertions(+), 11 deletions(-) create mode 100644 .env.txt diff --git a/.env.txt b/.env.txt new file mode 100644 index 00000000..b40ce0a4 --- /dev/null +++ b/.env.txt @@ -0,0 +1,10 @@ +FINN_HUB_API_KEY = cea18mqad3i831op604gcea18mqad3i831op6050 + + + + + + + + + diff --git a/tradingagents/graph/reflection.py b/tradingagents/graph/reflection.py index 33303231..6158bd0c 100644 --- a/tradingagents/graph/reflection.py +++ b/tradingagents/graph/reflection.py @@ -1,13 +1,18 @@ # TradingAgents/graph/reflection.py -from typing import Dict, Any +from typing import Dict, Any, Union from langchain_openai import ChatOpenAI +from langchain_anthropic import ChatAnthropic +from langchain_google_genai import ChatGoogleGenerativeAI + +# Type alias for supported LLM types +LLMType = Union[ChatOpenAI, ChatAnthropic, ChatGoogleGenerativeAI] class Reflector: """Handles reflection on decisions and updating memory.""" - def __init__(self, quick_thinking_llm: ChatOpenAI): + def __init__(self, quick_thinking_llm: LLMType): """Initialize the reflector with an LLM.""" self.quick_thinking_llm = quick_thinking_llm self.reflection_system_prompt = self._get_reflection_prompt() @@ -68,7 +73,7 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur ] result = self.quick_thinking_llm.invoke(messages).content - return result + return str(result) def reflect_bull_researcher(self, current_state, returns_losses, bull_memory): """Reflect on bull researcher's analysis and update memory.""" diff --git a/tradingagents/graph/setup.py b/tradingagents/graph/setup.py index 847c429f..b1f29a34 100644 --- a/tradingagents/graph/setup.py +++ b/tradingagents/graph/setup.py @@ -1,7 +1,9 @@ # TradingAgents/graph/setup.py -from typing import Dict, Any +from typing import Dict, Any, Union from langchain_openai import ChatOpenAI +from langchain_anthropic import ChatAnthropic +from langchain_google_genai import ChatGoogleGenerativeAI from langgraph.graph import END, StateGraph, START from langgraph.prebuilt import ToolNode @@ -11,14 +13,17 @@ from tradingagents.agents.utils.agent_utils import Toolkit from .conditional_logic import ConditionalLogic +# Type alias for supported LLM types +LLMType = Union[ChatOpenAI, ChatAnthropic, ChatGoogleGenerativeAI] + class GraphSetup: """Handles the setup and configuration of the agent graph.""" def __init__( self, - quick_thinking_llm: ChatOpenAI, - deep_thinking_llm: ChatOpenAI, + quick_thinking_llm: LLMType, + deep_thinking_llm: LLMType, toolkit: Toolkit, tool_nodes: Dict[str, ToolNode], bull_memory, diff --git a/tradingagents/graph/signal_processing.py b/tradingagents/graph/signal_processing.py index 903e8529..7fafecbd 100644 --- a/tradingagents/graph/signal_processing.py +++ b/tradingagents/graph/signal_processing.py @@ -1,12 +1,18 @@ # TradingAgents/graph/signal_processing.py +from typing import Union from langchain_openai import ChatOpenAI +from langchain_anthropic import ChatAnthropic +from langchain_google_genai import ChatGoogleGenerativeAI + +# Type alias for supported LLM types +LLMType = Union[ChatOpenAI, ChatAnthropic, ChatGoogleGenerativeAI] class SignalProcessor: """Processes trading signals to extract actionable decisions.""" - def __init__(self, quick_thinking_llm: ChatOpenAI): + def __init__(self, quick_thinking_llm: LLMType): """Initialize with an LLM for processing.""" self.quick_thinking_llm = quick_thinking_llm @@ -28,4 +34,4 @@ class SignalProcessor: ("human", full_signal), ] - return self.quick_thinking_llm.invoke(messages).content + return str(self.quick_thinking_llm.invoke(messages).content) diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index eb06cf43..e22c3b11 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -36,7 +36,7 @@ class TradingAgentsGraph: self, selected_analysts=["market", "social", "news", "fundamentals"], debug=False, - config: Dict[str, Any] = None, + config: Optional[Dict[str, Any]] = None, ): """Initialize the trading agents graph and components. @@ -62,8 +62,8 @@ class TradingAgentsGraph: self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"]) self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"]) elif self.config["llm_provider"].lower() == "anthropic": - self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"]) - self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"]) + self.deep_thinking_llm = ChatAnthropic(model_name=self.config["deep_think_llm"], base_url=self.config["backend_url"], timeout=120, stop=None) + self.quick_thinking_llm = ChatAnthropic(model_name=self.config["quick_think_llm"], base_url=self.config["backend_url"], timeout=120, stop=None) elif self.config["llm_provider"].lower() == "google": self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"]) self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])