Updates for variable LLM types
This commit is contained in:
parent
7abff0f354
commit
4218f5f209
|
|
@ -0,0 +1,10 @@
|
|||
FINN_HUB_API_KEY = cea18mqad3i831op604gcea18mqad3i831op6050
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -1,13 +1,18 @@
|
|||
# TradingAgents/graph/reflection.py
|
||||
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, Union
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
# Type alias for supported LLM types
|
||||
LLMType = Union[ChatOpenAI, ChatAnthropic, ChatGoogleGenerativeAI]
|
||||
|
||||
|
||||
class Reflector:
|
||||
"""Handles reflection on decisions and updating memory."""
|
||||
|
||||
def __init__(self, quick_thinking_llm: ChatOpenAI):
|
||||
def __init__(self, quick_thinking_llm: LLMType):
|
||||
"""Initialize the reflector with an LLM."""
|
||||
self.quick_thinking_llm = quick_thinking_llm
|
||||
self.reflection_system_prompt = self._get_reflection_prompt()
|
||||
|
|
@ -68,7 +73,7 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur
|
|||
]
|
||||
|
||||
result = self.quick_thinking_llm.invoke(messages).content
|
||||
return result
|
||||
return str(result)
|
||||
|
||||
def reflect_bull_researcher(self, current_state, returns_losses, bull_memory):
|
||||
"""Reflect on bull researcher's analysis and update memory."""
|
||||
|
|
|
|||
|
|
@ -1,7 +1,9 @@
|
|||
# TradingAgents/graph/setup.py
|
||||
|
||||
from typing import Dict, Any
|
||||
from typing import Dict, Any, Union
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
from langgraph.graph import END, StateGraph, START
|
||||
from langgraph.prebuilt import ToolNode
|
||||
|
||||
|
|
@ -11,14 +13,17 @@ from tradingagents.agents.utils.agent_utils import Toolkit
|
|||
|
||||
from .conditional_logic import ConditionalLogic
|
||||
|
||||
# Type alias for supported LLM types
|
||||
LLMType = Union[ChatOpenAI, ChatAnthropic, ChatGoogleGenerativeAI]
|
||||
|
||||
|
||||
class GraphSetup:
|
||||
"""Handles the setup and configuration of the agent graph."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
quick_thinking_llm: ChatOpenAI,
|
||||
deep_thinking_llm: ChatOpenAI,
|
||||
quick_thinking_llm: LLMType,
|
||||
deep_thinking_llm: LLMType,
|
||||
toolkit: Toolkit,
|
||||
tool_nodes: Dict[str, ToolNode],
|
||||
bull_memory,
|
||||
|
|
|
|||
|
|
@ -1,12 +1,18 @@
|
|||
# TradingAgents/graph/signal_processing.py
|
||||
|
||||
from typing import Union
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
|
||||
# Type alias for supported LLM types
|
||||
LLMType = Union[ChatOpenAI, ChatAnthropic, ChatGoogleGenerativeAI]
|
||||
|
||||
|
||||
class SignalProcessor:
|
||||
"""Processes trading signals to extract actionable decisions."""
|
||||
|
||||
def __init__(self, quick_thinking_llm: ChatOpenAI):
|
||||
def __init__(self, quick_thinking_llm: LLMType):
|
||||
"""Initialize with an LLM for processing."""
|
||||
self.quick_thinking_llm = quick_thinking_llm
|
||||
|
||||
|
|
@ -28,4 +34,4 @@ class SignalProcessor:
|
|||
("human", full_signal),
|
||||
]
|
||||
|
||||
return self.quick_thinking_llm.invoke(messages).content
|
||||
return str(self.quick_thinking_llm.invoke(messages).content)
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class TradingAgentsGraph:
|
|||
self,
|
||||
selected_analysts=["market", "social", "news", "fundamentals"],
|
||||
debug=False,
|
||||
config: Dict[str, Any] = None,
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
):
|
||||
"""Initialize the trading agents graph and components.
|
||||
|
||||
|
|
@ -62,8 +62,8 @@ class TradingAgentsGraph:
|
|||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
elif self.config["llm_provider"].lower() == "anthropic":
|
||||
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
self.deep_thinking_llm = ChatAnthropic(model_name=self.config["deep_think_llm"], base_url=self.config["backend_url"], timeout=120, stop=None)
|
||||
self.quick_thinking_llm = ChatAnthropic(model_name=self.config["quick_think_llm"], base_url=self.config["backend_url"], timeout=120, stop=None)
|
||||
elif self.config["llm_provider"].lower() == "google":
|
||||
self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"])
|
||||
self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])
|
||||
|
|
|
|||
Loading…
Reference in New Issue