added x, retuers, eodhd and more support

This commit is contained in:
Manu Bhardwaj 2025-09-20 19:55:17 -04:00
parent a438acdbbd
commit e8d824d4ed
No known key found for this signature in database
GPG Key ID: 7E6C7C148E63578F
26 changed files with 794 additions and 198 deletions

38
.env.example Normal file
View File

@ -0,0 +1,38 @@
# OpenAI Configuration
OPENAI_API_KEY=your_openai_api_key_here
# Anthropic Configuration
ANTHROPIC_API_KEY=your_anthropic_api_key_here
# X (Twitter) API Configuration
X_BEARER_TOKEN=your_x_bearer_token_here
X_API_KEY=your_x_api_key_here
X_API_SECRET=your_x_api_secret_here
X_ACCESS_TOKEN=your_x_access_token_here
X_ACCESS_SECRET=your_x_access_secret_here
# Bloomberg API Configuration
BLOOMBERG_API_KEY=your_bloomberg_api_key_here
# Reuters API Configuration
REUTERS_API_KEY=your_reuters_api_key_here
# Reddit Configuration
REDDIT_CLIENT_ID=your_reddit_client_id_here
REDDIT_CLIENT_SECRET=your_reddit_client_secret_here
REDDIT_USER_AGENT=TradingAgents/1.0
# FinnHub Configuration
FINNHUB_API_KEY=your_finnhub_api_key_here
# Alpha Vantage Configuration
ALPHA_VANTAGE_API_KEY=your_alpha_vantage_api_key_here
# Polygon.io Configuration
POLYGON_API_KEY=your_polygon_api_key_here
# ChromaDB Configuration
CHROMA_DB_PATH=./chroma_data
# Trading Configuration
TRADINGAGENTS_RESULTS_DIR=./results

View File

@ -1,26 +1,34 @@
typing-extensions
langchain-openai
langchain-experimental
pandas
yfinance
praw
feedparser
stockstats
eodhd
langgraph
chromadb
setuptools
backtrader
akshare
tushare
finnhub-python
parsel
requests
tqdm
pytz
redis
chainlit
rich
questionary
langchain_anthropic
langchain-google-genai
# Essential dependencies with compatible versions
pandas>=2.0.0
numpy>=1.24.0
requests>=2.28.0
# LangChain ecosystem - compatible versions
langchain-core>=0.3.75
langchain-openai>=0.3.0
langchain-experimental>=0.3.0
langchain_anthropic>=0.3.0
langgraph>=0.6.0
langgraph-checkpoint>=2.1.0
# Financial data - minimal set
yfinance>=0.2.28
stockstats>=0.6.2
finnhub-python>=2.4.20
# Data storage
chromadb>=0.5.0
# Web scraping - essential only
feedparser>=6.0.11
python-dotenv>=1.0.1
# Text processing
textblob>=0.18.0
# Utilities
tqdm>=4.66.0
setuptools>=70.0.0
# UI
rich>=13.7.0

View File

@ -1,13 +1,10 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
def create_fundamentals_analyst(llm, toolkit):
def fundamentals_analyst_node(state):
current_date = state["trade_date"]
ticker = state["company_of_interest"]
company_name = state["company_of_interest"]
if toolkit.config["online_tools"]:
tools = [toolkit.get_fundamentals_openai]
@ -21,29 +18,44 @@ def create_fundamentals_analyst(llm, toolkit):
]
system_message = (
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.",
"You are a researcher tasked with analyzing fundamental "
"information over the past week about a company. Please write "
"a comprehensive report of the company's fundamental information "
"such as financial documents, company profile, basic company "
"financials, company financial history, insider sentiment and "
"insider transactions to gain a full view of the company's "
"fundamental information to inform traders. Make sure to include "
"as much detail as possible. Do not simply state the trends are "
"mixed, provide detailed and finegrained analysis and insights "
"that may help traders make decisions. Make sure to append a "
"Markdown table at the end of the report to organize key points "
"in the report, organized and easy to read."
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}",
"You are a helpful AI assistant, collaborating with other "
"assistants. Use the provided tools to progress towards "
"answering the question. If you are unable to fully answer, "
"that's OK; another assistant with different tools will help "
"where you left off. Execute what you can to make progress. "
"If you or any other assistant has the FINAL TRANSACTION "
"PROPOSAL: **BUY/HOLD/SELL** or deliverable, prefix your "
"response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** "
"so the team knows to stop. You have access to the following "
"tools: {tool_names}.\n{system_message}For your reference, "
"the current date is {current_date}. The company we want to "
"look at is {ticker}",
),
MessagesPlaceholder(variable_name="messages"),
]
)
prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
tool_names = ", ".join([tool.name for tool in tools])
prompt = prompt.partial(tool_names=tool_names)
prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker)
@ -51,10 +63,7 @@ def create_fundamentals_analyst(llm, toolkit):
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
report = result.content if result.content else ""
return {
"messages": [result],

View File

@ -1,6 +1,4 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
def create_market_analyst(llm, toolkit):
@ -8,7 +6,6 @@ def create_market_analyst(llm, toolkit):
def market_analyst_node(state):
current_date = state["trade_date"]
ticker = state["company_of_interest"]
company_name = state["company_of_interest"]
if toolkit.config["online_tools"]:
tools = [
@ -76,11 +73,8 @@ Volume-Based Indicators:
result = chain.invoke(state["messages"])
report = ""
report = result.content if result.content else ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"market_report": report,

View File

@ -1,23 +1,25 @@
import time
import json
def create_research_manager(llm, memory):
def research_manager_node(state) -> dict:
history = state["investment_debate_state"].get("history", "")
market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"]
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
if not state or not isinstance(state, dict):
raise ValueError("Invalid state provided to research_manager")
investment_debate_state = state["investment_debate_state"]
investment_debate_state = state.get("investment_debate_state", {})
history = investment_debate_state.get("history", "")
market_research_report = state.get("market_report", "")
sentiment_report = state.get("sentiment_report", "")
news_report = state.get("news_report", "")
fundamentals_report = state.get("fundamentals_report", "")
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
curr_situation = f"Market: {market_research_report}\nSentiment: {sentiment_report}\nNews: {news_report}\nFundamentals: {fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=3, min_similarity=0.8)
past_memory_str = ""
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
if past_memories:
for i, rec in enumerate(past_memories, 1):
similarity = rec.get("similarity_score", 0)
past_memory_str += f"Research Memory {i} (similarity: {similarity:.3f}): {rec['recommendation']}\n\n"
else:
past_memory_str = "No statistically significant research memories found (similarity < 80%)."
prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented.

View File

@ -1,5 +1,3 @@
import time
import json
def create_risk_manager(llm, memory):
@ -11,16 +9,20 @@ def create_risk_manager(llm, memory):
risk_debate_state = state["risk_debate_state"]
market_research_report = state["market_report"]
news_report = state["news_report"]
fundamentals_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
sentiment_report = state["sentiment_report"]
trader_plan = state["investment_plan"]
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
curr_situation = f"Market: {market_research_report}\nSentiment: {sentiment_report}\nNews: {news_report}\nFundamentals: {fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=3, min_similarity=0.8)
past_memory_str = ""
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
if past_memories:
for i, rec in enumerate(past_memories, 1):
similarity = rec.get("similarity_score", 0)
past_memory_str += f"Risk Memory {i} (similarity: {similarity:.3f}): {rec['recommendation']}\n\n"
else:
past_memory_str = "No statistically significant risk memories found (similarity < 80%)."
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Risky, Neutral, and Safe/Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness.

View File

@ -1,26 +1,31 @@
from langchain_core.messages import AIMessage
import time
import json
import functools
def create_bear_researcher(llm, memory):
def bear_node(state) -> dict:
investment_debate_state = state["investment_debate_state"]
if not state or not isinstance(state, dict):
raise ValueError("Invalid state provided to bear_researcher")
investment_debate_state = state.get("investment_debate_state", {})
history = investment_debate_state.get("history", "")
bear_history = investment_debate_state.get("bear_history", "")
current_response = investment_debate_state.get("current_response", "")
market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"]
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
market_research_report = state.get("market_report", "")
sentiment_report = state.get("sentiment_report", "")
news_report = state.get("news_report", "")
fundamentals_report = state.get("fundamentals_report", "")
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
curr_situation = f"Market: {market_research_report}\nSentiment: {sentiment_report}\nNews: {news_report}\nFundamentals: {fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=3, min_similarity=0.8)
past_memory_str = ""
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
if past_memories:
for i, rec in enumerate(past_memories, 1):
similarity = rec.get("similarity_score", 0)
past_memory_str += f"Bear Memory {i} (similarity: {similarity:.3f}): {rec['recommendation']}\n\n"
else:
past_memory_str = "No statistically significant bear memories found (similarity < 80%)."
prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively.

View File

@ -1,26 +1,31 @@
from langchain_core.messages import AIMessage
import time
import json
import functools
def create_bull_researcher(llm, memory):
def bull_node(state) -> dict:
investment_debate_state = state["investment_debate_state"]
if not state or not isinstance(state, dict):
raise ValueError("Invalid state provided to bull_researcher")
investment_debate_state = state.get("investment_debate_state", {})
history = investment_debate_state.get("history", "")
bull_history = investment_debate_state.get("bull_history", "")
current_response = investment_debate_state.get("current_response", "")
market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"]
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
market_research_report = state.get("market_report", "")
sentiment_report = state.get("sentiment_report", "")
news_report = state.get("news_report", "")
fundamentals_report = state.get("fundamentals_report", "")
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
curr_situation = f"Market: {market_research_report}\nSentiment: {sentiment_report}\nNews: {news_report}\nFundamentals: {fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=3, min_similarity=0.8)
past_memory_str = ""
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
if past_memories:
for i, rec in enumerate(past_memories, 1):
similarity = rec.get("similarity_score", 0)
past_memory_str += f"Bull Memory {i} (similarity: {similarity:.3f}): {rec['recommendation']}\n\n"
else:
past_memory_str = "No statistically significant bull memories found (similarity < 80%)."
prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively.

View File

@ -1,15 +1,15 @@
import time
import json
def create_risky_debator(llm):
def risky_node(state) -> dict:
risk_debate_state = state["risk_debate_state"]
history = risk_debate_state.get("history", "")
risky_history = risk_debate_state.get("risky_history", "")
current_safe_response = risk_debate_state.get("current_safe_response", "")
current_neutral_response = risk_debate_state.get("current_neutral_response", "")
current_safe_response = risk_debate_state.get(
"current_safe_response", ""
)
current_neutral_response = risk_debate_state.get(
"current_neutral_response", ""
)
market_research_report = state["market_report"]
sentiment_report = state["sentiment_report"]
@ -18,19 +18,34 @@ def create_risky_debator(llm):
trader_decision = state["trader_investment_plan"]
prompt = f"""As the Risky Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision:
prompt = f"""As the Risky Risk Analyst, your role is to actively champion
high-reward, high-risk opportunities, emphasizing bold strategies and competitive
advantages. When evaluating the trader's decision or plan, focus intently on the
potential upside, growth potential, and innovative benefitseven when these come
with elevated risk. Use the provided market data and sentiment analysis to
strengthen your arguments and challenge the opposing views.
{trader_decision}
Here is the trader's decision: {trader_decision}
Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments:
Your task is to create a compelling case for the trader's decision by questioning
and critiquing the conservative and neutral stances to demonstrate why your
high-reward perspective offers the best path forward. Incorporate insights from
the following sources into your arguments:
Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting."""
Current conversation history: {history}
Last arguments from conservative analyst: {current_safe_response}
Last arguments from neutral analyst: {current_neutral_response}
If there are no responses from the other viewpoints, do not halluncinate and
just present your point. Engage actively by addressing any specific concerns
raised, refuting the weaknesses in their logic, and asserting the benefits of
risk-taking to outpace market norms. Output conversationally as if you are
speaking without any special formatting."""
response = llm.invoke(prompt)

View File

@ -1,6 +1,3 @@
from langchain_core.messages import AIMessage
import time
import json
def create_safe_debator(llm):

View File

@ -1,14 +1,12 @@
import time
import json
def create_neutral_debator(llm):
def neutral_node(state) -> dict:
risk_debate_state = state["risk_debate_state"]
history = risk_debate_state.get("history", "")
neutral_history = risk_debate_state.get("neutral_history", "")
current_risky_response = risk_debate_state.get("current_risky_response", "")
current_risky_response = risk_debate_state.get(
"current_risky_response", ""
)
current_safe_response = risk_debate_state.get("current_safe_response", "")
market_research_report = state["market_report"]
@ -18,19 +16,33 @@ def create_neutral_debator(llm):
trader_decision = state["trader_investment_plan"]
prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision:
prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced
perspective, weighing both the potential benefits and risks of the trader's
decision or plan. You prioritize a well-rounded approach, evaluating the upsides
and downsides while factoring in broader market trends, potential economic
shifts, and diversification strategies.
{trader_decision}
Here is the trader's decision: {trader_decision}
Your task is to challenge both the Risky and Safe Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision:
Your task is to challenge both the Risky and Safe Analysts, pointing out where
each perspective may be overly optimistic or overly cautious. Use insights from
the following data sources to support a moderate, sustainable strategy to
adjust the trader's decision:
Market Research Report: {market_research_report}
Social Media Sentiment Report: {sentiment_report}
Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting."""
Current conversation history: {history}
Last response from risky analyst: {current_risky_response}
Last response from safe analyst: {current_safe_response}
If there are no responses from the other viewpoints, do not halluncinate and
just present your point. Engage actively by analyzing both sides critically,
addressing weaknesses in the risky and conservative arguments to advocate for a
more balanced approach. Focus on debating rather than simply presenting data.
Output conversationally as if you are speaking without any special formatting."""
response = llm.invoke(prompt)
@ -45,7 +57,9 @@ Engage actively by analyzing both sides critically, addressing weaknesses in the
"current_risky_response": risk_debate_state.get(
"current_risky_response", ""
),
"current_safe_response": risk_debate_state.get("current_safe_response", ""),
"current_safe_response": risk_debate_state.get(
"current_safe_response", ""
),
"current_neutral_response": argument,
"count": risk_debate_state["count"] + 1,
}

View File

@ -1,6 +1,4 @@
import functools
import time
import json
def create_trader(llm, memory):
@ -12,15 +10,16 @@ def create_trader(llm, memory):
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
curr_situation = f"Market: {market_research_report}\nSentiment: {sentiment_report}\nNews: {news_report}\nFundamentals: {fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=3, min_similarity=0.8)
past_memory_str = ""
if past_memories:
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
similarity = rec.get("similarity_score", 0)
past_memory_str += f"Memory {i} (similarity: {similarity:.3f}): {rec['recommendation']}\n\n"
else:
past_memory_str = "No past memories found."
past_memory_str = "No statistically significant past memories found (similarity < 80%)."
context = {
"role": "user",

View File

@ -417,3 +417,57 @@ class Toolkit:
)
return openai_fundamentals_results
@staticmethod
@tool
def get_x_stock_sentiment(
ticker: Annotated[str, "Ticker of a company. e.g. AAPL, TSM"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve X sentiment analysis for a stock ticker.
Args:
ticker (str): Ticker of a company. e.g. AAPL, TSM
curr_date (str): Current date in yyyy-mm-dd format
Returns:
str: Formatted sentiment analysis from X posts about the stock
"""
from tradingagents.dataflows.x_utils import get_x_stock_sentiment
return get_x_stock_sentiment(ticker, curr_date)
@staticmethod
@tool
def get_bloomberg_news(
ticker: Annotated[str, "Ticker of a company. e.g. AAPL, TSM"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve Bloomberg news for a stock ticker.
Args:
ticker (str): Ticker of a company. e.g. AAPL, TSM
curr_date (str): Current date in yyyy-mm-dd format
Returns:
str: Formatted Bloomberg news about the stock
"""
from tradingagents.dataflows.bloomberg_utils import get_bloomberg_news
return get_bloomberg_news(ticker, curr_date)
@staticmethod
@tool
def get_reuters_news(
ticker: Annotated[str, "Ticker of a company. e.g. AAPL, TSM"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
) -> str:
"""
Retrieve Reuters news for a stock ticker.
Args:
ticker (str): Ticker of a company. e.g. AAPL, TSM
curr_date (str): Current date in yyyy-mm-dd format
Returns:
str: Formatted Reuters news about the stock
"""
from tradingagents.dataflows.reuters_utils import get_reuters_news
return get_reuters_news(ticker, curr_date)

View File

@ -44,27 +44,39 @@ class FinancialSituationMemory:
ids=ids,
)
def get_memories(self, current_situation, n_matches=1):
"""Find matching recommendations using OpenAI embeddings"""
def get_memories(self, current_situation, n_matches=5, min_similarity=0.75):
"""Find statistically significant matching recommendations"""
if not current_situation or len(current_situation.strip()) < 50:
return []
query_embedding = self.get_embedding(current_situation)
max_results = max(n_matches, 10)
results = self.situation_collection.query(
query_embeddings=[query_embedding],
n_results=n_matches,
n_results=max_results,
include=["metadatas", "documents", "distances"],
)
if not results["documents"] or not results["documents"][0]:
return []
matched_results = []
for i in range(len(results["documents"][0])):
matched_results.append(
{
"matched_situation": results["documents"][0][i],
"recommendation": results["metadatas"][0][i]["recommendation"],
"similarity_score": 1 - results["distances"][0][i],
}
)
similarity_score = 1 - results["distances"][0][i]
return matched_results
if similarity_score >= min_similarity:
matched_results.append(
{
"matched_situation": results["documents"][0][i],
"recommendation": results["metadatas"][0][i]["recommendation"],
"similarity_score": similarity_score,
"distance": results["distances"][0][i]
}
)
matched_results.sort(key=lambda x: x["similarity_score"], reverse=True)
return matched_results[:n_matches]
if __name__ == "__main__":

View File

@ -0,0 +1,72 @@
import os
import requests
from typing import Annotated
from dotenv import load_dotenv
load_dotenv()
def get_bloomberg_news(
ticker: Annotated[str, "Ticker of a company. e.g. AAPL, TSM"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
look_back_days: int = 7
) -> str:
"""Get Bloomberg news for stock ticker"""
try:
api_key = os.getenv('BLOOMBERG_API_KEY')
if not api_key:
return f"Bloomberg News: API credentials not configured"
company_name = get_company_name(ticker)
search_terms = [ticker, company_name]
news_items = []
for term in search_terms:
try:
headers = {"Authorization": f"Bearer {api_key}"}
url = "https://api.bloomberg.com/v1/news/search"
params = {
'q': term,
'limit': 10,
'sort': 'publishedAt:desc'
}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
data = response.json()
if data.get('articles'):
for article in data['articles'][:5]:
headline = article.get('headline', '')
summary = article.get('summary', '')[:200]
news_items.append(f"- {headline}: {summary}")
except Exception:
continue
if news_items:
return f"Bloomberg News for {ticker}:\n" + "\n".join(news_items[:10])
else:
return f"Bloomberg News: No recent news found for {ticker}"
except Exception as e:
return f"Bloomberg News: Service unavailable - {str(e)[:50]}"
def get_company_name(ticker: str) -> str:
"""Map ticker to company name for better search results"""
ticker_mapping = {
"AAPL": "Apple Inc",
"MSFT": "Microsoft Corporation",
"GOOGL": "Alphabet Google",
"AMZN": "Amazon.com Inc",
"TSLA": "Tesla Inc",
"NVDA": "NVIDIA Corporation",
"META": "Meta Facebook",
"JPM": "JPMorgan Chase",
"JNJ": "Johnson & Johnson",
"V": "Visa Inc",
"TSM": "Taiwan Semiconductor"
}
return ticker_mapping.get(ticker, ticker)

View File

@ -1,4 +1,3 @@
import json
import requests
from bs4 import BeautifulSoup
from datetime import datetime
@ -8,7 +7,6 @@ from tenacity import (
retry,
stop_after_attempt,
wait_exponential,
retry_if_exception_type,
retry_if_result,
)
@ -87,7 +85,7 @@ def getNewsData(query, start_date, end_date):
"source": source,
}
)
except Exception as e:
except (AttributeError, KeyError, TypeError) as e:
print(f"Error processing result: {e}")
# If one of the fields is not found, skip this result
continue
@ -101,7 +99,7 @@ def getNewsData(query, start_date, end_date):
page += 1
except Exception as e:
except (requests.RequestException, ValueError, AttributeError) as e:
print(f"Failed after multiple retries: {e}")
break

View File

@ -36,7 +36,14 @@ def get_finnhub_news(
"""
start_date = datetime.strptime(curr_date, "%Y-%m-%d")
try:
start_date = datetime.strptime(curr_date, "%Y-%m-%d")
except ValueError as e:
return f"Error: Invalid date format '{curr_date}'. Expected YYYY-MM-DD."
if look_back_days < 0 or look_back_days > 365:
return f"Error: look_back_days must be 0-365, got {look_back_days}"
before = start_date - relativedelta(days=look_back_days)
before = before.strftime("%Y-%m-%d")

View File

@ -0,0 +1,90 @@
import os
import requests
from typing import Annotated
from dotenv import load_dotenv
load_dotenv()
def get_reuters_news(
ticker: Annotated[str, "Ticker of a company. e.g. AAPL, TSM"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
look_back_days: int = 7
) -> str:
"""Get Reuters news for stock ticker"""
try:
api_key = os.getenv('REUTERS_API_KEY')
if not api_key:
return f"Reuters News: API credentials not configured"
company_name = get_company_name(ticker)
search_queries = [f"{ticker} stock", f"{company_name} earnings", f"{company_name} news"]
news_items = []
for query in search_queries:
try:
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}
url = "https://api.reuters.com/v2/articles/search"
params = {
'query': query,
'limit': 5,
'sortBy': 'publishedDateTime',
'sortOrder': 'desc'
}
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
data = response.json()
if data.get('data', {}).get('articles'):
for article in data['data']['articles'][:3]:
title = article.get('title', '')
summary = article.get('description', '')[:150]
category = article.get('category', {}).get('name', '')
if title and summary:
news_items.append(f"- [{category}] {title}: {summary}")
except Exception:
continue
if news_items:
return f"Reuters News for {ticker}:\n" + "\n".join(news_items[:8])
else:
try:
from reuterspy import ReutersPy
reuters = ReutersPy()
company_data = reuters.get_company_info(ticker)
if company_data:
return f"Reuters: {ticker} fundamental data available - {company_data[:200]}"
else:
return f"Reuters News: No recent news found for {ticker}"
except ImportError:
return f"Reuters News: No recent news found for {ticker}"
except Exception as e:
return f"Reuters News: Service unavailable - {str(e)[:50]}"
def get_company_name(ticker: str) -> str:
"""Map ticker to company name for Reuters search"""
ticker_mapping = {
"AAPL": "Apple Inc",
"MSFT": "Microsoft Corp",
"GOOGL": "Alphabet Inc",
"AMZN": "Amazon.com",
"TSLA": "Tesla Inc",
"NVDA": "NVIDIA Corp",
"META": "Meta Platforms",
"JPM": "JPMorgan Chase",
"JNJ": "Johnson & Johnson",
"V": "Visa Inc",
"TSM": "Taiwan Semiconductor"
}
return ticker_mapping.get(ticker, ticker)

View File

@ -11,7 +11,8 @@ class StockstatsUtils:
def get_stock_stats(
symbol: Annotated[str, "ticker symbol for the company"],
indicator: Annotated[
str, "quantitative indicators based off of the stock data for the company"
str,
"quantitative indicators based off of the stock data for the company"
],
curr_date: Annotated[
str, "curr date for retrieving stock price data, YYYY-mm-dd"
@ -22,7 +23,8 @@ class StockstatsUtils:
],
online: Annotated[
bool,
"whether to use online tools to fetch data or offline tools. If True, will use online tools.",
"whether to use online tools to fetch data or offline tools. "
"If True, will use online tools.",
] = False,
):
df = None
@ -38,7 +40,9 @@ class StockstatsUtils:
)
df = wrap(data)
except FileNotFoundError:
raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!")
raise Exception(
"Stockstats fail: Yahoo Finance data not fetched yet!"
)
else:
# Get today's date as YYYY-mm-dd to add to cache
today_date = pd.Timestamp.today()
@ -77,11 +81,21 @@ class StockstatsUtils:
df["Date"] = df["Date"].dt.strftime("%Y-%m-%d")
curr_date = curr_date.strftime("%Y-%m-%d")
df[indicator] # trigger stockstats to calculate the indicator
matching_rows = df[df["Date"].str.startswith(curr_date)]
try:
df[indicator] # trigger stockstats to calculate the indicator
except (KeyError, ValueError) as e:
raise ValueError(f"Invalid indicator '{indicator}': {e}")
try:
matching_rows = df[df["Date"].str.startswith(curr_date)]
except (AttributeError, KeyError) as e:
raise ValueError(f"Date column formatting error: {e}")
if not matching_rows.empty:
indicator_value = matching_rows[indicator].values[0]
return indicator_value
indicator_value = matching_rows[indicator].iloc[0]
# Validate numerical result
if pd.isna(indicator_value):
return f"N/A: Indicator {indicator} not available for {curr_date}"
return float(indicator_value)
else:
return "N/A: Not a trading day (weekend or holiday)"
return f"N/A: No data for {curr_date} (weekend/holiday/non-trading day)"

View File

@ -0,0 +1,126 @@
import requests
import os
import re
import numpy as np
from typing import Annotated
from datetime import datetime, timedelta
from textblob import TextBlob
from dotenv import load_dotenv
load_dotenv()
def get_x_stock_sentiment(
ticker: Annotated[str, "Ticker of a company. e.g. AAPL, TSM"],
curr_date: Annotated[str, "Current date in yyyy-mm-dd format"],
look_back_days: int = 3
) -> str:
"""Get X sentiment analysis for stock ticker"""
try:
bearer_token = os.getenv('X_BEARER_TOKEN')
if not bearer_token:
return f"X Analysis: API credentials not configured"
headers = {"Authorization": f"Bearer {bearer_token}"}
query = f"${ticker} -is:retweet lang:en"
url = "https://api.twitter.com/2/tweets/search/recent"
params = {
'query': query,
'max_results': 100,
'tweet.fields': 'created_at,public_metrics,author_id'
}
response = requests.get(url, headers=headers, params=params)
if response.status_code != 200:
return f"X Analysis: API error {response.status_code}"
data = response.json()
if not data.get('data'):
return f"X Analysis: No recent posts found for ${ticker}"
posts = data['data']
total_sentiment = 0
weighted_sentiment = 0
total_weight = 0
bullish_count = 0
bearish_count = 0
sentiments = []
weights = []
for post in posts:
text = clean_post_text(post['text'])
sentiment = get_sentiment_score(text)
if len(text.strip()) < 10:
continue
metrics = post.get('public_metrics', {})
engagement = metrics.get('like_count', 0) + metrics.get('retweet_count', 0)
weight = max(1, engagement + 1)
sentiments.append(sentiment)
weights.append(weight)
weighted_sentiment += sentiment * weight
total_weight += weight
total_sentiment += sentiment
if sentiment > 0.15:
bullish_count += 1
elif sentiment < -0.15:
bearish_count += 1
if len(sentiments) < 10:
return f"X Analysis: Insufficient data (only {len(sentiments)} valid posts)"
sentiments_array = np.array(sentiments)
weights_array = np.array(weights)
avg_sentiment = np.average(sentiments_array, weights=weights_array)
std_sentiment = np.sqrt(np.average((sentiments_array - avg_sentiment)**2, weights=weights_array))
confidence = min(len(sentiments) / 50.0, 1.0)
sentiment_label = "NEUTRAL"
if avg_sentiment > 0.15 and confidence > 0.3:
sentiment_label = "BULLISH"
elif avg_sentiment < -0.15 and confidence > 0.3:
sentiment_label = "BEARISH"
trend_strength = abs(bullish_count - bearish_count) / max(len(sentiments), 1)
trend_direction = ""
if trend_strength > 0.2:
if bullish_count > bearish_count:
trend_direction = " TRENDING_UP"
else:
trend_direction = " TRENDING_DOWN"
return f"X Sentiment: {sentiment_label}{trend_direction} (Score: {avg_sentiment:.3f}±{std_sentiment:.3f}, Confidence: {confidence:.2f}, Posts: {len(sentiments)}, Bullish: {bullish_count}, Bearish: {bearish_count})"
except Exception as e:
return f"X Analysis: Error - {str(e)[:50]}"
def clean_post_text(text: str) -> str:
"""Clean X post text for sentiment analysis"""
text = re.sub(r'http\S+|www\S+|https\S+', '', text, flags=re.MULTILINE)
text = re.sub(r'@\w+|#\w+', '', text)
text = re.sub(r'[^\w\s]', ' ', text)
return text.strip()
def get_sentiment_score(text: str) -> float:
"""Get sentiment polarity score using TextBlob"""
if not text or len(text.strip()) < 3:
return 0.0
try:
blob = TextBlob(text)
polarity = blob.sentiment.polarity
return max(-1.0, min(1.0, polarity))
except (ValueError, TypeError, AttributeError) as e:
return 0.0

View File

@ -6,14 +6,16 @@ from pandas import DataFrame
import pandas as pd
from functools import wraps
from .utils import save_output, SavePathType, decorate_all_methods
from .utils import SavePathType, decorate_all_methods
def init_ticker(func: Callable) -> Callable:
"""Decorator to initialize yf.Ticker and pass it to the function."""
@wraps(func)
def wrapper(symbol: Annotated[str, "ticker symbol"], *args, **kwargs) -> Any:
def wrapper(
symbol: Annotated[str, "ticker symbol"], *args, **kwargs
) -> Any:
ticker = yf.Ticker(symbol)
return func(ticker, *args, **kwargs)
@ -83,35 +85,59 @@ class YFinanceUtils:
return dividends
def get_income_stmt(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
"""Fetches and returns the latest income statement of the company as a DataFrame."""
"""Fetches and returns the latest income statement of the company."""
ticker = symbol
income_stmt = ticker.financials
return income_stmt
def get_balance_sheet(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
"""Fetches and returns the latest balance sheet of the company as a DataFrame."""
"""Fetches and returns the latest balance sheet of the company."""
ticker = symbol
balance_sheet = ticker.balance_sheet
return balance_sheet
def get_cash_flow(symbol: Annotated[str, "ticker symbol"]) -> DataFrame:
"""Fetches and returns the latest cash flow statement of the company as a DataFrame."""
"""Fetches and returns the latest cash flow statement of the company."""
ticker = symbol
cash_flow = ticker.cashflow
return cash_flow
def get_analyst_recommendations(symbol: Annotated[str, "ticker symbol"]) -> tuple:
"""Fetches the latest analyst recommendations and returns the most common recommendation and its count."""
def get_analyst_recommendations(
symbol: Annotated[str, "ticker symbol"]
) -> tuple:
"""Fetches analyst recommendations with statistical validation."""
ticker = symbol
recommendations = ticker.recommendations
if recommendations.empty:
return None, 0 # No recommendations available
try:
recommendations = ticker.recommendations
except (AttributeError, ValueError) as e:
raise ValueError(
f"Failed to fetch recommendations for {ticker.ticker}: {e}"
)
# Assuming 'period' column exists and needs to be excluded
row_0 = recommendations.iloc[0, 1:] # Exclude 'period' column if necessary
if recommendations is None or recommendations.empty:
return None, 0
# Find the maximum voting result
max_votes = row_0.max()
majority_voting_result = row_0[row_0 == max_votes].index.tolist()
try:
# Get the most recent recommendations (first row)
latest_row = recommendations.iloc[0]
return majority_voting_result[0], max_votes
# Remove non-numeric columns (like 'period')
numeric_cols = latest_row.select_dtypes(include=['number'])
if numeric_cols.empty:
return None, 0
# Find maximum with statistical validation
max_votes = numeric_cols.max()
if pd.isna(max_votes) or max_votes <= 0:
return None, 0
# Get recommendation with highest count
max_recommendation = numeric_cols.idxmax()
# Convert to int for consistency
max_votes = int(max_votes)
return max_recommendation, max_votes
except (IndexError, KeyError, ValueError) as e:
raise ValueError(f"Error processing recommendations data: {e}")

View File

@ -1,22 +1,39 @@
import os
DEFAULT_CONFIG = {
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"project_dir": os.path.abspath(
os.path.join(os.path.dirname(__file__), ".")
),
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"),
"data_dir": "/Users/yluo/Documents/Code/ScAI/FR1-data",
"data_cache_dir": os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
"dataflows/data_cache",
),
# LLM settings
"llm_provider": "openai",
"deep_think_llm": "o4-mini",
"deep_think_llm": "o1-mini",
"quick_think_llm": "gpt-4o-mini",
"backend_url": "https://api.openai.com/v1",
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,
"max_recur_limit": 100,
# Tool settings
"online_tools": True,
}
OPENAI_MODELS = {
"reasoning": ["o1", "o1-mini", "o3", "o3-mini"],
"flagship": ["gpt-4o", "gpt-4o-mini"],
"latest": ["gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano"],
}
ANTHROPIC_MODELS = {
"opus": [
"claude-3-opus-20240229", "claude-opus-4", "claude-opus-4.1"
],
"sonnet": [
"claude-3-5-sonnet-20241022",
"claude-sonnet-4",
"claude-3.7-sonnet"
],
"haiku": ["claude-3-haiku-20240307", "claude-3-5-haiku-20241022"]
}

View File

@ -44,24 +44,46 @@ class ConditionalLogic:
return "Msg Clear Fundamentals"
def should_continue_debate(self, state: AgentState) -> str:
"""Determine if debate should continue."""
if (
state["investment_debate_state"]["count"] >= 2 * self.max_debate_rounds
): # 3 rounds of back-and-forth between 2 agents
"""Determine if debate should continue with proper validation."""
if not state or "investment_debate_state" not in state:
return "Research Manager"
if state["investment_debate_state"]["current_response"].startswith("Bull"):
return "Bear Researcher"
debate_state = state["investment_debate_state"]
count = debate_state.get("count", 0)
current_response = debate_state.get("current_response", "")
if not isinstance(count, int) or count < 0:
return "Research Manager"
if count >= 2 * self.max_debate_rounds:
return "Research Manager"
if isinstance(current_response, str) and current_response.strip():
if current_response.upper().startswith("BULL"):
return "Bear Researcher"
return "Bull Researcher"
def should_continue_risk_analysis(self, state: AgentState) -> str:
"""Determine if risk analysis should continue."""
if (
state["risk_debate_state"]["count"] >= 3 * self.max_risk_discuss_rounds
): # 3 rounds of back-and-forth between 3 agents
"""Determine if risk analysis should continue with proper validation."""
if not state or "risk_debate_state" not in state:
return "Risk Judge"
if state["risk_debate_state"]["latest_speaker"].startswith("Risky"):
return "Safe Analyst"
if state["risk_debate_state"]["latest_speaker"].startswith("Safe"):
return "Neutral Analyst"
risk_state = state["risk_debate_state"]
count = risk_state.get("count", 0)
latest_speaker = risk_state.get("latest_speaker", "")
if not isinstance(count, int) or count < 0:
return "Risk Judge"
if count >= 3 * self.max_risk_discuss_rounds:
return "Risk Judge"
if isinstance(latest_speaker, str) and latest_speaker.strip():
speaker_upper = latest_speaker.upper()
if speaker_upper.startswith("RISKY"):
return "Safe Analyst"
elif speaker_upper.startswith("SAFE"):
return "Neutral Analyst"
return "Risky Analyst"

View File

@ -18,11 +18,29 @@ class Propagator:
def create_initial_state(
self, company_name: str, trade_date: str
) -> Dict[str, Any]:
"""Create the initial state for the agent graph."""
"""Create the initial state for the agent graph with validation."""
if not company_name or not isinstance(company_name, str):
raise ValueError(f"Invalid company_name: {company_name}")
if not trade_date or not isinstance(trade_date, str):
raise ValueError(f"Invalid trade_date: {trade_date}")
from datetime import datetime
try:
parsed_date = datetime.strptime(trade_date, "%Y-%m-%d")
if parsed_date.year < 1990 or parsed_date.year > 2030:
raise ValueError(f"Trade date out of reasonable range: {trade_date}")
except ValueError as e:
raise ValueError(f"Invalid date format. Expected YYYY-MM-DD, got: {trade_date}") from e
company_clean = company_name.strip().upper()
if len(company_clean) < 1 or len(company_clean) > 10:
raise ValueError(f"Company name must be 1-10 characters: {company_name}")
return {
"messages": [("human", company_name)],
"company_of_interest": company_name,
"trade_date": str(trade_date),
"messages": [("human", company_clean)],
"company_of_interest": company_clean,
"trade_date": trade_date,
"investment_debate_state": InvestDebateState(
{"history": "", "current_response": "", "count": 0}
),

View File

@ -20,12 +20,61 @@ class SignalProcessor:
Returns:
Extracted decision (BUY, SELL, or HOLD)
"""
messages = [
(
"system",
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.",
),
("human", full_signal),
if not full_signal or len(full_signal.strip()) < 10:
return "HOLD"
import re
signal_upper = full_signal.upper()
buy_patterns = [
r'FINAL\s+TRANSACTION\s+PROPOSAL:\s*\*\*BUY\*\*',
r'FINAL\s+DECISION:\s*BUY',
r'RECOMMENDATION:\s*BUY',
r'DECISION:\s*BUY',
r'PROPOSE:\s*BUY',
r'\bBUY\b(?=\s*[.!]|\s*$)',
]
return self.quick_thinking_llm.invoke(messages).content
sell_patterns = [
r'FINAL\s+TRANSACTION\s+PROPOSAL:\s*\*\*SELL\*\*',
r'FINAL\s+DECISION:\s*SELL',
r'RECOMMENDATION:\s*SELL',
r'DECISION:\s*SELL',
r'PROPOSE:\s*SELL',
r'\bSELL\b(?=\s*[.!]|\s*$)',
]
hold_patterns = [
r'FINAL\s+TRANSACTION\s+PROPOSAL:\s*\*\*HOLD\*\*',
r'FINAL\s+DECISION:\s*HOLD',
r'RECOMMENDATION:\s*HOLD',
r'DECISION:\s*HOLD',
r'PROPOSE:\s*HOLD',
r'\bHOLD\b(?=\s*[.!]|\s*$)',
]
for pattern in buy_patterns:
if re.search(pattern, signal_upper):
return "BUY"
for pattern in sell_patterns:
if re.search(pattern, signal_upper):
return "SELL"
for pattern in hold_patterns:
if re.search(pattern, signal_upper):
return "HOLD"
buy_count = len(re.findall(r'\bBUY\b', signal_upper))
sell_count = len(re.findall(r'\bSELL\b', signal_upper))
hold_count = len(re.findall(r'\bHOLD\b', signal_upper))
if buy_count > sell_count and buy_count > hold_count:
return "BUY"
elif sell_count > buy_count and sell_count > hold_count:
return "SELL"
elif hold_count > 0:
return "HOLD"
return "HOLD"

View File

@ -126,6 +126,7 @@ class TradingAgentsGraph:
[
# online tools
self.toolkit.get_stock_news_openai,
self.toolkit.get_x_stock_sentiment,
# offline tools
self.toolkit.get_reddit_stock_info,
]
@ -135,6 +136,8 @@ class TradingAgentsGraph:
# online tools
self.toolkit.get_global_news_openai,
self.toolkit.get_google_news,
self.toolkit.get_bloomberg_news,
self.toolkit.get_reuters_news,
# offline tools
self.toolkit.get_finnhub_news,
self.toolkit.get_reddit_news,