This commit is contained in:
Moshi Wei 2025-08-21 10:23:24 -03:00 committed by GitHub
commit 3dfec4f706
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 1663 additions and 115 deletions

2
.gitignore vendored
View File

@ -7,3 +7,5 @@ eval_results/
eval_data/ eval_data/
*.egg-info/ *.egg-info/
.env .env
TA
/.chainlit

View File

@ -0,0 +1,67 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
def create_fundamentals_analyst(llm, toolkit):
def fundamentals_analyst_node(state):
current_date = state["trade_date"]
ticker = state["company_of_interest"]
company_name = state["company_of_interest"]
if toolkit.config["online_tools"]:
tools = [toolkit.get_fundamentals_openai]
else:
tools = [
toolkit.get_finnhub_company_insider_sentiment,
toolkit.get_finnhub_company_insider_transactions,
toolkit.get_simfin_balance_sheet,
toolkit.get_simfin_cashflow,
toolkit.get_simfin_income_stmt,
]
system_message = (
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read. ",
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
),
MessagesPlaceholder(variable_name="messages"),
]
)
prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"fundamentals_report": report,
}
return fundamentals_analyst_node

View File

@ -0,0 +1,91 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
def create_market_analyst(llm, toolkit):
def market_analyst_node(state):
current_date = state["trade_date"]
ticker = state["company_of_interest"]
company_name = state["company_of_interest"]
if toolkit.config["online_tools"]:
tools = [
toolkit.get_YFin_data_online,
toolkit.get_stockstats_indicators_report_online,
]
else:
tools = [
toolkit.get_YFin_data,
toolkit.get_stockstats_indicators_report,
]
system_message = (
"""You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are:
Moving Averages:
- close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
- close_200_sma: 200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.
- close_10_ema: 10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.
MACD Related:
- macd: MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.
- macds: MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.
- macdh: MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.
Momentum Indicators:
- rsi: RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.
Volatility Indicators:
- boll: Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.
- boll_ub: Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.
- boll_lb: Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.
- atr: ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.
Volume-Based Indicators:
- vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses.
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_YFin_data first to retrieve the CSV that is needed to generate indicators. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."""
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
),
MessagesPlaceholder(variable_name="messages"),
]
)
prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"market_report": report,
}
return market_analyst_node

View File

@ -0,0 +1,64 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
def create_news_analyst(llm, toolkit):
def news_analyst_node(state):
current_date = state["trade_date"]
ticker = state["company_of_interest"]
if toolkit.config["online_tools"]:
tools = [toolkit.get_global_news_openai, toolkit.get_google_news]
else:
tools = [
toolkit.get_finnhub_news,
toolkit.get_reddit_news,
toolkit.get_google_news,
]
system_message = (
"You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Look at news from EODHD, and finnhub to be comprehensive. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read."""
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. We are looking at the company {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
),
MessagesPlaceholder(variable_name="messages"),
]
)
prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"news_report": report,
}
return news_analyst_node

View File

@ -0,0 +1,64 @@
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import time
import json
def create_social_media_analyst(llm, toolkit):
def social_media_analyst_node(state):
current_date = state["trade_date"]
ticker = state["company_of_interest"]
company_name = state["company_of_interest"]
if toolkit.config["online_tools"]:
tools = [toolkit.get_stock_news_openai]
else:
tools = [
toolkit.get_reddit_stock_info,
]
system_message = (
"You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ """ Make sure to append a Makrdown table at the end of the report to organize key points in the report, organized and easy to read.""",
)
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful AI assistant, collaborating with other assistants."
" Use the provided tools to progress towards answering the question."
" If you are unable to fully answer, that's OK; another assistant with different tools"
" will help where you left off. Execute what you can to make progress."
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
),
MessagesPlaceholder(variable_name="messages"),
]
)
prompt = prompt.partial(system_message=system_message)
prompt = prompt.partial(tool_names=", ".join([tool.name for tool in tools]))
prompt = prompt.partial(current_date=current_date)
prompt = prompt.partial(ticker=ticker)
chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"])
report = ""
if len(result.tool_calls) == 0:
report = result.content
return {
"messages": [result],
"sentiment_report": report,
}
return social_media_analyst_node

View File

@ -0,0 +1,131 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from isek.adapter.base import Adapter, AdapterCard
from isek.node.etcd_registry import EtcdRegistry
from isek.node.node_v2 import Node
import dotenv
from isek.utils.log import LoggerManager
from isek.utils.log import log
import json
LoggerManager.plain_mode()
dotenv.load_dotenv()
NODE_ID = "TA_Agent_Fundamentals"
# selected_analysts=["market", "social", "news", "fundamentals"]
selected_analysts=["fundamentals"]
def json_to_markdown(data: dict) -> str:
"""
Convert trading analysis JSON to formatted markdown string, extracting only market analysis.
Assumes input is a dict with a single key (date), and the value is the trading data dict.
"""
# Extract the first value (trading data dict)
trading_data = next(iter(data.values()))
# Start building markdown
markdown = []
# Header
markdown.append("# Analysis Report")
markdown.append("")
# Basic Information
markdown.append("## Basic Information")
markdown.append("")
markdown.append(f"**Company:** {trading_data.get('company_of_interest', 'N/A')}")
markdown.append(f"**Trade Date:** {trading_data.get('trade_date', 'N/A')}")
markdown.append("")
# Market Report (only section we want)
markdown.append(f"## {selected_analysts[0]} Analysis")
markdown.append("")
if "market_report" in trading_data:
market_report = trading_data.get("market_report", "")
if market_report:
markdown.append(market_report)
if "news_report" in trading_data:
news_report = trading_data.get("news_report", "")
if news_report:
markdown.append(news_report)
if "fundamentals_report" in trading_data:
fundamentals_report = trading_data.get("fundamentals_report", "")
if fundamentals_report:
markdown.append(fundamentals_report)
if "sentiment_report" in trading_data:
sentiment_report = trading_data.get("sentiment_report", "")
if sentiment_report:
markdown.append(sentiment_report)
else:
markdown.append("*No market analysis available*")
markdown.append("")
return "\n".join(markdown)
class TradingAgentAdapter(Adapter):
def __init__(self):
# Create a custom config
self.config = DEFAULT_CONFIG.copy()
self.config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["max_debate_rounds"] = 1 # Increase debate rounds
self.config["online_tools"] = True # Use online tools or cached data
self.config["max_completion_tokens"] = 1000
# Initialize with custom config
# self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=["market", "social", "news", "fundamentals"])
self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=selected_analysts)
def run(self, prompt: str) -> str:
"""Prompt format must be like this: Ticker,Date"""
try:
# Try to parse as JSON first
received = json.loads(prompt)
# Extract text from the structure
if isinstance(received, dict) and 'parts' in received and received['parts']:
result = received['parts'][0]['text']
else:
result = str(received)
except (json.JSONDecodeError, KeyError, TypeError):
# If not JSON or structure doesn't match, use prompt as is
result = prompt
log.debug(f"prompt: {result}")
Ticker = prompt.split(",")[0]
Date = prompt.split(",")[1]
print(f"Ticker: {Ticker}, Date: {Date}")
final_state, decision = self.ta.propagate(Ticker, Date)
# path = f"eval_results/{Ticker}/TradingAgentsStrategy_logs/full_states_log_{Date}.json"
# # read the json file
# with open(path, 'r') as f:
# final_state = json.load(f)
# final_state is already a dict, no need to parse it
markdown_content = json_to_markdown(final_state)
return markdown_content
def get_adapter_card(self) -> AdapterCard:
return AdapterCard(
name="Random Number Generator",
bio="",
lore="",
knowledge="",
routine="",
)
# Create the server node.
etcd_registry = EtcdRegistry(host="47.236.116.81", port=2379)
# Create the server node.
server_node = Node(node_id=NODE_ID, port=8866, p2p=True, p2p_server_port=9000, adapter=TradingAgentAdapter(), registry=etcd_registry)
# Start the server in the foreground.
server_node.build_server(daemon=False)
# print(server_node.adapter.run("random a number 0-10"))

View File

@ -0,0 +1,132 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from isek.adapter.base import Adapter, AdapterCard
from isek.node.etcd_registry import EtcdRegistry
from isek.node.node_v2 import Node
import dotenv
from isek.utils.log import LoggerManager
from isek.utils.log import log
import json
LoggerManager.plain_mode()
dotenv.load_dotenv()
NODE_ID = "TA_Agent_Market"
# selected_analysts=["market", "social", "news", "fundamentals"]
selected_analysts=["market"]
def json_to_markdown(data: dict) -> str:
"""
Convert trading analysis JSON to formatted markdown string, extracting only market analysis.
Assumes input is a dict with a single key (date), and the value is the trading data dict.
"""
# Extract the first value (trading data dict)
trading_data = next(iter(data.values()))
# Start building markdown
markdown = []
# Header
markdown.append("# Market Analysis Report")
markdown.append("")
# Basic Information
markdown.append("## Basic Information")
markdown.append("")
markdown.append(f"**Company:** {trading_data.get('company_of_interest', 'N/A')}")
markdown.append(f"**Trade Date:** {trading_data.get('trade_date', 'N/A')}")
markdown.append("")
# Market Report (only section we want)
markdown.append(f"## {selected_analysts[0]} Analysis")
markdown.append("")
if "market_report" in trading_data:
market_report = trading_data.get("market_report", "")
if market_report:
markdown.append(market_report)
if "news_report" in trading_data:
news_report = trading_data.get("news_report", "")
if news_report:
markdown.append(news_report)
if "fundamentals_report" in trading_data:
fundamentals_report = trading_data.get("fundamentals_report", "")
if fundamentals_report:
markdown.append(fundamentals_report)
if "sentiment_report" in trading_data:
sentiment_report = trading_data.get("sentiment_report", "")
if sentiment_report:
markdown.append(sentiment_report)
else:
markdown.append("*No market analysis available*")
markdown.append("")
return "\n".join(markdown)
class TradingAgentAdapter(Adapter):
def __init__(self):
# Create a custom config
self.config = DEFAULT_CONFIG.copy()
self.config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["max_debate_rounds"] = 1 # Increase debate rounds
self.config["online_tools"] = True # Use online tools or cached data
self.config["max_completion_tokens"] = 1000
# Initialize with custom config
# self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=["market", "social", "news", "fundamentals"])
self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=selected_analysts)
def run(self, prompt: str) -> str:
"""Prompt format must be like this: Ticker,Date"""
try:
# Try to parse as JSON first
received = json.loads(prompt)
# Extract text from the structure
if isinstance(received, dict) and 'parts' in received and received['parts']:
result = received['parts'][0]['text']
else:
result = str(received)
except (json.JSONDecodeError, KeyError, TypeError):
# If not JSON or structure doesn't match, use prompt as is
result = prompt
log.debug(f"prompt: {result}")
Ticker = prompt.split(",")[0]
Date = prompt.split(",")[1]
print(f"Ticker: {Ticker}, Date: {Date}")
final_state, decision = self.ta.propagate(Ticker, Date)
# path = f"eval_results/{Ticker}/TradingAgentsStrategy_logs/full_states_log_{Date}.json"
# # read the json file
# with open(path, 'r') as f:
# final_state = json.load(f)
# final_state is already a dict, no need to parse it
markdown_content = json_to_markdown(final_state)
return markdown_content
def get_adapter_card(self) -> AdapterCard:
return AdapterCard(
name="Random Number Generator",
bio="",
lore="",
knowledge="",
routine="",
)
# Create the server node.
etcd_registry = EtcdRegistry(host="47.236.116.81", port=2379)
# Create the server node.
server_node = Node(node_id=NODE_ID, port=8867, p2p=True, p2p_server_port=9000, adapter=TradingAgentAdapter(), registry=etcd_registry)
# Start the server in the foreground.
server_node.build_server(daemon=False)
# print(server_node.adapter.run("random a number 0-10"))

View File

@ -0,0 +1,132 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from isek.adapter.base import Adapter, AdapterCard
from isek.node.etcd_registry import EtcdRegistry
from isek.node.node_v2 import Node
import dotenv
from isek.utils.log import LoggerManager
from isek.utils.log import log
import json
LoggerManager.plain_mode()
dotenv.load_dotenv()
NODE_ID = "TA_Agent_News"
# selected_analysts=["market", "social", "news", "fundamentals"]
selected_analysts=["news"]
def json_to_markdown(data: dict) -> str:
"""
Convert trading analysis JSON to formatted markdown string, extracting only market analysis.
Assumes input is a dict with a single key (date), and the value is the trading data dict.
"""
# Extract the first value (trading data dict)
trading_data = next(iter(data.values()))
# Start building markdown
markdown = []
# Header
markdown.append("# Analysis Report")
markdown.append("")
# Basic Information
markdown.append("## Basic Information")
markdown.append("")
markdown.append(f"**Company:** {trading_data.get('company_of_interest', 'N/A')}")
markdown.append(f"**Trade Date:** {trading_data.get('trade_date', 'N/A')}")
markdown.append("")
# Market Report (only section we want)
markdown.append(f"## {selected_analysts[0]} Analysis")
markdown.append("")
if "market_report" in trading_data:
market_report = trading_data.get("market_report", "")
if market_report:
markdown.append(market_report)
if "news_report" in trading_data:
news_report = trading_data.get("news_report", "")
if news_report:
markdown.append(news_report)
if "fundamentals_report" in trading_data:
fundamentals_report = trading_data.get("fundamentals_report", "")
if fundamentals_report:
markdown.append(fundamentals_report)
if "sentiment_report" in trading_data:
sentiment_report = trading_data.get("sentiment_report", "")
if sentiment_report:
markdown.append(sentiment_report)
else:
markdown.append("*No market analysis available*")
markdown.append("")
return "\n".join(markdown)
class TradingAgentAdapter(Adapter):
def __init__(self):
# Create a custom config
self.config = DEFAULT_CONFIG.copy()
self.config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["max_debate_rounds"] = 1 # Increase debate rounds
self.config["online_tools"] = True # Use online tools or cached data
self.config["max_completion_tokens"] = 1000
# Initialize with custom config
# self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=["market", "social", "news", "fundamentals"])
self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=selected_analysts)
def run(self, prompt: str) -> str:
"""Prompt format must be like this: Ticker,Date"""
try:
# Try to parse as JSON first
received = json.loads(prompt)
# Extract text from the structure
if isinstance(received, dict) and 'parts' in received and received['parts']:
result = received['parts'][0]['text']
else:
result = str(received)
except (json.JSONDecodeError, KeyError, TypeError):
# If not JSON or structure doesn't match, use prompt as is
result = prompt
log.debug(f"prompt: {result}")
Ticker = prompt.split(",")[0]
Date = prompt.split(",")[1]
print(f"Ticker: {Ticker}, Date: {Date}")
final_state, decision = self.ta.propagate(Ticker, Date)
# path = f"eval_results/{Ticker}/TradingAgentsStrategy_logs/full_states_log_{Date}.json"
# # read the json file
# with open(path, 'r') as f:
# final_state = json.load(f)
# final_state is already a dict, no need to parse it
markdown_content = json_to_markdown(final_state)
return markdown_content
def get_adapter_card(self) -> AdapterCard:
return AdapterCard(
name="Random Number Generator",
bio="",
lore="",
knowledge="",
routine="",
)
# Create the server node.
etcd_registry = EtcdRegistry(host="47.236.116.81", port=2379)
# Create the server node.
server_node = Node(node_id=NODE_ID, port=8868, p2p=True, p2p_server_port=9000, adapter=TradingAgentAdapter(), registry=etcd_registry)
# Start the server in the foreground.
server_node.build_server(daemon=False)
# print(server_node.adapter.run("random a number 0-10"))

View File

@ -0,0 +1,132 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from isek.adapter.base import Adapter, AdapterCard
from isek.node.etcd_registry import EtcdRegistry
from isek.node.node_v2 import Node
import dotenv
from isek.utils.log import LoggerManager
from isek.utils.log import log
import json
LoggerManager.plain_mode()
dotenv.load_dotenv()
NODE_ID = "TA_Agent_Social"
# selected_analysts=["market", "social", "news", "fundamentals"]
selected_analysts=["social"]
def json_to_markdown(data: dict) -> str:
"""
Convert trading analysis JSON to formatted markdown string, extracting only market analysis.
Assumes input is a dict with a single key (date), and the value is the trading data dict.
"""
# Extract the first value (trading data dict)
trading_data = next(iter(data.values()))
# Start building markdown
markdown = []
# Header
markdown.append("# Market Analysis Report")
markdown.append("")
# Basic Information
markdown.append("## Basic Information")
markdown.append("")
markdown.append(f"**Company:** {trading_data.get('company_of_interest', 'N/A')}")
markdown.append(f"**Trade Date:** {trading_data.get('trade_date', 'N/A')}")
markdown.append("")
# Market Report (only section we want)
markdown.append(f"## {selected_analysts[0]} Analysis")
markdown.append("")
if "market_report" in trading_data:
market_report = trading_data.get("market_report", "")
if market_report:
markdown.append(market_report)
if "news_report" in trading_data:
news_report = trading_data.get("news_report", "")
if news_report:
markdown.append(news_report)
if "fundamentals_report" in trading_data:
fundamentals_report = trading_data.get("fundamentals_report", "")
if fundamentals_report:
markdown.append(fundamentals_report)
if "sentiment_report" in trading_data:
sentiment_report = trading_data.get("sentiment_report", "")
if sentiment_report:
markdown.append(sentiment_report)
else:
markdown.append("*No market analysis available*")
markdown.append("")
return "\n".join(markdown)
class TradingAgentAdapter(Adapter):
def __init__(self):
# Create a custom config
self.config = DEFAULT_CONFIG.copy()
self.config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
self.config["max_debate_rounds"] = 1 # Increase debate rounds
self.config["online_tools"] = True # Use online tools or cached data
self.config["max_completion_tokens"] = 1000
# Initialize with custom config
# self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=["market", "social", "news", "fundamentals"])
self.ta = TradingAgentsGraph(debug=True, config=self.config, debate=False, selected_analysts=selected_analysts)
def run(self, prompt: str) -> str:
"""Prompt format must be like this: Ticker,Date"""
try:
# Try to parse as JSON first
received = json.loads(prompt)
# Extract text from the structure
if isinstance(received, dict) and 'parts' in received and received['parts']:
result = received['parts'][0]['text']
else:
result = str(received)
except (json.JSONDecodeError, KeyError, TypeError):
# If not JSON or structure doesn't match, use prompt as is
result = prompt
log.debug(f"prompt: {result}")
Ticker = prompt.split(",")[0]
Date = prompt.split(",")[1]
print(f"Ticker: {Ticker}, Date: {Date}")
final_state, decision = self.ta.propagate(Ticker, Date)
# path = f"eval_results/{Ticker}/TradingAgentsStrategy_logs/full_states_log_{Date}.json"
# # read the json file
# with open(path, 'r') as f:
# final_state = json.load(f)
# final_state is already a dict, no need to parse it
markdown_content = json_to_markdown(final_state)
return markdown_content
def get_adapter_card(self) -> AdapterCard:
return AdapterCard(
name="Random Number Generator",
bio="",
lore="",
knowledge="",
routine="",
)
# Create the server node.
etcd_registry = EtcdRegistry(host="47.236.116.81", port=2379)
# Create the server node.
server_node = Node(node_id=NODE_ID, port=8869, p2p=True, p2p_server_port=9000, adapter=TradingAgentAdapter(), registry=etcd_registry)
# Start the server in the foreground.
server_node.build_server(daemon=False)
# print(server_node.adapter.run("random a number 0-10"))

14
chainlit.md Normal file
View File

@ -0,0 +1,14 @@
# Welcome to Chainlit! 🚀🤖
Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
## Useful Links 🔗
- **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
- **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬
We can't wait to see what you create with Chainlit! Happy coding! 💻😊
## Welcome screen
To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.

98
chainlit_ui.py Normal file
View File

@ -0,0 +1,98 @@
import chainlit as cl
import os
from dotenv import load_dotenv
from isek.node.node_v2 import Node
from isek.utils.log import log
from isek.node.etcd_registry import EtcdRegistry
# Load environment variables
load_dotenv()
# Server configuration
SERVER_NODE_ID = "TA_Agent"
# Global client node instance
client_node = None
@cl.on_chat_start
async def start():
"""Initialize the client connection to the ISEK server"""
global client_node
try:
# Create a client node to send messages
EXAMPLE_REGISTRY_HOST = "47.236.116.81"
# Create the server node.
etcd_registry = EtcdRegistry(host=EXAMPLE_REGISTRY_HOST, port=2379)
client_node = Node(node_id="Lyra_client", port=8889, p2p=True, p2p_server_port=9001, registry=etcd_registry)
# Start the server in the foreground.
client_node.build_server(daemon=True)
# example of agent card
# • AdapterCard(name='SimpleAdapter',
# bio='A simple adapter for testing',
# lore='Created for testing purposes',
# knowledge='Basic testing knowledge',
# routine='Respond to messages')
# Send welcome message
await cl.Message(
content=f"🤖 Welcome to ISEK Agent Interface!\n\n"
f"I'm connected to your ISEK agent server. You can now interact with the agent ",
author="System"
).send()
log.info("Chainlit client connected to ISEK server")
except Exception as e:
await cl.Message(
content=f"❌ Failed to connect to ISEK server: {str(e)}\n\n"
"Please make sure the agent server is running on localhost:9006",
author="System"
).send()
log.error(f"Failed to connect to ISEK server: {e}")
@cl.on_message
async def main(message: cl.Message):
"""Handle incoming messages and forward them to the ISEK agent"""
global client_node
if client_node is None:
await cl.Message(
content="❌ Client not initialized. Please refresh the page.",
author="System"
).send()
return
try:
# Send message to ISEK agent and get response
response = client_node.send_message(SERVER_NODE_ID, message.content)
# Show agent response
if response is not None:
await cl.Message(
content=str(response),
author="ISEK Agent"
).send()
else:
await cl.Message(
content="No response received from agent",
author="System"
).send()
except Exception as e:
error_msg = f"❌ Error communicating with agent: {str(e)}"
await cl.Message(
content=error_msg,
author="System"
).send()
log.error(f"Error in message handling: {e}")
@cl.on_chat_end
async def end():
"""Clean up when chat ends"""
global client_node
client_node = None
log.info("Chainlit client disconnected")
# Note: Chat profile configuration has been removed as it's not supported in current Chainlit version

33
client_test.py Normal file
View File

@ -0,0 +1,33 @@
from isek.node.etcd_registry import EtcdRegistry
from isek.node.node_v2 import Node
from isek.utils.log import LoggerManager
from isek.utils.print_utils import print_send_message_result, print_panel
LoggerManager.plain_mode()
EXAMPLE_REGISTRY_HOST = "47.236.116.81"
SERVER_NODE_ID = "TA_Agent_News"
SERVER_NODE_ID = "TA_Agent_Market"
SERVER_NODE_ID = "TA_Agent_Social"
SERVER_NODE_ID = "TA_Agent_Fundamentals"
# Create the server node.
etcd_registry = EtcdRegistry(host=EXAMPLE_REGISTRY_HOST, port=2379)
client_node = Node(node_id="RN_client", port=8889, p2p=True, p2p_server_port=9001, registry=etcd_registry)
# Start the server in the foreground.
client_node.build_server(daemon=True)
print_panel(title="LV10 P2P Node Client",
content="This Client accesses RN node through the p2p protocol."
"\nAnd demonstrate the autonomous discovery of nodes through the registration center",
color="bright_yellow")
print_send_message_result(
lambda msg: client_node.send_message(SERVER_NODE_ID, msg),
source_node_id=client_node.node_id,
target_node_id=SERVER_NODE_ID,
message="NVDA,2024-01-03"
)
# reply = client_node.send_message("RN", "random a number 10-100")
# print(f"RN say:\n{reply}")

307
display_json.py Executable file
View File

@ -0,0 +1,307 @@
#!/usr/bin/env python3
"""
JSON Pretty Display Script for Trading Analysis Data
Displays trading strategy logs with color coding and formatting
"""
import json
import sys
import os
from typing import Dict, Any, Optional
from datetime import datetime
import argparse
# Color codes for terminal output
class Colors:
HEADER = '\033[95m'
BLUE = '\033[94m'
CYAN = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def print_header(text: str, color: str = Colors.HEADER):
"""Print a formatted header"""
print(f"\n{color}{'='*80}")
print(f"{text.center(80)}")
print(f"{'='*80}{Colors.END}\n")
def print_section(text: str, color: str = Colors.BLUE):
"""Print a section header"""
print(f"\n{color}{''*60}")
print(f" {text}")
print(f"{''*60}{Colors.END}")
def print_key_value(key: str, value: str, max_width: int = 100):
"""Print a key-value pair with formatting"""
if len(value) > max_width:
# Truncate and add ellipsis
truncated = value[:max_width-3] + "..."
print(f"{Colors.CYAN}{key}:{Colors.END} {truncated}")
print(f"{Colors.YELLOW} (truncated - full text available in raw JSON){Colors.END}")
else:
print(f"{Colors.CYAN}{key}:{Colors.END} {value}")
def format_text_block(text: str, indent: int = 2, max_width: int = 80) -> str:
"""Format a long text block with proper wrapping"""
if not text:
return ""
# Split by newlines and format each paragraph
paragraphs = text.split('\n\n')
formatted_paragraphs = []
for paragraph in paragraphs:
if not paragraph.strip():
continue
# Handle markdown headers
if paragraph.startswith('###'):
formatted_paragraphs.append(f"{Colors.GREEN}{paragraph}{Colors.END}")
elif paragraph.startswith('**') and paragraph.endswith('**'):
formatted_paragraphs.append(f"{Colors.BOLD}{paragraph}{Colors.END}")
else:
# Simple text wrapping
words = paragraph.split()
lines = []
current_line = " " * indent
for word in words:
if len(current_line + word) < max_width:
current_line += word + " "
else:
lines.append(current_line.strip())
current_line = " " * indent + word + " "
if current_line.strip():
lines.append(current_line.strip())
formatted_paragraphs.append('\n'.join(lines))
return '\n\n'.join(formatted_paragraphs)
def display_trading_data(data: Dict[str, Any]):
"""Display the trading analysis data in a formatted way"""
# Get the main date key (assuming it's the first key)
date_key = list(data.keys())[0]
trading_data = data[date_key]
print_header(f"Trading Analysis Report - {date_key}", Colors.HEADER)
# Basic information
print_section("Basic Information")
print_key_value("Company", trading_data.get("company_of_interest", "N/A"))
print_key_value("Trade Date", trading_data.get("trade_date", "N/A"))
# Market Report
print_section("Market Analysis Report")
market_report = trading_data.get("market_report", "")
if market_report:
print(format_text_block(market_report))
else:
print(f"{Colors.RED}No market report available{Colors.END}")
# Sentiment Report
print_section("Sentiment Analysis")
sentiment_report = trading_data.get("sentiment_report", "")
if sentiment_report:
print(format_text_block(sentiment_report))
else:
print(f"{Colors.RED}No sentiment report available{Colors.END}")
# News Report
print_section("News Analysis")
news_report = trading_data.get("news_report", "")
if news_report:
print(format_text_block(news_report))
else:
print(f"{Colors.RED}No news report available{Colors.END}")
# Fundamentals Report
print_section("Fundamentals Analysis")
fundamentals_report = trading_data.get("fundamentals_report", "")
if fundamentals_report:
print(format_text_block(fundamentals_report))
else:
print(f"{Colors.RED}No fundamentals report available{Colors.END}")
# Investment Decision
print_section("Investment Decision")
investment_decision = trading_data.get("trader_investment_decision", "")
if investment_decision:
print(format_text_block(investment_decision))
else:
print(f"{Colors.RED}No investment decision available{Colors.END}")
# Investment Plan
print_section("Investment Plan")
investment_plan = trading_data.get("investment_plan", "")
if investment_plan:
print(format_text_block(investment_plan))
else:
print(f"{Colors.RED}No investment plan available{Colors.END}")
# Final Trade Decision
print_section("Final Trade Decision")
final_decision = trading_data.get("final_trade_decision", "")
if final_decision:
print(format_text_block(final_decision))
else:
print(f"{Colors.RED}No final trade decision available{Colors.END}")
# Debate States
if "investment_debate_state" in trading_data:
print_section("Investment Debate Analysis")
debate_state = trading_data["investment_debate_state"]
if "judge_decision" in debate_state:
print(f"{Colors.BOLD}Judge Decision:{Colors.END}")
print(format_text_block(debate_state["judge_decision"]))
if "risk_debate_state" in trading_data:
print_section("Risk Management Debate")
risk_state = trading_data["risk_debate_state"]
if "judge_decision" in risk_state:
print(f"{Colors.BOLD}Risk Judge Decision:{Colors.END}")
print(format_text_block(risk_state["judge_decision"]))
def display_raw_json(data: Dict[str, Any], indent: int = 2):
"""Display the raw JSON with proper formatting"""
print_header("Raw JSON Data", Colors.YELLOW)
print(json.dumps(data, indent=indent, ensure_ascii=False))
def interactive_menu(data: Dict[str, Any]):
"""Provide an interactive menu for exploring the data"""
while True:
print(f"\n{Colors.CYAN}Interactive Menu:{Colors.END}")
print("1. View formatted trading analysis")
print("2. View raw JSON data")
print("3. Search for specific content")
print("4. Export to formatted text file")
print("5. Exit")
choice = input(f"\n{Colors.YELLOW}Enter your choice (1-5): {Colors.END}").strip()
if choice == "1":
display_trading_data(data)
elif choice == "2":
display_raw_json(data)
elif choice == "3":
search_content(data)
elif choice == "4":
export_to_file(data)
elif choice == "5":
print(f"{Colors.GREEN}Goodbye!{Colors.END}")
break
else:
print(f"{Colors.RED}Invalid choice. Please try again.{Colors.END}")
def search_content(data: Dict[str, Any]):
"""Search for specific content in the data"""
search_term = input(f"{Colors.YELLOW}Enter search term: {Colors.END}").strip().lower()
if not search_term:
return
print(f"\n{Colors.CYAN}Searching for '{search_term}'...{Colors.END}\n")
found = False
date_key = list(data.keys())[0]
trading_data = data[date_key]
for key, value in trading_data.items():
if isinstance(value, str) and search_term in value.lower():
print_section(f"Found in: {key}")
# Find the context around the search term
words = value.split()
for i, word in enumerate(words):
if search_term in word.lower():
start = max(0, i-5)
end = min(len(words), i+6)
context = " ".join(words[start:end])
print(f"...{context}...")
found = True
break
if not found:
print(f"{Colors.RED}No matches found for '{search_term}'{Colors.END}")
def export_to_file(data: Dict[str, Any]):
"""Export the formatted data to a text file"""
filename = input(f"{Colors.YELLOW}Enter filename (default: trading_analysis.txt): {Colors.END}").strip()
if not filename:
filename = "trading_analysis.txt"
if not filename.endswith('.txt'):
filename += '.txt'
try:
with open(filename, 'w', encoding='utf-8') as f:
# Redirect stdout to capture the formatted output
import io
import contextlib
output = io.StringIO()
with contextlib.redirect_stdout(output):
display_trading_data(data)
f.write(output.getvalue())
print(f"{Colors.GREEN}Data exported to {filename}{Colors.END}")
except Exception as e:
print(f"{Colors.RED}Error exporting file: {e}{Colors.END}")
def main():
parser = argparse.ArgumentParser(description="Pretty display JSON trading analysis data")
parser.add_argument("json_file", help="Path to the JSON file to display")
parser.add_argument("--raw", action="store_true", help="Display raw JSON only")
parser.add_argument("--interactive", action="store_true", help="Start interactive mode")
parser.add_argument("--export", help="Export formatted data to specified file")
args = parser.parse_args()
# Check if file exists
if not os.path.exists(args.json_file):
print(f"{Colors.RED}Error: File '{args.json_file}' not found{Colors.END}")
sys.exit(1)
try:
with open(args.json_file, 'r', encoding='utf-8') as f:
data = json.load(f)
except json.JSONDecodeError as e:
print(f"{Colors.RED}Error: Invalid JSON file - {e}{Colors.END}")
sys.exit(1)
except Exception as e:
print(f"{Colors.RED}Error reading file: {e}{Colors.END}")
sys.exit(1)
# Handle different display modes
if args.raw:
display_raw_json(data)
elif args.interactive:
interactive_menu(data)
elif args.export:
try:
with open(args.export, 'w', encoding='utf-8') as f:
import io
import contextlib
output = io.StringIO()
with contextlib.redirect_stdout(output):
display_trading_data(data)
f.write(output.getvalue())
print(f"{Colors.GREEN}Data exported to {args.export}{Colors.END}")
except Exception as e:
print(f"{Colors.RED}Error exporting file: {e}{Colors.END}")
else:
# Default: display formatted data
display_trading_data(data)
if __name__ == "__main__":
main()

16
mermaid_graph_plot.py Normal file
View File

@ -0,0 +1,16 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from dotenv import load_dotenv
load_dotenv()
# Create a custom config
config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds
config["online_tools"] = True # Use online tools or cached data
# Initialize with custom config
ta = TradingAgentsGraph(debug=True, config=config)
print(ta.graph.get_graph().draw_mermaid())

30
test.json Normal file
View File

@ -0,0 +1,30 @@
{
"final_state": {
"messages": [
{
"content": "Continue",
"additional_kwargs": {},
"response_metadata": {},
"id": "3a5825d2-ec8d-4560-9d08-1db61085ac2f"
}
],
"company_of_interest": "eth",
"trade_date": "2024-01-01",
"market_report": "The recent ETH/USD data reveals a nuanced trend landscape:\n\n**Price Action & Moving Averages:** The price has been oscillating around the 50 SMA (~2178), which has steadily increased from about 1887 in early December to over 2178 at present, indicating a persistent medium-term upward trend. The 200 SMA (~1864) remains below the current price, reaffirming a bullish bias on a long-term horizon. The price hasn't crossed below the 50 SMA, supporting ongoing upward momentum.\n\n**MACD:** The MACD value stands at approximately 39.50, significantly above zero, suggesting strong bullish momentum. Although recent MACD values have fluctuated, they have remained in positive territory, and the recent readings from December are still indicative of upward acceleration, with no clear divergence signaling a reversal.\n\n**RSI:** The RSI is around 58.7, which is in a neutral zone but leaning slightly towards overbought. This suggests that while the momentum remains positive, the asset is nearing levels where a short-term correction could occur if the RSI approaches or surpasses 70.\n\n**Bollinger Bands:** The current Bollinger middle line (~2270) and the price near the upper band (~2270.7) indicate that ETH is approaching overbought levels relative to recent volatility. The bands are widening, reflecting increased volatility, typical during trend sustainment phases.\n\n### Insights:\n- The consistent upward move above the 50 and 200 SMAs indicates a resilient bullish trend.\n- The MACD confirms strong momentum, but its high level warrants caution for potential short-term pullbacks.\n- The RSI's neutral reading suggests room for continuation, but nearing overbought territory signals watchfulness.\n- Price near the upper Bollinger Band indicates potential for short-term retracement or consolidation before further gains.\n\n### Trading considerations:\n- For bullish continuation: recent bullish momentum is strong, but confirmation with a sustainable MACD crossover or a pullback to the 50 SMA before resuming upward could be prudent.\n- For caution: the proximity to the upper Bollinger Band and RSI nearing overbought suggest possible short-term pullbacks.\n\n**Overall, the trend remains bullish, supported by several key indicators, but short-term caution is advisable given overbought signals.**\n\n| Aspect | Key Point |\n| --- | --- |\n| Price Trend | Upward, near 50 SMA (~2178) and below upper Bollinger Band (~2270) |\n| Long-term",
"sentiment_report": "",
"news_report": "",
"fundamentals_report": "",
"investment_debate_state": {
"history": "",
"current_response": "",
"count": 0
},
"risk_debate_state": {
"history": "",
"current_risky_response": "",
"current_safe_response": "",
"current_neutral_response": "",
"count": 0
}
}
}

184
test.py Normal file
View File

@ -0,0 +1,184 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
def json_to_markdown(json_file_path: str) -> str:
"""
Convert trading analysis JSON file to formatted markdown string.
Args:
json_file_path (str): Path to the JSON file containing trading analysis data
Returns:
str: Formatted markdown string
"""
import json
import os
# Check if file exists
if not os.path.exists(json_file_path):
return f"# Error\nFile not found: {json_file_path}"
try:
with open(json_file_path, 'r', encoding='utf-8') as f:
data = json.load(f)
except json.JSONDecodeError as e:
return f"# Error\nInvalid JSON file: {e}"
except Exception as e:
return f"# Error\nError reading file: {e}"
# Get the main date key (assuming it's the first key)
date_key = list(data.keys())[0]
trading_data = data[date_key]
# Start building markdown
markdown = []
# Header
markdown.append(f"# Trading Analysis Report - {date_key}")
markdown.append("")
# Basic Information
markdown.append("## Basic Information")
markdown.append("")
markdown.append(f"**Company:** {trading_data.get('company_of_interest', 'N/A')}")
markdown.append(f"**Trade Date:** {trading_data.get('trade_date', 'N/A')}")
markdown.append("")
# Market Report
markdown.append("## Market Analysis Report")
markdown.append("")
market_report = trading_data.get("market_report", "")
if market_report:
markdown.append(market_report)
else:
markdown.append("*No market report available*")
markdown.append("")
# Sentiment Report
markdown.append("## Sentiment Analysis")
markdown.append("")
sentiment_report = trading_data.get("sentiment_report", "")
if sentiment_report:
markdown.append(sentiment_report)
else:
markdown.append("*No sentiment report available*")
markdown.append("")
# News Report
markdown.append("## News Analysis")
markdown.append("")
news_report = trading_data.get("news_report", "")
if news_report:
markdown.append(news_report)
else:
markdown.append("*No news report available*")
markdown.append("")
# Fundamentals Report
markdown.append("## Fundamentals Analysis")
markdown.append("")
fundamentals_report = trading_data.get("fundamentals_report", "")
if fundamentals_report:
markdown.append(fundamentals_report)
else:
markdown.append("*No fundamentals report available*")
markdown.append("")
# Investment Decision
markdown.append("## Investment Decision")
markdown.append("")
investment_decision = trading_data.get("trader_investment_decision", "")
if investment_decision:
markdown.append(investment_decision)
else:
markdown.append("*No investment decision available*")
markdown.append("")
# Investment Plan
markdown.append("## Investment Plan")
markdown.append("")
investment_plan = trading_data.get("investment_plan", "")
if investment_plan:
markdown.append(investment_plan)
else:
markdown.append("*No investment plan available*")
markdown.append("")
# Final Trade Decision
markdown.append("## Final Trade Decision")
markdown.append("")
final_decision = trading_data.get("final_trade_decision", "")
if final_decision:
markdown.append(final_decision)
else:
markdown.append("*No final trade decision available*")
markdown.append("")
# Debate States
if "investment_debate_state" in trading_data:
markdown.append("## Investment Debate Analysis")
markdown.append("")
debate_state = trading_data["investment_debate_state"]
if "judge_decision" in debate_state:
markdown.append("### Judge Decision")
markdown.append("")
markdown.append(debate_state["judge_decision"])
markdown.append("")
if "risk_debate_state" in trading_data:
markdown.append("## Risk Management Debate")
markdown.append("")
risk_state = trading_data["risk_debate_state"]
if "judge_decision" in risk_state:
markdown.append("### Risk Judge Decision")
markdown.append("")
markdown.append(risk_state["judge_decision"])
markdown.append("")
return "\n".join(markdown)
# Example usage:
if __name__ == "__main__":
# Create a custom config
config = DEFAULT_CONFIG.copy()
config["deep_think_llm"] = "gpt-4.1-nano" # Use a different model
config["quick_think_llm"] = "gpt-4.1-nano" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds
config["online_tools"] = True # Use online tools or cached data
# Initialize with custom config
ta = TradingAgentsGraph(debug=False, config=config)
# forward propagate
Ticker = "BTC"
Date = "2024-05-11"
_, decision = ta.propagate(Ticker, Date)
print("--------------------------------")
print(decision)
print("--------------------------------")
path = f"eval_results/{Ticker}/TradingAgentsStrategy_logs/full_states_log_{Date}.json"
# Convert the JSON file to markdown
markdown_content = json_to_markdown(path)
# Print the markdown content
print("=" * 80)
print("MARKDOWN OUTPUT:")
print("=" * 80)
print(markdown_content)
# Optionally save to file
markdown_file = f"eval_results/{Ticker}/TradingAgentsStrategy_logs/report_{Date}.md"
with open(markdown_file, 'w', encoding='utf-8') as f:
f.write(markdown_content)
# print(f"\nMarkdown report saved to: {markdown_file}")

View File

@ -22,7 +22,7 @@ def create_fundamentals_analyst(llm, toolkit):
system_message = ( system_message = (
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, company financial history, insider sentiment and insider transactions to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.", + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read. ",
) )
prompt = ChatPromptTemplate.from_messages( prompt = ChatPromptTemplate.from_messages(
@ -36,7 +36,10 @@ def create_fundamentals_analyst(llm, toolkit):
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}", "For your reference, the current date is {current_date}. The company we want to look at is {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]

View File

@ -61,7 +61,9 @@ Volume-Based Indicators:
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The company we want to look at is {ticker}", "For your reference, the current date is {current_date}. The company we want to look at is {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]

View File

@ -33,7 +33,11 @@ def create_news_analyst(llm, toolkit):
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. We are looking at the company {ticker}", "For your reference, the current date is {current_date}. We are looking at the company {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]

View File

@ -32,7 +32,11 @@ def create_social_media_analyst(llm, toolkit):
" If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable," " If you or any other assistant has the FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** or deliverable,"
" prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop." " prefix your response with FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL** so the team knows to stop."
" You have access to the following tools: {tool_names}.\n{system_message}" " You have access to the following tools: {tool_names}.\n{system_message}"
"For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}", "For your reference, the current date is {current_date}. The current company we want to analyze is {ticker}"
"Make sure to generate less than 500 tokens."
" Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. "
), ),
MessagesPlaceholder(variable_name="messages"), MessagesPlaceholder(variable_name="messages"),
] ]

View File

@ -29,6 +29,7 @@ Your Recommendation: A decisive stance supported by the most convincing argument
Rationale: An explanation of why these arguments lead to your conclusion. Rationale: An explanation of why these arguments lead to your conclusion.
Strategic Actions: Concrete steps for implementing the recommendation. Strategic Actions: Concrete steps for implementing the recommendation.
Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting. Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting.
Make sure to generate less than 500 tokens.
Here are your past reflections on mistakes: Here are your past reflections on mistakes:
\"{past_memory_str}\" \"{past_memory_str}\"

View File

@ -33,7 +33,7 @@ Guidelines for Decision-Making:
Deliverables: Deliverables:
- A clear and actionable recommendation: Buy, Sell, or Hold. - A clear and actionable recommendation: Buy, Sell, or Hold.
- Detailed reasoning anchored in the debate and past reflections. - Detailed reasoning anchored in the debate and past reflections.
- Make sure to generate less than 500 tokens.
--- ---
**Analysts Debate History:** **Analysts Debate History:**

View File

@ -42,6 +42,7 @@ Conversation history of the debate: {history}
Last bull argument: {current_response} Last bull argument: {current_response}
Reflections from similar situations and lessons learned: {past_memory_str} Reflections from similar situations and lessons learned: {past_memory_str}
Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past. Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past.
Make sure to generate less than 500 tokens.
""" """
response = llm.invoke(prompt) response = llm.invoke(prompt)

View File

@ -40,6 +40,8 @@ Conversation history of the debate: {history}
Last bear argument: {current_response} Last bear argument: {current_response}
Reflections from similar situations and lessons learned: {past_memory_str} Reflections from similar situations and lessons learned: {past_memory_str}
Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past. Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past.
Make sure to generate less than 500 tokens.
""" """
response = llm.invoke(prompt) response = llm.invoke(prompt)

View File

@ -30,7 +30,10 @@ Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.""" Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.
Make sure to generate less than 500 tokens.
"""
response = llm.invoke(prompt) response = llm.invoke(prompt)

View File

@ -31,7 +31,10 @@ Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.""" Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.
Make sure to generate less than 500 tokens.
"""
response = llm.invoke(prompt) response = llm.invoke(prompt)

View File

@ -30,7 +30,10 @@ Latest World Affairs Report: {news_report}
Company Fundamentals Report: {fundamentals_report} Company Fundamentals Report: {fundamentals_report}
Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.""" Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.
Make sure to generate less than 500 tokens.
"""
response = llm.invoke(prompt) response = llm.invoke(prompt)

View File

@ -24,7 +24,7 @@ def create_trader(llm, memory):
context = { context = {
"role": "user", "role": "user",
"content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.", "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision. Make sure to generate less than 500 tokens.",
} }
messages = [ messages = [

View File

@ -19,4 +19,5 @@ DEFAULT_CONFIG = {
"max_recur_limit": 100, "max_recur_limit": 100,
# Tool settings # Tool settings
"online_tools": True, "online_tools": True,
"max_completion_tokens": 500,
} }

View File

@ -27,6 +27,7 @@ class GraphSetup:
invest_judge_memory, invest_judge_memory,
risk_manager_memory, risk_manager_memory,
conditional_logic: ConditionalLogic, conditional_logic: ConditionalLogic,
debate: bool = False
): ):
"""Initialize with required components.""" """Initialize with required components."""
self.quick_thinking_llm = quick_thinking_llm self.quick_thinking_llm = quick_thinking_llm
@ -39,7 +40,7 @@ class GraphSetup:
self.invest_judge_memory = invest_judge_memory self.invest_judge_memory = invest_judge_memory
self.risk_manager_memory = risk_manager_memory self.risk_manager_memory = risk_manager_memory
self.conditional_logic = conditional_logic self.conditional_logic = conditional_logic
self.debate = debate
def setup_graph( def setup_graph(
self, selected_analysts=["market", "social", "news", "fundamentals"] self, selected_analysts=["market", "social", "news", "fundamentals"]
): ):
@ -88,25 +89,26 @@ class GraphSetup:
delete_nodes["fundamentals"] = create_msg_delete() delete_nodes["fundamentals"] = create_msg_delete()
tool_nodes["fundamentals"] = self.tool_nodes["fundamentals"] tool_nodes["fundamentals"] = self.tool_nodes["fundamentals"]
# Create researcher and manager nodes if self.debate:
bull_researcher_node = create_bull_researcher( # Create researcher and manager nodes
self.quick_thinking_llm, self.bull_memory bull_researcher_node = create_bull_researcher(
) self.quick_thinking_llm, self.bull_memory
bear_researcher_node = create_bear_researcher( )
self.quick_thinking_llm, self.bear_memory bear_researcher_node = create_bear_researcher(
) self.quick_thinking_llm, self.bear_memory
research_manager_node = create_research_manager( )
self.deep_thinking_llm, self.invest_judge_memory research_manager_node = create_research_manager(
) self.deep_thinking_llm, self.invest_judge_memory
trader_node = create_trader(self.quick_thinking_llm, self.trader_memory) )
trader_node = create_trader(self.quick_thinking_llm, self.trader_memory)
# Create risk analysis nodes # Create risk analysis nodes
risky_analyst = create_risky_debator(self.quick_thinking_llm) risky_analyst = create_risky_debator(self.quick_thinking_llm)
neutral_analyst = create_neutral_debator(self.quick_thinking_llm) neutral_analyst = create_neutral_debator(self.quick_thinking_llm)
safe_analyst = create_safe_debator(self.quick_thinking_llm) safe_analyst = create_safe_debator(self.quick_thinking_llm)
risk_manager_node = create_risk_manager( risk_manager_node = create_risk_manager(
self.deep_thinking_llm, self.risk_manager_memory self.deep_thinking_llm, self.risk_manager_memory
) )
# Create workflow # Create workflow
workflow = StateGraph(AgentState) workflow = StateGraph(AgentState)
@ -120,14 +122,15 @@ class GraphSetup:
workflow.add_node(f"tools_{analyst_type}", tool_nodes[analyst_type]) workflow.add_node(f"tools_{analyst_type}", tool_nodes[analyst_type])
# Add other nodes # Add other nodes
workflow.add_node("Bull Researcher", bull_researcher_node) if self.debate:
workflow.add_node("Bear Researcher", bear_researcher_node) workflow.add_node("Bull Researcher", bull_researcher_node)
workflow.add_node("Research Manager", research_manager_node) workflow.add_node("Bear Researcher", bear_researcher_node)
workflow.add_node("Trader", trader_node) workflow.add_node("Research Manager", research_manager_node)
workflow.add_node("Risky Analyst", risky_analyst) workflow.add_node("Trader", trader_node)
workflow.add_node("Neutral Analyst", neutral_analyst) workflow.add_node("Risky Analyst", risky_analyst)
workflow.add_node("Safe Analyst", safe_analyst) workflow.add_node("Neutral Analyst", neutral_analyst)
workflow.add_node("Risk Judge", risk_manager_node) workflow.add_node("Safe Analyst", safe_analyst)
workflow.add_node("Risk Judge", risk_manager_node)
# Define edges # Define edges
# Start with the first analyst # Start with the first analyst
@ -153,53 +156,56 @@ class GraphSetup:
next_analyst = f"{selected_analysts[i+1].capitalize()} Analyst" next_analyst = f"{selected_analysts[i+1].capitalize()} Analyst"
workflow.add_edge(current_clear, next_analyst) workflow.add_edge(current_clear, next_analyst)
else: else:
workflow.add_edge(current_clear, "Bull Researcher") if self.debate:
workflow.add_edge(current_clear, "Bull Researcher")
else:
workflow.add_edge(current_clear, END)
# Add remaining edges # Add remaining edges
workflow.add_conditional_edges( if self.debate:
"Bull Researcher", workflow.add_conditional_edges(
self.conditional_logic.should_continue_debate, "Bull Researcher",
{ self.conditional_logic.should_continue_debate,
"Bear Researcher": "Bear Researcher", {
"Research Manager": "Research Manager", "Bear Researcher": "Bear Researcher",
}, "Research Manager": "Research Manager",
) },
workflow.add_conditional_edges( )
"Bear Researcher", workflow.add_conditional_edges(
self.conditional_logic.should_continue_debate, "Bear Researcher",
{ self.conditional_logic.should_continue_debate,
"Bull Researcher": "Bull Researcher", {
"Research Manager": "Research Manager", "Bull Researcher": "Bull Researcher",
}, "Research Manager": "Research Manager",
) },
workflow.add_edge("Research Manager", "Trader") )
workflow.add_edge("Trader", "Risky Analyst") workflow.add_edge("Research Manager", "Trader")
workflow.add_conditional_edges( workflow.add_edge("Trader", "Risky Analyst")
"Risky Analyst", workflow.add_conditional_edges(
self.conditional_logic.should_continue_risk_analysis, "Risky Analyst",
{ self.conditional_logic.should_continue_risk_analysis,
"Safe Analyst": "Safe Analyst", {
"Risk Judge": "Risk Judge", "Safe Analyst": "Safe Analyst",
}, "Risk Judge": "Risk Judge",
) },
workflow.add_conditional_edges( )
"Safe Analyst", workflow.add_conditional_edges(
self.conditional_logic.should_continue_risk_analysis, "Safe Analyst",
{ self.conditional_logic.should_continue_risk_analysis,
"Neutral Analyst": "Neutral Analyst", {
"Risk Judge": "Risk Judge", "Neutral Analyst": "Neutral Analyst",
}, "Risk Judge": "Risk Judge",
) },
workflow.add_conditional_edges( )
"Neutral Analyst", workflow.add_conditional_edges(
self.conditional_logic.should_continue_risk_analysis, "Neutral Analyst",
{ self.conditional_logic.should_continue_risk_analysis,
"Risky Analyst": "Risky Analyst", {
"Risk Judge": "Risk Judge", "Risky Analyst": "Risky Analyst",
}, "Risk Judge": "Risk Judge",
) },
)
workflow.add_edge("Risk Judge", END) workflow.add_edge("Risk Judge", END)
# Compile and return # Compile and return
return workflow.compile() return workflow.compile()

View File

@ -37,6 +37,7 @@ class TradingAgentsGraph:
selected_analysts=["market", "social", "news", "fundamentals"], selected_analysts=["market", "social", "news", "fundamentals"],
debug=False, debug=False,
config: Dict[str, Any] = None, config: Dict[str, Any] = None,
debate=False
): ):
"""Initialize the trading agents graph and components. """Initialize the trading agents graph and components.
@ -47,6 +48,7 @@ class TradingAgentsGraph:
""" """
self.debug = debug self.debug = debug
self.config = config or DEFAULT_CONFIG self.config = config or DEFAULT_CONFIG
self.debate = debate
# Update the interface's config # Update the interface's config
set_config(self.config) set_config(self.config)
@ -59,8 +61,8 @@ class TradingAgentsGraph:
# Initialize LLMs # Initialize LLMs
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter": if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"]) self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"],max_tokens=self.config["max_completion_tokens"])
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"]) self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"],max_tokens=self.config["max_completion_tokens"])
elif self.config["llm_provider"].lower() == "anthropic": elif self.config["llm_provider"].lower() == "anthropic":
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"]) self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"]) self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
@ -95,6 +97,7 @@ class TradingAgentsGraph:
self.invest_judge_memory, self.invest_judge_memory,
self.risk_manager_memory, self.risk_manager_memory,
self.conditional_logic, self.conditional_logic,
self.debate,
) )
self.propagator = Propagator() self.propagator = Propagator()
@ -184,42 +187,56 @@ class TradingAgentsGraph:
self.curr_state = final_state self.curr_state = final_state
# Log state # Log state
self._log_state(trade_date, final_state) log_state = self._log_state(trade_date, final_state)
# Return decision and processed signal # Return decision and processed signal
return final_state, self.process_signal(final_state["final_trade_decision"]) if self.debate:
return log_state, self.process_signal(final_state["final_trade_decision"])
else:
return log_state, ""
def _log_state(self, trade_date, final_state): def _log_state(self, trade_date, final_state):
"""Log the final state to a JSON file.""" """Log the final state to a JSON file."""
self.log_states_dict[str(trade_date)] = { if self.debate:
"company_of_interest": final_state["company_of_interest"], self.log_states_dict[str(trade_date)] = {
"trade_date": final_state["trade_date"], "company_of_interest": final_state["company_of_interest"],
"market_report": final_state["market_report"], "trade_date": final_state["trade_date"],
"sentiment_report": final_state["sentiment_report"], "market_report": final_state["market_report"],
"news_report": final_state["news_report"], "sentiment_report": final_state["sentiment_report"],
"fundamentals_report": final_state["fundamentals_report"], "news_report": final_state["news_report"],
"investment_debate_state": { "fundamentals_report": final_state["fundamentals_report"],
"bull_history": final_state["investment_debate_state"]["bull_history"], "investment_debate_state": {
"bear_history": final_state["investment_debate_state"]["bear_history"], "bull_history": final_state["investment_debate_state"]["bull_history"],
"history": final_state["investment_debate_state"]["history"], "bear_history": final_state["investment_debate_state"]["bear_history"],
"current_response": final_state["investment_debate_state"][ "history": final_state["investment_debate_state"]["history"],
"current_response" "current_response": final_state["investment_debate_state"][
], "current_response"
"judge_decision": final_state["investment_debate_state"][ ],
"judge_decision" "judge_decision": final_state["investment_debate_state"][
], "judge_decision"
}, ],
"trader_investment_decision": final_state["trader_investment_plan"], },
"risk_debate_state": { "trader_investment_decision": final_state["trader_investment_plan"],
"risky_history": final_state["risk_debate_state"]["risky_history"], "risk_debate_state": {
"safe_history": final_state["risk_debate_state"]["safe_history"], "risky_history": final_state["risk_debate_state"]["risky_history"],
"neutral_history": final_state["risk_debate_state"]["neutral_history"], "safe_history": final_state["risk_debate_state"]["safe_history"],
"history": final_state["risk_debate_state"]["history"], "neutral_history": final_state["risk_debate_state"]["neutral_history"],
"judge_decision": final_state["risk_debate_state"]["judge_decision"], "history": final_state["risk_debate_state"]["history"],
}, "judge_decision": final_state["risk_debate_state"]["judge_decision"],
"investment_plan": final_state["investment_plan"], },
"final_trade_decision": final_state["final_trade_decision"], "investment_plan": final_state["investment_plan"],
} "final_trade_decision": final_state["final_trade_decision"],
}
else:
self.log_states_dict[str(trade_date)] = {
"company_of_interest": final_state["company_of_interest"],
"trade_date": final_state["trade_date"],
"market_report": final_state["market_report"],
"sentiment_report": final_state["sentiment_report"],
"news_report": final_state["news_report"],
"fundamentals_report": final_state["fundamentals_report"],
}
# Save to file # Save to file
directory = Path(f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/") directory = Path(f"eval_results/{self.ticker}/TradingAgentsStrategy_logs/")
@ -230,6 +247,7 @@ class TradingAgentsGraph:
"w", "w",
) as f: ) as f:
json.dump(self.log_states_dict, f, indent=4) json.dump(self.log_states_dict, f, indent=4)
return self.log_states_dict
def reflect_and_remember(self, returns_losses): def reflect_and_remember(self, returns_losses):
"""Reflect on decisions and update memory based on returns.""" """Reflect on decisions and update memory based on returns."""