save
This commit is contained in:
parent
4aa2be9867
commit
817deb8247
|
|
@ -31,8 +31,8 @@ def create_trader(llm, memory):
|
|||
{
|
||||
"role": "system",
|
||||
"content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific position to long, short, or hold.
|
||||
End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **LONG/SHORT/HOLD**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}"""
|
||||
+ """Output strictly a JSON block with the following format: {"position": "Long" | "Short" | "Hold", explanation:""}""",
|
||||
End with a firm decision. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}"""
|
||||
+ """Output strictly a JSON block with the following format: {"position": "Long" | "Short" | "Hold", explanation: str, "profit_estimate_pct": float, "risk_level": "Low" | "Medium" | "High", "trade_duration_days": int}""",
|
||||
},
|
||||
context,
|
||||
]
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ from typing import Annotated
|
|||
|
||||
# Import from vendor-specific modules
|
||||
from .local import get_YFin_data, get_finnhub_news, get_finnhub_company_insider_sentiment, get_finnhub_company_insider_transactions, get_simfin_balance_sheet, get_simfin_cashflow, get_simfin_income_statements, get_reddit_global_news, get_reddit_company_news
|
||||
from .local_news import get_local_news
|
||||
from .y_finance import get_YFin_data_online, get_stock_stats_indicators_window, get_balance_sheet as get_yfinance_balance_sheet, get_cashflow as get_yfinance_cashflow, get_income_statement as get_yfinance_income_statement, get_insider_transactions as get_yfinance_insider_transactions
|
||||
from .google import get_google_news
|
||||
from .openai import get_stock_news_openai, get_global_news_openai, get_fundamentals_openai
|
||||
|
|
@ -58,7 +59,8 @@ VENDOR_LIST = [
|
|||
"local",
|
||||
"yfinance",
|
||||
"openai",
|
||||
"google"
|
||||
"google",
|
||||
"local_news"
|
||||
]
|
||||
|
||||
# Mapping of methods to their vendor-specific implementations
|
||||
|
|
@ -101,6 +103,7 @@ VENDOR_METHODS = {
|
|||
"openai": get_stock_news_openai,
|
||||
"google": get_google_news,
|
||||
"local": [get_finnhub_news, get_reddit_company_news, get_google_news],
|
||||
"local_news": get_local_news
|
||||
},
|
||||
"get_global_news": {
|
||||
"openai": get_global_news_openai,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,94 @@
|
|||
from datetime import date, timedelta, datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from .config import DATA_DIR
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def get_local_news(ticker, start_date: str, end_date: str) -> dict[str, str] | str:
|
||||
"""Returns live and historical market news & sentiment data from premier news outlets worldwide.
|
||||
|
||||
Covers stocks, cryptocurrencies, forex, and topics like fiscal policy, mergers & acquisitions, IPOs.
|
||||
|
||||
Args:
|
||||
ticker: Stock symbol for news articles.
|
||||
start_date: Start date for news search.
|
||||
end_date: End date for news search.
|
||||
|
||||
Returns:
|
||||
Dictionary containing news sentiment data or JSON string.
|
||||
"""
|
||||
|
||||
template = lambda feed: f"""{{
|
||||
"items": {len(feed)},
|
||||
"sentiment_score_definition": "x <= -0.65: Bearish; -0.65 < x <= -0.25: Somewhat-Bearish; -0.25 < x < 0.25: Neutral; 0.25 <= x < 0.65: Somewhat_Bullish; x >= 0.65: Bullish",
|
||||
"relevance_score_definition": "0 < x <= 1, with a higher score indicating higher relevance.",
|
||||
"feed": {feed}
|
||||
}}"""
|
||||
|
||||
start_date_date = date.fromisoformat(start_date)
|
||||
end_date_date = date.fromisoformat(end_date)
|
||||
|
||||
total_days = (end_date_date - start_date_date).days
|
||||
dates_to_fetch = [start_date_date + timedelta(days=i) for i in range(total_days)]
|
||||
|
||||
feed = {}
|
||||
for date_ in dates_to_fetch:
|
||||
feed[str(date_)] = filter_irrelevant_news(load_news(ticker, date_))
|
||||
return template(feed)
|
||||
|
||||
def load_news(ticker: str, date: date, save_dir:str = 'news/daily_news_processed') -> list:
|
||||
"""
|
||||
Load news articles from a JSON file.`
|
||||
|
||||
Args:
|
||||
ticker (str): The stock ticker symbol.
|
||||
date (date_cls): The date for which to load news articles.
|
||||
Returns:
|
||||
list: A list of news articles loaded from the file.
|
||||
"""
|
||||
save_dir = os.path.join(DATA_DIR, save_dir)
|
||||
filename = f"{save_dir}/{ticker}/{date}.json"
|
||||
try:
|
||||
with open(filename, 'r') as f:
|
||||
news = json.load(f)
|
||||
return news
|
||||
except Exception as e:
|
||||
print(f"Error loading news from {filename}: {e}")
|
||||
return []
|
||||
|
||||
def filter_irrelevant_news(news_list: list, threshold: float = 0.6) -> list:
|
||||
"""
|
||||
Filter news articles based on their relevancy score.
|
||||
|
||||
Args:
|
||||
news_list (list): List of news articles with relevancy scores.
|
||||
threshold (float): Minimum relevancy score to include a news article (default: 0.5).
|
||||
|
||||
Returns:
|
||||
list: Filtered list of news articles with relevancy scores >= threshold.
|
||||
"""
|
||||
try:
|
||||
if news_list is None or len(news_list) == 0:
|
||||
log.info("No news articles provided for filtering.")
|
||||
return []
|
||||
filtered_news = []
|
||||
for news in news_list:
|
||||
if 'relevancy_score' in news:
|
||||
if isinstance(news['relevancy_score'], (float, int)) and news['relevancy_score'] >= threshold:
|
||||
filtered_news.append(
|
||||
{
|
||||
"summary": news.get("summary", ""),
|
||||
"relevancy_score": news.get("relevancy_score", 0),
|
||||
"sentiment_score": news.get("sentiment_score", 0)
|
||||
}
|
||||
)
|
||||
else:
|
||||
log.warning(f"News item missing valid 'relevancy_score': {news}")
|
||||
log.info(f"Filtered {len(filtered_news)} out of {len(news_list)} news articles with relevancy_score >= {threshold}.")
|
||||
except Exception as e:
|
||||
log.error(f"Error filtering news: {e}")
|
||||
filtered_news = news_list
|
||||
return filtered_news
|
||||
|
|
@ -2,33 +2,37 @@ import os
|
|||
|
||||
DEFAULT_CONFIG = {
|
||||
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
|
||||
"results_dir": os.getenv("TRADINGAGENTS_RESULTS_DIR", "./results"),
|
||||
"data_dir": "/data/coding/trading_agents",
|
||||
"results_dir": os.getenv("/data/coding/TradingAgents/results", "./results"),
|
||||
"data_dir": "/data",
|
||||
"data_cache_dir": os.path.join(
|
||||
os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
|
||||
"dataflows/data_cache",
|
||||
),
|
||||
# LLM settings
|
||||
"llm_provider": "openai",
|
||||
"llm_provider": "ollama",
|
||||
# "deep_think_llm": "hf.co/unsloth/Qwen3-30B-A3B-Instruct-2507-GGUF:Q5_K_XL",
|
||||
# "quick_think_llm": "hf.co/unsloth/Qwen3-30B-A3B-Instruct-2507-GGUF:Q5_K_XL",
|
||||
# "deep_think_llm": "hf.co/unsloth/gpt-oss-20b-GGUF:F16",
|
||||
# "quick_think_llm": "hf.co/unsloth/gpt-oss-20b-GGUF:F16",
|
||||
"deep_think_llm": "gemma3:27b",
|
||||
"quick_think_llm": "gemma3:27b",
|
||||
# "deep_think_llm": "qwen3:30b",
|
||||
# "quick_think_llm": "qwen3:30b",
|
||||
# "deep_think_llm": "gpt-oss",
|
||||
# "quick_think_llm": "gpt-oss",
|
||||
"deep_think_llm": "glm-4.7-flash",
|
||||
"quick_think_llm": "glm-4.7-flash",
|
||||
# "backend_url": "http://localhost:8080/v1",
|
||||
"backend_url": "http://localhost:11434/v1",
|
||||
# Debate and discussion settings
|
||||
"max_debate_rounds": 1,
|
||||
"max_risk_discuss_rounds": 1,
|
||||
"max_recur_limit": 500,
|
||||
"max_debate_rounds": 5,
|
||||
"max_risk_discuss_rounds": 5,
|
||||
"max_recur_limit": 100,
|
||||
# Data vendor configuration
|
||||
# Category-level configuration (default for all tools in category)
|
||||
"data_vendors": {
|
||||
"core_stock_apis": "yfinance", # Options: yfinance, alpha_vantage, local
|
||||
"technical_indicators": "yfinance", # Options: yfinance, alpha_vantage, local
|
||||
"fundamental_data": "alpha_vantage", # Options: openai, alpha_vantage, local
|
||||
"news_data": "google", # Options: openai, alpha_vantage, google, local
|
||||
"news_data": "local_news", # Options: openai, alpha_vantage, google, local_news
|
||||
},
|
||||
# Tool-level configuration (takes precedence over category-level)
|
||||
"tool_vendors": {
|
||||
|
|
@ -40,7 +44,7 @@ DEFAULT_CONFIG = {
|
|||
"get_balance_sheet": "yfinance",
|
||||
"get_cashflow": "yfinance",
|
||||
"get_income_statement": "yfinance",
|
||||
"get_news": "alpha_vantage",
|
||||
"get_news": "local_news",
|
||||
"get_global_news": "openai",
|
||||
# "get_insider_sentiment": "na",
|
||||
"get_insider_transactions": "yfinance",
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ class SignalProcessor:
|
|||
messages = [
|
||||
(
|
||||
"system",
|
||||
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SHORT, LONG, or HOLD. Provide only the extracted decision (SHORT, LONG, or HOLD) as your output, without adding any additional text or information."
|
||||
+ """Output strictly a JSON block with the following format: {"position": "Long" | "Short" | "Hold", explanation:""}""",
|
||||
"You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: LONG, SHORT, or HOLD. Provide only the extracted decision (LONG, SHORT, or HOLD) as your output, without adding any additional text or information."
|
||||
+ """Output strictly a JSON block with the following format: {"position": "Long" | "Short" | "Hold", explanation: str, "profit_estimate_pct": float, "risk_level": "Low" | "Medium" | "High"}""",
|
||||
),
|
||||
("human", full_signal),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ class TradingAgentsGraph:
|
|||
self.curr_state = final_state
|
||||
|
||||
# Log state
|
||||
self._log_state(trade_date, final_state)
|
||||
# self._log_state(trade_date, final_state)
|
||||
|
||||
# Return decision and processed signal
|
||||
return final_state, self.process_signal(final_state["final_trade_decision"])
|
||||
|
|
|
|||
|
|
@ -0,0 +1,39 @@
|
|||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict
|
||||
|
||||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
class TauricResearcher:
|
||||
"""A trading research agent that analyzes stocks and makes trading decisions."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the TauricResearch agent and set up environment variables."""
|
||||
self.init_environ_vars()
|
||||
|
||||
def init_environ_vars(self):
|
||||
"""Initialize required API keys as environment variables."""
|
||||
os.environ["OPENAI_API_KEY"] = "sk-xxxx" # IGNORE
|
||||
os.environ["ALPHA_VANTAGE_API_KEY"] = "J13IJQQOT4NLKF3A"
|
||||
os.environ["OLLAMA_API_KEY"] = "85a41aff1f814d3ca81f0a957ac02114.HGH8TZywvA0zbLe2y09Kvv4F"
|
||||
|
||||
def run(self, stock_symbol: str = "NVDA", date: str = None, config: dict = DEFAULT_CONFIG.copy()) -> dict:
|
||||
"""
|
||||
Run the trading agent to generate a trading decision for a stock.
|
||||
|
||||
Args:
|
||||
config: Configuration dictionary for the trading graph (default: DEFAULT_CONFIG)
|
||||
|
||||
Returns:
|
||||
The evaluated trading decision as a Python object.
|
||||
"""
|
||||
# Initialize the trading graph with debug mode enabled
|
||||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
|
||||
# Forward propagate through the graph to get trading decision for NVDA
|
||||
_, decision = ta.propagate(stock_symbol, date)
|
||||
decision = eval(decision)
|
||||
|
||||
# Evaluate and return the decision string as a Python object
|
||||
return decision
|
||||
Loading…
Reference in New Issue