Update
This commit is contained in:
parent
8f5e2f6e5e
commit
ea4ee9176b
202
cli/main.py
202
cli/main.py
|
|
@ -836,6 +836,18 @@ def run_discovery_analysis(selections):
|
||||||
# Set config globally for route_to_vendor
|
# Set config globally for route_to_vendor
|
||||||
set_config(config)
|
set_config(config)
|
||||||
|
|
||||||
|
|
||||||
|
# Generate run timestamp
|
||||||
|
import datetime
|
||||||
|
run_timestamp = datetime.datetime.now().strftime("%H_%M_%S")
|
||||||
|
|
||||||
|
# Create results directory with run timestamp
|
||||||
|
results_dir = Path(config["results_dir"]) / "discovery" / selections["analysis_date"] / f"run_{run_timestamp}"
|
||||||
|
results_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Add results dir to config so graph can use it for logging
|
||||||
|
config["discovery_run_dir"] = str(results_dir)
|
||||||
|
|
||||||
console.print(f"[dim]Using {config['llm_provider'].upper()} - Shallow: {config['quick_think_llm']}, Deep: {config['deep_think_llm']}[/dim]")
|
console.print(f"[dim]Using {config['llm_provider'].upper()} - Shallow: {config['quick_think_llm']}, Deep: {config['deep_think_llm']}[/dim]")
|
||||||
|
|
||||||
# Initialize Discovery Graph (LLMs initialized internally like TradingAgentsGraph)
|
# Initialize Discovery Graph (LLMs initialized internally like TradingAgentsGraph)
|
||||||
|
|
@ -849,13 +861,10 @@ def run_discovery_analysis(selections):
|
||||||
"tickers": [],
|
"tickers": [],
|
||||||
"filtered_tickers": [],
|
"filtered_tickers": [],
|
||||||
"opportunities": [],
|
"opportunities": [],
|
||||||
|
"tool_logs": [],
|
||||||
"status": "start"
|
"status": "start"
|
||||||
})
|
})
|
||||||
|
|
||||||
# Create results directory
|
|
||||||
results_dir = Path(config["results_dir"]) / "discovery" / selections["analysis_date"]
|
|
||||||
results_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Save discovery results
|
# Save discovery results
|
||||||
final_ranking = result.get("final_ranking", "No ranking available")
|
final_ranking = result.get("final_ranking", "No ranking available")
|
||||||
final_ranking_text = extract_text_from_content(final_ranking)
|
final_ranking_text = extract_text_from_content(final_ranking)
|
||||||
|
|
@ -1046,7 +1055,7 @@ def run_trading_analysis(selections):
|
||||||
)
|
)
|
||||||
|
|
||||||
# Create result directory
|
# Create result directory
|
||||||
results_dir = Path(config["results_dir"]) / selections["ticker"] / selections["analysis_date"]
|
results_dir = Path(config["results_dir"]) / "trading" / selections["analysis_date"] / selections["ticker"]
|
||||||
results_dir.mkdir(parents=True, exist_ok=True)
|
results_dir.mkdir(parents=True, exist_ok=True)
|
||||||
report_dir = results_dir / "reports"
|
report_dir = results_dir / "reports"
|
||||||
report_dir.mkdir(parents=True, exist_ok=True)
|
report_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
@ -1394,6 +1403,189 @@ def run_trading_analysis(selections):
|
||||||
update_display(layout)
|
update_display(layout)
|
||||||
|
|
||||||
|
|
||||||
|
@app.command()
|
||||||
|
def build_memories(
|
||||||
|
start_date: str = typer.Option(
|
||||||
|
"2023-01-01",
|
||||||
|
"--start-date",
|
||||||
|
"-s",
|
||||||
|
help="Start date for scanning high movers (YYYY-MM-DD)"
|
||||||
|
),
|
||||||
|
end_date: str = typer.Option(
|
||||||
|
"2024-12-01",
|
||||||
|
"--end-date",
|
||||||
|
"-e",
|
||||||
|
help="End date for scanning high movers (YYYY-MM-DD)"
|
||||||
|
),
|
||||||
|
tickers: str = typer.Option(
|
||||||
|
None,
|
||||||
|
"--tickers",
|
||||||
|
"-t",
|
||||||
|
help="Comma-separated list of tickers to scan (overrides --use-alpha-vantage)"
|
||||||
|
),
|
||||||
|
use_alpha_vantage: bool = typer.Option(
|
||||||
|
False,
|
||||||
|
"--use-alpha-vantage",
|
||||||
|
"-a",
|
||||||
|
help="Use Alpha Vantage top gainers/losers to get ticker list"
|
||||||
|
),
|
||||||
|
av_limit: int = typer.Option(
|
||||||
|
20,
|
||||||
|
"--av-limit",
|
||||||
|
help="Number of tickers to get from each Alpha Vantage category (gainers/losers)"
|
||||||
|
),
|
||||||
|
min_move_pct: float = typer.Option(
|
||||||
|
15.0,
|
||||||
|
"--min-move",
|
||||||
|
"-m",
|
||||||
|
help="Minimum percentage move to qualify as high mover"
|
||||||
|
),
|
||||||
|
analysis_windows: str = typer.Option(
|
||||||
|
"7,30",
|
||||||
|
"--windows",
|
||||||
|
"-w",
|
||||||
|
help="Comma-separated list of days before move to analyze (e.g., '7,30')"
|
||||||
|
),
|
||||||
|
max_samples: int = typer.Option(
|
||||||
|
20,
|
||||||
|
"--max-samples",
|
||||||
|
help="Maximum number of high movers to analyze (reduces runtime)"
|
||||||
|
),
|
||||||
|
sample_strategy: str = typer.Option(
|
||||||
|
"diverse",
|
||||||
|
"--strategy",
|
||||||
|
help="Sampling strategy: diverse, largest, recent, or random"
|
||||||
|
),
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Build historical memories from high movers.
|
||||||
|
|
||||||
|
This command:
|
||||||
|
1. Scans for stocks with significant moves (>15% in 5 days by default)
|
||||||
|
2. Runs retrospective trading analyses at T-7 and T-30 days before the move
|
||||||
|
3. Stores situations, outcomes, and agent correctness in ChromaDB
|
||||||
|
4. Creates a memory bank for future trading decisions
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
# Use Alpha Vantage top movers
|
||||||
|
python cli/main.py build-memories --use-alpha-vantage
|
||||||
|
|
||||||
|
# Use specific tickers
|
||||||
|
python cli/main.py build-memories --tickers "AAPL,NVDA,TSLA"
|
||||||
|
|
||||||
|
# Customize date range and parameters
|
||||||
|
python cli/main.py build-memories --use-alpha-vantage --start-date 2023-01-01 --min-move 20.0
|
||||||
|
"""
|
||||||
|
console.print("\n[bold cyan]═══════════════════════════════════════════════════════[/bold cyan]")
|
||||||
|
console.print("[bold cyan] TRADINGAGENTS MEMORY BUILDER[/bold cyan]")
|
||||||
|
console.print("[bold cyan]═══════════════════════════════════════════════════════[/bold cyan]\n")
|
||||||
|
|
||||||
|
# Determine ticker source
|
||||||
|
if use_alpha_vantage and not tickers:
|
||||||
|
console.print("[bold yellow]📡 Using Alpha Vantage to fetch top movers...[/bold yellow]")
|
||||||
|
try:
|
||||||
|
from tradingagents.agents.utils.historical_memory_builder import HistoricalMemoryBuilder
|
||||||
|
builder_temp = HistoricalMemoryBuilder(DEFAULT_CONFIG)
|
||||||
|
ticker_list = builder_temp.get_tickers_from_alpha_vantage(limit=av_limit)
|
||||||
|
|
||||||
|
if not ticker_list:
|
||||||
|
console.print("\n[bold red]❌ No tickers found from Alpha Vantage. Please check your API key or try --tickers instead.[/bold red]\n")
|
||||||
|
raise typer.Exit(code=1)
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"\n[bold red]❌ Error fetching from Alpha Vantage: {e}[/bold red]")
|
||||||
|
console.print("[yellow]Please use --tickers to specify tickers manually.[/yellow]\n")
|
||||||
|
raise typer.Exit(code=1)
|
||||||
|
elif tickers:
|
||||||
|
ticker_list = [t.strip().upper() for t in tickers.split(",")]
|
||||||
|
console.print(f"[bold]Using {len(ticker_list)} specified tickers[/bold]")
|
||||||
|
else:
|
||||||
|
# Default tickers if neither option specified
|
||||||
|
default_tickers = "AAPL,MSFT,GOOGL,NVDA,TSLA,META,AMZN,AMD,NFLX,DIS"
|
||||||
|
ticker_list = [t.strip().upper() for t in default_tickers.split(",")]
|
||||||
|
console.print(f"[bold yellow]No ticker source specified. Using default list.[/bold yellow]")
|
||||||
|
console.print(f"[dim]Tip: Use --use-alpha-vantage for dynamic ticker discovery or --tickers for custom list[/dim]")
|
||||||
|
|
||||||
|
window_list = [int(w.strip()) for w in analysis_windows.split(",")]
|
||||||
|
|
||||||
|
console.print(f"\n[bold]Configuration:[/bold]")
|
||||||
|
console.print(f" Ticker Source: {'Alpha Vantage' if use_alpha_vantage else 'Manual/Default'}")
|
||||||
|
console.print(f" Date Range: {start_date} to {end_date}")
|
||||||
|
console.print(f" Tickers: {len(ticker_list)} stocks")
|
||||||
|
console.print(f" Min Move: {min_move_pct}%")
|
||||||
|
console.print(f" Max Samples: {max_samples}")
|
||||||
|
console.print(f" Sampling Strategy: {sample_strategy}")
|
||||||
|
console.print(f" Analysis Windows: {window_list} days before move")
|
||||||
|
console.print()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
from tradingagents.agents.utils.historical_memory_builder import HistoricalMemoryBuilder
|
||||||
|
|
||||||
|
# Create builder
|
||||||
|
builder = HistoricalMemoryBuilder(DEFAULT_CONFIG)
|
||||||
|
|
||||||
|
# Build memories
|
||||||
|
memories = builder.build_memories_from_high_movers(
|
||||||
|
tickers=ticker_list,
|
||||||
|
start_date=start_date,
|
||||||
|
end_date=end_date,
|
||||||
|
min_move_pct=min_move_pct,
|
||||||
|
analysis_windows=window_list,
|
||||||
|
max_samples=max_samples,
|
||||||
|
sample_strategy=sample_strategy
|
||||||
|
)
|
||||||
|
|
||||||
|
if not memories:
|
||||||
|
console.print("\n[bold yellow]⚠️ No memories created. Try adjusting parameters.[/bold yellow]\n")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Display summary table
|
||||||
|
console.print("\n[bold green]✅ Memory building complete![/bold green]\n")
|
||||||
|
|
||||||
|
table = Table(title="Memory Bank Summary", box=box.ROUNDED)
|
||||||
|
table.add_column("Agent Type", style="cyan", no_wrap=True)
|
||||||
|
table.add_column("Total Memories", justify="right", style="magenta")
|
||||||
|
table.add_column("Accuracy Rate", justify="right", style="green")
|
||||||
|
table.add_column("Avg Move %", justify="right", style="yellow")
|
||||||
|
|
||||||
|
for agent_type, memory in memories.items():
|
||||||
|
stats = memory.get_statistics()
|
||||||
|
table.add_row(
|
||||||
|
agent_type.upper(),
|
||||||
|
str(stats['total_memories']),
|
||||||
|
f"{stats['accuracy_rate']:.1f}%",
|
||||||
|
f"{stats['avg_move_pct']:.1f}%"
|
||||||
|
)
|
||||||
|
|
||||||
|
console.print(table)
|
||||||
|
console.print()
|
||||||
|
|
||||||
|
# Test memory retrieval
|
||||||
|
console.print("[bold]Testing Memory Retrieval:[/bold]")
|
||||||
|
test_situation = """
|
||||||
|
Strong earnings beat with positive sentiment and bullish technical indicators.
|
||||||
|
Volume spike detected. Analyst upgrades present. News sentiment is positive.
|
||||||
|
"""
|
||||||
|
|
||||||
|
console.print(f" Query: '{test_situation.strip()[:100]}...'\n")
|
||||||
|
|
||||||
|
for agent_type, memory in list(memories.items())[:2]: # Test first 2 agents
|
||||||
|
results = memory.get_memories(test_situation, n_matches=1)
|
||||||
|
if results:
|
||||||
|
console.print(f" [cyan]{agent_type.upper()}[/cyan]: Found {len(results)} relevant memory")
|
||||||
|
console.print(f" Similarity: {results[0]['similarity_score']:.2f}")
|
||||||
|
|
||||||
|
console.print("\n[bold green]🎉 Memory bank ready for use![/bold green]")
|
||||||
|
console.print("\n[dim]Note: These memories will be used automatically in future trading analyses when memory is enabled in config.[/dim]\n")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"\n[bold red]❌ Error building memories:[/bold red]")
|
||||||
|
console.print(f"[red]{str(e)}[/red]\n")
|
||||||
|
import traceback
|
||||||
|
console.print(f"[dim]{traceback.format_exc()}[/dim]")
|
||||||
|
raise typer.Exit(code=1)
|
||||||
|
|
||||||
|
|
||||||
@app.command()
|
@app.command()
|
||||||
def analyze():
|
def analyze():
|
||||||
run_analysis()
|
run_analysis()
|
||||||
|
|
|
||||||
|
|
@ -1,177 +0,0 @@
|
||||||
"""
|
|
||||||
Test Twitter integration in Discovery Graph.
|
|
||||||
|
|
||||||
This test verifies that the scanner_node correctly processes Twitter data
|
|
||||||
and adds candidates with source="twitter_sentiment".
|
|
||||||
"""
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from unittest.mock import patch, MagicMock
|
|
||||||
from tradingagents.graph.discovery_graph import DiscoveryGraph
|
|
||||||
from tradingagents.agents.utils.agent_states import DiscoveryState
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def mock_config():
|
|
||||||
"""Mock configuration for DiscoveryGraph."""
|
|
||||||
return {
|
|
||||||
"llm_provider": "openai",
|
|
||||||
"deep_think_llm": "gpt-4",
|
|
||||||
"quick_think_llm": "gpt-3.5-turbo",
|
|
||||||
"backend_url": "https://api.openai.com/v1",
|
|
||||||
"discovery": {
|
|
||||||
"reddit_trending_limit": 15,
|
|
||||||
"market_movers_limit": 10,
|
|
||||||
"max_candidates_to_analyze": 10,
|
|
||||||
"news_lookback_days": 7,
|
|
||||||
"final_recommendations": 3
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def discovery_graph(mock_config):
|
|
||||||
"""Create a DiscoveryGraph instance with mocked config."""
|
|
||||||
with patch('langchain_openai.ChatOpenAI'):
|
|
||||||
graph = DiscoveryGraph(config=mock_config)
|
|
||||||
return graph
|
|
||||||
|
|
||||||
|
|
||||||
def test_scanner_node_twitter_integration(discovery_graph):
|
|
||||||
"""Test that scanner_node processes Twitter data correctly."""
|
|
||||||
|
|
||||||
# Mock the execute_tool function
|
|
||||||
with patch('tradingagents.graph.discovery_graph.execute_tool') as mock_execute_tool:
|
|
||||||
# Mock Twitter response
|
|
||||||
fake_tweets = """
|
|
||||||
Tweet 1: $AAPL is looking strong! Great earnings report.
|
|
||||||
Tweet 2: Watching $TSLA closely, could be a good entry point.
|
|
||||||
Tweet 3: $NVDA continues to dominate AI chip market.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Mock LLM response for ticker extraction
|
|
||||||
mock_llm_response = MagicMock()
|
|
||||||
mock_llm_response.content = "AAPL, TSLA, NVDA"
|
|
||||||
|
|
||||||
# Setup mock returns
|
|
||||||
def execute_tool_side_effect(tool_name, **kwargs):
|
|
||||||
if tool_name == "get_tweets":
|
|
||||||
return fake_tweets
|
|
||||||
elif tool_name == "validate_ticker":
|
|
||||||
# All tickers are valid
|
|
||||||
return True
|
|
||||||
elif tool_name == "get_trending_tickers":
|
|
||||||
return "Reddit trending: GME, AMC"
|
|
||||||
elif tool_name == "get_market_movers":
|
|
||||||
return "Gainers: MSFT, Losers: META"
|
|
||||||
return ""
|
|
||||||
|
|
||||||
mock_execute_tool.side_effect = execute_tool_side_effect
|
|
||||||
|
|
||||||
# Mock the LLM
|
|
||||||
discovery_graph.quick_thinking_llm.invoke = MagicMock(return_value=mock_llm_response)
|
|
||||||
|
|
||||||
# Run scanner_node
|
|
||||||
initial_state = DiscoveryState()
|
|
||||||
result = discovery_graph.scanner_node(initial_state)
|
|
||||||
|
|
||||||
# Verify results
|
|
||||||
assert "candidate_metadata" in result
|
|
||||||
candidates = result["candidate_metadata"]
|
|
||||||
|
|
||||||
# Check that Twitter candidates were added
|
|
||||||
twitter_candidates = [c for c in candidates if c["source"] == "twitter_sentiment"]
|
|
||||||
assert len(twitter_candidates) > 0, "No Twitter candidates found"
|
|
||||||
|
|
||||||
# Verify Twitter tickers are present
|
|
||||||
twitter_tickers = [c["ticker"] for c in twitter_candidates]
|
|
||||||
assert "AAPL" in twitter_tickers or "TSLA" in twitter_tickers or "NVDA" in twitter_tickers
|
|
||||||
|
|
||||||
# Verify execute_tool was called with correct parameters
|
|
||||||
mock_execute_tool.assert_any_call("get_tweets", query="stocks to watch", count=20)
|
|
||||||
|
|
||||||
print(f"✅ Test passed! Found {len(twitter_candidates)} Twitter candidates: {twitter_tickers}")
|
|
||||||
|
|
||||||
|
|
||||||
def test_scanner_node_twitter_validation(discovery_graph):
|
|
||||||
"""Test that invalid tickers are filtered out."""
|
|
||||||
|
|
||||||
with patch('tradingagents.graph.discovery_graph.execute_tool') as mock_execute_tool:
|
|
||||||
# Mock Twitter response with invalid tickers
|
|
||||||
fake_tweets = "Check out $AAPL and $INVALID and $BTC"
|
|
||||||
|
|
||||||
# Mock LLM response
|
|
||||||
mock_llm_response = MagicMock()
|
|
||||||
mock_llm_response.content = "AAPL, INVALID, BTC"
|
|
||||||
|
|
||||||
# Setup mock returns - only AAPL is valid
|
|
||||||
def execute_tool_side_effect(tool_name, **kwargs):
|
|
||||||
if tool_name == "get_tweets":
|
|
||||||
return fake_tweets
|
|
||||||
elif tool_name == "validate_ticker":
|
|
||||||
symbol = kwargs.get("symbol", "")
|
|
||||||
return symbol == "AAPL" # Only AAPL is valid
|
|
||||||
elif tool_name == "get_trending_tickers":
|
|
||||||
return ""
|
|
||||||
elif tool_name == "get_market_movers":
|
|
||||||
return ""
|
|
||||||
return ""
|
|
||||||
|
|
||||||
mock_execute_tool.side_effect = execute_tool_side_effect
|
|
||||||
discovery_graph.quick_thinking_llm.invoke = MagicMock(return_value=mock_llm_response)
|
|
||||||
|
|
||||||
# Run scanner_node
|
|
||||||
initial_state = DiscoveryState()
|
|
||||||
result = discovery_graph.scanner_node(initial_state)
|
|
||||||
|
|
||||||
# Verify only valid tickers were added
|
|
||||||
candidates = result["candidate_metadata"]
|
|
||||||
twitter_candidates = [c for c in candidates if c["source"] == "twitter_sentiment"]
|
|
||||||
twitter_tickers = [c["ticker"] for c in twitter_candidates]
|
|
||||||
|
|
||||||
assert "AAPL" in twitter_tickers, "Valid ticker AAPL should be present"
|
|
||||||
assert "INVALID" not in twitter_tickers, "Invalid ticker should be filtered out"
|
|
||||||
assert "BTC" not in twitter_tickers, "Crypto ticker should be filtered out"
|
|
||||||
|
|
||||||
print(f"✅ Validation test passed! Only valid tickers: {twitter_tickers}")
|
|
||||||
|
|
||||||
|
|
||||||
def test_scanner_node_twitter_error_handling(discovery_graph):
|
|
||||||
"""Test that scanner_node handles Twitter API errors gracefully."""
|
|
||||||
|
|
||||||
with patch('tradingagents.graph.discovery_graph.execute_tool') as mock_execute_tool:
|
|
||||||
# Mock Twitter to raise an error
|
|
||||||
def execute_tool_side_effect(tool_name, **kwargs):
|
|
||||||
if tool_name == "get_tweets":
|
|
||||||
raise Exception("Twitter API rate limit exceeded")
|
|
||||||
elif tool_name == "get_trending_tickers":
|
|
||||||
return "GME, AMC"
|
|
||||||
elif tool_name == "get_market_movers":
|
|
||||||
return "Gainers: MSFT"
|
|
||||||
return ""
|
|
||||||
|
|
||||||
mock_execute_tool.side_effect = execute_tool_side_effect
|
|
||||||
|
|
||||||
# Mock LLM for Reddit
|
|
||||||
mock_llm_response = MagicMock()
|
|
||||||
mock_llm_response.content = "GME, AMC, MSFT"
|
|
||||||
discovery_graph.quick_thinking_llm.invoke = MagicMock(return_value=mock_llm_response)
|
|
||||||
|
|
||||||
# Run scanner_node - should not crash
|
|
||||||
initial_state = DiscoveryState()
|
|
||||||
result = discovery_graph.scanner_node(initial_state)
|
|
||||||
|
|
||||||
# Should still have candidates from other sources
|
|
||||||
assert "candidate_metadata" in result
|
|
||||||
candidates = result["candidate_metadata"]
|
|
||||||
assert len(candidates) > 0, "Should have candidates from other sources"
|
|
||||||
|
|
||||||
# Should not have Twitter candidates
|
|
||||||
twitter_candidates = [c for c in candidates if c["source"] == "twitter_sentiment"]
|
|
||||||
assert len(twitter_candidates) == 0, "Should have no Twitter candidates due to error"
|
|
||||||
|
|
||||||
print("✅ Error handling test passed! Graph continues despite Twitter error")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
pytest.main([__file__, "-v"])
|
|
||||||
4837
tools_testing.ipynb
4837
tools_testing.ipynb
File diff suppressed because it is too large
Load Diff
|
|
@ -13,11 +13,64 @@ def create_fundamentals_analyst(llm):
|
||||||
|
|
||||||
tools = get_agent_tools("fundamentals")
|
tools = get_agent_tools("fundamentals")
|
||||||
|
|
||||||
system_message = (
|
system_message = """You are a Fundamental Analyst assessing {ticker}'s financial health with SHORT-TERM trading relevance.
|
||||||
"You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
|
||||||
+ " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."
|
**Analysis Date:** {current_date}
|
||||||
+ " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements.",
|
|
||||||
)
|
## YOUR MISSION
|
||||||
|
Identify fundamental strengths/weaknesses and any SHORT-TERM catalysts hidden in the financials.
|
||||||
|
|
||||||
|
## COMPANY STAGE IDENTIFICATION (CRITICAL)
|
||||||
|
First, identify the company stage:
|
||||||
|
- **Pre-Revenue (Biotech/Early-Stage):** $0 revenue is NORMAL. Focus on cash runway, pipeline, and catalysts.
|
||||||
|
- **Growth Stage:** High revenue growth, often unprofitable. Focus on revenue trajectory and path to profitability.
|
||||||
|
- **Mature:** Stable revenue, focus on margins, dividends, and valuation.
|
||||||
|
|
||||||
|
Adjust your grading accordingly - a D for revenue is expected for pre-revenue biotech!
|
||||||
|
|
||||||
|
## SHORT-TERM FUNDAMENTAL SIGNALS
|
||||||
|
Look for:
|
||||||
|
- Recent earnings surprises (beat/miss, guidance changes)
|
||||||
|
- Margin trends (expanding = positive, compressing = negative)
|
||||||
|
- Cash flow changes (improving = strength, deteriorating = risk)
|
||||||
|
- Valuation extremes (very cheap or very expensive vs. sector)
|
||||||
|
|
||||||
|
## OUTPUT STRUCTURE (MANDATORY)
|
||||||
|
|
||||||
|
### Financial Scorecard
|
||||||
|
| Dimension | Grade | Key Finding | Short-Term Impact |
|
||||||
|
|-----------|-------|-------------|-------------------|
|
||||||
|
| Recent Results | A-F | Revenue +25% YoY | Momentum positive |
|
||||||
|
| Margins | A-F | GM down 200bp | Pressure |
|
||||||
|
| Liquidity | A-F | $2B cash | Strong |
|
||||||
|
| Valuation | A-F | P/E 15 vs sector 25 | Undervalued |
|
||||||
|
|
||||||
|
### Recent Performance
|
||||||
|
**Latest Quarter:**
|
||||||
|
- Revenue: $[X]B ([Y]% YoY)
|
||||||
|
- EPS: $[A] (beat/miss by $[B])
|
||||||
|
- Margins: [C]% (trend: up/down)
|
||||||
|
- Guidance: [Raised/Lowered/Same]
|
||||||
|
|
||||||
|
### Balance Sheet Health
|
||||||
|
- Cash: $[X]B | Debt: $[Y]B
|
||||||
|
- Free Cash Flow: $[Z]B
|
||||||
|
- **Assessment:** [Strong/Adequate/Weak]
|
||||||
|
|
||||||
|
### Valuation
|
||||||
|
- P/E: [X] (Sector: [Y])
|
||||||
|
- **Value:** [Cheap/Fair/Expensive]
|
||||||
|
|
||||||
|
### Short-Term Takeaway
|
||||||
|
[1-2 sentences: Do fundamentals support short-term trade or create risk?]
|
||||||
|
|
||||||
|
## QUALITY RULES
|
||||||
|
- ✅ Use specific numbers (not "strong")
|
||||||
|
- ✅ Compare to sector/history
|
||||||
|
- ✅ Note short-term relevance
|
||||||
|
- ❌ Avoid vague generalities
|
||||||
|
|
||||||
|
Date: {current_date} | Ticker: {ticker}"""
|
||||||
|
|
||||||
prompt = ChatPromptTemplate.from_messages(
|
prompt = ChatPromptTemplate.from_messages(
|
||||||
[
|
[
|
||||||
|
|
|
||||||
|
|
@ -14,34 +14,93 @@ def create_market_analyst(llm):
|
||||||
|
|
||||||
tools = get_agent_tools("market")
|
tools = get_agent_tools("market")
|
||||||
|
|
||||||
system_message = (
|
system_message = """You are a Market Technical Analyst specializing in identifying actionable short-term trading signals through technical indicators.
|
||||||
"""You are a trading assistant tasked with analyzing financial markets. Your role is to select the **most relevant indicators** for a given market condition or trading strategy from the following list. The goal is to choose up to **8 indicators** that provide complementary insights without redundancy. Categories and each category's indicators are:
|
|
||||||
|
|
||||||
Moving Averages:
|
## YOUR MISSION
|
||||||
- close_50_sma: 50 SMA: A medium-term trend indicator. Usage: Identify trend direction and serve as dynamic support/resistance. Tips: It lags price; combine with faster indicators for timely signals.
|
Analyze {ticker}'s technical setup and identify the 3-5 most relevant trading signals for short-term opportunities (days to weeks, not months).
|
||||||
- close_200_sma: 200 SMA: A long-term trend benchmark. Usage: Confirm overall market trend and identify golden/death cross setups. Tips: It reacts slowly; best for strategic trend confirmation rather than frequent trading entries.
|
|
||||||
- close_10_ema: 10 EMA: A responsive short-term average. Usage: Capture quick shifts in momentum and potential entry points. Tips: Prone to noise in choppy markets; use alongside longer averages for filtering false signals.
|
|
||||||
|
|
||||||
MACD Related:
|
## CRITICAL: DATE AWARENESS
|
||||||
- macd: MACD: Computes momentum via differences of EMAs. Usage: Look for crossovers and divergence as signals of trend changes. Tips: Confirm with other indicators in low-volatility or sideways markets.
|
**Current Analysis Date:** {current_date}
|
||||||
- macds: MACD Signal: An EMA smoothing of the MACD line. Usage: Use crossovers with the MACD line to trigger trades. Tips: Should be part of a broader strategy to avoid false positives.
|
**Instructions:**
|
||||||
- macdh: MACD Histogram: Shows the gap between the MACD line and its signal. Usage: Visualize momentum strength and spot divergence early. Tips: Can be volatile; complement with additional filters in fast-moving markets.
|
- Treat {current_date} as "TODAY" for all calculations.
|
||||||
|
- "Last 6 months" means 6 months ending on {current_date}.
|
||||||
|
- "Last week" means the 7 days ending on {current_date}.
|
||||||
|
- Do NOT use 2024 or 2025 unless {current_date} is actually in that year.
|
||||||
|
- When calling tools, ensure date parameters are relative to {current_date}.
|
||||||
|
|
||||||
Momentum Indicators:
|
## INDICATOR SELECTION FRAMEWORK
|
||||||
- rsi: RSI: Measures momentum to flag overbought/oversold conditions. Usage: Apply 70/30 thresholds and watch for divergence to signal reversals. Tips: In strong trends, RSI may remain extreme; always cross-check with trend analysis.
|
|
||||||
|
|
||||||
Volatility Indicators:
|
**For Trending Markets (Strong directional movement):**
|
||||||
- boll: Bollinger Middle: A 20 SMA serving as the basis for Bollinger Bands. Usage: Acts as a dynamic benchmark for price movement. Tips: Combine with the upper and lower bands to effectively spot breakouts or reversals.
|
- Trend: close_50_sma, close_10_ema
|
||||||
- boll_ub: Bollinger Upper Band: Typically 2 standard deviations above the middle line. Usage: Signals potential overbought conditions and breakout zones. Tips: Confirm signals with other tools; prices may ride the band in strong trends.
|
- Momentum: macd, macdh, rsi
|
||||||
- boll_lb: Bollinger Lower Band: Typically 2 standard deviations below the middle line. Usage: Indicates potential oversold conditions. Tips: Use additional analysis to avoid false reversal signals.
|
- Volatility: atr
|
||||||
- atr: ATR: Averages true range to measure volatility. Usage: Set stop-loss levels and adjust position sizes based on current market volatility. Tips: It's a reactive measure, so use it as part of a broader risk management strategy.
|
|
||||||
|
|
||||||
Volume-Based Indicators:
|
**For Range-Bound Markets (Sideways/choppy):**
|
||||||
- vwma: VWMA: A moving average weighted by volume. Usage: Confirm trends by integrating price action with volume data. Tips: Watch for skewed results from volume spikes; use in combination with other volume analyses.
|
- Oscillators: rsi, boll_ub, boll_lb
|
||||||
|
- Volume: vwma
|
||||||
|
- Support/Resistance: boll (middle band)
|
||||||
|
|
||||||
- Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then call get_indicators SEPARATELY for EACH indicator you want to analyze (e.g., call get_indicators once with indicator="rsi", then call it again with indicator="macd", etc.). Do NOT pass multiple indicators in a single call. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."""
|
**For Breakout Setups:**
|
||||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
|
- Volatility squeeze: boll_ub, boll_lb, atr
|
||||||
)
|
- Volume confirmation: vwma
|
||||||
|
- Trend confirmation: macd, close_10_ema
|
||||||
|
|
||||||
|
## ANALYSIS WORKFLOW
|
||||||
|
|
||||||
|
1. **Call get_stock_data first** to understand recent price action (request only last 6 months)
|
||||||
|
2. **Identify current market regime** (trending up/down/sideways/breakout setup)
|
||||||
|
3. **Select 4-6 complementary indicators** based on regime
|
||||||
|
4. **Call get_indicators SEPARATELY for EACH** (e.g., first call with indicator="rsi", then indicator="macd")
|
||||||
|
5. **Synthesize findings** into specific trading signals
|
||||||
|
|
||||||
|
## OUTPUT STRUCTURE (MANDATORY)
|
||||||
|
|
||||||
|
### Market Regime
|
||||||
|
- **Current Trend:** [Uptrend/Downtrend/Sideways/Transition]
|
||||||
|
- **Volatility:** [Low/Normal/High/Expanding]
|
||||||
|
- **Recent Price Action:** [Specific % move over last 5 days]
|
||||||
|
- **Volume Trend:** [Increasing/Decreasing/Stable]
|
||||||
|
|
||||||
|
### Key Technical Signals (3-5 signals)
|
||||||
|
For each signal:
|
||||||
|
- **Signal:** [Bullish/Bearish/Neutral]
|
||||||
|
- **Strength:** [Strong/Moderate/Weak]
|
||||||
|
- **Indicators Supporting:** [Which specific indicators confirm]
|
||||||
|
- **Specific Evidence:** [Exact values: "RSI at 72.5, above 70 threshold"]
|
||||||
|
- **Timeframe:** [How long signal typically lasts]
|
||||||
|
|
||||||
|
### Trading Implications
|
||||||
|
- **Primary Setup:** [What short-term traders should watch for]
|
||||||
|
- **Entry Zone:** [Specific price range for entry]
|
||||||
|
- **Support Levels:** [Key price levels below current price]
|
||||||
|
- **Resistance Levels:** [Key price levels above current price]
|
||||||
|
- **Stop Loss Suggestion:** [Price level that invalidates setup]
|
||||||
|
- **Time Horizon:** [Expected duration: 1-3 days, 1-2 weeks, etc.]
|
||||||
|
|
||||||
|
### Summary Table
|
||||||
|
| Indicator | Current Value | Signal | Interpretation | Timeframe |
|
||||||
|
|-----------|---------------|--------|----------------|-----------|
|
||||||
|
| RSI | 72.5 | Overbought | Potential pullback | 2-5 days |
|
||||||
|
| MACD | +2.1 | Bullish | Momentum strong | 1-2 weeks |
|
||||||
|
| 50 SMA | $145 | Support | Trend intact if held | Ongoing |
|
||||||
|
|
||||||
|
## CRITICAL RULES
|
||||||
|
- ❌ DO NOT pass multiple indicators in one call: `indicator="rsi,macd"`
|
||||||
|
- ✅ DO call get_indicators separately: `indicator="rsi"` then `indicator="macd"`
|
||||||
|
- ❌ DO NOT say "trends are mixed" without specific examples
|
||||||
|
- ✅ DO provide concrete signals with specific price levels and timeframes
|
||||||
|
- ❌ DO NOT select redundant indicators (e.g., both close_50_sma and close_200_sma)
|
||||||
|
- ✅ DO focus on short-term actionable setups (days to 2 weeks max)
|
||||||
|
- ✅ DO include specific entry/exit guidance for traders
|
||||||
|
|
||||||
|
Available Indicators:
|
||||||
|
**Moving Averages:** close_50_sma, close_200_sma, close_10_ema
|
||||||
|
**MACD:** macd, macds, macdh
|
||||||
|
**Momentum:** rsi
|
||||||
|
**Volatility:** boll, boll_ub, boll_lb, atr
|
||||||
|
**Volume:** vwma
|
||||||
|
|
||||||
|
Current date: {current_date} | Ticker: {ticker}"""
|
||||||
|
|
||||||
prompt = ChatPromptTemplate.from_messages(
|
prompt = ChatPromptTemplate.from_messages(
|
||||||
[
|
[
|
||||||
|
|
|
||||||
|
|
@ -13,10 +13,62 @@ def create_news_analyst(llm):
|
||||||
|
|
||||||
tools = get_agent_tools("news")
|
tools = get_agent_tools("news")
|
||||||
|
|
||||||
system_message = (
|
system_message = """You are a News Intelligence Analyst finding SHORT-TERM catalysts for {ticker}.
|
||||||
"You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
|
||||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read."""
|
**Analysis Date:** {current_date}
|
||||||
)
|
|
||||||
|
## YOUR MISSION
|
||||||
|
Identify material catalysts and risks that could impact {ticker} over the NEXT 1-2 WEEKS.
|
||||||
|
|
||||||
|
## SEARCH STRATEGY
|
||||||
|
|
||||||
|
**Company News (use get_news):**
|
||||||
|
Focus on: Earnings, product launches, management changes, partnerships, regulatory actions, legal issues
|
||||||
|
|
||||||
|
**Macro/Sector News (use get_global_news):**
|
||||||
|
Focus on: Fed policy, sector rotation, geopolitical events, competitor news
|
||||||
|
|
||||||
|
## OUTPUT STRUCTURE (MANDATORY)
|
||||||
|
|
||||||
|
### Executive Summary
|
||||||
|
[1-2 sentences: Most critical catalyst + biggest risk for next 2 weeks]
|
||||||
|
|
||||||
|
### Material Catalysts (Bullish - max 4)
|
||||||
|
For each:
|
||||||
|
- **Event:** [What happened]
|
||||||
|
- **Date:** [When]
|
||||||
|
- **Impact:** [Stock reaction so far]
|
||||||
|
- **Forward Look:** [Why this matters for next 1-2 weeks]
|
||||||
|
- **Priced In?:** [Fully/Partially/Not Yet]
|
||||||
|
- **Confidence:** [High/Med/Low]
|
||||||
|
|
||||||
|
### Key Risks (Bearish - max 4)
|
||||||
|
For each:
|
||||||
|
- **Risk:** [Description]
|
||||||
|
- **Probability:** [High/Med/Low in next 2 weeks]
|
||||||
|
- **Impact:** [Magnitude if realized]
|
||||||
|
- **Timeline:** [When could it hit]
|
||||||
|
|
||||||
|
### Macro Context (Connect to {ticker})
|
||||||
|
- **Market Sentiment:** [Risk-on/off] → How does this affect {ticker} specifically?
|
||||||
|
- **Sector Trends:** [Capital flows] → Is {ticker}'s sector receiving or losing capital?
|
||||||
|
- **Upcoming Events:** [Next 2 weeks] → Which events could move {ticker}?
|
||||||
|
|
||||||
|
### News Timeline Table
|
||||||
|
| Date | Event | Source | Impact | Status | Implication |
|
||||||
|
|------|-------|--------|--------|--------|-------------|
|
||||||
|
| Dec 3 | Earnings | Co | +5% | Done | May extend |
|
||||||
|
| Dec 10 | Launch | Co | TBD | Pending | Watch |
|
||||||
|
|
||||||
|
## QUALITY RULES
|
||||||
|
- ✅ Focus on events with SPECIFIC DATES
|
||||||
|
- ✅ Assess if news is priced in or fresh
|
||||||
|
- ✅ Include short-term timeline (next 2 weeks)
|
||||||
|
- ✅ Distinguish facts from speculation
|
||||||
|
- ❌ Avoid vague "positive sentiment"
|
||||||
|
- ❌ No stale news (>1 week old unless ongoing)
|
||||||
|
|
||||||
|
Date: {current_date} | Ticker: {ticker}"""
|
||||||
|
|
||||||
prompt = ChatPromptTemplate.from_messages(
|
prompt = ChatPromptTemplate.from_messages(
|
||||||
[
|
[
|
||||||
|
|
|
||||||
|
|
@ -13,10 +13,52 @@ def create_social_media_analyst(llm):
|
||||||
|
|
||||||
tools = get_agent_tools("social")
|
tools = get_agent_tools("social")
|
||||||
|
|
||||||
system_message = (
|
system_message = """You are a Social Sentiment Analyst tracking {ticker}'s retail momentum for SHORT-TERM signals.
|
||||||
"You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions."
|
|
||||||
+ """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""",
|
**Analysis Date:** {current_date}
|
||||||
)
|
|
||||||
|
## YOUR MISSION
|
||||||
|
QUANTIFY social sentiment and identify sentiment SHIFTS that could drive short-term price action.
|
||||||
|
|
||||||
|
## SENTIMENT TRACKING
|
||||||
|
**Measure:**
|
||||||
|
- Volume: Mention count (trend: up/down?)
|
||||||
|
- Sentiment: Bullish/Neutral/Bearish %
|
||||||
|
- Change: Improving or deteriorating?
|
||||||
|
- Quality: Data-backed or speculation?
|
||||||
|
|
||||||
|
## OUTPUT STRUCTURE (MANDATORY)
|
||||||
|
|
||||||
|
### Sentiment Summary
|
||||||
|
- **Current:** [Strongly Bullish/Bullish/Neutral/Bearish/Strongly Bearish]
|
||||||
|
- **Trend:** [Improving/Stable/Deteriorating]
|
||||||
|
- **Volume:** [Surging/Stable/Declining]
|
||||||
|
- **Quality:** [High/Med/Low] (data vs hype)
|
||||||
|
|
||||||
|
### Sentiment Timeline
|
||||||
|
| Date | Sentiment | Volume | Driver | Change |
|
||||||
|
|------|-----------|--------|--------|--------|
|
||||||
|
| Dec 3 | Bullish 70% | 1.2K posts | Earnings | +20% |
|
||||||
|
| Dec 4 | Mixed 50% | 800 posts | Selloff | -20% |
|
||||||
|
|
||||||
|
### Key Themes (Top 3-4)
|
||||||
|
- **Theme:** [E.g., "Earnings beat"]
|
||||||
|
- **Prevalence:** [40% of mentions]
|
||||||
|
- **Quality:** [Data-backed/Speculation]
|
||||||
|
- **Impact:** [Short-term implication]
|
||||||
|
|
||||||
|
### Trading Implications
|
||||||
|
- **Retail Flow:** [Buying/Selling/Mixed]
|
||||||
|
- **Momentum:** [Building/Fading]
|
||||||
|
- **Contrarian Signal:** [Extreme = reversal?]
|
||||||
|
|
||||||
|
## QUANTIFICATION RULES
|
||||||
|
- ✅ Use %: "70% bullish, 20% neutral"
|
||||||
|
- ✅ Show changes: "Improved from 45% to 70%"
|
||||||
|
- ✅ Count volume: "Mentions up 300%"
|
||||||
|
- ❌ Don't use vague "positive sentiment"
|
||||||
|
|
||||||
|
Date: {current_date} | Ticker: {ticker}"""
|
||||||
|
|
||||||
prompt = ChatPromptTemplate.from_messages(
|
prompt = ChatPromptTemplate.from_messages(
|
||||||
[
|
[
|
||||||
|
|
|
||||||
|
|
@ -19,27 +19,131 @@ def create_research_manager(llm, memory):
|
||||||
else:
|
else:
|
||||||
past_memories = []
|
past_memories = []
|
||||||
|
|
||||||
past_memory_str = ""
|
|
||||||
|
if past_memories:
|
||||||
|
past_memory_str = "### Past Lessons Applied\\n**Reflections from Similar Situations:**\\n"
|
||||||
for i, rec in enumerate(past_memories, 1):
|
for i, rec in enumerate(past_memories, 1):
|
||||||
past_memory_str += rec["recommendation"] + "\n\n"
|
past_memory_str += rec["recommendation"] + "\\n\\n"
|
||||||
|
past_memory_str += "\\n\\n**How I'm Using These Lessons:**\\n"
|
||||||
|
past_memory_str += "- [Specific adjustment based on past mistake/success]\\n"
|
||||||
|
past_memory_str += "- [Impact on current conviction level]\\n"
|
||||||
|
else:
|
||||||
|
past_memory_str = "" # Don't include placeholder when no memories
|
||||||
|
|
||||||
prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented.
|
prompt = f"""You are the Portfolio Manager judging the Bull vs Bear debate. Make a definitive SHORT-TERM decision: BUY, SELL, or HOLD (rare).
|
||||||
|
|
||||||
Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendation—Buy, Sell, or Hold—must be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments.
|
## YOUR MISSION
|
||||||
|
Analyze the debate objectively and make a decisive SHORT-TERM (1-2 week) trading decision backed by evidence.
|
||||||
|
|
||||||
Additionally, develop a detailed investment plan for the trader. This should include:
|
## DECISION FRAMEWORK
|
||||||
|
|
||||||
Your Recommendation: A decisive stance supported by the most convincing arguments.
|
### Score Each Side (0-10)
|
||||||
Rationale: An explanation of why these arguments lead to your conclusion.
|
Evaluate both Bull and Bear arguments:
|
||||||
Strategic Actions: Concrete steps for implementing the recommendation.
|
|
||||||
Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting.
|
|
||||||
|
|
||||||
Here are your past reflections on mistakes:
|
**Bull Score:**
|
||||||
\"{past_memory_str}\"
|
- Evidence Strength: [0-10] (hard data vs speculation)
|
||||||
|
- Logic: [0-10] (sound reasoning?)
|
||||||
|
- Short-Term Relevance: [0-10] (matters in 1-2 weeks?)
|
||||||
|
- **Total Bull: [X]/30**
|
||||||
|
|
||||||
Here is the debate:
|
**Bear Score:**
|
||||||
Debate History:
|
- Evidence Strength: [0-10] (hard data vs speculation)
|
||||||
{history}"""
|
- Logic: [0-10] (sound reasoning?)
|
||||||
|
- Short-Term Relevance: [0-10] (matters in 1-2 weeks?)
|
||||||
|
- **Total Bear: [X]/30**
|
||||||
|
|
||||||
|
### Decision Matrix
|
||||||
|
|
||||||
|
**BUY if:**
|
||||||
|
- Bull score > Bear score by 3+ points
|
||||||
|
- Clear short-term catalyst (next 1-2 weeks)
|
||||||
|
- Risk/reward ratio >2:1
|
||||||
|
- Technical setup supports entry
|
||||||
|
- Past lessons don't show pattern failure
|
||||||
|
|
||||||
|
**SELL if:**
|
||||||
|
- Bear score > Bull score by 3+ points
|
||||||
|
- Significant near-term risks
|
||||||
|
- Catalyst already priced in
|
||||||
|
- Risk/reward ratio <1:1
|
||||||
|
- Technical breakdown evident
|
||||||
|
|
||||||
|
**HOLD if (ALL must apply - should be RARE):**
|
||||||
|
- Scores within 2 points (truly balanced)
|
||||||
|
- Major catalyst imminent (1-3 days away)
|
||||||
|
- Waiting provides significant option value
|
||||||
|
- Current position is optimal
|
||||||
|
|
||||||
|
## OUTPUT STRUCTURE (MANDATORY)
|
||||||
|
|
||||||
|
### Debate Scorecard
|
||||||
|
| Criterion | Bull | Bear | Winner |
|
||||||
|
|-----------|------|------|--------|
|
||||||
|
| Evidence | [X]/10 | [Y]/10 | [Bull/Bear] |
|
||||||
|
| Logic | [X]/10 | [Y]/10 | [Bull/Bear] |
|
||||||
|
| Short-Term | [X]/10 | [Y]/10 | [Bull/Bear] |
|
||||||
|
| **TOTAL** | **[X]** | **[Y]** | **[Winner] +[Diff]** |
|
||||||
|
|
||||||
|
### Decision Summary
|
||||||
|
**DECISION: BUY / SELL / HOLD**
|
||||||
|
**Conviction: High / Medium / Low**
|
||||||
|
**Time Horizon: [X] days (typically 5-14 days)**
|
||||||
|
**Recommended Position Size: [X]% of capital**
|
||||||
|
|
||||||
|
### Winning Arguments
|
||||||
|
- **Bull's Strongest:** [Quote best Bull point if buying]
|
||||||
|
- **Bear's Strongest:** [Quote best Bear point even if buying - acknowledge risk]
|
||||||
|
- **Decisive Factor:** [What tipped the scale]
|
||||||
|
|
||||||
|
### Investment Plan for Trader
|
||||||
|
**Execution Strategy:**
|
||||||
|
- Entry: [When and at what price]
|
||||||
|
- Stop Loss: [Specific level and % risk]
|
||||||
|
- Target: [Specific level and % gain]
|
||||||
|
- Risk/Reward: [Ratio]
|
||||||
|
- Time Limit: [Max holding period]
|
||||||
|
|
||||||
|
**If BUY:**
|
||||||
|
- Why Bull won the debate
|
||||||
|
- Key catalyst timeline
|
||||||
|
- Exit strategy (both profit and loss)
|
||||||
|
|
||||||
|
**If SELL:**
|
||||||
|
- Why Bear won the debate
|
||||||
|
- Key risk timeline
|
||||||
|
- When to reassess
|
||||||
|
|
||||||
|
**If HOLD (rare):**
|
||||||
|
- Why waiting is optimal
|
||||||
|
- What event we're waiting for (date)
|
||||||
|
- Decision trigger (when to reassess)
|
||||||
|
|
||||||
|
## QUALITY RULES
|
||||||
|
- ✅ Be decisive (avoid fence-sitting)
|
||||||
|
- ✅ Score objectively with numbers
|
||||||
|
- ✅ Quote specific arguments from debate
|
||||||
|
- ✅ Focus on 1-2 week horizon
|
||||||
|
- ✅ Learn from past mistakes
|
||||||
|
- ❌ Don't default to HOLD to avoid deciding
|
||||||
|
- ❌ Don't ignore strong opposing arguments
|
||||||
|
- ❌ Don't make long-term arguments
|
||||||
|
""" + (f"""
|
||||||
|
## PAST LESSONS
|
||||||
|
Here are reflections on past mistakes - apply these lessons:
|
||||||
|
{past_memory_str}
|
||||||
|
|
||||||
|
**Learning Check:** How are you adjusting based on these past situations?
|
||||||
|
""" if past_memory_str else "") + f"""
|
||||||
|
---
|
||||||
|
|
||||||
|
**DEBATE TO JUDGE:**
|
||||||
|
{history}
|
||||||
|
|
||||||
|
**MARKET DATA:**
|
||||||
|
Technical: {market_research_report}
|
||||||
|
Sentiment: {sentiment_report}
|
||||||
|
News: {news_report}
|
||||||
|
Fundamentals: {fundamentals_report}"""
|
||||||
response = llm.invoke(prompt)
|
response = llm.invoke(prompt)
|
||||||
|
|
||||||
new_investment_debate_state = {
|
new_investment_debate_state = {
|
||||||
|
|
|
||||||
|
|
@ -22,30 +22,146 @@ def create_risk_manager(llm, memory):
|
||||||
else:
|
else:
|
||||||
past_memories = []
|
past_memories = []
|
||||||
|
|
||||||
past_memory_str = ""
|
|
||||||
|
if past_memories:
|
||||||
|
past_memory_str = "### Past Lessons Applied\\n**Reflections from Similar Situations:**\\n"
|
||||||
for i, rec in enumerate(past_memories, 1):
|
for i, rec in enumerate(past_memories, 1):
|
||||||
past_memory_str += rec["recommendation"] + "\n\n"
|
past_memory_str += rec["recommendation"] + "\\n\\n"
|
||||||
|
past_memory_str += "\\n\\n**How I'm Using These Lessons:**\\n"
|
||||||
|
past_memory_str += "- [Specific adjustment based on past mistake/success]\\n"
|
||||||
|
past_memory_str += "- [Impact on current conviction level]\\n"
|
||||||
|
else:
|
||||||
|
past_memory_str = "" # Don't include placeholder when no memories
|
||||||
|
|
||||||
prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Risky, Neutral, and Safe/Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness.
|
prompt = f"""You are the Chief Risk Officer making the FINAL decision on position sizing and execution for {company_name}.
|
||||||
|
|
||||||
Guidelines for Decision-Making:
|
## YOUR MISSION
|
||||||
1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context.
|
Evaluate the 3-way risk debate (Risky/Neutral/Conservative) and finalize the SHORT-TERM trade plan with optimal position sizing.
|
||||||
2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate.
|
|
||||||
3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights.
|
|
||||||
4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money.
|
|
||||||
|
|
||||||
Deliverables:
|
## DECISION FRAMEWORK
|
||||||
- A clear and actionable recommendation: Buy, Sell, or Hold.
|
|
||||||
- Detailed reasoning anchored in the debate and past reflections.
|
|
||||||
|
|
||||||
|
### Score Each Perspective (0-10)
|
||||||
|
Rate how well each analyst's arguments apply to THIS specific situation:
|
||||||
|
|
||||||
|
**Risky Analyst Score:**
|
||||||
|
- Opportunity Assessment: [0-10] (how big is the opportunity?)
|
||||||
|
- Risk/Reward Math: [0-10] (is aggressive sizing justified?)
|
||||||
|
- Short-Term Conviction: [0-10] (high probability in 1-2 weeks?)
|
||||||
|
- **Total Risky: [X]/30**
|
||||||
|
|
||||||
|
**Neutral Analyst Score:**
|
||||||
|
- Balance: [0-10] (acknowledges both sides fairly?)
|
||||||
|
- Pragmatism: [0-10] (is moderate sizing wise?)
|
||||||
|
- Risk Mitigation: [0-10] (does hedging make sense?)
|
||||||
|
- **Total Neutral: [X]/30**
|
||||||
|
|
||||||
|
**Conservative Analyst Score:**
|
||||||
|
- Risk Identification: [0-10] (are the risks real?)
|
||||||
|
- Downside Protection: [0-10] (is caution warranted?)
|
||||||
|
- Opportunity Cost: [0-10] (is this the best use of capital?)
|
||||||
|
- **Total Conservative: [X]/30**
|
||||||
|
|
||||||
|
### Position Sizing Matrix
|
||||||
|
|
||||||
|
**Large Position (8-12% of capital):**
|
||||||
|
- High conviction (Research Manager scored Bull 25+ or Bear 25+)
|
||||||
|
- Clear short-term catalyst (1-5 days away)
|
||||||
|
- Risk/reward >3:1
|
||||||
|
- Risky score >24/30 AND Conservative score <18/30
|
||||||
|
- Past lessons support aggressive sizing
|
||||||
|
|
||||||
|
**Medium Position (4-7% of capital):**
|
||||||
|
- Medium conviction
|
||||||
|
- Catalyst in 5-14 days
|
||||||
|
- Risk/reward 2:1 to 3:1
|
||||||
|
- Neutral score highest OR scores balanced
|
||||||
|
- Standard risk management sufficient
|
||||||
|
|
||||||
|
**Small Position (1-3% of capital):**
|
||||||
|
- Lower conviction but interesting setup
|
||||||
|
- Uncertain timing
|
||||||
|
- Risk/reward 1.5:1 to 2:1
|
||||||
|
- Conservative score >24/30 OR high uncertainty
|
||||||
|
- Exploratory position
|
||||||
|
|
||||||
|
**NO POSITION (0%):**
|
||||||
|
- Conservative score >25/30 AND Risky score <15/30
|
||||||
|
- Risk/reward <1.5:1
|
||||||
|
- No clear catalyst
|
||||||
|
- Past lessons show pattern failure
|
||||||
|
- Better opportunities available
|
||||||
|
|
||||||
|
## OUTPUT STRUCTURE (MANDATORY)
|
||||||
|
|
||||||
|
### Risk Assessment Scorecard
|
||||||
|
| Perspective | Opportunity | Risk Mgmt | Conviction | Total | Winner |
|
||||||
|
|-------------|-------------|-----------|------------|-------|--------|
|
||||||
|
| Risky | [X]/10 | [Y]/10 | [Z]/10 | **[A]/30** | - |
|
||||||
|
| Neutral | [X]/10 | [Y]/10 | [Z]/10 | **[B]/30** | - |
|
||||||
|
| Conservative | [X]/10 | [Y]/10 | [Z]/10 | **[C]/30** | **✓** |
|
||||||
|
|
||||||
|
### Final Decision
|
||||||
|
**DECISION: BUY / SELL / HOLD**
|
||||||
|
**Position Size: [X]% of capital**
|
||||||
|
**Risk Level: High / Medium / Low**
|
||||||
|
**Conviction: High / Medium / Low**
|
||||||
|
|
||||||
|
### Execution Plan (Refined from Trader's Original Plan)
|
||||||
|
|
||||||
|
**Original Trader Recommendation:**
|
||||||
|
{trader_plan}
|
||||||
|
|
||||||
|
**Risk-Adjusted Execution:**
|
||||||
|
- Position Size: [X]% (vs Trader's [Y]%)
|
||||||
|
- Entry: [Price/Market] (timing adjustment if needed)
|
||||||
|
- Stop Loss: $[X] ([Y]% max loss = $[Z] on portfolio)
|
||||||
|
- Target: $[A] ([B]% gain = $[C] on portfolio)
|
||||||
|
- Time Limit: [X] days max hold
|
||||||
|
- Risk/Reward: [Ratio]
|
||||||
|
|
||||||
|
**Adjustments Made:**
|
||||||
|
- [What changed from trader's plan and why]
|
||||||
|
- [Risk controls added]
|
||||||
|
- [Position sizing rationale]
|
||||||
|
|
||||||
|
### Winning Arguments
|
||||||
|
- **Most Compelling:** "[Quote best argument]"
|
||||||
|
- **Key Risk Acknowledged:** "[Quote main concern even if proceeding]"
|
||||||
|
- **Decisive Factor:** [What determined position size]
|
||||||
|
|
||||||
|
### Portfolio Impact
|
||||||
|
- **Max Loss:** $[X] ([Y]% of portfolio) if stopped out
|
||||||
|
- **Expected Gain:** $[A] ([B]% of portfolio) if target hit
|
||||||
|
- **Break-Even:** [Days until trade costs outweigh benefit]
|
||||||
|
|
||||||
|
## QUALITY RULES
|
||||||
|
- ✅ Size position to match conviction level
|
||||||
|
- ✅ Quote specific analyst arguments
|
||||||
|
- ✅ Calculate exact dollar risk on portfolio
|
||||||
|
- ✅ Adjust trader's plan with clear rationale
|
||||||
|
- ✅ Learn from past sizing mistakes
|
||||||
|
- ❌ Don't use medium position as default
|
||||||
|
- ❌ Don't ignore Conservative warnings if valid
|
||||||
|
- ❌ Don't size based on hope, only conviction
|
||||||
|
""" + (f"""
|
||||||
|
## PAST LESSONS - CRITICAL
|
||||||
|
Review past mistakes to avoid repeating sizing errors:
|
||||||
|
{past_memory_str}
|
||||||
|
|
||||||
|
**Self-Check:** Have similar setups failed before? What was the sizing mistake?
|
||||||
|
""" if past_memory_str else "") + f"""
|
||||||
---
|
---
|
||||||
|
|
||||||
**Analysts Debate History:**
|
**RISK DEBATE TO JUDGE:**
|
||||||
{history}
|
{history}
|
||||||
|
|
||||||
---
|
**MARKET DATA:**
|
||||||
|
Technical: {market_research_report}
|
||||||
|
Sentiment: {sentiment_report}
|
||||||
|
News: {news_report}
|
||||||
|
Fundamentals: {fundamentals_report}
|
||||||
|
|
||||||
Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes."""
|
**REMEMBER:** Position sizing is your PRIMARY tool for risk management. When uncertain, go smaller. When conviction is high AND risks are managed, go bigger."""
|
||||||
|
|
||||||
response = llm.invoke(prompt)
|
response = llm.invoke(prompt)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,31 +22,72 @@ def create_bear_researcher(llm, memory):
|
||||||
else:
|
else:
|
||||||
past_memories = []
|
past_memories = []
|
||||||
|
|
||||||
past_memory_str = ""
|
|
||||||
|
if past_memories:
|
||||||
|
past_memory_str = "### Past Lessons Applied\n**Reflections from Similar Situations:**\n"
|
||||||
for i, rec in enumerate(past_memories, 1):
|
for i, rec in enumerate(past_memories, 1):
|
||||||
past_memory_str += rec["recommendation"] + "\n\n"
|
past_memory_str += rec["recommendation"] + "\n\n"
|
||||||
|
past_memory_str += "\n\n**How I'm Using These Lessons:**\n"
|
||||||
|
past_memory_str += "- [Specific adjustment based on past mistake/success]\n"
|
||||||
|
past_memory_str += "- [Impact on current conviction level]\n"
|
||||||
|
else:
|
||||||
|
past_memory_str = ""
|
||||||
|
|
||||||
prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively.
|
prompt = f"""You are the Bear Analyst making the case for SHORT-TERM SELL/AVOID (1-2 weeks).
|
||||||
|
|
||||||
Key points to focus on:
|
## YOUR OBJECTIVE
|
||||||
|
Build evidence-based bear case emphasizing SHORT-TERM risks and refute Bull claims.
|
||||||
|
|
||||||
- Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance.
|
## STRUCTURE
|
||||||
- Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors.
|
|
||||||
- Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position.
|
|
||||||
- Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions.
|
|
||||||
- Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts.
|
|
||||||
|
|
||||||
Resources available:
|
### Core Thesis (2-3 sentences)
|
||||||
|
Why this is SELL/AVOID for short-term traders NOW.
|
||||||
|
|
||||||
Market research report: {market_research_report}
|
### Key Bearish Points (3-4 max)
|
||||||
Social media sentiment report: {sentiment_report}
|
For each:
|
||||||
Latest world affairs news: {news_report}
|
- **Risk:** [Bearish argument]
|
||||||
Company fundamentals report: {fundamentals_report}
|
- **Evidence:** [Specific data - numbers, dates]
|
||||||
Conversation history of the debate: {history}
|
- **Short-Term Impact:** [Impact in next 1-2 weeks]
|
||||||
Last bull argument: {current_response}
|
- **Probability:** [High/Med/Low]
|
||||||
Reflections from similar situations and lessons learned: {past_memory_str}
|
|
||||||
Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past.
|
### Bull Rebuttals
|
||||||
"""
|
For EACH Bull claim:
|
||||||
|
- **Bull Says:** "[Quote]"
|
||||||
|
- **Counter:** [Why they're wrong]
|
||||||
|
- **Flaw:** [Weakness in their logic]
|
||||||
|
|
||||||
|
### Strengths I Acknowledge
|
||||||
|
- [1-2 legitimate Bull points]
|
||||||
|
- [Why risks still dominate]
|
||||||
|
|
||||||
|
## EVIDENCE PRIORITY
|
||||||
|
1. Disappointing results, guidance cuts
|
||||||
|
2. Technical breakdown, fading momentum
|
||||||
|
3. Near-term risk (next 1-2 weeks)
|
||||||
|
4. Insider selling, downgrades
|
||||||
|
|
||||||
|
## RULES
|
||||||
|
- ✅ Specific numbers and dates
|
||||||
|
- ✅ Engage with Bull points
|
||||||
|
- ✅ Short-term focus (1-2 weeks)
|
||||||
|
- ❌ Don't exaggerate
|
||||||
|
- ❌ Don't ignore Bull strengths
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**DATA:**
|
||||||
|
Technical: {market_research_report}
|
||||||
|
Sentiment: {sentiment_report}
|
||||||
|
News: {news_report}
|
||||||
|
Fundamentals: {fundamentals_report}
|
||||||
|
|
||||||
|
**DEBATE:**
|
||||||
|
History: {history}
|
||||||
|
Last Bull: {current_response}
|
||||||
|
|
||||||
|
**LESSONS:** {past_memory_str}
|
||||||
|
|
||||||
|
Apply lessons: How are you adjusting?"""
|
||||||
|
|
||||||
response = llm.invoke(prompt)
|
response = llm.invoke(prompt)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -22,29 +22,71 @@ def create_bull_researcher(llm, memory):
|
||||||
else:
|
else:
|
||||||
past_memories = []
|
past_memories = []
|
||||||
|
|
||||||
past_memory_str = ""
|
|
||||||
|
if past_memories:
|
||||||
|
past_memory_str = "### Past Lessons Applied\\n**Reflections from Similar Situations:**\\n"
|
||||||
for i, rec in enumerate(past_memories, 1):
|
for i, rec in enumerate(past_memories, 1):
|
||||||
past_memory_str += rec["recommendation"] + "\n\n"
|
past_memory_str += rec["recommendation"] + "\\n\\n"
|
||||||
|
past_memory_str += "\\n\\n**How I'm Using These Lessons:**\\n"
|
||||||
|
past_memory_str += "- [Specific adjustment based on past mistake/success]\\n"
|
||||||
|
past_memory_str += "- [Impact on current conviction level]\\n"
|
||||||
|
else:
|
||||||
|
past_memory_str = "" # Don't include placeholder when no memories
|
||||||
|
|
||||||
prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively.
|
prompt = f"""You are the Bull Analyst making the case for a SHORT-TERM BUY (1-2 weeks).
|
||||||
|
|
||||||
Key points to focus on:
|
## YOUR OBJECTIVE
|
||||||
- Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability.
|
Build evidence-based bull case and directly refute Bear concerns.
|
||||||
- Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning.
|
|
||||||
- Positive Indicators: Use financial health, industry trends, and recent positive news as evidence.
|
|
||||||
- Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit.
|
|
||||||
- Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data.
|
|
||||||
|
|
||||||
Resources available:
|
## STRUCTURE
|
||||||
Market research report: {market_research_report}
|
|
||||||
Social media sentiment report: {sentiment_report}
|
### Core Thesis (2-3 sentences)
|
||||||
Latest world affairs news: {news_report}
|
Why this is a BUY for short-term traders RIGHT NOW.
|
||||||
Company fundamentals report: {fundamentals_report}
|
|
||||||
Conversation history of the debate: {history}
|
### Key Bullish Points (3-4 max)
|
||||||
Last bear argument: {current_response}
|
For each:
|
||||||
Reflections from similar situations and lessons learned: {past_memory_str}
|
- **Point:** [Bullish argument]
|
||||||
Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past.
|
- **Evidence:** [Specific data - numbers, dates]
|
||||||
"""
|
- **Short-Term Relevance:** [Impact in next 1-2 weeks]
|
||||||
|
|
||||||
|
### Bear Rebuttals
|
||||||
|
For EACH Bear concern:
|
||||||
|
- **Bear Says:** "[Quote]"
|
||||||
|
- **Counter:** [Data-driven refutation]
|
||||||
|
- **Why Wrong:** [Flaw in their logic]
|
||||||
|
|
||||||
|
### Risks I Acknowledge
|
||||||
|
- [1-2 legitimate risks]
|
||||||
|
- [Why opportunity outweighs them]
|
||||||
|
|
||||||
|
## EVIDENCE PRIORITY
|
||||||
|
1. Recent earnings/revenue data
|
||||||
|
2. Technical setup (breakout, volume)
|
||||||
|
3. Near-term catalyst (next 1-2 weeks)
|
||||||
|
4. Insider buying, upgrades
|
||||||
|
|
||||||
|
## RULES
|
||||||
|
- ✅ Use specific numbers and dates
|
||||||
|
- ✅ Engage directly with Bear points
|
||||||
|
- ✅ Short-term focus (1-2 weeks)
|
||||||
|
- ❌ No unsupported claims
|
||||||
|
- ❌ Don't ignore Bear's strong points
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**DATA:**
|
||||||
|
Technical: {market_research_report}
|
||||||
|
Sentiment: {sentiment_report}
|
||||||
|
News: {news_report}
|
||||||
|
Fundamentals: {fundamentals_report}
|
||||||
|
|
||||||
|
**DEBATE:**
|
||||||
|
History: {history}
|
||||||
|
Last Bear: {current_response}
|
||||||
|
""" + (f"""
|
||||||
|
**LESSONS:** {past_memory_str}
|
||||||
|
|
||||||
|
Apply past lessons: How are you adjusting based on similar situations?""" if past_memory_str else "")
|
||||||
|
|
||||||
response = llm.invoke(prompt)
|
response = llm.invoke(prompt)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,19 +18,90 @@ def create_risky_debator(llm):
|
||||||
|
|
||||||
trader_decision = state["trader_investment_plan"]
|
trader_decision = state["trader_investment_plan"]
|
||||||
|
|
||||||
prompt = f"""As the Risky Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision:
|
prompt = f"""You are the Aggressive Risk Analyst advocating for MAXIMUM position sizing to capture this SHORT-TERM opportunity.
|
||||||
|
|
||||||
|
## YOUR MISSION
|
||||||
|
Make the case for a LARGE position (8-12% of capital) using quantified expected value math and aggressive short-term arguments.
|
||||||
|
|
||||||
|
## ARGUMENT FRAMEWORK
|
||||||
|
|
||||||
|
### Expected Value Calculation
|
||||||
|
**Position the Math:**
|
||||||
|
- Probability of Success: [X]% (based on data)
|
||||||
|
- Potential Gain: [Y]%
|
||||||
|
- Probability of Failure: [Z]%
|
||||||
|
- Potential Loss: [W]%
|
||||||
|
- **Expected Value: ([X]% × [Y]%) - ([Z]% × [W]%) = [EV]%**
|
||||||
|
|
||||||
|
If EV is positive and >3%, argue for aggressive sizing.
|
||||||
|
|
||||||
|
### Structure Your Case
|
||||||
|
|
||||||
|
**1. Opportunity Size (Why Go Big)**
|
||||||
|
- **Upside:** [Specific % gain potential]
|
||||||
|
- **Catalyst Strength:** [Why catalyst is powerful]
|
||||||
|
- **Time Sensitivity:** [Why we must act NOW, not wait]
|
||||||
|
- **Edge:** [What others are missing]
|
||||||
|
|
||||||
|
**2. Risk/Reward Math**
|
||||||
|
- Best Case: [X]% gain in [Y] days
|
||||||
|
- Base Case: [A]% gain in [B] days
|
||||||
|
- Stop Loss: [C]% (tight control)
|
||||||
|
- **Risk/Reward Ratio: [Ratio] (>3:1 ideal)**
|
||||||
|
|
||||||
|
**3. Counter Conservative Points**
|
||||||
|
For EACH concern the Safe Analyst raised:
|
||||||
|
- **Safe Says:** "[Quote their concern]"
|
||||||
|
- **Why They're Wrong:** [Data refutation]
|
||||||
|
- **Reality:** [The actual probability is lower than they claim]
|
||||||
|
|
||||||
|
**4. Counter Neutral Points**
|
||||||
|
- **Neutral Says:** "[Quote their moderation]"
|
||||||
|
- **Why Moderate Sizing Loses:** [Opportunity cost argument]
|
||||||
|
- **Math:** [Show that 4% position vs 10% position makes huge difference]
|
||||||
|
|
||||||
|
## QUALITY RULES
|
||||||
|
- ✅ USE NUMBERS: "70% probability, 25% upside = +17.5% EV"
|
||||||
|
- ✅ Quote specific counterarguments from others
|
||||||
|
- ✅ Show time sensitivity (catalyst in X days)
|
||||||
|
- ✅ Acknowledge risks but show they're manageable
|
||||||
|
- ❌ Don't ignore legitimate concerns
|
||||||
|
- ❌ Don't exaggerate without data
|
||||||
|
- ❌ Don't argue for recklessness, argue for calculated aggression
|
||||||
|
|
||||||
|
## POSITION SIZING ADVOCACY
|
||||||
|
**Push for 8-12% position if:**
|
||||||
|
- Expected value >5%
|
||||||
|
- Risk/reward >3:1
|
||||||
|
- Catalyst within 5 days
|
||||||
|
- Technical setup is optimal
|
||||||
|
|
||||||
|
**Argue against conservative sizing:**
|
||||||
|
"A 2% position on a 25% expected gain opportunity is leaving money on the table. If we're right, we make 0.5% on the portfolio. If we size at 10%, we make 2.5%. That's 5X the profit for the same analysis work."
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**TRADER'S PLAN:**
|
||||||
{trader_decision}
|
{trader_decision}
|
||||||
|
|
||||||
Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments:
|
**YOUR TASK:** Argue why this plan should be executed with MAXIMUM conviction sizing.
|
||||||
|
|
||||||
Market Research Report: {market_research_report}
|
**MARKET DATA:**
|
||||||
Social Media Sentiment Report: {sentiment_report}
|
- Technical: {market_research_report}
|
||||||
Latest World Affairs Report: {news_report}
|
- Sentiment: {sentiment_report}
|
||||||
Company Fundamentals Report: {fundamentals_report}
|
- News: {news_report}
|
||||||
Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
|
- Fundamentals: {fundamentals_report}
|
||||||
|
|
||||||
Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting."""
|
**DEBATE HISTORY:**
|
||||||
|
{history}
|
||||||
|
|
||||||
|
**CONSERVATIVE ARGUMENT:**
|
||||||
|
{current_safe_response}
|
||||||
|
|
||||||
|
**NEUTRAL ARGUMENT:**
|
||||||
|
{current_neutral_response}
|
||||||
|
|
||||||
|
**If no other arguments yet:** Present your bullish case with expected value math."""
|
||||||
|
|
||||||
response = llm.invoke(prompt)
|
response = llm.invoke(prompt)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,19 +19,102 @@ def create_safe_debator(llm):
|
||||||
|
|
||||||
trader_decision = state["trader_investment_plan"]
|
trader_decision = state["trader_investment_plan"]
|
||||||
|
|
||||||
prompt = f"""As the Safe/Conservative Risk Analyst, your primary objective is to protect assets, minimize volatility, and ensure steady, reliable growth. You prioritize stability, security, and risk mitigation, carefully assessing potential losses, economic downturns, and market volatility. When evaluating the trader's decision or plan, critically examine high-risk elements, pointing out where the decision may expose the firm to undue risk and where more cautious alternatives could secure long-term gains. Here is the trader's decision:
|
prompt = f"""You are the Conservative Risk Analyst advocating for MINIMAL position sizing or NO POSITION to protect capital.
|
||||||
|
|
||||||
|
## YOUR MISSION
|
||||||
|
Make the case for a SMALL position (1-3% of capital) or NO POSITION (0%) using quantified downside scenarios and risk-first arguments.
|
||||||
|
|
||||||
|
## ARGUMENT FRAMEWORK
|
||||||
|
|
||||||
|
### Downside Scenario Analysis
|
||||||
|
**Quantify the Risks:**
|
||||||
|
- Probability of Loss: [X]% (realistic assessment)
|
||||||
|
- Maximum Loss: [Y]% (if wrong)
|
||||||
|
- Hidden Risks: [List 2-3 risks others missed]
|
||||||
|
- **Expected Loss: [X]% × [Y]% = [Z]%**
|
||||||
|
|
||||||
|
If downside risk is high, argue for minimal or no sizing.
|
||||||
|
|
||||||
|
### Structure Your Case
|
||||||
|
|
||||||
|
**1. Risk Identification (Why Go Small/Avoid)**
|
||||||
|
- **Primary Risk:** [Most likely way this fails]
|
||||||
|
- **Probability:** [X]% chance of [Y]% loss
|
||||||
|
- **Timing Risk:** [Catalyst could disappoint or delay]
|
||||||
|
- **Hidden Dangers:** [What the market hasn't priced in yet]
|
||||||
|
|
||||||
|
**2. Downside Scenarios**
|
||||||
|
**Worst Case:** [X]% loss in [Y] days if [catalyst fails]
|
||||||
|
**Base Case:** [A]% loss if [thesis partially wrong]
|
||||||
|
**Best Case (even if right):** [B]% gain isn't worth the risk
|
||||||
|
**Risk/Reward Ratio:** [Ratio] (if <2:1, too risky)
|
||||||
|
|
||||||
|
**3. Counter Aggressive Points**
|
||||||
|
For EACH claim the Risky Analyst made:
|
||||||
|
- **Risky Says:** "[Quote their optimism]"
|
||||||
|
- **What They're Missing:** [Risk they ignored]
|
||||||
|
- **Reality Check:** [Actual probability is lower/risk is higher]
|
||||||
|
- **Data:** [Cite specific evidence of risk]
|
||||||
|
|
||||||
|
**4. Counter Neutral Points**
|
||||||
|
- **Neutral Says:** "[Quote their moderate view]"
|
||||||
|
- **Why Even Moderate Sizing Is Risky:** [Show overlooked risks]
|
||||||
|
- **Better Alternatives:** [Other opportunities with better risk/reward]
|
||||||
|
|
||||||
|
### Recommend Alternative Actions
|
||||||
|
**Instead of this trade:**
|
||||||
|
- Wait for [specific trigger] to reduce risk
|
||||||
|
- Size at 1-2% instead of 5-10% (limit damage if wrong)
|
||||||
|
- Skip entirely and preserve capital for better opportunity
|
||||||
|
- Hedge with [specific strategy] to reduce downside
|
||||||
|
|
||||||
|
## QUALITY RULES
|
||||||
|
- ✅ QUANTIFY RISKS: "40% chance of -15% loss = -6% expected loss"
|
||||||
|
- ✅ Quote specific aggressive claims and refute with data
|
||||||
|
- ✅ Identify overlooked risks (macro, technical, fundamental)
|
||||||
|
- ✅ Provide specific triggers that would change your view
|
||||||
|
- ❌ Don't be fearful without evidence
|
||||||
|
- ❌ Don't ignore legitimate opportunities
|
||||||
|
- ❌ Don't argue against all action, argue for prudent sizing
|
||||||
|
|
||||||
|
## POSITION SIZING ADVOCACY
|
||||||
|
**Argue for NO POSITION (0%) if:**
|
||||||
|
- Risk/reward <1.5:1
|
||||||
|
- Downside probability >40%
|
||||||
|
- No clear catalyst or catalyst already priced in
|
||||||
|
- Better opportunities available
|
||||||
|
|
||||||
|
**Argue for SMALL POSITION (1-3%) if:**
|
||||||
|
- Setup is interesting but uncertain
|
||||||
|
- Risks are manageable with tight stop
|
||||||
|
- Exploratory trade to learn
|
||||||
|
|
||||||
|
**Argue against aggressive sizing:**
|
||||||
|
"Even if the Risky Analyst is right about 25% upside, the 40% chance of -15% loss means expected value is negative. A 10% position could lose us 1.5% of the portfolio. That's three good trades' worth of profit."
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**TRADER'S PLAN:**
|
||||||
{trader_decision}
|
{trader_decision}
|
||||||
|
|
||||||
Your task is to actively counter the arguments of the Risky and Neutral Analysts, highlighting where their views may overlook potential threats or fail to prioritize sustainability. Respond directly to their points, drawing from the following data sources to build a convincing case for a low-risk approach adjustment to the trader's decision:
|
**YOUR TASK:** Identify the risks others are missing and argue for minimal or no position.
|
||||||
|
|
||||||
Market Research Report: {market_research_report}
|
**MARKET DATA:**
|
||||||
Social Media Sentiment Report: {sentiment_report}
|
- Technical: {market_research_report}
|
||||||
Latest World Affairs Report: {news_report}
|
- Sentiment: {sentiment_report}
|
||||||
Company Fundamentals Report: {fundamentals_report}
|
- News: {news_report}
|
||||||
Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
|
- Fundamentals: {fundamentals_report}
|
||||||
|
|
||||||
Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting."""
|
**DEBATE HISTORY:**
|
||||||
|
{history}
|
||||||
|
|
||||||
|
**AGGRESSIVE ARGUMENT:**
|
||||||
|
{current_risky_response}
|
||||||
|
|
||||||
|
**NEUTRAL ARGUMENT:**
|
||||||
|
{current_neutral_response}
|
||||||
|
|
||||||
|
**If no other arguments yet:** Present your bearish case with downside scenario analysis."""
|
||||||
|
|
||||||
response = llm.invoke(prompt)
|
response = llm.invoke(prompt)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,19 +18,100 @@ def create_neutral_debator(llm):
|
||||||
|
|
||||||
trader_decision = state["trader_investment_plan"]
|
trader_decision = state["trader_investment_plan"]
|
||||||
|
|
||||||
prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision:
|
prompt = f"""You are the Neutral Risk Analyst advocating for BALANCED position sizing (4-7% of capital) that optimizes risk-adjusted returns.
|
||||||
|
|
||||||
|
## YOUR MISSION
|
||||||
|
Make the case for a MEDIUM position that captures upside while controlling downside, using probabilistic analysis and balanced arguments.
|
||||||
|
|
||||||
|
## ARGUMENT FRAMEWORK
|
||||||
|
|
||||||
|
### Probabilistic Analysis
|
||||||
|
**Balance the Probabilities:**
|
||||||
|
- Bull Case Probability: [X]%
|
||||||
|
- Bear Case Probability: [Y]%
|
||||||
|
- Neutral Case Probability: [Z]%
|
||||||
|
- **Most Likely Outcome:** [Describe scenario with highest probability]
|
||||||
|
- **Expected Value:** [Calculate using all scenarios]
|
||||||
|
|
||||||
|
### Structure Your Case
|
||||||
|
|
||||||
|
**1. Balanced Assessment**
|
||||||
|
- **Opportunity Recognition:** [What's real about the bull case]
|
||||||
|
- **Risk Recognition:** [What's valid about the bear case]
|
||||||
|
- **Optimal Sizing:** [Why 4-7% captures both]
|
||||||
|
- **Middle Ground:** [The scenario both extremes are missing]
|
||||||
|
|
||||||
|
**2. Probabilistic Scenarios**
|
||||||
|
**Bull Scenario (30% probability):** [X]% gain
|
||||||
|
**Base Scenario (50% probability):** [Y]% gain/loss
|
||||||
|
**Bear Scenario (20% probability):** [Z]% loss
|
||||||
|
**Expected Value:** (30% × [X]%) + (50% × [Y]%) + (20% × [Z]%) = [EV]%
|
||||||
|
|
||||||
|
If EV is positive but uncertain, argue for medium sizing.
|
||||||
|
|
||||||
|
**3. Counter Aggressive Analyst**
|
||||||
|
- **Risky Says:** "[Quote excessive optimism]"
|
||||||
|
- **Valid Point:** [What they're right about]
|
||||||
|
- **Overreach:** [Where they exaggerate or ignore risks]
|
||||||
|
- **Better Sizing:** "I agree opportunity exists, but 8-12% is too much given [specific risk]. 5-6% captures upside with better risk control."
|
||||||
|
|
||||||
|
**4. Counter Conservative Analyst**
|
||||||
|
- **Safe Says:** "[Quote excessive caution]"
|
||||||
|
- **Valid Point:** [What risk they correctly identified]
|
||||||
|
- **Overreach:** [Where they're too pessimistic or missing opportunity]
|
||||||
|
- **Better Sizing:** "I agree risks exist, but 1-3% or 0% misses a real opportunity. 5-6% with tight stop manages risk while participating."
|
||||||
|
|
||||||
|
### Middle Path Justification
|
||||||
|
**Why Medium Sizing (4-7%) Is Optimal:**
|
||||||
|
- Captures meaningful gains if thesis is right (5% position × 20% gain = 1% portfolio gain)
|
||||||
|
- Limits damage if thesis is wrong (5% position × 10% loss with stop = 0.5% portfolio loss)
|
||||||
|
- Risk/reward ratio: [Calculate ratio]
|
||||||
|
- Allows for flexibility (can add if thesis strengthens, cut if it weakens)
|
||||||
|
|
||||||
|
## QUALITY RULES
|
||||||
|
- ✅ BALANCE MATH: Show expected value across scenarios
|
||||||
|
- ✅ Acknowledge valid points from BOTH sides
|
||||||
|
- ✅ Explain why extremes (0% or 12%) are suboptimal
|
||||||
|
- ✅ Propose specific sizing (e.g., "5.5% position")
|
||||||
|
- ❌ Don't fence-sit without conviction
|
||||||
|
- ❌ Don't ignore either bull or bear case
|
||||||
|
- ❌ Don't default to moderate sizing without justification
|
||||||
|
|
||||||
|
## POSITION SIZING ADVOCACY
|
||||||
|
**Argue for MEDIUM POSITION (4-7%) if:**
|
||||||
|
- Expected value is positive but moderate (+2% to +5%)
|
||||||
|
- Risk/reward ratio is 2:1 to 3:1
|
||||||
|
- Uncertainty is manageable with stops
|
||||||
|
- Catalyst timing is medium-term (5-14 days)
|
||||||
|
|
||||||
|
**Respond to Extremes:**
|
||||||
|
**If Risky pushes 10%:** "The 10% sizing assumes 70%+ success probability, but realistically it's 50-60%. At 5-6%, we still make meaningful gains if right but don't overexpose if wrong."
|
||||||
|
|
||||||
|
**If Safe pushes 0-2%:** "The risks are real but manageable. A 1% position makes only 0.2% on the portfolio even if we're right. That's not enough return for the analysis effort. 5% with a tight stop is prudent."
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**TRADER'S PLAN:**
|
||||||
{trader_decision}
|
{trader_decision}
|
||||||
|
|
||||||
Your task is to challenge both the Risky and Safe Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision:
|
**YOUR TASK:** Find the balanced position size that maximizes risk-adjusted returns.
|
||||||
|
|
||||||
Market Research Report: {market_research_report}
|
**MARKET DATA:**
|
||||||
Social Media Sentiment Report: {sentiment_report}
|
- Technical: {market_research_report}
|
||||||
Latest World Affairs Report: {news_report}
|
- Sentiment: {sentiment_report}
|
||||||
Company Fundamentals Report: {fundamentals_report}
|
- News: {news_report}
|
||||||
Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point.
|
- Fundamentals: {fundamentals_report}
|
||||||
|
|
||||||
Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting."""
|
**DEBATE HISTORY:**
|
||||||
|
{history}
|
||||||
|
|
||||||
|
**AGGRESSIVE ARGUMENT:**
|
||||||
|
{current_risky_response}
|
||||||
|
|
||||||
|
**CONSERVATIVE ARGUMENT:**
|
||||||
|
{current_safe_response}
|
||||||
|
|
||||||
|
**If no other arguments yet:** Present your balanced case with probabilistic scenarios."""
|
||||||
|
|
||||||
response = llm.invoke(prompt)
|
response = llm.invoke(prompt)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -19,12 +19,15 @@ def create_trader(llm, memory):
|
||||||
else:
|
else:
|
||||||
past_memories = []
|
past_memories = []
|
||||||
|
|
||||||
past_memory_str = ""
|
|
||||||
if past_memories:
|
if past_memories:
|
||||||
|
past_memory_str = "### Past Lessons Applied\\n**Reflections from Similar Situations:**\\n"
|
||||||
for i, rec in enumerate(past_memories, 1):
|
for i, rec in enumerate(past_memories, 1):
|
||||||
past_memory_str += rec["recommendation"] + "\n\n"
|
past_memory_str += rec["recommendation"] + "\\n\\n"
|
||||||
|
past_memory_str += "\\n\\n**How I'm Using These Lessons:**\\n"
|
||||||
|
past_memory_str += "- [Specific adjustment based on past mistake/success]\\n"
|
||||||
|
past_memory_str += "- [Impact on current conviction level]\\n"
|
||||||
else:
|
else:
|
||||||
past_memory_str = "No past memories found."
|
past_memory_str = "" # Don't include placeholder when no memories
|
||||||
|
|
||||||
context = {
|
context = {
|
||||||
"role": "user",
|
"role": "user",
|
||||||
|
|
@ -34,7 +37,87 @@ def create_trader(llm, memory):
|
||||||
messages = [
|
messages = [
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
"content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""",
|
"content": f"""You are the Lead Trader making the final SHORT-TERM trading decision on {company_name}.
|
||||||
|
|
||||||
|
## YOUR RESPONSIBILITIES
|
||||||
|
1. **Validate the Plan:** Review for logic, data support, and risks
|
||||||
|
2. **Add Trading Details:** Entry price, position size, stop loss, targets
|
||||||
|
3. **Apply Past Lessons:** Learn from history (see reflections below)
|
||||||
|
4. **Make Final Call:** Clear BUY/HOLD/SELL with execution plan
|
||||||
|
|
||||||
|
## IMPORTANT: DECISION HIERARCHY
|
||||||
|
Your decision will be reviewed by the Risk Manager who may:
|
||||||
|
- Reduce position size if risks are high
|
||||||
|
- Override to NO POSITION if risks outweigh opportunity
|
||||||
|
- Adjust stop-loss levels for better risk management
|
||||||
|
|
||||||
|
Make your best recommendation - the Risk Manager will apply final risk controls.
|
||||||
|
|
||||||
|
## SHORT-TERM TRADING CRITERIA (1-2 week horizon)
|
||||||
|
|
||||||
|
**BUY if:**
|
||||||
|
- Clear catalyst in next 5-10 days
|
||||||
|
- Technical setup favorable (not overextended)
|
||||||
|
- Risk/reward ratio >2:1
|
||||||
|
- Specific entry and stop loss levels identified
|
||||||
|
|
||||||
|
**SELL if:**
|
||||||
|
- Catalyst played out (news priced in, earnings passed)
|
||||||
|
- Technical breakdown or trend reversal
|
||||||
|
- Risk/reward deteriorated
|
||||||
|
- Better opportunities available
|
||||||
|
|
||||||
|
**HOLD if (rare, needs strong justification):**
|
||||||
|
- Major catalyst imminent (1-3 days away)
|
||||||
|
- Current position is optimal
|
||||||
|
- Waiting provides option value
|
||||||
|
|
||||||
|
## OUTPUT STRUCTURE (MANDATORY SECTIONS)
|
||||||
|
|
||||||
|
### Decision Summary
|
||||||
|
**DECISION: BUY / SELL / HOLD**
|
||||||
|
**Conviction: High / Medium / Low**
|
||||||
|
**Position Size: [X]% of capital**
|
||||||
|
**Time Horizon: [Y] days**
|
||||||
|
|
||||||
|
### Plan Evaluation
|
||||||
|
**What I Agree With:** [Key strengths from the plan]
|
||||||
|
**What I'm Concerned About:** [Gaps or risks in the plan]
|
||||||
|
**My Adjustments:** [How I'm modifying based on trading experience]
|
||||||
|
|
||||||
|
### Trade Execution Details
|
||||||
|
|
||||||
|
**If BUY:**
|
||||||
|
- Entry: $[X] (or market)
|
||||||
|
- Size: [Y]% portfolio
|
||||||
|
- Stop Loss: $[A] ([B]% risk)
|
||||||
|
- Target: $[C] ([D]% gain)
|
||||||
|
- Horizon: [E] days
|
||||||
|
- Risk/Reward: [Ratio]
|
||||||
|
|
||||||
|
**If SELL:**
|
||||||
|
- Exit: $[X] (or market)
|
||||||
|
- Timing: [When/how to exit]
|
||||||
|
- Re-entry: [What would change my mind]
|
||||||
|
|
||||||
|
**If HOLD:**
|
||||||
|
- Why: [Specific justification]
|
||||||
|
- BUY trigger: [Event/price]
|
||||||
|
- SELL trigger: [Event/price]
|
||||||
|
- Review: [When to reassess]
|
||||||
|
|
||||||
|
{past_memory_str}
|
||||||
|
|
||||||
|
### Risk Management
|
||||||
|
- Max Loss: $[X] or [Y]%
|
||||||
|
- What Invalidates Thesis: [Specific condition]
|
||||||
|
- Portfolio Impact: [Effect on overall risk]
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**FINAL TRANSACTION PROPOSAL: BUY/HOLD/SELL**
|
||||||
|
|
||||||
|
End with clear decision statement.""",
|
||||||
},
|
},
|
||||||
context,
|
context,
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -84,4 +84,5 @@ class DiscoveryState(TypedDict):
|
||||||
opportunities: Annotated[list[dict], "List of final opportunities with rationale"]
|
opportunities: Annotated[list[dict], "List of final opportunities with rationale"]
|
||||||
final_ranking: Annotated[str, "Final ranking from LLM"]
|
final_ranking: Annotated[str, "Final ranking from LLM"]
|
||||||
status: Annotated[str, "Current status of discovery"]
|
status: Annotated[str, "Current status of discovery"]
|
||||||
|
tool_logs: Annotated[list[dict], "Detailed logs of all tool calls across all nodes (scanner, filter, deep_dive)"]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,15 +2,20 @@
|
||||||
Historical Memory Builder for TradingAgents
|
Historical Memory Builder for TradingAgents
|
||||||
|
|
||||||
This module creates agent memories from historical stock data by:
|
This module creates agent memories from historical stock data by:
|
||||||
1. Analyzing market conditions at time T
|
1. Finding high movers (>15% in 5 days)
|
||||||
2. Observing actual stock performance at time T + delta
|
2. Running retrospective trading graph analysis at T-7 and T-30 days before the move
|
||||||
3. Creating situation -> outcome mappings for each agent type
|
3. Extracting structured signals and agent decisions
|
||||||
4. Storing memories in ChromaDB for future retrieval
|
4. Creating situation -> outcome mappings with enhanced metadata
|
||||||
|
5. Storing memories in ChromaDB for future retrieval
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import List, Dict, Tuple, Optional
|
from typing import List, Dict, Tuple, Optional, Any
|
||||||
from tradingagents.tools.executor import execute_tool
|
from tradingagents.tools.executor import execute_tool
|
||||||
from tradingagents.agents.utils.memory import FinancialSituationMemory
|
from tradingagents.agents.utils.memory import FinancialSituationMemory
|
||||||
|
|
||||||
|
|
@ -33,6 +38,639 @@ class HistoricalMemoryBuilder:
|
||||||
"risk_manager": 0
|
"risk_manager": 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def get_tickers_from_alpha_vantage(self, limit: int = 20) -> List[str]:
|
||||||
|
"""
|
||||||
|
Get ticker list from Alpha Vantage top gainers/losers.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
limit: Number of tickers to get from each category (gainers/losers)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of ticker symbols from top gainers and losers
|
||||||
|
"""
|
||||||
|
print(f"\n🔍 Fetching top movers from Alpha Vantage...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Use execute_tool to call the alpha vantage function
|
||||||
|
response = execute_tool("get_market_movers", limit=limit)
|
||||||
|
|
||||||
|
# Parse the markdown table response to extract tickers
|
||||||
|
tickers = set()
|
||||||
|
|
||||||
|
lines = response.split('\n')
|
||||||
|
for line in lines:
|
||||||
|
# Look for table rows with ticker data
|
||||||
|
if '|' in line and not line.strip().startswith('|---'):
|
||||||
|
parts = [p.strip() for p in line.split('|')]
|
||||||
|
# Table format: | Ticker | Price | Change % | Volume |
|
||||||
|
if len(parts) >= 2 and parts[1] and parts[1] not in ['Ticker', '']:
|
||||||
|
ticker = parts[1].strip()
|
||||||
|
|
||||||
|
# Filter out warrants, units, and problematic tickers
|
||||||
|
if ticker and self._is_valid_ticker(ticker):
|
||||||
|
tickers.add(ticker)
|
||||||
|
|
||||||
|
ticker_list = sorted(list(tickers))
|
||||||
|
print(f" ✅ Found {len(ticker_list)} unique tickers from Alpha Vantage")
|
||||||
|
print(f" Tickers: {', '.join(ticker_list[:10])}{'...' if len(ticker_list) > 10 else ''}")
|
||||||
|
|
||||||
|
return ticker_list
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ⚠️ Error fetching from Alpha Vantage: {e}")
|
||||||
|
print(f" Falling back to empty list")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _is_valid_ticker(self, ticker: str) -> bool:
|
||||||
|
"""
|
||||||
|
Validate if a ticker is suitable for analysis.
|
||||||
|
|
||||||
|
Filters out:
|
||||||
|
- Warrants (ending in W, WW, WS)
|
||||||
|
- Units (ending in U)
|
||||||
|
- Preferred shares (containing -, /)
|
||||||
|
- Rights (ending in R)
|
||||||
|
- Other derivative instruments
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ticker: Stock ticker symbol
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if ticker is a regular stock, False otherwise
|
||||||
|
"""
|
||||||
|
if not ticker or len(ticker) > 6:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Must be uppercase letters and numbers only
|
||||||
|
if not re.match(r'^[A-Z]{1,5}$', ticker):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Filter out warrants (W, WW, WS suffix)
|
||||||
|
if ticker.endswith('W') or ticker.endswith('WW') or ticker.endswith('WS'):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Filter out units
|
||||||
|
if ticker.endswith('U'):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Filter out rights
|
||||||
|
if ticker.endswith('R') and len(ticker) > 1:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Filter out other suffixes that indicate derivatives
|
||||||
|
if ticker.endswith('Z'): # Often used for special situations
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def find_high_movers(
|
||||||
|
self,
|
||||||
|
tickers: List[str],
|
||||||
|
start_date: str,
|
||||||
|
end_date: str,
|
||||||
|
min_move_pct: float = 15.0,
|
||||||
|
window_days: int = 5
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Find stocks that had significant moves (>15% in 5 days).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tickers: List of tickers to scan
|
||||||
|
start_date: Start date for scanning (YYYY-MM-DD)
|
||||||
|
end_date: End date for scanning (YYYY-MM-DD)
|
||||||
|
min_move_pct: Minimum percentage move (default: 15%)
|
||||||
|
window_days: Rolling window in days (default: 5)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of dicts with keys:
|
||||||
|
- ticker: Stock symbol
|
||||||
|
- move_start_date: Start of the move (YYYY-MM-DD)
|
||||||
|
- move_end_date: End of the move (YYYY-MM-DD)
|
||||||
|
- move_pct: Percentage change
|
||||||
|
- direction: "up" or "down"
|
||||||
|
- start_price: Price at start
|
||||||
|
- end_price: Price at end
|
||||||
|
"""
|
||||||
|
high_movers = []
|
||||||
|
|
||||||
|
print(f"\n🔍 Scanning for high movers ({min_move_pct}%+ in {window_days} days)")
|
||||||
|
print(f" Period: {start_date} to {end_date}")
|
||||||
|
print(f" Tickers: {len(tickers)}\n")
|
||||||
|
|
||||||
|
for ticker in tickers:
|
||||||
|
try:
|
||||||
|
print(f" Scanning {ticker}...", end=" ")
|
||||||
|
|
||||||
|
# Download historical data using yfinance
|
||||||
|
stock = yf.Ticker(ticker)
|
||||||
|
df = stock.history(start=start_date, end=end_date)
|
||||||
|
|
||||||
|
if df.empty:
|
||||||
|
print("No data")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate rolling returns over window_days
|
||||||
|
df['rolling_return'] = df['Close'].pct_change(periods=window_days) * 100
|
||||||
|
|
||||||
|
# Find periods with moves >= min_move_pct
|
||||||
|
significant_moves = df[abs(df['rolling_return']) >= min_move_pct]
|
||||||
|
|
||||||
|
if not significant_moves.empty:
|
||||||
|
for idx, row in significant_moves.iterrows():
|
||||||
|
# Get the start date (window_days before this date)
|
||||||
|
move_end_date = idx.strftime('%Y-%m-%d')
|
||||||
|
move_start_date = (idx - timedelta(days=window_days)).strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
# Get prices
|
||||||
|
try:
|
||||||
|
start_price = df.loc[df.index >= move_start_date, 'Close'].iloc[0]
|
||||||
|
end_price = row['Close']
|
||||||
|
move_pct = row['rolling_return']
|
||||||
|
|
||||||
|
high_movers.append({
|
||||||
|
'ticker': ticker,
|
||||||
|
'move_start_date': move_start_date,
|
||||||
|
'move_end_date': move_end_date,
|
||||||
|
'move_pct': move_pct,
|
||||||
|
'direction': 'up' if move_pct > 0 else 'down',
|
||||||
|
'start_price': start_price,
|
||||||
|
'end_price': end_price
|
||||||
|
})
|
||||||
|
except (IndexError, KeyError):
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"Found {len([m for m in high_movers if m['ticker'] == ticker])} moves")
|
||||||
|
else:
|
||||||
|
print("No significant moves")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"\n✅ Total high movers found: {len(high_movers)}\n")
|
||||||
|
return high_movers
|
||||||
|
|
||||||
|
def run_retrospective_analysis(
|
||||||
|
self,
|
||||||
|
ticker: str,
|
||||||
|
analysis_date: str
|
||||||
|
) -> Optional[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Run the trading graph analysis for a ticker at a specific historical date.
|
||||||
|
|
||||||
|
This simulates what the agent would have seen/decided on that date.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
ticker: Stock ticker symbol
|
||||||
|
analysis_date: Date to run analysis (YYYY-MM-DD)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with keys:
|
||||||
|
- market_report: str
|
||||||
|
- sentiment_report: str
|
||||||
|
- news_report: str
|
||||||
|
- fundamentals_report: str
|
||||||
|
- investment_plan: str (if available)
|
||||||
|
- final_decision: str (if available)
|
||||||
|
- structured_signals: Dict of extracted features
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||||
|
|
||||||
|
print(f" Running analysis for {ticker} on {analysis_date}...")
|
||||||
|
|
||||||
|
# Create trading graph instance
|
||||||
|
# Use fewer analysts to reduce token usage
|
||||||
|
graph = TradingAgentsGraph(
|
||||||
|
selected_analysts=["market", "fundamentals"], # Skip social/news to reduce tokens
|
||||||
|
config=self.config,
|
||||||
|
debug=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the analysis (returns tuple: final_state, processed_signal)
|
||||||
|
final_state, _ = graph.propagate(ticker, analysis_date)
|
||||||
|
|
||||||
|
# Extract reports and decisions (with type safety)
|
||||||
|
def safe_get_str(d, key, default=''):
|
||||||
|
"""Safely extract string from state, handling lists or other types."""
|
||||||
|
value = d.get(key, default)
|
||||||
|
if isinstance(value, list):
|
||||||
|
# If it's a list, try to extract text from messages
|
||||||
|
return ' '.join(str(item) for item in value)
|
||||||
|
return str(value) if value else default
|
||||||
|
|
||||||
|
# Extract reports and decisions
|
||||||
|
analysis_data = {
|
||||||
|
'market_report': safe_get_str(final_state, 'market_report'),
|
||||||
|
'sentiment_report': safe_get_str(final_state, 'sentiment_report'),
|
||||||
|
'news_report': safe_get_str(final_state, 'news_report'),
|
||||||
|
'fundamentals_report': safe_get_str(final_state, 'fundamentals_report'),
|
||||||
|
'investment_plan': safe_get_str(final_state, 'investment_plan'),
|
||||||
|
'final_decision': safe_get_str(final_state, 'final_trade_decision'),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract structured signals from reports
|
||||||
|
analysis_data['structured_signals'] = self.extract_structured_signals(analysis_data)
|
||||||
|
|
||||||
|
return analysis_data
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Error running analysis: {e}")
|
||||||
|
import traceback
|
||||||
|
print(f" Traceback: {traceback.format_exc()}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def extract_structured_signals(self, reports: Dict[str, str]) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Extract structured signal features from analyst reports.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
reports: Dict with market_report, sentiment_report, news_report, fundamentals_report
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with extracted signal features:
|
||||||
|
- unusual_volume: bool
|
||||||
|
- analyst_sentiment: str (bullish/bearish/neutral)
|
||||||
|
- news_sentiment: str (positive/negative/neutral)
|
||||||
|
- short_interest: str (high/medium/low)
|
||||||
|
- insider_activity: str (buying/selling/none)
|
||||||
|
- price_trend: str (uptrend/downtrend/sideways)
|
||||||
|
- volatility: str (high/medium/low)
|
||||||
|
"""
|
||||||
|
signals = {}
|
||||||
|
|
||||||
|
market_report = reports.get('market_report', '')
|
||||||
|
sentiment_report = reports.get('sentiment_report', '')
|
||||||
|
news_report = reports.get('news_report', '')
|
||||||
|
fundamentals_report = reports.get('fundamentals_report', '')
|
||||||
|
|
||||||
|
# Extract volume signals
|
||||||
|
signals['unusual_volume'] = bool(
|
||||||
|
re.search(r'(unusual volume|volume spike|high volume|increased volume)', market_report, re.IGNORECASE)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract sentiment
|
||||||
|
if re.search(r'(bullish|positive outlook|strong buy|buy)', sentiment_report + news_report, re.IGNORECASE):
|
||||||
|
signals['analyst_sentiment'] = 'bullish'
|
||||||
|
elif re.search(r'(bearish|negative outlook|strong sell|sell)', sentiment_report + news_report, re.IGNORECASE):
|
||||||
|
signals['analyst_sentiment'] = 'bearish'
|
||||||
|
else:
|
||||||
|
signals['analyst_sentiment'] = 'neutral'
|
||||||
|
|
||||||
|
# Extract news sentiment
|
||||||
|
if re.search(r'(positive|good news|beat expectations|upgrade|growth)', news_report, re.IGNORECASE):
|
||||||
|
signals['news_sentiment'] = 'positive'
|
||||||
|
elif re.search(r'(negative|bad news|miss expectations|downgrade|decline)', news_report, re.IGNORECASE):
|
||||||
|
signals['news_sentiment'] = 'negative'
|
||||||
|
else:
|
||||||
|
signals['news_sentiment'] = 'neutral'
|
||||||
|
|
||||||
|
# Extract short interest
|
||||||
|
if re.search(r'(high short interest|heavily shorted|short squeeze)', market_report + news_report, re.IGNORECASE):
|
||||||
|
signals['short_interest'] = 'high'
|
||||||
|
elif re.search(r'(low short interest|minimal short)', market_report, re.IGNORECASE):
|
||||||
|
signals['short_interest'] = 'low'
|
||||||
|
else:
|
||||||
|
signals['short_interest'] = 'medium'
|
||||||
|
|
||||||
|
# Extract insider activity
|
||||||
|
if re.search(r'(insider buying|executive purchased|insider purchases)', news_report + fundamentals_report, re.IGNORECASE):
|
||||||
|
signals['insider_activity'] = 'buying'
|
||||||
|
elif re.search(r'(insider selling|executive sold|insider sales)', news_report + fundamentals_report, re.IGNORECASE):
|
||||||
|
signals['insider_activity'] = 'selling'
|
||||||
|
else:
|
||||||
|
signals['insider_activity'] = 'none'
|
||||||
|
|
||||||
|
# Extract price trend
|
||||||
|
if re.search(r'(uptrend|bullish trend|rising|moving higher|higher highs)', market_report, re.IGNORECASE):
|
||||||
|
signals['price_trend'] = 'uptrend'
|
||||||
|
elif re.search(r'(downtrend|bearish trend|falling|moving lower|lower lows)', market_report, re.IGNORECASE):
|
||||||
|
signals['price_trend'] = 'downtrend'
|
||||||
|
else:
|
||||||
|
signals['price_trend'] = 'sideways'
|
||||||
|
|
||||||
|
# Extract volatility
|
||||||
|
if re.search(r'(high volatility|volatile|wild swings|sharp movements)', market_report, re.IGNORECASE):
|
||||||
|
signals['volatility'] = 'high'
|
||||||
|
elif re.search(r'(low volatility|stable|steady)', market_report, re.IGNORECASE):
|
||||||
|
signals['volatility'] = 'low'
|
||||||
|
else:
|
||||||
|
signals['volatility'] = 'medium'
|
||||||
|
|
||||||
|
return signals
|
||||||
|
|
||||||
|
def build_memories_from_high_movers(
|
||||||
|
self,
|
||||||
|
tickers: List[str],
|
||||||
|
start_date: str,
|
||||||
|
end_date: str,
|
||||||
|
min_move_pct: float = 15.0,
|
||||||
|
analysis_windows: List[int] = [7, 30],
|
||||||
|
max_samples: int = 50,
|
||||||
|
sample_strategy: str = "diverse"
|
||||||
|
) -> Dict[str, FinancialSituationMemory]:
|
||||||
|
"""
|
||||||
|
Build memories by finding high movers and running retrospective analyses.
|
||||||
|
|
||||||
|
This is the main method for the new learning system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tickers: List of tickers to scan
|
||||||
|
start_date: Start date for scanning (YYYY-MM-DD)
|
||||||
|
end_date: End date for scanning (YYYY-MM-DD)
|
||||||
|
min_move_pct: Minimum percentage move (default: 15%)
|
||||||
|
analysis_windows: Days before move to analyze (default: [7, 30])
|
||||||
|
max_samples: Maximum number of high movers to analyze (default: 50)
|
||||||
|
sample_strategy: How to select samples from high movers:
|
||||||
|
- "diverse": Mix of up/down moves, different magnitudes (recommended)
|
||||||
|
- "largest": Take the largest moves only
|
||||||
|
- "recent": Take the most recent moves only
|
||||||
|
- "random": Random sampling
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary of populated memory instances for each agent type
|
||||||
|
"""
|
||||||
|
print("=" * 70)
|
||||||
|
print("🏗️ BUILDING MEMORIES FROM HIGH MOVERS")
|
||||||
|
print("=" * 70)
|
||||||
|
|
||||||
|
# Step 1: Find high movers
|
||||||
|
high_movers = self.find_high_movers(tickers, start_date, end_date, min_move_pct)
|
||||||
|
|
||||||
|
if not high_movers:
|
||||||
|
print("⚠️ No high movers found. Try a different date range or lower threshold.")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Step 1.5: Sample/filter high movers based on strategy
|
||||||
|
sampled_movers = self._sample_high_movers(high_movers, max_samples, sample_strategy)
|
||||||
|
|
||||||
|
print(f"\n📊 Sampling Strategy: {sample_strategy}")
|
||||||
|
print(f" Total high movers found: {len(high_movers)}")
|
||||||
|
print(f" Samples to analyze: {len(sampled_movers)}")
|
||||||
|
print(f" Estimated runtime: ~{len(sampled_movers) * len(analysis_windows) * 2} minutes")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Initialize memory stores
|
||||||
|
agent_memories = {
|
||||||
|
"bull": FinancialSituationMemory("bull_memory", self.config),
|
||||||
|
"bear": FinancialSituationMemory("bear_memory", self.config),
|
||||||
|
"trader": FinancialSituationMemory("trader_memory", self.config),
|
||||||
|
"invest_judge": FinancialSituationMemory("invest_judge_memory", self.config),
|
||||||
|
"risk_manager": FinancialSituationMemory("risk_manager_memory", self.config)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Step 2: For each high mover, run retrospective analyses
|
||||||
|
print("\n📊 Running retrospective analyses...\n")
|
||||||
|
|
||||||
|
for idx, mover in enumerate(sampled_movers, 1):
|
||||||
|
ticker = mover['ticker']
|
||||||
|
move_pct = mover['move_pct']
|
||||||
|
direction = mover['direction']
|
||||||
|
move_start_date = mover['move_start_date']
|
||||||
|
|
||||||
|
print(f" [{idx}/{len(sampled_movers)}] {ticker}: {move_pct:+.1f}% {direction}")
|
||||||
|
|
||||||
|
# Run analyses at different time windows before the move
|
||||||
|
for days_before in analysis_windows:
|
||||||
|
# Calculate analysis date
|
||||||
|
try:
|
||||||
|
analysis_date = (
|
||||||
|
datetime.strptime(move_start_date, '%Y-%m-%d') - timedelta(days=days_before)
|
||||||
|
).strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
print(f" Analyzing T-{days_before} days ({analysis_date})...")
|
||||||
|
|
||||||
|
# Run trading graph analysis
|
||||||
|
analysis = self.run_retrospective_analysis(ticker, analysis_date)
|
||||||
|
|
||||||
|
if not analysis:
|
||||||
|
print(f" ⚠️ Analysis failed, skipping...")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Create combined situation text
|
||||||
|
situation_text = f"""
|
||||||
|
**Ticker**: {ticker}
|
||||||
|
**Analysis Date**: {analysis_date}
|
||||||
|
**Time Before Move**: {days_before} days
|
||||||
|
|
||||||
|
**Market Analysis**:
|
||||||
|
{analysis['market_report'][:500]}...
|
||||||
|
|
||||||
|
**Sentiment Analysis**:
|
||||||
|
{analysis['sentiment_report'][:500]}...
|
||||||
|
|
||||||
|
**News Analysis**:
|
||||||
|
{analysis['news_report'][:500]}...
|
||||||
|
|
||||||
|
**Fundamentals**:
|
||||||
|
{analysis['fundamentals_report'][:500]}...
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
# Extract agent recommendation from investment plan and final decision
|
||||||
|
agent_recommendation = self._extract_recommendation(
|
||||||
|
analysis.get('investment_plan', ''),
|
||||||
|
analysis.get('final_decision', '')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine if agent was correct
|
||||||
|
was_correct = self._compute_correctness(agent_recommendation, direction)
|
||||||
|
|
||||||
|
# Create metadata
|
||||||
|
metadata = {
|
||||||
|
'ticker': ticker,
|
||||||
|
'analysis_date': analysis_date,
|
||||||
|
'days_before_move': days_before,
|
||||||
|
'move_pct': abs(move_pct),
|
||||||
|
'move_direction': direction,
|
||||||
|
'agent_recommendation': agent_recommendation,
|
||||||
|
'was_correct': was_correct,
|
||||||
|
'structured_signals': analysis['structured_signals']
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create recommendation text
|
||||||
|
lesson_text = f"This signal combination is reliable for predicting {direction} moves." if was_correct else "This signal combination can be misleading. Need to consider other factors."
|
||||||
|
|
||||||
|
recommendation_text = f"""
|
||||||
|
Agent Decision: {agent_recommendation}
|
||||||
|
Actual Outcome: {direction} {abs(move_pct):.1f}%
|
||||||
|
Correctness: {'✓ CORRECT' if was_correct else '✗ INCORRECT'}
|
||||||
|
|
||||||
|
{days_before} days before this {direction} move, the agent recommended {agent_recommendation}.
|
||||||
|
The stock moved {direction} by {abs(move_pct):.1f}%, so the agent was {'correct' if was_correct else 'incorrect'}.
|
||||||
|
|
||||||
|
Structured Signals Present:
|
||||||
|
{self._format_signals(analysis.get('structured_signals', {}))}
|
||||||
|
|
||||||
|
Lesson: {lesson_text}
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
# Store in all agent memories
|
||||||
|
for agent_type, memory in agent_memories.items():
|
||||||
|
memory.add_situations_with_metadata([
|
||||||
|
(situation_text, recommendation_text, metadata)
|
||||||
|
])
|
||||||
|
|
||||||
|
self.memories_created[agent_type] = self.memories_created.get(agent_type, 0) + 1
|
||||||
|
|
||||||
|
print(f" ✅ Memory created: {agent_recommendation} -> {direction} ({was_correct})")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f" ⚠️ Error: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Print summary
|
||||||
|
print("\n" + "=" * 70)
|
||||||
|
print("📊 MEMORY CREATION SUMMARY")
|
||||||
|
print("=" * 70)
|
||||||
|
print(f" High movers analyzed: {len(sampled_movers)}")
|
||||||
|
print(f" Analysis windows: {analysis_windows} days before move")
|
||||||
|
for agent_type, count in self.memories_created.items():
|
||||||
|
print(f" {agent_type.ljust(15)}: {count} memories")
|
||||||
|
|
||||||
|
# Print statistics
|
||||||
|
print("\n📈 MEMORY BANK STATISTICS")
|
||||||
|
print("=" * 70)
|
||||||
|
for agent_type, memory in agent_memories.items():
|
||||||
|
stats = memory.get_statistics()
|
||||||
|
print(f"\n {agent_type.upper()}:")
|
||||||
|
print(f" Total memories: {stats['total_memories']}")
|
||||||
|
print(f" Accuracy rate: {stats['accuracy_rate']:.1f}%")
|
||||||
|
print(f" Avg move: {stats['avg_move_pct']:.1f}%")
|
||||||
|
|
||||||
|
print("=" * 70 + "\n")
|
||||||
|
|
||||||
|
return agent_memories
|
||||||
|
|
||||||
|
def _extract_recommendation(self, investment_plan: str, final_decision: str) -> str:
|
||||||
|
"""
|
||||||
|
Extract agent's recommendation from investment plan and final decision.
|
||||||
|
|
||||||
|
Returns: "buy", "sell", "hold", or "unclear"
|
||||||
|
"""
|
||||||
|
combined_text = (investment_plan + " " + final_decision).lower()
|
||||||
|
|
||||||
|
# Check for clear buy/sell/hold signals
|
||||||
|
if re.search(r'\b(strong buy|buy|long position|bullish|recommend buying)\b', combined_text):
|
||||||
|
return "buy"
|
||||||
|
elif re.search(r'\b(strong sell|sell|short position|bearish|recommend selling)\b', combined_text):
|
||||||
|
return "sell"
|
||||||
|
elif re.search(r'\b(hold|neutral|wait|avoid)\b', combined_text):
|
||||||
|
return "hold"
|
||||||
|
else:
|
||||||
|
return "unclear"
|
||||||
|
|
||||||
|
def _compute_correctness(self, recommendation: str, actual_direction: str) -> bool:
|
||||||
|
"""
|
||||||
|
Determine if the agent's recommendation matched the actual outcome.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
recommendation: "buy", "sell", "hold", or "unclear"
|
||||||
|
actual_direction: "up" or "down"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if agent was correct, False otherwise
|
||||||
|
"""
|
||||||
|
if recommendation == "buy" and actual_direction == "up":
|
||||||
|
return True
|
||||||
|
elif recommendation == "sell" and actual_direction == "down":
|
||||||
|
return True
|
||||||
|
elif recommendation == "hold":
|
||||||
|
# Hold is considered neutral, so not correct/incorrect for big moves
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _format_signals(self, signals: Dict[str, Any]) -> str:
|
||||||
|
"""Format structured signals for display."""
|
||||||
|
lines = []
|
||||||
|
for key, value in signals.items():
|
||||||
|
lines.append(f" - {key}: {value}")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
def _sample_high_movers(
|
||||||
|
self,
|
||||||
|
high_movers: List[Dict[str, Any]],
|
||||||
|
max_samples: int,
|
||||||
|
strategy: str
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Sample high movers based on strategy to reduce analysis time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
high_movers: List of all high movers found
|
||||||
|
max_samples: Maximum number to return
|
||||||
|
strategy: Sampling strategy (diverse, largest, recent, random)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Sampled list of high movers
|
||||||
|
"""
|
||||||
|
import random
|
||||||
|
|
||||||
|
if len(high_movers) <= max_samples:
|
||||||
|
return high_movers
|
||||||
|
|
||||||
|
if strategy == "diverse":
|
||||||
|
# Get balanced mix of up/down moves across different magnitudes
|
||||||
|
up_moves = [m for m in high_movers if m['direction'] == 'up']
|
||||||
|
down_moves = [m for m in high_movers if m['direction'] == 'down']
|
||||||
|
|
||||||
|
# Sort each by magnitude
|
||||||
|
up_moves.sort(key=lambda x: abs(x['move_pct']), reverse=True)
|
||||||
|
down_moves.sort(key=lambda x: abs(x['move_pct']), reverse=True)
|
||||||
|
|
||||||
|
# Take half from each direction (or proportional if imbalanced)
|
||||||
|
up_count = min(len(up_moves), max_samples // 2)
|
||||||
|
down_count = min(len(down_moves), max_samples - up_count)
|
||||||
|
|
||||||
|
# If one side has fewer, take more from the other
|
||||||
|
if up_count < max_samples // 2:
|
||||||
|
down_count = min(len(down_moves), max_samples - up_count)
|
||||||
|
if down_count < max_samples - up_count:
|
||||||
|
up_count = min(len(up_moves), max_samples - down_count)
|
||||||
|
|
||||||
|
# Stratified sampling - take from different magnitude ranges
|
||||||
|
def stratified_sample(moves, count):
|
||||||
|
if len(moves) <= count:
|
||||||
|
return moves
|
||||||
|
|
||||||
|
# Divide into 3 buckets by magnitude
|
||||||
|
bucket_size = len(moves) // 3
|
||||||
|
large = moves[:bucket_size]
|
||||||
|
medium = moves[bucket_size:bucket_size*2]
|
||||||
|
small = moves[bucket_size*2:]
|
||||||
|
|
||||||
|
# Sample proportionally from each bucket
|
||||||
|
samples = []
|
||||||
|
samples.extend(large[:count // 3])
|
||||||
|
samples.extend(medium[:count // 3])
|
||||||
|
samples.extend(small[:count - (2 * (count // 3))])
|
||||||
|
return samples
|
||||||
|
|
||||||
|
sampled = []
|
||||||
|
sampled.extend(stratified_sample(up_moves, up_count))
|
||||||
|
sampled.extend(stratified_sample(down_moves, down_count))
|
||||||
|
|
||||||
|
return sampled
|
||||||
|
|
||||||
|
elif strategy == "largest":
|
||||||
|
# Take the largest absolute moves
|
||||||
|
sorted_movers = sorted(high_movers, key=lambda x: abs(x['move_pct']), reverse=True)
|
||||||
|
return sorted_movers[:max_samples]
|
||||||
|
|
||||||
|
elif strategy == "recent":
|
||||||
|
# Take the most recent moves
|
||||||
|
sorted_movers = sorted(high_movers, key=lambda x: x['move_end_date'], reverse=True)
|
||||||
|
return sorted_movers[:max_samples]
|
||||||
|
|
||||||
|
elif strategy == "random":
|
||||||
|
# Random sampling
|
||||||
|
return random.sample(high_movers, max_samples)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Default to diverse
|
||||||
|
return self._sample_high_movers(high_movers, max_samples, "diverse")
|
||||||
|
|
||||||
def _get_stock_data_for_period(self, ticker: str, date: str) -> Dict[str, str]:
|
def _get_stock_data_for_period(self, ticker: str, date: str) -> Dict[str, str]:
|
||||||
"""Gather all available data for a stock on a specific date.
|
"""Gather all available data for a stock on a specific date.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,8 @@
|
||||||
|
import os
|
||||||
import chromadb
|
import chromadb
|
||||||
from chromadb.config import Settings
|
from chromadb.config import Settings
|
||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
|
from typing import List, Dict, Any, Optional, Tuple
|
||||||
|
|
||||||
|
|
||||||
class FinancialSituationMemory:
|
class FinancialSituationMemory:
|
||||||
|
|
@ -15,8 +17,18 @@ class FinancialSituationMemory:
|
||||||
self.embedding_backend = "https://api.openai.com/v1"
|
self.embedding_backend = "https://api.openai.com/v1"
|
||||||
self.embedding = "text-embedding-3-small"
|
self.embedding = "text-embedding-3-small"
|
||||||
|
|
||||||
self.client = OpenAI(base_url=self.embedding_backend)
|
self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
||||||
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
|
|
||||||
|
# Use persistent storage in project directory
|
||||||
|
persist_directory = os.path.join(config.get("project_dir", "."), "memory_db")
|
||||||
|
os.makedirs(persist_directory, exist_ok=True)
|
||||||
|
|
||||||
|
self.chroma_client = chromadb.PersistentClient(path=persist_directory)
|
||||||
|
|
||||||
|
# Get or create collection
|
||||||
|
try:
|
||||||
|
self.situation_collection = self.chroma_client.get_collection(name=name)
|
||||||
|
except:
|
||||||
self.situation_collection = self.chroma_client.create_collection(name=name)
|
self.situation_collection = self.chroma_client.create_collection(name=name)
|
||||||
|
|
||||||
def get_embedding(self, text):
|
def get_embedding(self, text):
|
||||||
|
|
@ -50,6 +62,81 @@ class FinancialSituationMemory:
|
||||||
ids=ids,
|
ids=ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def add_situations_with_metadata(
|
||||||
|
self,
|
||||||
|
situations_and_outcomes: List[Tuple[str, str, Dict[str, Any]]]
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Add financial situations with enhanced metadata for learning system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
situations_and_outcomes: List of tuples (situation_text, recommendation, metadata)
|
||||||
|
where metadata contains:
|
||||||
|
- ticker: Stock symbol
|
||||||
|
- analysis_date: Date of analysis (YYYY-MM-DD)
|
||||||
|
- days_before_move: How many days before the major move (7 or 30)
|
||||||
|
- move_pct: Percentage move that occurred
|
||||||
|
- move_direction: "up" or "down"
|
||||||
|
- agent_recommendation: What the agent recommended
|
||||||
|
- was_correct: Boolean, whether recommendation matched outcome
|
||||||
|
- structured_signals: Dict of signal features (optional)
|
||||||
|
- unusual_volume: bool
|
||||||
|
- analyst_sentiment: str (bullish/bearish/neutral)
|
||||||
|
- news_sentiment: str (positive/negative/neutral)
|
||||||
|
- short_interest: str (high/medium/low)
|
||||||
|
- insider_activity: str (buying/selling/none)
|
||||||
|
- etc.
|
||||||
|
"""
|
||||||
|
situations = []
|
||||||
|
ids = []
|
||||||
|
embeddings = []
|
||||||
|
metadatas = []
|
||||||
|
|
||||||
|
offset = self.situation_collection.count()
|
||||||
|
|
||||||
|
for i, (situation, recommendation, metadata) in enumerate(situations_and_outcomes):
|
||||||
|
situations.append(situation)
|
||||||
|
ids.append(str(offset + i))
|
||||||
|
embeddings.append(self.get_embedding(situation))
|
||||||
|
|
||||||
|
# Merge recommendation with metadata
|
||||||
|
full_metadata = {"recommendation": recommendation}
|
||||||
|
full_metadata.update(metadata)
|
||||||
|
|
||||||
|
# Ensure all metadata values are strings, numbers, or booleans for ChromaDB
|
||||||
|
full_metadata = self._sanitize_metadata(full_metadata)
|
||||||
|
metadatas.append(full_metadata)
|
||||||
|
|
||||||
|
self.situation_collection.add(
|
||||||
|
documents=situations,
|
||||||
|
metadatas=metadatas,
|
||||||
|
embeddings=embeddings,
|
||||||
|
ids=ids,
|
||||||
|
)
|
||||||
|
|
||||||
|
def _sanitize_metadata(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Sanitize metadata for ChromaDB compatibility.
|
||||||
|
ChromaDB requires metadata values to be str, int, float, or bool.
|
||||||
|
Nested dicts are flattened with dot notation.
|
||||||
|
"""
|
||||||
|
sanitized = {}
|
||||||
|
|
||||||
|
for key, value in metadata.items():
|
||||||
|
if isinstance(value, dict):
|
||||||
|
# Flatten nested dicts
|
||||||
|
for nested_key, nested_value in value.items():
|
||||||
|
flat_key = f"{key}.{nested_key}"
|
||||||
|
if isinstance(nested_value, (str, int, float, bool, type(None))):
|
||||||
|
sanitized[flat_key] = nested_value if nested_value is not None else "none"
|
||||||
|
elif isinstance(value, (str, int, float, bool, type(None))):
|
||||||
|
sanitized[key] = value if value is not None else "none"
|
||||||
|
else:
|
||||||
|
# Convert other types to string
|
||||||
|
sanitized[key] = str(value)
|
||||||
|
|
||||||
|
return sanitized
|
||||||
|
|
||||||
def get_memories(self, current_situation, n_matches=1):
|
def get_memories(self, current_situation, n_matches=1):
|
||||||
"""Find matching recommendations using OpenAI embeddings"""
|
"""Find matching recommendations using OpenAI embeddings"""
|
||||||
query_embedding = self.get_embedding(current_situation)
|
query_embedding = self.get_embedding(current_situation)
|
||||||
|
|
@ -72,6 +159,133 @@ class FinancialSituationMemory:
|
||||||
|
|
||||||
return matched_results
|
return matched_results
|
||||||
|
|
||||||
|
def get_memories_hybrid(
|
||||||
|
self,
|
||||||
|
current_situation: str,
|
||||||
|
signal_filters: Optional[Dict[str, Any]] = None,
|
||||||
|
n_matches: int = 3,
|
||||||
|
min_similarity: float = 0.5
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Hybrid search: Filter by structured signals, then rank by embedding similarity.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_situation: Text description of current market situation
|
||||||
|
signal_filters: Dict of structured signals to filter by (e.g., {"unusual_volume": True})
|
||||||
|
Supports exact matches and can use dot notation for nested fields
|
||||||
|
e.g., {"structured_signals.unusual_volume": True}
|
||||||
|
n_matches: Number of results to return
|
||||||
|
min_similarity: Minimum similarity score (0-1) to include in results
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of dicts with keys:
|
||||||
|
- matched_situation: Historical situation text
|
||||||
|
- recommendation: What was recommended
|
||||||
|
- similarity_score: Embedding similarity (0-1)
|
||||||
|
- metadata: Full metadata including outcome, signals, etc.
|
||||||
|
"""
|
||||||
|
query_embedding = self.get_embedding(current_situation)
|
||||||
|
|
||||||
|
# Build where clause for filtering
|
||||||
|
where_clause = None
|
||||||
|
if signal_filters:
|
||||||
|
where_clause = {}
|
||||||
|
for key, value in signal_filters.items():
|
||||||
|
where_clause[key] = value
|
||||||
|
|
||||||
|
# Query ChromaDB with optional filtering
|
||||||
|
query_params = {
|
||||||
|
"query_embeddings": [query_embedding],
|
||||||
|
"n_results": min(n_matches * 3, 100), # Get more results for filtering
|
||||||
|
"include": ["metadatas", "documents", "distances"],
|
||||||
|
}
|
||||||
|
|
||||||
|
if where_clause:
|
||||||
|
query_params["where"] = where_clause
|
||||||
|
|
||||||
|
results = self.situation_collection.query(**query_params)
|
||||||
|
|
||||||
|
# Process and filter results
|
||||||
|
matched_results = []
|
||||||
|
for i in range(len(results["documents"][0])):
|
||||||
|
similarity_score = 1 - results["distances"][0][i]
|
||||||
|
|
||||||
|
# Apply similarity threshold
|
||||||
|
if similarity_score < min_similarity:
|
||||||
|
continue
|
||||||
|
|
||||||
|
metadata = results["metadatas"][0][i]
|
||||||
|
|
||||||
|
matched_results.append({
|
||||||
|
"matched_situation": results["documents"][0][i],
|
||||||
|
"recommendation": metadata.get("recommendation", ""),
|
||||||
|
"similarity_score": similarity_score,
|
||||||
|
"metadata": metadata,
|
||||||
|
# Extract key fields for convenience
|
||||||
|
"ticker": metadata.get("ticker", ""),
|
||||||
|
"move_pct": metadata.get("move_pct", 0),
|
||||||
|
"move_direction": metadata.get("move_direction", ""),
|
||||||
|
"was_correct": metadata.get("was_correct", False),
|
||||||
|
"days_before_move": metadata.get("days_before_move", 0),
|
||||||
|
})
|
||||||
|
|
||||||
|
# Return top n_matches
|
||||||
|
return matched_results[:n_matches]
|
||||||
|
|
||||||
|
def get_statistics(self) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Get statistics about the memory bank.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict with keys:
|
||||||
|
- total_memories: Total number of stored memories
|
||||||
|
- accuracy_rate: % of memories where was_correct=True
|
||||||
|
- avg_move_pct: Average percentage move in stored outcomes
|
||||||
|
- signal_distribution: Count of different signal patterns
|
||||||
|
"""
|
||||||
|
total_count = self.situation_collection.count()
|
||||||
|
|
||||||
|
if total_count == 0:
|
||||||
|
return {
|
||||||
|
"total_memories": 0,
|
||||||
|
"accuracy_rate": 0.0,
|
||||||
|
"avg_move_pct": 0.0,
|
||||||
|
"signal_distribution": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get all memories
|
||||||
|
all_results = self.situation_collection.get(
|
||||||
|
include=["metadatas"]
|
||||||
|
)
|
||||||
|
|
||||||
|
metadatas = all_results["metadatas"]
|
||||||
|
|
||||||
|
# Calculate statistics
|
||||||
|
correct_count = sum(1 for m in metadatas if m.get("was_correct") == True)
|
||||||
|
accuracy_rate = (correct_count / total_count * 100) if total_count > 0 else 0
|
||||||
|
|
||||||
|
move_pcts = [m.get("move_pct", 0) for m in metadatas if "move_pct" in m]
|
||||||
|
avg_move_pct = sum(move_pcts) / len(move_pcts) if move_pcts else 0
|
||||||
|
|
||||||
|
# Count signal patterns
|
||||||
|
signal_distribution = {}
|
||||||
|
for metadata in metadatas:
|
||||||
|
for key, value in metadata.items():
|
||||||
|
if key.startswith("structured_signals."):
|
||||||
|
signal_name = key.replace("structured_signals.", "")
|
||||||
|
if signal_name not in signal_distribution:
|
||||||
|
signal_distribution[signal_name] = {}
|
||||||
|
if value not in signal_distribution[signal_name]:
|
||||||
|
signal_distribution[signal_name][value] = 0
|
||||||
|
signal_distribution[signal_name][value] += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_memories": total_count,
|
||||||
|
"accuracy_rate": accuracy_rate,
|
||||||
|
"avg_move_pct": avg_move_pct,
|
||||||
|
"signal_distribution": signal_distribution
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Example usage
|
# Example usage
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,176 @@
|
||||||
|
"""
|
||||||
|
Alpha Vantage Analyst Rating Changes Detection
|
||||||
|
Tracks recent analyst upgrades/downgrades and price target changes
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Annotated, List
|
||||||
|
|
||||||
|
|
||||||
|
def get_analyst_rating_changes(
|
||||||
|
lookback_days: Annotated[int, "Number of days to look back for rating changes"] = 7,
|
||||||
|
change_types: Annotated[List[str], "Types of changes to track"] = None,
|
||||||
|
top_n: Annotated[int, "Number of top results to return"] = 20,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Track recent analyst upgrades/downgrades and rating changes.
|
||||||
|
|
||||||
|
Fresh analyst actions (<72 hours) are strong catalysts for short-term moves.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
lookback_days: Number of days to look back (default 7)
|
||||||
|
change_types: Types of changes ["upgrade", "downgrade", "initiated", "reiterated"]
|
||||||
|
top_n: Maximum number of results to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted markdown report of recent analyst rating changes
|
||||||
|
"""
|
||||||
|
api_key = os.getenv("ALPHA_VANTAGE_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
return "Error: ALPHA_VANTAGE_API_KEY not set in environment variables"
|
||||||
|
|
||||||
|
if change_types is None:
|
||||||
|
change_types = ["upgrade", "downgrade", "initiated"]
|
||||||
|
|
||||||
|
# Note: Alpha Vantage doesn't have a direct analyst ratings endpoint in the free tier
|
||||||
|
# We'll use news sentiment API which includes analyst actions
|
||||||
|
# For production, consider using Financial Modeling Prep or Benzinga API
|
||||||
|
|
||||||
|
url = "https://www.alphavantage.co/query"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get market news which includes analyst actions
|
||||||
|
params = {
|
||||||
|
"function": "NEWS_SENTIMENT",
|
||||||
|
"topics": "earnings,technology,finance",
|
||||||
|
"sort": "LATEST",
|
||||||
|
"limit": 200, # Get more news to find analyst actions
|
||||||
|
"apikey": api_key,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.get(url, params=params, timeout=30)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
if "Note" in data:
|
||||||
|
return f"API Rate Limit: {data['Note']}"
|
||||||
|
|
||||||
|
if "Error Message" in data:
|
||||||
|
return f"API Error: {data['Error Message']}"
|
||||||
|
|
||||||
|
# Parse news for analyst actions
|
||||||
|
analyst_changes = []
|
||||||
|
cutoff_date = datetime.now() - timedelta(days=lookback_days)
|
||||||
|
|
||||||
|
if "feed" in data:
|
||||||
|
for article in data["feed"]:
|
||||||
|
try:
|
||||||
|
# Check article time
|
||||||
|
time_published = article.get("time_published", "")
|
||||||
|
if time_published:
|
||||||
|
article_date = datetime.strptime(time_published[:8], "%Y%m%d")
|
||||||
|
if article_date < cutoff_date:
|
||||||
|
continue
|
||||||
|
|
||||||
|
title = article.get("title", "").lower()
|
||||||
|
summary = article.get("summary", "").lower()
|
||||||
|
text = f"{title} {summary}"
|
||||||
|
|
||||||
|
# Look for analyst action keywords
|
||||||
|
is_upgrade = any(word in text for word in ["upgrade", "upgrades", "raised", "raises rating"])
|
||||||
|
is_downgrade = any(word in text for word in ["downgrade", "downgrades", "lowered", "lowers rating"])
|
||||||
|
is_initiated = any(word in text for word in ["initiates", "initiated", "coverage", "starts coverage"])
|
||||||
|
is_reiterated = any(word in text for word in ["reiterates", "reiterated", "maintains", "confirms"])
|
||||||
|
|
||||||
|
# Extract tickers from article
|
||||||
|
tickers = []
|
||||||
|
if "ticker_sentiment" in article:
|
||||||
|
for ticker_data in article["ticker_sentiment"]:
|
||||||
|
ticker = ticker_data.get("ticker", "")
|
||||||
|
if ticker and len(ticker) <= 5: # Valid ticker format
|
||||||
|
tickers.append(ticker)
|
||||||
|
|
||||||
|
# Classify action type
|
||||||
|
action_type = None
|
||||||
|
if is_upgrade and "upgrade" in change_types:
|
||||||
|
action_type = "upgrade"
|
||||||
|
elif is_downgrade and "downgrade" in change_types:
|
||||||
|
action_type = "downgrade"
|
||||||
|
elif is_initiated and "initiated" in change_types:
|
||||||
|
action_type = "initiated"
|
||||||
|
elif is_reiterated and "reiterated" in change_types:
|
||||||
|
action_type = "reiterated"
|
||||||
|
|
||||||
|
if action_type and tickers:
|
||||||
|
# Calculate freshness (hours since published)
|
||||||
|
hours_old = (datetime.now() - article_date).total_seconds() / 3600
|
||||||
|
|
||||||
|
for ticker in tickers[:3]: # Max 3 tickers per article
|
||||||
|
analyst_changes.append({
|
||||||
|
"ticker": ticker,
|
||||||
|
"action": action_type,
|
||||||
|
"date": time_published[:8],
|
||||||
|
"hours_old": int(hours_old),
|
||||||
|
"headline": article.get("title", "")[:100],
|
||||||
|
"source": article.get("source", "Unknown"),
|
||||||
|
"url": article.get("url", ""),
|
||||||
|
})
|
||||||
|
|
||||||
|
except (ValueError, KeyError) as e:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Remove duplicates (keep most recent per ticker)
|
||||||
|
seen_tickers = {}
|
||||||
|
for change in analyst_changes:
|
||||||
|
ticker = change["ticker"]
|
||||||
|
if ticker not in seen_tickers or change["hours_old"] < seen_tickers[ticker]["hours_old"]:
|
||||||
|
seen_tickers[ticker] = change
|
||||||
|
|
||||||
|
# Sort by freshness (most recent first)
|
||||||
|
sorted_changes = sorted(
|
||||||
|
seen_tickers.values(),
|
||||||
|
key=lambda x: x["hours_old"]
|
||||||
|
)[:top_n]
|
||||||
|
|
||||||
|
# Format output
|
||||||
|
if not sorted_changes:
|
||||||
|
return f"No analyst rating changes found in the last {lookback_days} days"
|
||||||
|
|
||||||
|
report = f"# Analyst Rating Changes - Last {lookback_days} Days\n\n"
|
||||||
|
report += f"**Tracking**: {', '.join(change_types)}\n\n"
|
||||||
|
report += f"**Found**: {len(sorted_changes)} recent analyst actions\n\n"
|
||||||
|
report += "## Recent Analyst Actions\n\n"
|
||||||
|
report += "| Ticker | Action | Source | Hours Ago | Headline |\n"
|
||||||
|
report += "|--------|--------|--------|-----------|----------|\n"
|
||||||
|
|
||||||
|
for change in sorted_changes:
|
||||||
|
freshness = "🔥 FRESH" if change["hours_old"] < 24 else "🟢 Recent" if change["hours_old"] < 72 else "Older"
|
||||||
|
|
||||||
|
report += f"| {change['ticker']} | "
|
||||||
|
report += f"{change['action'].upper()} | "
|
||||||
|
report += f"{change['source']} | "
|
||||||
|
report += f"{change['hours_old']}h ({freshness}) | "
|
||||||
|
report += f"{change['headline']} |\n"
|
||||||
|
|
||||||
|
report += "\n\n## Freshness Legend\n\n"
|
||||||
|
report += "- 🔥 **FRESH** (<24h): Highest impact, market may not have fully reacted\n"
|
||||||
|
report += "- 🟢 **Recent** (24-72h): Still relevant for short-term trading\n"
|
||||||
|
report += "- **Older** (>72h): Lower priority, likely partially priced in\n"
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
return f"Error fetching analyst rating changes: {str(e)}"
|
||||||
|
except Exception as e:
|
||||||
|
return f"Unexpected error in analyst rating detection: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_alpha_vantage_analyst_changes(
|
||||||
|
lookback_days: int = 7,
|
||||||
|
change_types: List[str] = None,
|
||||||
|
top_n: int = 20,
|
||||||
|
) -> str:
|
||||||
|
"""Alias for get_analyst_rating_changes to match registry naming convention"""
|
||||||
|
return get_analyst_rating_changes(lookback_days, change_types, top_n)
|
||||||
|
|
@ -0,0 +1,162 @@
|
||||||
|
"""
|
||||||
|
Alpha Vantage Unusual Volume Detection
|
||||||
|
Identifies stocks with unusual volume but minimal price movement (accumulation signal)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Annotated, List, Dict
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
def get_unusual_volume(
|
||||||
|
date: Annotated[str, "Analysis date in yyyy-mm-dd format"] = None,
|
||||||
|
min_volume_multiple: Annotated[float, "Minimum volume multiple vs average"] = 3.0,
|
||||||
|
max_price_change: Annotated[float, "Maximum price change percentage"] = 5.0,
|
||||||
|
top_n: Annotated[int, "Number of top results to return"] = 20,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Find stocks with unusual volume but minimal price movement.
|
||||||
|
|
||||||
|
This is a strong accumulation signal - smart money buying before a breakout.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
date: Analysis date in yyyy-mm-dd format
|
||||||
|
min_volume_multiple: Minimum volume multiple vs 30-day average
|
||||||
|
max_price_change: Maximum absolute price change percentage
|
||||||
|
top_n: Number of top results to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted markdown report of stocks with unusual volume
|
||||||
|
"""
|
||||||
|
api_key = os.getenv("ALPHA_VANTAGE_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
return "Error: ALPHA_VANTAGE_API_KEY not set in environment variables"
|
||||||
|
|
||||||
|
# For unusual volume detection, we'll use Alpha Vantage's market data
|
||||||
|
# Note: Alpha Vantage doesn't have a direct "unusual volume" endpoint,
|
||||||
|
# so we'll use a combination of their screening and market movers data
|
||||||
|
|
||||||
|
# Strategy: Get top active stocks (high volume) and filter for minimal price change
|
||||||
|
url = "https://www.alphavantage.co/query"
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get top active stocks by volume
|
||||||
|
params = {
|
||||||
|
"function": "TOP_GAINERS_LOSERS",
|
||||||
|
"apikey": api_key,
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.get(url, params=params, timeout=30)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
if "Note" in data:
|
||||||
|
return f"API Rate Limit: {data['Note']}"
|
||||||
|
|
||||||
|
if "Error Message" in data:
|
||||||
|
return f"API Error: {data['Error Message']}"
|
||||||
|
|
||||||
|
# Combine all movers (gainers, losers, and most actively traded)
|
||||||
|
unusual_candidates = []
|
||||||
|
|
||||||
|
# Process most actively traded (these have high volume)
|
||||||
|
if "most_actively_traded" in data:
|
||||||
|
for stock in data["most_actively_traded"][:50]: # Check top 50
|
||||||
|
try:
|
||||||
|
ticker = stock.get("ticker", "")
|
||||||
|
price_change = abs(float(stock.get("change_percentage", "0").replace("%", "")))
|
||||||
|
volume = int(stock.get("volume", 0))
|
||||||
|
price = float(stock.get("price", 0))
|
||||||
|
|
||||||
|
# Filter: High volume but low price change (accumulation signal)
|
||||||
|
if price_change <= max_price_change and volume > 0:
|
||||||
|
unusual_candidates.append({
|
||||||
|
"ticker": ticker,
|
||||||
|
"volume": volume,
|
||||||
|
"price": price,
|
||||||
|
"price_change_pct": price_change,
|
||||||
|
"signal": "accumulation" if price_change < 2.0 else "moderate_activity"
|
||||||
|
})
|
||||||
|
|
||||||
|
except (ValueError, KeyError) as e:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Also check gainers and losers with unusual volume patterns
|
||||||
|
for category in ["top_gainers", "top_losers"]:
|
||||||
|
if category in data:
|
||||||
|
for stock in data[category][:30]:
|
||||||
|
try:
|
||||||
|
ticker = stock.get("ticker", "")
|
||||||
|
price_change = abs(float(stock.get("change_percentage", "0").replace("%", "")))
|
||||||
|
volume = int(stock.get("volume", 0))
|
||||||
|
price = float(stock.get("price", 0))
|
||||||
|
|
||||||
|
# For gainers/losers, we want very high volume
|
||||||
|
# This indicates strong conviction in the move
|
||||||
|
if volume > 0:
|
||||||
|
unusual_candidates.append({
|
||||||
|
"ticker": ticker,
|
||||||
|
"volume": volume,
|
||||||
|
"price": price,
|
||||||
|
"price_change_pct": price_change,
|
||||||
|
"signal": "breakout" if price_change > 5.0 else "building_momentum"
|
||||||
|
})
|
||||||
|
except (ValueError, KeyError) as e:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Remove duplicates (keep highest volume)
|
||||||
|
seen_tickers = {}
|
||||||
|
for candidate in unusual_candidates:
|
||||||
|
ticker = candidate["ticker"]
|
||||||
|
if ticker not in seen_tickers or candidate["volume"] > seen_tickers[ticker]["volume"]:
|
||||||
|
seen_tickers[ticker] = candidate
|
||||||
|
|
||||||
|
# Sort by volume (highest first) and take top N
|
||||||
|
sorted_candidates = sorted(
|
||||||
|
seen_tickers.values(),
|
||||||
|
key=lambda x: x["volume"],
|
||||||
|
reverse=True
|
||||||
|
)[:top_n]
|
||||||
|
|
||||||
|
# Format output
|
||||||
|
if not sorted_candidates:
|
||||||
|
return "No stocks found with unusual volume patterns matching criteria"
|
||||||
|
|
||||||
|
report = f"# Unusual Volume Detected - {date or 'Latest'}\n\n"
|
||||||
|
report += f"**Criteria**: Volume signal detected, Price Change <{max_price_change}% preferred\n\n"
|
||||||
|
report += f"**Found**: {len(sorted_candidates)} stocks with unusual activity\n\n"
|
||||||
|
report += "## Top Unusual Volume Candidates\n\n"
|
||||||
|
report += "| Ticker | Price | Volume | Price Change % | Signal |\n"
|
||||||
|
report += "|--------|-------|--------|----------------|--------|\n"
|
||||||
|
|
||||||
|
for candidate in sorted_candidates:
|
||||||
|
report += f"| {candidate['ticker']} | "
|
||||||
|
report += f"${candidate['price']:.2f} | "
|
||||||
|
report += f"{candidate['volume']:,} | "
|
||||||
|
report += f"{candidate['price_change_pct']:.2f}% | "
|
||||||
|
report += f"{candidate['signal']} |\n"
|
||||||
|
|
||||||
|
report += "\n\n## Signal Definitions\n\n"
|
||||||
|
report += "- **accumulation**: High volume, minimal price change (<2%) - Smart money building position\n"
|
||||||
|
report += "- **moderate_activity**: Elevated volume with 2-5% price change - Early momentum\n"
|
||||||
|
report += "- **building_momentum**: Losers/Gainers with strong volume - Conviction in direction\n"
|
||||||
|
report += "- **breakout**: Strong price move (>5%) on high volume - Already in motion\n"
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
return f"Error fetching unusual volume data: {str(e)}"
|
||||||
|
except Exception as e:
|
||||||
|
return f"Unexpected error in unusual volume detection: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_alpha_vantage_unusual_volume(
|
||||||
|
date: str = None,
|
||||||
|
min_volume_multiple: float = 3.0,
|
||||||
|
max_price_change: float = 5.0,
|
||||||
|
top_n: int = 20,
|
||||||
|
) -> str:
|
||||||
|
"""Alias for get_unusual_volume to match registry naming convention"""
|
||||||
|
return get_unusual_volume(date, min_volume_multiple, max_price_change, top_n)
|
||||||
|
|
@ -0,0 +1,222 @@
|
||||||
|
"""
|
||||||
|
Finviz + Yahoo Finance Hybrid - Short Interest Discovery
|
||||||
|
Uses Finviz to discover tickers with high short interest, then Yahoo Finance for exact data
|
||||||
|
"""
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
from typing import Annotated
|
||||||
|
import re
|
||||||
|
import yfinance as yf
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
|
|
||||||
|
def get_short_interest(
|
||||||
|
min_short_interest_pct: Annotated[float, "Minimum short interest % of float"] = 10.0,
|
||||||
|
min_days_to_cover: Annotated[float, "Minimum days to cover ratio"] = 2.0,
|
||||||
|
top_n: Annotated[int, "Number of top results to return"] = 20,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Discover stocks with high short interest using Finviz + Yahoo Finance.
|
||||||
|
|
||||||
|
Strategy: Finviz filters stocks by short interest (discovery),
|
||||||
|
then Yahoo Finance provides exact short % data.
|
||||||
|
|
||||||
|
This is a TRUE DISCOVERY tool - finds stocks we may not know about,
|
||||||
|
not checking a predefined watchlist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
min_short_interest_pct: Minimum short interest as % of float
|
||||||
|
min_days_to_cover: Minimum days to cover ratio
|
||||||
|
top_n: Number of top results to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted markdown report of discovered high short interest stocks
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Step 1: Use Finviz screener to DISCOVER tickers with high short interest
|
||||||
|
print(f" Discovering tickers with short interest >{min_short_interest_pct}% from Finviz...")
|
||||||
|
|
||||||
|
# Determine Finviz filter
|
||||||
|
if min_short_interest_pct >= 20:
|
||||||
|
short_filter = "sh_short_o20"
|
||||||
|
elif min_short_interest_pct >= 15:
|
||||||
|
short_filter = "sh_short_o15"
|
||||||
|
elif min_short_interest_pct >= 10:
|
||||||
|
short_filter = "sh_short_o10"
|
||||||
|
else:
|
||||||
|
short_filter = "sh_short_o5"
|
||||||
|
|
||||||
|
# Build Finviz URL (v=152 is simple view)
|
||||||
|
base_url = f"https://finviz.com/screener.ashx?v=152&f={short_filter}"
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
|
||||||
|
'Accept': 'text/html',
|
||||||
|
}
|
||||||
|
|
||||||
|
discovered_tickers = []
|
||||||
|
|
||||||
|
# Scrape first 3 pages (60 stocks)
|
||||||
|
for page_num in range(1, 4):
|
||||||
|
if page_num == 1:
|
||||||
|
url = base_url
|
||||||
|
else:
|
||||||
|
offset = (page_num - 1) * 20 + 1
|
||||||
|
url = f"{base_url}&r={offset}"
|
||||||
|
|
||||||
|
response = requests.get(url, headers=headers, timeout=30)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
soup = BeautifulSoup(response.text, 'html.parser')
|
||||||
|
|
||||||
|
# Find ticker links in the page
|
||||||
|
ticker_links = soup.find_all('a', href=re.compile(r'quote\.ashx\?t='))
|
||||||
|
|
||||||
|
for link in ticker_links:
|
||||||
|
ticker = link.get_text(strip=True)
|
||||||
|
# Validate it's a ticker (1-5 uppercase letters)
|
||||||
|
if re.match(r'^[A-Z]{1,5}$', ticker) and ticker not in discovered_tickers:
|
||||||
|
discovered_tickers.append(ticker)
|
||||||
|
|
||||||
|
if not discovered_tickers:
|
||||||
|
return f"No stocks discovered with short interest >{min_short_interest_pct}% on Finviz."
|
||||||
|
|
||||||
|
print(f" Discovered {len(discovered_tickers)} tickers from Finviz")
|
||||||
|
print(f" Fetching detailed short interest data from Yahoo Finance...")
|
||||||
|
|
||||||
|
# Step 2: Use Yahoo Finance to get EXACT short interest data for discovered tickers
|
||||||
|
def fetch_short_data(ticker):
|
||||||
|
try:
|
||||||
|
stock = yf.Ticker(ticker)
|
||||||
|
info = stock.info
|
||||||
|
|
||||||
|
# Get short interest data
|
||||||
|
short_pct = info.get('shortPercentOfFloat', info.get('sharesPercentSharesOut', 0))
|
||||||
|
if short_pct and isinstance(short_pct, (int, float)):
|
||||||
|
short_pct = short_pct * 100 # Convert to percentage
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Verify it meets criteria (Finviz filter might be outdated)
|
||||||
|
if short_pct >= min_short_interest_pct:
|
||||||
|
price = info.get('currentPrice', info.get('regularMarketPrice', 0))
|
||||||
|
market_cap = info.get('marketCap', 0)
|
||||||
|
volume = info.get('volume', info.get('regularMarketVolume', 0))
|
||||||
|
|
||||||
|
# Categorize squeeze potential
|
||||||
|
if short_pct >= 30:
|
||||||
|
signal = "extreme_squeeze_risk"
|
||||||
|
elif short_pct >= 20:
|
||||||
|
signal = "high_squeeze_potential"
|
||||||
|
elif short_pct >= 15:
|
||||||
|
signal = "moderate_squeeze_potential"
|
||||||
|
else:
|
||||||
|
signal = "low_squeeze_potential"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"ticker": ticker,
|
||||||
|
"price": price,
|
||||||
|
"market_cap": market_cap,
|
||||||
|
"volume": volume,
|
||||||
|
"short_interest_pct": short_pct,
|
||||||
|
"signal": signal,
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Fetch data in parallel (faster)
|
||||||
|
all_candidates = []
|
||||||
|
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||||
|
futures = {executor.submit(fetch_short_data, ticker): ticker for ticker in discovered_tickers}
|
||||||
|
|
||||||
|
for future in as_completed(futures):
|
||||||
|
result = future.result()
|
||||||
|
if result:
|
||||||
|
all_candidates.append(result)
|
||||||
|
|
||||||
|
if not all_candidates:
|
||||||
|
return f"No stocks with verified short interest >{min_short_interest_pct}% (Finviz found {len(discovered_tickers)} tickers but Yahoo Finance data didn't confirm)."
|
||||||
|
|
||||||
|
# Sort by short interest percentage (highest first)
|
||||||
|
sorted_candidates = sorted(
|
||||||
|
all_candidates,
|
||||||
|
key=lambda x: x["short_interest_pct"],
|
||||||
|
reverse=True
|
||||||
|
)[:top_n]
|
||||||
|
|
||||||
|
# Format output
|
||||||
|
report = f"# Discovered High Short Interest Stocks\n\n"
|
||||||
|
report += f"**Criteria**: Short Interest >{min_short_interest_pct}%\n"
|
||||||
|
report += f"**Data Source**: Finviz Screener (Web Scraping)\n"
|
||||||
|
report += f"**Total Discovered**: {len(all_candidates)} stocks\n\n"
|
||||||
|
report += f"**Top {len(sorted_candidates)} Candidates**:\n\n"
|
||||||
|
report += "| Ticker | Price | Market Cap | Volume | Short % | Signal |\n"
|
||||||
|
report += "|--------|-------|------------|--------|---------|--------|\n"
|
||||||
|
|
||||||
|
for candidate in sorted_candidates:
|
||||||
|
market_cap_str = format_market_cap(candidate['market_cap'])
|
||||||
|
report += f"| {candidate['ticker']} | "
|
||||||
|
report += f"${candidate['price']:.2f} | "
|
||||||
|
report += f"{market_cap_str} | "
|
||||||
|
report += f"{candidate['volume']:,} | "
|
||||||
|
report += f"{candidate['short_interest_pct']:.1f}% | "
|
||||||
|
report += f"{candidate['signal']} |\n"
|
||||||
|
|
||||||
|
report += "\n\n## Signal Definitions\n\n"
|
||||||
|
report += "- **extreme_squeeze_risk**: Short interest >30% - Very high squeeze potential\n"
|
||||||
|
report += "- **high_squeeze_potential**: Short interest 20-30% - High squeeze risk\n"
|
||||||
|
report += "- **moderate_squeeze_potential**: Short interest 15-20% - Moderate squeeze risk\n"
|
||||||
|
report += "- **low_squeeze_potential**: Short interest 10-15% - Lower squeeze risk\n\n"
|
||||||
|
report += "**Note**: High short interest alone doesn't guarantee a squeeze. Look for positive catalysts.\n"
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
return f"Error scraping Finviz: {str(e)}"
|
||||||
|
except Exception as e:
|
||||||
|
return f"Unexpected error discovering short interest stocks: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def parse_market_cap(market_cap_text: str) -> float:
|
||||||
|
"""Parse market cap from Finviz format (e.g., '1.23B', '456M')."""
|
||||||
|
if not market_cap_text or market_cap_text == '-':
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
market_cap_text = market_cap_text.upper().strip()
|
||||||
|
|
||||||
|
# Extract number and multiplier
|
||||||
|
match = re.match(r'([0-9.]+)([BMK])?', market_cap_text)
|
||||||
|
if not match:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
number = float(match.group(1))
|
||||||
|
multiplier = match.group(2)
|
||||||
|
|
||||||
|
if multiplier == 'B':
|
||||||
|
return number * 1_000_000_000
|
||||||
|
elif multiplier == 'M':
|
||||||
|
return number * 1_000_000
|
||||||
|
elif multiplier == 'K':
|
||||||
|
return number * 1_000
|
||||||
|
else:
|
||||||
|
return number
|
||||||
|
|
||||||
|
|
||||||
|
def format_market_cap(market_cap: float) -> str:
|
||||||
|
"""Format market cap for display."""
|
||||||
|
if market_cap >= 1_000_000_000:
|
||||||
|
return f"${market_cap / 1_000_000_000:.2f}B"
|
||||||
|
elif market_cap >= 1_000_000:
|
||||||
|
return f"${market_cap / 1_000_000:.2f}M"
|
||||||
|
else:
|
||||||
|
return f"${market_cap:,.0f}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_finviz_short_interest(
|
||||||
|
min_short_interest_pct: float = 10.0,
|
||||||
|
min_days_to_cover: float = 2.0,
|
||||||
|
top_n: int = 20,
|
||||||
|
) -> str:
|
||||||
|
"""Alias for get_short_interest to match registry naming convention"""
|
||||||
|
return get_short_interest(min_short_interest_pct, min_days_to_cover, top_n)
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
import os
|
||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
from .config import get_config
|
from .config import get_config
|
||||||
|
|
||||||
|
|
@ -20,8 +21,7 @@ def get_stock_news_openai(query=None, ticker=None, start_date=None, end_date=Non
|
||||||
else:
|
else:
|
||||||
raise ValueError("Must provide either 'query' or 'ticker' parameter")
|
raise ValueError("Must provide either 'query' or 'ticker' parameter")
|
||||||
|
|
||||||
config = get_config()
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
||||||
client = OpenAI(base_url=config["backend_url"])
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = client.responses.create(
|
response = client.responses.create(
|
||||||
|
|
@ -35,14 +35,13 @@ def get_stock_news_openai(query=None, ticker=None, start_date=None, end_date=Non
|
||||||
|
|
||||||
|
|
||||||
def get_global_news_openai(date, look_back_days=7, limit=5):
|
def get_global_news_openai(date, look_back_days=7, limit=5):
|
||||||
config = get_config()
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
||||||
client = OpenAI(base_url=config["backend_url"])
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = client.responses.create(
|
response = client.responses.create(
|
||||||
model="gpt-4o-mini",
|
model="gpt-4o-mini",
|
||||||
tools=[{"type": "web_search_preview"}],
|
tools=[{"type": "web_search_preview"}],
|
||||||
input=f"Search global or macroeconomics news from {look_back_days} days before {date} to {date} that would be informative for trading purposes. Make sure you only get the data posted during that period. Limit the results to {limit} articles."
|
input=f"Search global or macroeconomics news from {look_back_days} days before {date} that would be informative for trading purposes. Make sure you only get the data posted during that period. Limit the results to {limit} articles."
|
||||||
)
|
)
|
||||||
return response.output_text
|
return response.output_text
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -50,8 +49,7 @@ def get_global_news_openai(date, look_back_days=7, limit=5):
|
||||||
|
|
||||||
|
|
||||||
def get_fundamentals_openai(ticker, curr_date):
|
def get_fundamentals_openai(ticker, curr_date):
|
||||||
config = get_config()
|
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
||||||
client = OpenAI(base_url=config["backend_url"])
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = client.responses.create(
|
response = client.responses.create(
|
||||||
|
|
|
||||||
|
|
@ -186,7 +186,7 @@ def get_reddit_global_news(
|
||||||
start_dt = curr_dt - timedelta(days=look_back_days)
|
start_dt = curr_dt - timedelta(days=look_back_days)
|
||||||
|
|
||||||
# Subreddits for global news
|
# Subreddits for global news
|
||||||
subreddits = "worldnews+economics+finance"
|
subreddits = "financenews+finance+economics+stockmarket"
|
||||||
|
|
||||||
posts = []
|
posts = []
|
||||||
subreddit = reddit.subreddit(subreddits)
|
subreddit = reddit.subreddit(subreddits)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,177 @@
|
||||||
|
"""
|
||||||
|
Tradier API - Options Activity Detection
|
||||||
|
Detects unusual options activity indicating smart money positioning
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Annotated, List
|
||||||
|
|
||||||
|
|
||||||
|
def get_unusual_options_activity(
|
||||||
|
tickers: Annotated[List[str], "List of ticker symbols to analyze"] = None,
|
||||||
|
date: Annotated[str, "Analysis date in yyyy-mm-dd format"] = None,
|
||||||
|
min_volume_multiple: Annotated[float, "Minimum options volume multiple"] = 2.0,
|
||||||
|
top_n: Annotated[int, "Number of top results to return"] = 20,
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Detect unusual options activity for given tickers (confirmation signal).
|
||||||
|
|
||||||
|
This function is designed as a CONFIRMATION tool - it analyzes options activity
|
||||||
|
for candidates found by other discovery methods (unusual volume, analyst changes, etc.)
|
||||||
|
|
||||||
|
Unusual options volume is a leading indicator of price moves - institutions
|
||||||
|
positioning before catalysts.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
tickers: List of ticker symbols to analyze (if None, returns error message)
|
||||||
|
date: Analysis date in yyyy-mm-dd format
|
||||||
|
min_volume_multiple: Minimum volume multiple vs 20-day average
|
||||||
|
top_n: Number of top results to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Formatted markdown report of unusual options activity
|
||||||
|
"""
|
||||||
|
api_key = os.getenv("TRADIER_API_KEY")
|
||||||
|
if not api_key:
|
||||||
|
return "Error: TRADIER_API_KEY not set in environment variables. Get a free key at https://tradier.com"
|
||||||
|
|
||||||
|
if not tickers or len(tickers) == 0:
|
||||||
|
return "Error: No tickers provided. This function analyzes options activity for specific tickers found by other discovery methods."
|
||||||
|
|
||||||
|
# Tradier API base URLs
|
||||||
|
# Use sandbox for testing: https://sandbox.tradier.com
|
||||||
|
# Use production: https://api.tradier.com
|
||||||
|
base_url = os.getenv("TRADIER_BASE_URL", "https://sandbox.tradier.com")
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {api_key}",
|
||||||
|
"Accept": "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Strategy: Analyze options activity for provided tickers
|
||||||
|
# This confirms smart money positioning for candidates found by other methods
|
||||||
|
|
||||||
|
unusual_activity = []
|
||||||
|
|
||||||
|
for ticker in tickers:
|
||||||
|
try:
|
||||||
|
# Get options chain
|
||||||
|
options_url = f"{base_url}/v1/markets/options/chains"
|
||||||
|
params = {
|
||||||
|
"symbol": ticker,
|
||||||
|
"expiration": "", # Will get nearest expiration
|
||||||
|
"greeks": "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.get(options_url, headers=headers, params=params, timeout=10)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
if "options" in data and "option" in data["options"]:
|
||||||
|
options = data["options"]["option"]
|
||||||
|
|
||||||
|
# Aggregate call and put volume
|
||||||
|
total_call_volume = 0
|
||||||
|
total_put_volume = 0
|
||||||
|
total_call_oi = 0
|
||||||
|
total_put_oi = 0
|
||||||
|
|
||||||
|
for option in options[:50]: # Check first 50 options
|
||||||
|
option_type = option.get("option_type", "")
|
||||||
|
volume = int(option.get("volume", 0))
|
||||||
|
open_interest = int(option.get("open_interest", 0))
|
||||||
|
|
||||||
|
if option_type == "call":
|
||||||
|
total_call_volume += volume
|
||||||
|
total_call_oi += open_interest
|
||||||
|
elif option_type == "put":
|
||||||
|
total_put_volume += volume
|
||||||
|
total_put_oi += open_interest
|
||||||
|
|
||||||
|
# Calculate metrics
|
||||||
|
total_volume = total_call_volume + total_put_volume
|
||||||
|
|
||||||
|
if total_volume > 10000: # Significant volume threshold
|
||||||
|
put_call_ratio = total_put_volume / total_call_volume if total_call_volume > 0 else 0
|
||||||
|
|
||||||
|
# Unusual signals:
|
||||||
|
# - Very low P/C ratio (<0.7) = Bullish (heavy call buying)
|
||||||
|
# - Very high P/C ratio (>1.5) = Bearish (heavy put buying)
|
||||||
|
# - High volume (>50k) = Strong conviction
|
||||||
|
|
||||||
|
signal = "neutral"
|
||||||
|
if put_call_ratio < 0.7:
|
||||||
|
signal = "bullish_calls"
|
||||||
|
elif put_call_ratio > 1.5:
|
||||||
|
signal = "bearish_puts"
|
||||||
|
elif total_volume > 50000:
|
||||||
|
signal = "high_volume"
|
||||||
|
|
||||||
|
unusual_activity.append({
|
||||||
|
"ticker": ticker,
|
||||||
|
"total_volume": total_volume,
|
||||||
|
"call_volume": total_call_volume,
|
||||||
|
"put_volume": total_put_volume,
|
||||||
|
"put_call_ratio": put_call_ratio,
|
||||||
|
"signal": signal,
|
||||||
|
"call_oi": total_call_oi,
|
||||||
|
"put_oi": total_put_oi,
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Skip this ticker if there's an error
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Sort by total volume (highest first)
|
||||||
|
sorted_activity = sorted(
|
||||||
|
unusual_activity,
|
||||||
|
key=lambda x: x["total_volume"],
|
||||||
|
reverse=True
|
||||||
|
)[:top_n]
|
||||||
|
|
||||||
|
# Format output
|
||||||
|
if not sorted_activity:
|
||||||
|
return "No unusual options activity detected"
|
||||||
|
|
||||||
|
report = f"# Unusual Options Activity - {date or 'Latest'}\n\n"
|
||||||
|
report += f"**Criteria**: P/C Ratio extremes (<0.7 bullish, >1.5 bearish), High volume (>50k)\n\n"
|
||||||
|
report += f"**Found**: {len(sorted_activity)} stocks with notable options activity\n\n"
|
||||||
|
report += "## Top Options Activity\n\n"
|
||||||
|
report += "| Ticker | Total Volume | Call Vol | Put Vol | P/C Ratio | Signal |\n"
|
||||||
|
report += "|--------|--------------|----------|---------|-----------|--------|\n"
|
||||||
|
|
||||||
|
for activity in sorted_activity:
|
||||||
|
report += f"| {activity['ticker']} | "
|
||||||
|
report += f"{activity['total_volume']:,} | "
|
||||||
|
report += f"{activity['call_volume']:,} | "
|
||||||
|
report += f"{activity['put_volume']:,} | "
|
||||||
|
report += f"{activity['put_call_ratio']:.2f} | "
|
||||||
|
report += f"{activity['signal']} |\n"
|
||||||
|
|
||||||
|
report += "\n\n## Signal Definitions\n\n"
|
||||||
|
report += "- **bullish_calls**: P/C ratio <0.7 - Heavy call buying, bullish positioning\n"
|
||||||
|
report += "- **bearish_puts**: P/C ratio >1.5 - Heavy put buying, bearish positioning\n"
|
||||||
|
report += "- **high_volume**: Exceptional volume (>50k) - Strong conviction move\n"
|
||||||
|
report += "- **neutral**: Balanced activity\n\n"
|
||||||
|
report += "**Note**: Options activity is a leading indicator. Smart money often positions 1-2 weeks before catalysts.\n"
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
return f"Error fetching options activity from Tradier: {str(e)}"
|
||||||
|
except Exception as e:
|
||||||
|
return f"Unexpected error in options activity detection: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_tradier_unusual_options(
|
||||||
|
tickers: List[str] = None,
|
||||||
|
date: str = None,
|
||||||
|
min_volume_multiple: float = 2.0,
|
||||||
|
top_n: int = 20,
|
||||||
|
) -> str:
|
||||||
|
"""Alias for get_unusual_options_activity to match registry naming convention"""
|
||||||
|
return get_unusual_options_activity(tickers, date, min_volume_multiple, top_n)
|
||||||
|
|
@ -2,6 +2,7 @@ from typing import Annotated
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from dateutil.relativedelta import relativedelta
|
from dateutil.relativedelta import relativedelta
|
||||||
import yfinance as yf
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
import os
|
import os
|
||||||
from .stockstats_utils import StockstatsUtils
|
from .stockstats_utils import StockstatsUtils
|
||||||
|
|
||||||
|
|
@ -220,7 +221,7 @@ def _get_stock_stats_bulk(
|
||||||
curr_date_dt = pd.to_datetime(curr_date)
|
curr_date_dt = pd.to_datetime(curr_date)
|
||||||
|
|
||||||
end_date = today_date
|
end_date = today_date
|
||||||
start_date = today_date - pd.DateOffset(years=15)
|
start_date = today_date - pd.DateOffset(years=2)
|
||||||
start_date_str = start_date.strftime("%Y-%m-%d")
|
start_date_str = start_date.strftime("%Y-%m-%d")
|
||||||
end_date_str = end_date.strftime("%Y-%m-%d")
|
end_date_str = end_date.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
|
@ -414,7 +415,269 @@ def validate_ticker(symbol: str) -> bool:
|
||||||
try:
|
try:
|
||||||
ticker = yf.Ticker(symbol.upper())
|
ticker = yf.Ticker(symbol.upper())
|
||||||
# Try to fetch 1 day of history
|
# Try to fetch 1 day of history
|
||||||
|
# Suppress yfinance error output
|
||||||
|
import sys
|
||||||
|
from io import StringIO
|
||||||
|
|
||||||
|
# Redirect stderr to suppress yfinance error messages
|
||||||
|
original_stderr = sys.stderr
|
||||||
|
sys.stderr = StringIO()
|
||||||
|
|
||||||
|
try:
|
||||||
history = ticker.history(period="1d")
|
history = ticker.history(period="1d")
|
||||||
return not history.empty
|
return not history.empty
|
||||||
|
finally:
|
||||||
|
# Restore stderr
|
||||||
|
sys.stderr = original_stderr
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_fundamentals(
|
||||||
|
ticker: Annotated[str, "ticker symbol of the company"],
|
||||||
|
curr_date: Annotated[str, "current date (for reference)"] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Get comprehensive fundamental data for a ticker using yfinance.
|
||||||
|
Returns data in a format similar to Alpha Vantage's OVERVIEW endpoint.
|
||||||
|
|
||||||
|
This is a FREE alternative to Alpha Vantage with no rate limits.
|
||||||
|
"""
|
||||||
|
import json
|
||||||
|
|
||||||
|
try:
|
||||||
|
ticker_obj = yf.Ticker(ticker.upper())
|
||||||
|
info = ticker_obj.info
|
||||||
|
|
||||||
|
if not info or info.get('regularMarketPrice') is None:
|
||||||
|
return f"No fundamental data found for symbol '{ticker}'"
|
||||||
|
|
||||||
|
# Build a structured response similar to Alpha Vantage
|
||||||
|
fundamentals = {
|
||||||
|
# Company Info
|
||||||
|
"Symbol": ticker.upper(),
|
||||||
|
"AssetType": info.get("quoteType", "N/A"),
|
||||||
|
"Name": info.get("longName", info.get("shortName", "N/A")),
|
||||||
|
"Description": info.get("longBusinessSummary", "N/A"),
|
||||||
|
"Exchange": info.get("exchange", "N/A"),
|
||||||
|
"Currency": info.get("currency", "USD"),
|
||||||
|
"Country": info.get("country", "N/A"),
|
||||||
|
"Sector": info.get("sector", "N/A"),
|
||||||
|
"Industry": info.get("industry", "N/A"),
|
||||||
|
"Address": f"{info.get('address1', '')} {info.get('city', '')}, {info.get('state', '')} {info.get('zip', '')}".strip(),
|
||||||
|
"OfficialSite": info.get("website", "N/A"),
|
||||||
|
"FiscalYearEnd": info.get("fiscalYearEnd", "N/A"),
|
||||||
|
|
||||||
|
# Valuation
|
||||||
|
"MarketCapitalization": str(info.get("marketCap", "N/A")),
|
||||||
|
"EBITDA": str(info.get("ebitda", "N/A")),
|
||||||
|
"PERatio": str(info.get("trailingPE", "N/A")),
|
||||||
|
"ForwardPE": str(info.get("forwardPE", "N/A")),
|
||||||
|
"PEGRatio": str(info.get("pegRatio", "N/A")),
|
||||||
|
"BookValue": str(info.get("bookValue", "N/A")),
|
||||||
|
"PriceToBookRatio": str(info.get("priceToBook", "N/A")),
|
||||||
|
"PriceToSalesRatioTTM": str(info.get("priceToSalesTrailing12Months", "N/A")),
|
||||||
|
"EVToRevenue": str(info.get("enterpriseToRevenue", "N/A")),
|
||||||
|
"EVToEBITDA": str(info.get("enterpriseToEbitda", "N/A")),
|
||||||
|
|
||||||
|
# Earnings & Revenue
|
||||||
|
"EPS": str(info.get("trailingEps", "N/A")),
|
||||||
|
"ForwardEPS": str(info.get("forwardEps", "N/A")),
|
||||||
|
"RevenueTTM": str(info.get("totalRevenue", "N/A")),
|
||||||
|
"RevenuePerShareTTM": str(info.get("revenuePerShare", "N/A")),
|
||||||
|
"GrossProfitTTM": str(info.get("grossProfits", "N/A")),
|
||||||
|
"QuarterlyRevenueGrowthYOY": str(info.get("revenueGrowth", "N/A")),
|
||||||
|
"QuarterlyEarningsGrowthYOY": str(info.get("earningsGrowth", "N/A")),
|
||||||
|
|
||||||
|
# Margins & Returns
|
||||||
|
"ProfitMargin": str(info.get("profitMargins", "N/A")),
|
||||||
|
"OperatingMarginTTM": str(info.get("operatingMargins", "N/A")),
|
||||||
|
"GrossMargins": str(info.get("grossMargins", "N/A")),
|
||||||
|
"ReturnOnAssetsTTM": str(info.get("returnOnAssets", "N/A")),
|
||||||
|
"ReturnOnEquityTTM": str(info.get("returnOnEquity", "N/A")),
|
||||||
|
|
||||||
|
# Dividend
|
||||||
|
"DividendPerShare": str(info.get("dividendRate", "N/A")),
|
||||||
|
"DividendYield": str(info.get("dividendYield", "N/A")),
|
||||||
|
"ExDividendDate": str(info.get("exDividendDate", "N/A")),
|
||||||
|
"PayoutRatio": str(info.get("payoutRatio", "N/A")),
|
||||||
|
|
||||||
|
# Balance Sheet
|
||||||
|
"TotalCash": str(info.get("totalCash", "N/A")),
|
||||||
|
"TotalDebt": str(info.get("totalDebt", "N/A")),
|
||||||
|
"CurrentRatio": str(info.get("currentRatio", "N/A")),
|
||||||
|
"QuickRatio": str(info.get("quickRatio", "N/A")),
|
||||||
|
"DebtToEquity": str(info.get("debtToEquity", "N/A")),
|
||||||
|
"FreeCashFlow": str(info.get("freeCashflow", "N/A")),
|
||||||
|
"OperatingCashFlow": str(info.get("operatingCashflow", "N/A")),
|
||||||
|
|
||||||
|
# Trading Info
|
||||||
|
"Beta": str(info.get("beta", "N/A")),
|
||||||
|
"52WeekHigh": str(info.get("fiftyTwoWeekHigh", "N/A")),
|
||||||
|
"52WeekLow": str(info.get("fiftyTwoWeekLow", "N/A")),
|
||||||
|
"50DayMovingAverage": str(info.get("fiftyDayAverage", "N/A")),
|
||||||
|
"200DayMovingAverage": str(info.get("twoHundredDayAverage", "N/A")),
|
||||||
|
"SharesOutstanding": str(info.get("sharesOutstanding", "N/A")),
|
||||||
|
"SharesFloat": str(info.get("floatShares", "N/A")),
|
||||||
|
"SharesShort": str(info.get("sharesShort", "N/A")),
|
||||||
|
"ShortRatio": str(info.get("shortRatio", "N/A")),
|
||||||
|
"ShortPercentOfFloat": str(info.get("shortPercentOfFloat", "N/A")),
|
||||||
|
|
||||||
|
# Ownership
|
||||||
|
"PercentInsiders": str(info.get("heldPercentInsiders", "N/A")),
|
||||||
|
"PercentInstitutions": str(info.get("heldPercentInstitutions", "N/A")),
|
||||||
|
|
||||||
|
# Analyst
|
||||||
|
"AnalystTargetPrice": str(info.get("targetMeanPrice", "N/A")),
|
||||||
|
"AnalystTargetHigh": str(info.get("targetHighPrice", "N/A")),
|
||||||
|
"AnalystTargetLow": str(info.get("targetLowPrice", "N/A")),
|
||||||
|
"NumberOfAnalysts": str(info.get("numberOfAnalystOpinions", "N/A")),
|
||||||
|
"RecommendationKey": info.get("recommendationKey", "N/A"),
|
||||||
|
"RecommendationMean": str(info.get("recommendationMean", "N/A")),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Return as formatted JSON string
|
||||||
|
return json.dumps(fundamentals, indent=4)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error retrieving fundamentals for {ticker}: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_options_activity(
|
||||||
|
ticker: Annotated[str, "ticker symbol of the company"],
|
||||||
|
num_expirations: Annotated[int, "number of nearest expiration dates to analyze"] = 3,
|
||||||
|
curr_date: Annotated[str, "current date (for reference)"] = None
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Get options activity for a specific ticker using yfinance.
|
||||||
|
Analyzes volume, open interest, and put/call ratios.
|
||||||
|
|
||||||
|
This is a FREE alternative to Tradier with no API key required.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
ticker_obj = yf.Ticker(ticker.upper())
|
||||||
|
|
||||||
|
# Get available expiration dates
|
||||||
|
expirations = ticker_obj.options
|
||||||
|
if not expirations:
|
||||||
|
return f"No options data available for {ticker}"
|
||||||
|
|
||||||
|
# Analyze the nearest N expiration dates
|
||||||
|
expirations_to_analyze = expirations[:min(num_expirations, len(expirations))]
|
||||||
|
|
||||||
|
report = f"## Options Activity for {ticker.upper()}\n\n"
|
||||||
|
report += f"**Available Expirations:** {len(expirations)} dates\n"
|
||||||
|
report += f"**Analyzing:** {', '.join(expirations_to_analyze)}\n\n"
|
||||||
|
|
||||||
|
total_call_volume = 0
|
||||||
|
total_put_volume = 0
|
||||||
|
total_call_oi = 0
|
||||||
|
total_put_oi = 0
|
||||||
|
|
||||||
|
unusual_activity = []
|
||||||
|
|
||||||
|
for exp_date in expirations_to_analyze:
|
||||||
|
try:
|
||||||
|
opt = ticker_obj.option_chain(exp_date)
|
||||||
|
calls = opt.calls
|
||||||
|
puts = opt.puts
|
||||||
|
|
||||||
|
if calls.empty and puts.empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate totals for this expiration
|
||||||
|
call_vol = calls['volume'].sum() if 'volume' in calls.columns else 0
|
||||||
|
put_vol = puts['volume'].sum() if 'volume' in puts.columns else 0
|
||||||
|
call_oi = calls['openInterest'].sum() if 'openInterest' in calls.columns else 0
|
||||||
|
put_oi = puts['openInterest'].sum() if 'openInterest' in puts.columns else 0
|
||||||
|
|
||||||
|
# Handle NaN values
|
||||||
|
call_vol = 0 if pd.isna(call_vol) else int(call_vol)
|
||||||
|
put_vol = 0 if pd.isna(put_vol) else int(put_vol)
|
||||||
|
call_oi = 0 if pd.isna(call_oi) else int(call_oi)
|
||||||
|
put_oi = 0 if pd.isna(put_oi) else int(put_oi)
|
||||||
|
|
||||||
|
total_call_volume += call_vol
|
||||||
|
total_put_volume += put_vol
|
||||||
|
total_call_oi += call_oi
|
||||||
|
total_put_oi += put_oi
|
||||||
|
|
||||||
|
# Find unusual activity (high volume relative to OI)
|
||||||
|
for _, row in calls.iterrows():
|
||||||
|
vol = row.get('volume', 0)
|
||||||
|
oi = row.get('openInterest', 0)
|
||||||
|
if pd.notna(vol) and pd.notna(oi) and oi > 0 and vol > oi * 0.5 and vol > 100:
|
||||||
|
unusual_activity.append({
|
||||||
|
'type': 'CALL',
|
||||||
|
'expiration': exp_date,
|
||||||
|
'strike': row['strike'],
|
||||||
|
'volume': int(vol),
|
||||||
|
'openInterest': int(oi),
|
||||||
|
'vol_oi_ratio': round(vol / oi, 2) if oi > 0 else 0,
|
||||||
|
'impliedVolatility': round(row.get('impliedVolatility', 0) * 100, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, row in puts.iterrows():
|
||||||
|
vol = row.get('volume', 0)
|
||||||
|
oi = row.get('openInterest', 0)
|
||||||
|
if pd.notna(vol) and pd.notna(oi) and oi > 0 and vol > oi * 0.5 and vol > 100:
|
||||||
|
unusual_activity.append({
|
||||||
|
'type': 'PUT',
|
||||||
|
'expiration': exp_date,
|
||||||
|
'strike': row['strike'],
|
||||||
|
'volume': int(vol),
|
||||||
|
'openInterest': int(oi),
|
||||||
|
'vol_oi_ratio': round(vol / oi, 2) if oi > 0 else 0,
|
||||||
|
'impliedVolatility': round(row.get('impliedVolatility', 0) * 100, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
report += f"*Error fetching {exp_date}: {str(e)}*\n"
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate put/call ratios
|
||||||
|
pc_volume_ratio = round(total_put_volume / total_call_volume, 3) if total_call_volume > 0 else 0
|
||||||
|
pc_oi_ratio = round(total_put_oi / total_call_oi, 3) if total_call_oi > 0 else 0
|
||||||
|
|
||||||
|
# Summary
|
||||||
|
report += "### Summary\n"
|
||||||
|
report += "| Metric | Calls | Puts | Put/Call Ratio |\n"
|
||||||
|
report += "|--------|-------|------|----------------|\n"
|
||||||
|
report += f"| Volume | {total_call_volume:,} | {total_put_volume:,} | {pc_volume_ratio} |\n"
|
||||||
|
report += f"| Open Interest | {total_call_oi:,} | {total_put_oi:,} | {pc_oi_ratio} |\n\n"
|
||||||
|
|
||||||
|
# Sentiment interpretation
|
||||||
|
report += "### Sentiment Analysis\n"
|
||||||
|
if pc_volume_ratio < 0.7:
|
||||||
|
report += "- **Volume P/C Ratio:** Bullish (more call volume)\n"
|
||||||
|
elif pc_volume_ratio > 1.3:
|
||||||
|
report += "- **Volume P/C Ratio:** Bearish (more put volume)\n"
|
||||||
|
else:
|
||||||
|
report += "- **Volume P/C Ratio:** Neutral\n"
|
||||||
|
|
||||||
|
if pc_oi_ratio < 0.7:
|
||||||
|
report += "- **OI P/C Ratio:** Bullish positioning\n"
|
||||||
|
elif pc_oi_ratio > 1.3:
|
||||||
|
report += "- **OI P/C Ratio:** Bearish positioning\n"
|
||||||
|
else:
|
||||||
|
report += "- **OI P/C Ratio:** Neutral positioning\n"
|
||||||
|
|
||||||
|
# Unusual activity
|
||||||
|
if unusual_activity:
|
||||||
|
# Sort by volume/OI ratio
|
||||||
|
unusual_activity.sort(key=lambda x: x['vol_oi_ratio'], reverse=True)
|
||||||
|
top_unusual = unusual_activity[:10]
|
||||||
|
|
||||||
|
report += "\n### Unusual Activity (High Volume vs Open Interest)\n"
|
||||||
|
report += "| Type | Expiry | Strike | Volume | OI | Vol/OI | IV |\n"
|
||||||
|
report += "|------|--------|--------|--------|----|---------|----|---|\n"
|
||||||
|
for item in top_unusual:
|
||||||
|
report += f"| {item['type']} | {item['expiration']} | ${item['strike']} | {item['volume']:,} | {item['openInterest']:,} | {item['vol_oi_ratio']}x | {item['impliedVolatility']}% |\n"
|
||||||
|
else:
|
||||||
|
report += "\n*No unusual options activity detected.*\n"
|
||||||
|
|
||||||
|
return report
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error retrieving options activity for {ticker}: {str(e)}"
|
||||||
|
|
@ -9,10 +9,10 @@ DEFAULT_CONFIG = {
|
||||||
"dataflows/data_cache",
|
"dataflows/data_cache",
|
||||||
),
|
),
|
||||||
# LLM settings
|
# LLM settings
|
||||||
"llm_provider": "openai",
|
"llm_provider": "google",
|
||||||
"deep_think_llm": "gpt-4o", # For Google: gemini-2.0-flash or gemini-1.5-pro-latest
|
"deep_think_llm": "gemini-3-pro-preview", # For Google: gemini-2.0-flash or gemini-1.5-pro-latest
|
||||||
"quick_think_llm": "gpt-4o-mini", # For Google: gemini-2.0-flash or gemini-1.5-flash-latest
|
"quick_think_llm": "gemini-2.5-flash-lite", # For Google: gemini-2.0-flash or gemini-1.5-flash-latest
|
||||||
"backend_url": "https://api.openai.com/v1",
|
"backend_url": "https://api.google.com/v1",
|
||||||
# Debate and discussion settings
|
# Debate and discussion settings
|
||||||
"max_debate_rounds": 1,
|
"max_debate_rounds": 1,
|
||||||
"max_risk_discuss_rounds": 1,
|
"max_risk_discuss_rounds": 1,
|
||||||
|
|
@ -24,6 +24,12 @@ DEFAULT_CONFIG = {
|
||||||
"max_candidates_to_analyze": 20, # Maximum candidates for deep dive analysis
|
"max_candidates_to_analyze": 20, # Maximum candidates for deep dive analysis
|
||||||
"news_lookback_days": 7, # Days of news history to analyze
|
"news_lookback_days": 7, # Days of news history to analyze
|
||||||
"final_recommendations": 10, # Number of final opportunities to recommend
|
"final_recommendations": 10, # Number of final opportunities to recommend
|
||||||
|
# New data source settings
|
||||||
|
"unusual_volume_multiple": 3.0, # Minimum volume multiple for unusual volume detection
|
||||||
|
"unusual_options_volume_multiple": 2.0, # Minimum options volume multiple
|
||||||
|
"analyst_lookback_days": 7, # Days to look back for analyst rating changes
|
||||||
|
"min_short_interest_pct": 15.0, # Minimum short interest % for squeeze candidates
|
||||||
|
"min_days_to_cover": 2.0, # Minimum days to cover ratio
|
||||||
},
|
},
|
||||||
# Memory settings
|
# Memory settings
|
||||||
"enable_memory": False, # Enable/disable embeddings and memory system
|
"enable_memory": False, # Enable/disable embeddings and memory system
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ from tradingagents.agents.utils.agent_utils import (
|
||||||
get_indicators
|
get_indicators
|
||||||
)
|
)
|
||||||
from tradingagents.tools.executor import execute_tool
|
from tradingagents.tools.executor import execute_tool
|
||||||
from tradingagents.schemas import TickerList, MarketMovers, ThemeList
|
from tradingagents.schemas import TickerList, TickerContextList, MarketMovers, ThemeList
|
||||||
|
|
||||||
class DiscoveryGraph:
|
class DiscoveryGraph:
|
||||||
def __init__(self, config=None):
|
def __init__(self, config=None):
|
||||||
|
|
@ -52,8 +52,94 @@ class DiscoveryGraph:
|
||||||
self.max_candidates_to_analyze = discovery_config.get("max_candidates_to_analyze", 10)
|
self.max_candidates_to_analyze = discovery_config.get("max_candidates_to_analyze", 10)
|
||||||
self.news_lookback_days = discovery_config.get("news_lookback_days", 7)
|
self.news_lookback_days = discovery_config.get("news_lookback_days", 7)
|
||||||
self.final_recommendations = discovery_config.get("final_recommendations", 3)
|
self.final_recommendations = discovery_config.get("final_recommendations", 3)
|
||||||
|
|
||||||
|
# Store run directory for saving results
|
||||||
|
self.run_dir = self.config.get("discovery_run_dir", None)
|
||||||
|
|
||||||
self.graph = self._create_graph()
|
self.graph = self._create_graph()
|
||||||
|
|
||||||
|
def _log_tool_call(self, tool_logs: list, node: str, step_name: str, tool_name: str, params: dict, output: str, context: str = ""):
|
||||||
|
"""Log a tool call with metadata for debugging and analysis."""
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
log_entry = {
|
||||||
|
"timestamp": datetime.now().isoformat(),
|
||||||
|
"node": node,
|
||||||
|
"step": step_name,
|
||||||
|
"tool": tool_name,
|
||||||
|
"parameters": params,
|
||||||
|
"context": context,
|
||||||
|
"output": output[:1000] + "..." if len(output) > 1000 else output,
|
||||||
|
"output_length": len(output)
|
||||||
|
}
|
||||||
|
tool_logs.append(log_entry)
|
||||||
|
return log_entry
|
||||||
|
|
||||||
|
def _save_results(self, state: dict, trade_date: str):
|
||||||
|
"""Save discovery results and tool logs to files."""
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Get or create results directory
|
||||||
|
if self.run_dir:
|
||||||
|
results_dir = Path(self.run_dir)
|
||||||
|
else:
|
||||||
|
run_timestamp = datetime.now().strftime("%H_%M_%S")
|
||||||
|
results_dir = Path(self.config.get("results_dir", "./results")) / "discovery" / trade_date / f"run_{run_timestamp}"
|
||||||
|
results_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
# Save main results as markdown
|
||||||
|
try:
|
||||||
|
with open(results_dir / "discovery_results.md", "w") as f:
|
||||||
|
f.write(f"# Discovery Results - {trade_date}\n\n")
|
||||||
|
f.write(f"## Final Ranking\n\n")
|
||||||
|
f.write(state.get("final_ranking", "No ranking available"))
|
||||||
|
f.write("\n\n## Candidates Analyzed\n\n")
|
||||||
|
for opp in state.get("opportunities", []):
|
||||||
|
f.write(f"### {opp['ticker']} ({opp['strategy']})\n\n")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Error saving results: {e}")
|
||||||
|
|
||||||
|
# Save as JSON
|
||||||
|
try:
|
||||||
|
with open(results_dir / "discovery_result.json", "w") as f:
|
||||||
|
json_state = {
|
||||||
|
"trade_date": trade_date,
|
||||||
|
"tickers": state.get("tickers", []),
|
||||||
|
"filtered_tickers": state.get("filtered_tickers", []),
|
||||||
|
"final_ranking": state.get("final_ranking", ""),
|
||||||
|
"status": state.get("status", "")
|
||||||
|
}
|
||||||
|
json.dump(json_state, f, indent=2)
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Error saving JSON: {e}")
|
||||||
|
|
||||||
|
# Save tool logs
|
||||||
|
tool_logs = state.get("tool_logs", [])
|
||||||
|
if tool_logs:
|
||||||
|
try:
|
||||||
|
with open(results_dir / "tool_execution_logs.json", "w") as f:
|
||||||
|
json.dump(tool_logs, f, indent=2)
|
||||||
|
|
||||||
|
with open(results_dir / "tool_execution_logs.md", "w") as f:
|
||||||
|
f.write(f"# Tool Execution Logs - {trade_date}\n\n")
|
||||||
|
for i, log in enumerate(tool_logs, 1):
|
||||||
|
f.write(f"## {i}. {log['step']}\n\n")
|
||||||
|
f.write(f"- **Tool:** `{log['tool']}`\n")
|
||||||
|
f.write(f"- **Node:** {log['node']}\n")
|
||||||
|
f.write(f"- **Timestamp:** {log['timestamp']}\n")
|
||||||
|
if log.get('context'):
|
||||||
|
f.write(f"- **Context:** {log['context']}\n")
|
||||||
|
f.write(f"- **Parameters:** `{log['parameters']}`\n")
|
||||||
|
f.write(f"- **Output Length:** {log['output_length']} chars\n\n")
|
||||||
|
f.write(f"### Output\n```\n{log['output']}\n```\n\n")
|
||||||
|
f.write("---\n\n")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Error saving tool logs: {e}")
|
||||||
|
|
||||||
|
print(f" Results saved to: {results_dir}")
|
||||||
|
|
||||||
def _create_graph(self):
|
def _create_graph(self):
|
||||||
workflow = StateGraph(DiscoveryState)
|
workflow = StateGraph(DiscoveryState)
|
||||||
|
|
||||||
|
|
@ -75,156 +161,79 @@ class DiscoveryGraph:
|
||||||
print("🔍 Scanning market for opportunities...")
|
print("🔍 Scanning market for opportunities...")
|
||||||
|
|
||||||
candidates = []
|
candidates = []
|
||||||
|
tool_logs = state.get("tool_logs", [])
|
||||||
|
|
||||||
# 0. Macro Theme Discovery (Top-Down)
|
# 0. Macro Theme Discovery (Top-Down) - DISABLED
|
||||||
try:
|
# This section used Twitter API which has rate limit issues
|
||||||
from datetime import datetime
|
# try:
|
||||||
today = datetime.now().strftime("%Y-%m-%d")
|
# from datetime import datetime
|
||||||
|
# today = datetime.now().strftime("%Y-%m-%d")
|
||||||
# Get Global News
|
# global_news = execute_tool("get_global_news", date=today, limit=5)
|
||||||
global_news = execute_tool("get_global_news", date=today, limit=5)
|
# ... (macro theme code disabled)
|
||||||
|
# except Exception as e:
|
||||||
# Extract Themes
|
# print(f" Error in Macro Theme Discovery: {e}")
|
||||||
prompt = f"""Based on this global news, identify 3 trending market themes or sectors (e.g., 'Artificial Intelligence', 'Oil', 'Biotech').
|
|
||||||
Return a JSON object with a 'themes' array of strings.
|
|
||||||
|
|
||||||
News:
|
|
||||||
{global_news}
|
|
||||||
"""
|
|
||||||
|
|
||||||
structured_llm = self.quick_thinking_llm.with_structured_output(
|
|
||||||
schema=ThemeList.model_json_schema(),
|
|
||||||
method="json_schema"
|
|
||||||
)
|
|
||||||
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
|
||||||
themes = response.get("themes", [])
|
|
||||||
|
|
||||||
print(f" Identified Macro Themes: {themes}")
|
|
||||||
|
|
||||||
# Find tickers for each theme
|
|
||||||
for theme in themes:
|
|
||||||
try:
|
|
||||||
tweets_report = execute_tool("get_tweets", query=f"{theme} stocks", count=15)
|
|
||||||
|
|
||||||
prompt = f"""Extract ONLY valid stock ticker symbols related to the theme '{theme}' from this report.
|
|
||||||
Return a comma-separated list of tickers (1-5 uppercase letters).
|
|
||||||
|
|
||||||
Report:
|
|
||||||
{tweets_report}
|
|
||||||
|
|
||||||
Return a JSON object with a 'tickers' array."""
|
|
||||||
|
|
||||||
structured_llm = self.quick_thinking_llm.with_structured_output(
|
|
||||||
schema=TickerList.model_json_schema(),
|
|
||||||
method="json_schema"
|
|
||||||
)
|
|
||||||
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
|
||||||
theme_tickers = response.get("tickers", [])
|
|
||||||
|
|
||||||
for t in theme_tickers:
|
|
||||||
t = t.upper().strip()
|
|
||||||
if re.match(r'^[A-Z]{1,5}$', t):
|
|
||||||
# Use validate_ticker tool logic (via execute_tool)
|
|
||||||
try:
|
|
||||||
if execute_tool("validate_ticker", symbol=t):
|
|
||||||
candidates.append({"ticker": t, "source": f"macro_theme_{theme}", "sentiment": "unknown"})
|
|
||||||
except Exception:
|
|
||||||
continue
|
|
||||||
except Exception as e:
|
|
||||||
print(f" Error fetching tickers for theme {theme}: {e}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f" Error in Macro Theme Discovery: {e}")
|
|
||||||
|
|
||||||
# 1. Get Reddit Trending (Social Sentiment)
|
# 1. Get Reddit Trending (Social Sentiment)
|
||||||
try:
|
try:
|
||||||
reddit_report = execute_tool("get_trending_tickers", limit=self.reddit_trending_limit)
|
reddit_report = execute_tool("get_trending_tickers", limit=self.reddit_trending_limit)
|
||||||
# Use LLM to extract tickers
|
# Use LLM to extract tickers WITH context
|
||||||
prompt = """Extract ONLY valid stock ticker symbols from this Reddit report.
|
prompt = """Extract valid stock ticker symbols from this Reddit report, along with context about why they're trending.
|
||||||
Return a comma-separated list of tickers (1-5 uppercase letters).
|
|
||||||
Do not include currencies (like RMB), cryptocurrencies (like BTC unless it's an ETF), or explanations.
|
|
||||||
Only include actual stock tickers.
|
|
||||||
|
|
||||||
Examples of valid tickers: AAPL, GOOGL, MSFT, TSLA, NVDA
|
For each ticker, include:
|
||||||
Examples of invalid: RMB (currency), BTC (crypto)
|
- ticker: The stock symbol (1-5 uppercase letters)
|
||||||
|
- context: Brief description of sentiment, mentions, or key discussion points
|
||||||
|
|
||||||
|
Do not include currencies (RMB), cryptocurrencies (BTC), or invalid symbols.
|
||||||
|
|
||||||
Report:
|
Report:
|
||||||
{report}
|
{report}
|
||||||
|
|
||||||
Return a JSON object with a 'tickers' array containing only valid stock ticker symbols.""".format(report=reddit_report)
|
Return a JSON object with a 'candidates' array of objects, each having 'ticker' and 'context' fields.""".format(report=reddit_report)
|
||||||
|
|
||||||
# Use structured output for ticker extraction
|
# Use structured output for ticker+context extraction
|
||||||
structured_llm = self.quick_thinking_llm.with_structured_output(
|
structured_llm = self.quick_thinking_llm.with_structured_output(
|
||||||
schema=TickerList.model_json_schema(),
|
schema=TickerContextList.model_json_schema(),
|
||||||
method="json_schema"
|
method="json_schema"
|
||||||
)
|
)
|
||||||
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
||||||
|
|
||||||
# Validate and add tickers
|
# Validate and add tickers with context
|
||||||
reddit_tickers = response.get("tickers", [])
|
reddit_candidates = response.get("candidates", [])
|
||||||
for t in reddit_tickers:
|
for c in reddit_candidates:
|
||||||
t = t.upper().strip()
|
ticker = c.get("ticker", "").upper().strip()
|
||||||
|
context = c.get("context", "Trending on Reddit")
|
||||||
# Validate ticker format (1-5 uppercase letters)
|
# Validate ticker format (1-5 uppercase letters)
|
||||||
if re.match(r'^[A-Z]{1,5}$', t):
|
if re.match(r'^[A-Z]{1,5}$', ticker):
|
||||||
candidates.append({"ticker": t, "source": "social_trending", "sentiment": "unknown"})
|
candidates.append({"ticker": ticker, "source": "social_trending", "context": context, "sentiment": "unknown"})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f" Error fetching Reddit tickers: {e}")
|
print(f" Error fetching Reddit tickers: {e}")
|
||||||
|
|
||||||
# 2. Get Twitter Trending (Social Sentiment)
|
# 2. Get Twitter Trending (Social Sentiment) - DISABLED due to API issues
|
||||||
try:
|
# try:
|
||||||
# Search for general market discussions
|
# # Search for general market discussions
|
||||||
tweets_report = execute_tool("get_tweets", query="stocks to watch", count=20)
|
# tweets_report = execute_tool("get_tweets", query="stocks to watch", count=20)
|
||||||
|
#
|
||||||
# Use LLM to extract tickers
|
# # Use LLM to extract tickers
|
||||||
prompt = """Extract ONLY valid stock ticker symbols from this Twitter report.
|
# prompt = """Extract ONLY valid stock ticker symbols from this Twitter report.
|
||||||
Return a comma-separated list of tickers (1-5 uppercase letters).
|
# ... (Twitter extraction code disabled)
|
||||||
Do not include currencies (like RMB), cryptocurrencies (like BTC unless it's an ETF), or explanations.
|
# except Exception as e:
|
||||||
Only include actual stock tickers.
|
# print(f" Error fetching Twitter tickers: {e}")
|
||||||
|
|
||||||
Examples of valid tickers: AAPL, GOOGL, MSFT, TSLA, NVDA
|
|
||||||
Examples of invalid: RMB (currency), BTC (crypto)
|
|
||||||
|
|
||||||
Report:
|
|
||||||
{report}
|
|
||||||
|
|
||||||
Return a JSON object with a 'tickers' array containing only valid stock ticker symbols.""".format(report=tweets_report)
|
|
||||||
|
|
||||||
# Use structured output for ticker extraction
|
|
||||||
structured_llm = self.quick_thinking_llm.with_structured_output(
|
|
||||||
schema=TickerList.model_json_schema(),
|
|
||||||
method="json_schema"
|
|
||||||
)
|
|
||||||
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
|
||||||
|
|
||||||
# Validate and add tickers
|
|
||||||
twitter_tickers = response.get("tickers", [])
|
|
||||||
valid_twitter_tickers = []
|
|
||||||
for t in twitter_tickers:
|
|
||||||
t = t.upper().strip()
|
|
||||||
# Validate ticker format (1-5 uppercase letters)
|
|
||||||
if re.match(r'^[A-Z]{1,5}$', t):
|
|
||||||
# Use validate_ticker tool logic (via execute_tool)
|
|
||||||
try:
|
|
||||||
if execute_tool("validate_ticker", symbol=t):
|
|
||||||
valid_twitter_tickers.append(t)
|
|
||||||
except Exception:
|
|
||||||
continue
|
|
||||||
|
|
||||||
for t in valid_twitter_tickers:
|
|
||||||
candidates.append({"ticker": t, "source": "twitter_sentiment", "sentiment": "unknown"})
|
|
||||||
except Exception as e:
|
|
||||||
print(f" Error fetching Twitter tickers: {e}")
|
|
||||||
|
|
||||||
# 2. Get Market Movers (Gainers & Losers)
|
# 2. Get Market Movers (Gainers & Losers)
|
||||||
try:
|
try:
|
||||||
movers_report = execute_tool("get_market_movers", limit=self.market_movers_limit)
|
movers_report = execute_tool("get_market_movers", limit=self.market_movers_limit)
|
||||||
# We need to parse this to separate Gainers vs Losers
|
# Use LLM to extract movers with context
|
||||||
# Since it's a markdown report, we'll use LLM to structure it
|
prompt = f"""Extract stock tickers from this market movers data with context about their performance.
|
||||||
prompt = f"""Based on the following market movers data, extract the top {self.market_movers_limit} tickers.
|
|
||||||
Return a JSON object with a 'movers' array containing objects with 'ticker' and 'type' (either 'gainer' or 'loser') fields.
|
For each ticker, include:
|
||||||
|
- ticker: The stock symbol (1-5 uppercase letters)
|
||||||
|
- type: Either 'gainer' or 'loser'
|
||||||
|
- reason: Brief description of the price movement (%, volume, catalyst if mentioned)
|
||||||
|
|
||||||
Data:
|
Data:
|
||||||
{movers_report}"""
|
{movers_report}
|
||||||
|
|
||||||
|
Return a JSON object with a 'movers' array containing objects with 'ticker', 'type', and 'reason' fields."""
|
||||||
|
|
||||||
# Use structured output for market movers
|
# Use structured output for market movers
|
||||||
structured_llm = self.quick_thinking_llm.with_structured_output(
|
structured_llm = self.quick_thinking_llm.with_structured_output(
|
||||||
|
|
@ -233,16 +242,17 @@ Data:
|
||||||
)
|
)
|
||||||
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
||||||
|
|
||||||
# Validate and add tickers
|
# Validate and add tickers with context
|
||||||
movers = response.get("movers", [])
|
movers = response.get("movers", [])
|
||||||
for m in movers:
|
for m in movers:
|
||||||
ticker = m.get('ticker', '').upper().strip()
|
ticker = m.get('ticker', '').upper().strip()
|
||||||
# Only add valid tickers (1-5 uppercase letters)
|
|
||||||
if ticker and re.match(r'^[A-Z]{1,5}$', ticker):
|
if ticker and re.match(r'^[A-Z]{1,5}$', ticker):
|
||||||
mover_type = m.get('type', 'gainer')
|
mover_type = m.get('type', 'gainer')
|
||||||
|
reason = m.get('reason', f"Top {mover_type}")
|
||||||
candidates.append({
|
candidates.append({
|
||||||
"ticker": ticker,
|
"ticker": ticker,
|
||||||
"source": mover_type,
|
"source": mover_type,
|
||||||
|
"context": reason,
|
||||||
"sentiment": "negative" if mover_type == "loser" else "positive"
|
"sentiment": "negative" if mover_type == "loser" else "positive"
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
@ -258,27 +268,30 @@ Data:
|
||||||
|
|
||||||
earnings_report = execute_tool("get_earnings_calendar", from_date=from_date, to_date=to_date)
|
earnings_report = execute_tool("get_earnings_calendar", from_date=from_date, to_date=to_date)
|
||||||
|
|
||||||
# Extract tickers from earnings calendar
|
# Extract tickers with earnings context
|
||||||
prompt = """Extract ONLY valid stock ticker symbols from this earnings calendar.
|
prompt = """Extract stock tickers from this earnings calendar with context about their upcoming earnings.
|
||||||
Return a comma-separated list of tickers (1-5 uppercase letters).
|
|
||||||
Only include actual stock tickers, not indexes or other symbols.
|
For each ticker, include:
|
||||||
|
- ticker: The stock symbol (1-5 uppercase letters)
|
||||||
|
- context: Earnings date, expected EPS, and any other relevant info
|
||||||
|
|
||||||
Earnings Calendar:
|
Earnings Calendar:
|
||||||
{report}
|
{report}
|
||||||
|
|
||||||
Return a JSON object with a 'tickers' array containing only valid stock ticker symbols.""".format(report=earnings_report)
|
Return a JSON object with a 'candidates' array of objects, each having 'ticker' and 'context' fields.""".format(report=earnings_report)
|
||||||
|
|
||||||
structured_llm = self.quick_thinking_llm.with_structured_output(
|
structured_llm = self.quick_thinking_llm.with_structured_output(
|
||||||
schema=TickerList.model_json_schema(),
|
schema=TickerContextList.model_json_schema(),
|
||||||
method="json_schema"
|
method="json_schema"
|
||||||
)
|
)
|
||||||
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
||||||
|
|
||||||
earnings_tickers = response.get("tickers", [])
|
earnings_candidates = response.get("candidates", [])
|
||||||
for t in earnings_tickers:
|
for c in earnings_candidates:
|
||||||
t = t.upper().strip()
|
ticker = c.get("ticker", "").upper().strip()
|
||||||
if re.match(r'^[A-Z]{1,5}$', t):
|
context = c.get("context", "Upcoming earnings")
|
||||||
candidates.append({"ticker": t, "source": "earnings_catalyst", "sentiment": "unknown"})
|
if re.match(r'^[A-Z]{1,5}$', ticker):
|
||||||
|
candidates.append({"ticker": ticker, "source": "earnings_catalyst", "context": context, "sentiment": "unknown"})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f" Error fetching Earnings Calendar: {e}")
|
print(f" Error fetching Earnings Calendar: {e}")
|
||||||
|
|
||||||
|
|
@ -291,30 +304,72 @@ Return a JSON object with a 'tickers' array containing only valid stock ticker s
|
||||||
|
|
||||||
ipo_report = execute_tool("get_ipo_calendar", from_date=from_date, to_date=to_date)
|
ipo_report = execute_tool("get_ipo_calendar", from_date=from_date, to_date=to_date)
|
||||||
|
|
||||||
# Extract tickers from IPO calendar
|
# Extract tickers with IPO context
|
||||||
prompt = """Extract ONLY valid stock ticker symbols from this IPO calendar.
|
prompt = """Extract stock tickers from this IPO calendar with context about the offering.
|
||||||
Return a comma-separated list of tickers (1-5 uppercase letters).
|
|
||||||
Only include actual stock tickers that are listed or about to be listed.
|
For each ticker, include:
|
||||||
|
- ticker: The stock symbol (1-5 uppercase letters)
|
||||||
|
- context: IPO date, price range, shares offered, and company description
|
||||||
|
|
||||||
IPO Calendar:
|
IPO Calendar:
|
||||||
{report}
|
{report}
|
||||||
|
|
||||||
Return a JSON object with a 'tickers' array containing only valid stock ticker symbols.""".format(report=ipo_report)
|
Return a JSON object with a 'candidates' array of objects, each having 'ticker' and 'context' fields.""".format(report=ipo_report)
|
||||||
|
|
||||||
structured_llm = self.quick_thinking_llm.with_structured_output(
|
structured_llm = self.quick_thinking_llm.with_structured_output(
|
||||||
schema=TickerList.model_json_schema(),
|
schema=TickerContextList.model_json_schema(),
|
||||||
method="json_schema"
|
method="json_schema"
|
||||||
)
|
)
|
||||||
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
||||||
|
|
||||||
ipo_tickers = response.get("tickers", [])
|
ipo_candidates = response.get("candidates", [])
|
||||||
for t in ipo_tickers:
|
for c in ipo_candidates:
|
||||||
t = t.upper().strip()
|
ticker = c.get("ticker", "").upper().strip()
|
||||||
if re.match(r'^[A-Z]{1,5}$', t):
|
context = c.get("context", "Recent/upcoming IPO")
|
||||||
candidates.append({"ticker": t, "source": "ipo_listing", "sentiment": "unknown"})
|
if re.match(r'^[A-Z]{1,5}$', ticker):
|
||||||
|
candidates.append({"ticker": ticker, "source": "ipo_listing", "context": context, "sentiment": "unknown"})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f" Error fetching IPO Calendar: {e}")
|
print(f" Error fetching IPO Calendar: {e}")
|
||||||
|
|
||||||
|
# 5. Short Squeeze Detection (High Short Interest)
|
||||||
|
try:
|
||||||
|
# Get stocks with high short interest - potential squeeze candidates
|
||||||
|
short_interest_report = execute_tool(
|
||||||
|
"get_short_interest",
|
||||||
|
min_short_interest_pct=15.0, # 15%+ short interest
|
||||||
|
min_days_to_cover=3.0, # 3+ days to cover
|
||||||
|
top_n=15
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract tickers with short squeeze context
|
||||||
|
prompt = """Extract stock tickers from this short interest report with context about squeeze potential.
|
||||||
|
|
||||||
|
For each ticker, include:
|
||||||
|
- ticker: The stock symbol (1-5 uppercase letters)
|
||||||
|
- context: Short interest %, days to cover, squeeze potential rating, and any other relevant metrics
|
||||||
|
|
||||||
|
Short Interest Report:
|
||||||
|
{report}
|
||||||
|
|
||||||
|
Return a JSON object with a 'candidates' array of objects, each having 'ticker' and 'context' fields.""".format(report=short_interest_report)
|
||||||
|
|
||||||
|
structured_llm = self.quick_thinking_llm.with_structured_output(
|
||||||
|
schema=TickerContextList.model_json_schema(),
|
||||||
|
method="json_schema"
|
||||||
|
)
|
||||||
|
response = structured_llm.invoke([HumanMessage(content=prompt)])
|
||||||
|
|
||||||
|
short_candidates = response.get("candidates", [])
|
||||||
|
for c in short_candidates:
|
||||||
|
ticker = c.get("ticker", "").upper().strip()
|
||||||
|
context = c.get("context", "High short interest")
|
||||||
|
if re.match(r'^[A-Z]{1,5}$', ticker):
|
||||||
|
candidates.append({"ticker": ticker, "source": "short_squeeze", "context": context, "sentiment": "unknown"})
|
||||||
|
|
||||||
|
print(f" Found {len(short_candidates)} short squeeze candidates")
|
||||||
|
except Exception as e:
|
||||||
|
print(f" Error fetching Short Interest: {e}")
|
||||||
|
|
||||||
# Deduplicate
|
# Deduplicate
|
||||||
unique_candidates = {}
|
unique_candidates = {}
|
||||||
for c in candidates:
|
for c in candidates:
|
||||||
|
|
@ -323,7 +378,7 @@ Return a JSON object with a 'tickers' array containing only valid stock ticker s
|
||||||
|
|
||||||
final_candidates = list(unique_candidates.values())
|
final_candidates = list(unique_candidates.values())
|
||||||
print(f" Found {len(final_candidates)} unique candidates.")
|
print(f" Found {len(final_candidates)} unique candidates.")
|
||||||
return {"tickers": [c['ticker'] for c in final_candidates], "candidate_metadata": final_candidates, "status": "scanned"}
|
return {"tickers": [c['ticker'] for c in final_candidates], "candidate_metadata": final_candidates, "tool_logs": tool_logs, "status": "scanned"}
|
||||||
|
|
||||||
def filter_node(self, state: DiscoveryState):
|
def filter_node(self, state: DiscoveryState):
|
||||||
"""Filter candidates based on strategy (Contrarian vs Momentum)."""
|
"""Filter candidates based on strategy (Contrarian vs Momentum)."""
|
||||||
|
|
@ -451,6 +506,8 @@ Return a JSON object with a 'tickers' array containing only valid stock ticker s
|
||||||
|
|
||||||
def ranker_node(self, state: DiscoveryState):
|
def ranker_node(self, state: DiscoveryState):
|
||||||
"""Rank opportunities and select the best ones."""
|
"""Rank opportunities and select the best ones."""
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
opportunities = state["opportunities"]
|
opportunities = state["opportunities"]
|
||||||
print("🔍 Ranking opportunities...")
|
print("🔍 Ranking opportunities...")
|
||||||
|
|
||||||
|
|
@ -490,4 +547,17 @@ Return a JSON object with a 'tickers' array containing only valid stock ticker s
|
||||||
response = self.deep_thinking_llm.invoke([HumanMessage(content=prompt)])
|
response = self.deep_thinking_llm.invoke([HumanMessage(content=prompt)])
|
||||||
|
|
||||||
print(" Ranking complete.")
|
print(" Ranking complete.")
|
||||||
return {"status": "complete", "opportunities": opportunities, "final_ranking": response.content}
|
|
||||||
|
# Build result state
|
||||||
|
result_state = {
|
||||||
|
"status": "complete",
|
||||||
|
"opportunities": opportunities,
|
||||||
|
"final_ranking": response.content,
|
||||||
|
"tool_logs": state.get("tool_logs", [])
|
||||||
|
}
|
||||||
|
|
||||||
|
# Save results to files
|
||||||
|
trade_date = state.get("trade_date", datetime.now().strftime("%Y-%m-%d"))
|
||||||
|
self._save_results(result_state, trade_date)
|
||||||
|
|
||||||
|
return result_state
|
||||||
|
|
|
||||||
|
|
@ -62,11 +62,11 @@ class TradingAgentsGraph:
|
||||||
|
|
||||||
# Initialize LLMs
|
# Initialize LLMs
|
||||||
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
|
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
|
||||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], api_key=os.getenv("OPENAI_API_KEY"))
|
||||||
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], api_key=os.getenv("OPENAI_API_KEY"))
|
||||||
elif self.config["llm_provider"].lower() == "anthropic":
|
elif self.config["llm_provider"].lower() == "anthropic":
|
||||||
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], api_key=os.getenv("ANTHROPIC_API_KEY"))
|
||||||
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], api_key=os.getenv("ANTHROPIC_API_KEY"))
|
||||||
elif self.config["llm_provider"].lower() == "google":
|
elif self.config["llm_provider"].lower() == "google":
|
||||||
# Explicitly pass Google API key from environment
|
# Explicitly pass Google API key from environment
|
||||||
google_api_key = os.getenv("GOOGLE_API_KEY")
|
google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,8 @@
|
||||||
from .llm_outputs import (
|
from .llm_outputs import (
|
||||||
TradeDecision,
|
TradeDecision,
|
||||||
TickerList,
|
TickerList,
|
||||||
|
TickerWithContext,
|
||||||
|
TickerContextList,
|
||||||
ThemeList,
|
ThemeList,
|
||||||
MarketMover,
|
MarketMover,
|
||||||
MarketMovers,
|
MarketMovers,
|
||||||
|
|
@ -15,6 +17,8 @@ from .llm_outputs import (
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"TradeDecision",
|
"TradeDecision",
|
||||||
"TickerList",
|
"TickerList",
|
||||||
|
"TickerWithContext",
|
||||||
|
"TickerContextList",
|
||||||
"ThemeList",
|
"ThemeList",
|
||||||
"MarketMovers",
|
"MarketMovers",
|
||||||
"MarketMover",
|
"MarketMover",
|
||||||
|
|
|
||||||
|
|
@ -35,6 +35,25 @@ class TickerList(BaseModel):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TickerWithContext(BaseModel):
|
||||||
|
"""Individual ticker with context description."""
|
||||||
|
|
||||||
|
ticker: str = Field(
|
||||||
|
description="Stock ticker symbol (1-5 uppercase letters)"
|
||||||
|
)
|
||||||
|
context: str = Field(
|
||||||
|
description="Brief description of why this ticker is relevant (key metrics, catalyst, etc.)"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TickerContextList(BaseModel):
|
||||||
|
"""Structured output for tickers with context."""
|
||||||
|
|
||||||
|
candidates: List[TickerWithContext] = Field(
|
||||||
|
description="List of stock tickers with context explaining their relevance"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ThemeList(BaseModel):
|
class ThemeList(BaseModel):
|
||||||
"""Structured output for market themes."""
|
"""Structured output for market themes."""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,8 @@ from tradingagents.dataflows.y_finance import (
|
||||||
get_income_statement as get_yfinance_income_statement,
|
get_income_statement as get_yfinance_income_statement,
|
||||||
get_insider_transactions as get_yfinance_insider_transactions,
|
get_insider_transactions as get_yfinance_insider_transactions,
|
||||||
validate_ticker as validate_ticker_yfinance,
|
validate_ticker as validate_ticker_yfinance,
|
||||||
|
get_fundamentals as get_yfinance_fundamentals,
|
||||||
|
get_options_activity as get_yfinance_options_activity,
|
||||||
)
|
)
|
||||||
from tradingagents.dataflows.alpha_vantage import (
|
from tradingagents.dataflows.alpha_vantage import (
|
||||||
get_stock as get_alpha_vantage_stock,
|
get_stock as get_alpha_vantage_stock,
|
||||||
|
|
@ -59,6 +61,18 @@ from tradingagents.dataflows.finnhub_api import (
|
||||||
from tradingagents.dataflows.twitter_data import (
|
from tradingagents.dataflows.twitter_data import (
|
||||||
get_tweets as get_twitter_tweets,
|
get_tweets as get_twitter_tweets,
|
||||||
)
|
)
|
||||||
|
from tradingagents.dataflows.alpha_vantage_volume import (
|
||||||
|
get_alpha_vantage_unusual_volume,
|
||||||
|
)
|
||||||
|
from tradingagents.dataflows.alpha_vantage_analysts import (
|
||||||
|
get_alpha_vantage_analyst_changes,
|
||||||
|
)
|
||||||
|
from tradingagents.dataflows.tradier_api import (
|
||||||
|
get_tradier_unusual_options,
|
||||||
|
)
|
||||||
|
from tradingagents.dataflows.finviz_scraper import (
|
||||||
|
get_finviz_short_interest,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|
@ -77,7 +91,7 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
|
||||||
"yfinance": get_YFin_data_online,
|
"yfinance": get_YFin_data_online,
|
||||||
"alpha_vantage": get_alpha_vantage_stock,
|
"alpha_vantage": get_alpha_vantage_stock,
|
||||||
},
|
},
|
||||||
"vendor_priority": ["yfinance", "alpha_vantage"],
|
"vendor_priority": ["yfinance"],
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"symbol": {"type": "str", "description": "Ticker symbol of the company (e.g., AAPL)"},
|
"symbol": {"type": "str", "description": "Ticker symbol of the company (e.g., AAPL)"},
|
||||||
"start_date": {"type": "str", "description": "Start date in yyyy-mm-dd format"},
|
"start_date": {"type": "str", "description": "Start date in yyyy-mm-dd format"},
|
||||||
|
|
@ -110,9 +124,9 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
|
||||||
"yfinance": get_stock_stats_indicators_window,
|
"yfinance": get_stock_stats_indicators_window,
|
||||||
"alpha_vantage": get_alpha_vantage_indicator,
|
"alpha_vantage": get_alpha_vantage_indicator,
|
||||||
},
|
},
|
||||||
"vendor_priority": ["yfinance", "alpha_vantage"],
|
"vendor_priority": ["yfinance"],
|
||||||
"execution_mode": "aggregate",
|
"execution_mode": "aggregate",
|
||||||
"aggregate_vendors": ["yfinance", "alpha_vantage"],
|
"aggregate_vendors": ["yfinance"],
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"symbol": {"type": "str", "description": "Ticker symbol"},
|
"symbol": {"type": "str", "description": "Ticker symbol"},
|
||||||
"indicator": {"type": "str", "description": "Technical indicator (rsi, macd, sma, ema, etc.)"},
|
"indicator": {"type": "str", "description": "Technical indicator (rsi, macd, sma, ema, etc.)"},
|
||||||
|
|
@ -129,10 +143,11 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
|
||||||
"category": "fundamental_data",
|
"category": "fundamental_data",
|
||||||
"agents": ["fundamentals"],
|
"agents": ["fundamentals"],
|
||||||
"vendors": {
|
"vendors": {
|
||||||
|
"yfinance": get_yfinance_fundamentals,
|
||||||
"alpha_vantage": get_alpha_vantage_fundamentals,
|
"alpha_vantage": get_alpha_vantage_fundamentals,
|
||||||
"openai": get_fundamentals_openai,
|
"openai": get_fundamentals_openai,
|
||||||
},
|
},
|
||||||
"vendor_priority": ["alpha_vantage", "openai"],
|
"vendor_priority": ["yfinance", "openai"],
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"ticker": {"type": "str", "description": "Ticker symbol"},
|
"ticker": {"type": "str", "description": "Ticker symbol"},
|
||||||
"curr_date": {"type": "str", "description": "Current date, yyyy-mm-dd"},
|
"curr_date": {"type": "str", "description": "Current date, yyyy-mm-dd"},
|
||||||
|
|
@ -232,7 +247,7 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
|
||||||
"reddit": get_reddit_api_global_news,
|
"reddit": get_reddit_api_global_news,
|
||||||
"alpha_vantage": get_alpha_vantage_global_news,
|
"alpha_vantage": get_alpha_vantage_global_news,
|
||||||
},
|
},
|
||||||
"vendor_priority": ["openai", "google", "reddit", "alpha_vantage"],
|
"vendor_priority": ["openai", "google", "reddit"],
|
||||||
"execution_mode": "aggregate",
|
"execution_mode": "aggregate",
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"date": {"type": "str", "description": "Date for news, yyyy-mm-dd"},
|
"date": {"type": "str", "description": "Date for news, yyyy-mm-dd"},
|
||||||
|
|
@ -347,6 +362,72 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
|
||||||
"returns": "str: Formatted IPO calendar with pricing and share details",
|
"returns": "str: Formatted IPO calendar with pricing and share details",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"get_unusual_volume": {
|
||||||
|
"description": "Find stocks with unusual volume but minimal price movement (accumulation signal)",
|
||||||
|
"category": "discovery",
|
||||||
|
"agents": [],
|
||||||
|
"vendors": {
|
||||||
|
"alpha_vantage": get_alpha_vantage_unusual_volume,
|
||||||
|
},
|
||||||
|
"vendor_priority": ["alpha_vantage"],
|
||||||
|
"parameters": {
|
||||||
|
"date": {"type": "str", "description": "Analysis date in yyyy-mm-dd format", "default": None},
|
||||||
|
"min_volume_multiple": {"type": "float", "description": "Minimum volume multiple vs average", "default": 3.0},
|
||||||
|
"max_price_change": {"type": "float", "description": "Maximum price change percentage", "default": 5.0},
|
||||||
|
"top_n": {"type": "int", "description": "Number of top results to return", "default": 20},
|
||||||
|
},
|
||||||
|
"returns": "str: Formatted report of stocks with unusual volume patterns",
|
||||||
|
},
|
||||||
|
|
||||||
|
"get_unusual_options_activity": {
|
||||||
|
"description": "Analyze options activity for specific tickers as confirmation signal (not for primary discovery)",
|
||||||
|
"category": "discovery",
|
||||||
|
"agents": [],
|
||||||
|
"vendors": {
|
||||||
|
"yfinance": get_yfinance_options_activity,
|
||||||
|
"tradier": get_tradier_unusual_options,
|
||||||
|
},
|
||||||
|
"vendor_priority": ["yfinance"],
|
||||||
|
"parameters": {
|
||||||
|
"ticker": {"type": "str", "description": "Ticker symbol to analyze"},
|
||||||
|
"num_expirations": {"type": "int", "description": "Number of nearest expiration dates to analyze", "default": 3},
|
||||||
|
"curr_date": {"type": "str", "description": "Analysis date for reference", "default": None},
|
||||||
|
},
|
||||||
|
"returns": "str: Formatted report of options activity with put/call ratios",
|
||||||
|
},
|
||||||
|
|
||||||
|
"get_analyst_rating_changes": {
|
||||||
|
"description": "Track recent analyst upgrades/downgrades and price target changes",
|
||||||
|
"category": "discovery",
|
||||||
|
"agents": [],
|
||||||
|
"vendors": {
|
||||||
|
"alpha_vantage": get_alpha_vantage_analyst_changes,
|
||||||
|
},
|
||||||
|
"vendor_priority": ["alpha_vantage"],
|
||||||
|
"parameters": {
|
||||||
|
"lookback_days": {"type": "int", "description": "Number of days to look back", "default": 7},
|
||||||
|
"change_types": {"type": "list", "description": "Types of changes to track", "default": ["upgrade", "downgrade", "initiated"]},
|
||||||
|
"top_n": {"type": "int", "description": "Number of top results", "default": 20},
|
||||||
|
},
|
||||||
|
"returns": "str: Formatted report of recent analyst rating changes with freshness indicators",
|
||||||
|
},
|
||||||
|
|
||||||
|
"get_short_interest": {
|
||||||
|
"description": "Discover stocks with high short interest by scraping Finviz screener (squeeze candidates)",
|
||||||
|
"category": "discovery",
|
||||||
|
"agents": [],
|
||||||
|
"vendors": {
|
||||||
|
"finviz": get_finviz_short_interest,
|
||||||
|
},
|
||||||
|
"vendor_priority": ["finviz"],
|
||||||
|
"parameters": {
|
||||||
|
"min_short_interest_pct": {"type": "float", "description": "Minimum short interest % of float", "default": 10.0},
|
||||||
|
"min_days_to_cover": {"type": "float", "description": "Minimum days to cover ratio", "default": 2.0},
|
||||||
|
"top_n": {"type": "int", "description": "Number of top results", "default": 20},
|
||||||
|
},
|
||||||
|
"returns": "str: Formatted report of discovered high short interest stocks with squeeze potential",
|
||||||
|
},
|
||||||
|
|
||||||
"get_reddit_discussions": {
|
"get_reddit_discussions": {
|
||||||
"description": "Get Reddit discussions about a specific ticker",
|
"description": "Get Reddit discussions about a specific ticker",
|
||||||
"category": "news_data",
|
"category": "news_data",
|
||||||
|
|
@ -362,6 +443,23 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
|
||||||
},
|
},
|
||||||
"returns": "str: Reddit discussions and sentiment",
|
"returns": "str: Reddit discussions and sentiment",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
"get_options_activity": {
|
||||||
|
"description": "Get options activity for a specific ticker (volume, open interest, put/call ratios, unusual activity)",
|
||||||
|
"category": "discovery",
|
||||||
|
"agents": ["fundamentals"],
|
||||||
|
"vendors": {
|
||||||
|
"yfinance": get_yfinance_options_activity,
|
||||||
|
"tradier": get_tradier_unusual_options,
|
||||||
|
},
|
||||||
|
"vendor_priority": ["yfinance"],
|
||||||
|
"parameters": {
|
||||||
|
"ticker": {"type": "str", "description": "Ticker symbol"},
|
||||||
|
"num_expirations": {"type": "int", "description": "Number of nearest expiration dates to analyze", "default": 3},
|
||||||
|
"curr_date": {"type": "str", "description": "Current date for reference", "default": None},
|
||||||
|
},
|
||||||
|
"returns": "str: Options activity report with volume, OI, P/C ratios, and unusual activity",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue