feat: add daily discovery workflow, recommendation history, and scanner improvements

- Add GitHub Actions workflow for daily discovery (8:30 AM ET, weekdays)
- Add headless run_daily_discovery.py script for scheduling
- Expand options_flow scanner to use tickers.txt with parallel execution
- Add recommendation history section to Performance page with filters and charts
- Fix strategy name normalization (momentum/Momentum/Momentum-Hype → momentum)
- Fix strategy metrics to count all recs, not just evaluated ones
- Add error handling to Streamlit page rendering

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Youssef Aitousarrah 2026-02-11 22:07:02 -08:00
parent ab8d174990
commit f4aceef857
10 changed files with 1871 additions and 888 deletions

120
.github/workflows/daily-discovery.yml vendored Normal file
View File

@ -0,0 +1,120 @@
name: Daily Discovery
on:
schedule:
# 8:30 AM ET (13:30 UTC) on weekdays
- cron: "30 13 * * 1-5"
workflow_dispatch:
# Manual trigger with optional overrides
inputs:
date:
description: "Analysis date (YYYY-MM-DD, blank = today)"
required: false
default: ""
provider:
description: "LLM provider"
required: false
default: "google"
type: choice
options:
- google
- openai
- anthropic
env:
PYTHON_VERSION: "3.10"
jobs:
discovery:
runs-on: ubuntu-latest
environment: TradingAgent
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: pip
- name: Install dependencies
run: |
pip install --upgrade pip
pip install -e .
- name: Determine analysis date
id: date
run: |
if [ -n "${{ github.event.inputs.date }}" ]; then
echo "analysis_date=${{ github.event.inputs.date }}" >> "$GITHUB_OUTPUT"
else
echo "analysis_date=$(date -u +%Y-%m-%d)" >> "$GITHUB_OUTPUT"
fi
- name: Run discovery pipeline
env:
# LLM keys (set whichever provider you use)
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
# Data source keys
FINNHUB_API_KEY: ${{ secrets.FINNHUB_API_KEY }}
ALPHA_VANTAGE_API_KEY: ${{ secrets.ALPHA_VANTAGE_API_KEY }}
FMP_API_KEY: ${{ secrets.FMP_API_KEY }}
REDDIT_CLIENT_ID: ${{ secrets.REDDIT_CLIENT_ID }}
REDDIT_CLIENT_SECRET: ${{ secrets.REDDIT_CLIENT_SECRET }}
TRADIER_API_KEY: ${{ secrets.TRADIER_API_KEY }}
run: |
python scripts/run_daily_discovery.py \
--date "${{ steps.date.outputs.analysis_date }}" \
--no-update-positions
- name: Commit recommendations to repo
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
# Stage new/updated recommendation files
git add data/recommendations/ || true
git add results/ || true
# Only commit if there are changes
if git diff --cached --quiet; then
echo "No new recommendations to commit"
else
git commit -m "chore: daily discovery ${{ steps.date.outputs.analysis_date }}"
git push
fi
- name: Update positions
if: success()
env:
FINNHUB_API_KEY: ${{ secrets.FINNHUB_API_KEY }}
run: |
python scripts/update_positions.py
- name: Commit position updates
if: success()
run: |
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add data/recommendations/ || true
if git diff --cached --quiet; then
echo "No position updates"
else
git commit -m "chore: update positions ${{ steps.date.outputs.analysis_date }}"
git push
fi
- name: Upload results as artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: discovery-${{ steps.date.outputs.analysis_date }}
path: |
data/recommendations/${{ steps.date.outputs.analysis_date }}*.json
results/discovery/${{ steps.date.outputs.analysis_date }}/
retention-days: 30

File diff suppressed because it is too large Load Diff

View File

@ -1,36 +1,36 @@
{
"total_recommendations": 185,
"total_recommendations": 200,
"by_strategy": {
"momentum": {
"count": 92,
"wins_1d": 45,
"losses_1d": 33,
"wins_1d": 47,
"losses_1d": 45,
"wins_7d": 25,
"losses_7d": 23,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": 1.0,
"avg_return_1d": 0.32,
"avg_return_7d": 0.71,
"avg_return_30d": 0,
"win_rate_1d": 57.7,
"win_rate_1d": 51.1,
"win_rate_7d": 52.1
},
"volume_accumulation": {
"count": 2,
"wins_1d": 1,
"losses_1d": 0,
"losses_1d": 1,
"wins_7d": 1,
"losses_7d": 0,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": 19.7,
"avg_return_1d": 7.41,
"avg_return_7d": 19.7,
"avg_return_30d": 0,
"win_rate_1d": 100.0,
"win_rate_1d": 50.0,
"win_rate_7d": 100.0
},
"insider_buying": {
"count": 21,
"count": 25,
"wins_1d": 15,
"losses_1d": 6,
"wins_7d": 10,
@ -44,7 +44,7 @@
"win_rate_7d": 66.7
},
"options_flow": {
"count": 5,
"count": 11,
"wins_1d": 4,
"losses_1d": 1,
"wins_7d": 0,
@ -57,18 +57,18 @@
"win_rate_1d": 80.0
},
"earnings_calendar": {
"count": 17,
"count": 20,
"wins_1d": 6,
"losses_1d": 11,
"wins_7d": 4,
"wins_7d": 5,
"losses_7d": 8,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": -0.23,
"avg_return_7d": 0.36,
"avg_return_7d": 2.79,
"avg_return_30d": 0,
"win_rate_1d": 35.3,
"win_rate_7d": 33.3
"win_rate_7d": 38.5
},
"contrarian_value": {
"count": 6,
@ -102,15 +102,15 @@
"count": 10,
"wins_1d": 5,
"losses_1d": 5,
"wins_7d": 4,
"losses_7d": 3,
"wins_7d": 6,
"losses_7d": 4,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": 0.56,
"avg_return_7d": 0.85,
"avg_return_7d": 2.15,
"avg_return_30d": 0,
"win_rate_1d": 50.0,
"win_rate_7d": 57.1
"win_rate_7d": 60.0
},
"early_accumulation": {
"count": 1,
@ -131,28 +131,28 @@
"wins_1d": 2,
"losses_1d": 5,
"wins_7d": 2,
"losses_7d": 4,
"losses_7d": 5,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": -2.0,
"avg_return_7d": -2.06,
"avg_return_7d": -1.94,
"avg_return_30d": 0,
"win_rate_1d": 28.6,
"win_rate_7d": 33.3
"win_rate_7d": 28.6
},
"analyst_upgrade": {
"count": 8,
"wins_1d": 6,
"losses_1d": 2,
"wins_7d": 4,
"wins_7d": 6,
"losses_7d": 2,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": 1.32,
"avg_return_7d": -0.1,
"avg_return_7d": 0.99,
"avg_return_30d": 0,
"win_rate_1d": 75.0,
"win_rate_7d": 66.7
"win_rate_7d": 75.0
},
"ipo_opportunity": {
"count": 1,
@ -201,78 +201,95 @@
"wins_1d": 1,
"losses_1d": 1,
"wins_7d": 0,
"losses_7d": 0,
"losses_7d": 2,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": -3.38,
"avg_return_7d": 0,
"avg_return_7d": -3.85,
"avg_return_30d": 0,
"win_rate_1d": 50.0
"win_rate_1d": 50.0,
"win_rate_7d": 0.0
},
"momentum_options": {
"count": 2,
"wins_1d": 1,
"losses_1d": 1,
"wins_7d": 0,
"wins_7d": 2,
"losses_7d": 0,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": 0.93,
"avg_return_7d": 0,
"avg_return_7d": 2.27,
"avg_return_30d": 0,
"win_rate_1d": 50.0
"win_rate_1d": 50.0,
"win_rate_7d": 100.0
},
"oversold_reversal": {
"count": 1,
"wins_1d": 0,
"losses_1d": 1,
"wins_7d": 0,
"losses_7d": 0,
"losses_7d": 1,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": -5.11,
"avg_return_7d": 0,
"avg_return_7d": -7.41,
"avg_return_30d": 0,
"win_rate_1d": 0.0
"win_rate_1d": 0.0,
"win_rate_7d": 0.0
},
"earnings_reversal": {
"count": 2,
"wins_1d": 1,
"losses_1d": 1,
"wins_7d": 0,
"losses_7d": 0,
"wins_7d": 1,
"losses_7d": 1,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": -1.47,
"avg_return_7d": 0,
"avg_return_7d": -2.82,
"avg_return_30d": 0,
"win_rate_1d": 50.0
"win_rate_1d": 50.0,
"win_rate_7d": 50.0
},
"earnings_growth": {
"count": 1,
"wins_1d": 1,
"losses_1d": 0,
"wins_7d": 0,
"losses_7d": 0,
"losses_7d": 1,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": 1.36,
"avg_return_7d": 0,
"avg_return_7d": -1.94,
"avg_return_30d": 0,
"win_rate_1d": 100.0
"win_rate_1d": 100.0,
"win_rate_7d": 0.0
},
"reddit_dd": {
"count": 2,
"wins_1d": 0,
"losses_1d": 0,
"wins_7d": 0,
"losses_7d": 0,
"wins_30d": 0,
"losses_30d": 0,
"avg_return_1d": 0,
"avg_return_7d": 0,
"avg_return_30d": 0
}
},
"overall_1d": {
"count": 170,
"wins": 94,
"avg_return": 0.26,
"win_rate": 55.3
"count": 185,
"wins": 96,
"avg_return": -0.05,
"win_rate": 51.9
},
"overall_7d": {
"count": 110,
"wins": 56,
"avg_return": -0.18,
"win_rate": 50.9
"count": 125,
"wins": 64,
"avg_return": 0.13,
"win_rate": 51.2
},
"overall_30d": {
"count": 0,

146
scripts/run_daily_discovery.py Executable file
View File

@ -0,0 +1,146 @@
#!/usr/bin/env python3
"""
Daily Discovery Runner non-interactive script for cron/launchd scheduling.
Runs the full discovery pipeline (scan filter rank), saves recommendations,
and updates position tracking. Designed to run before market open (~8:30 AM ET).
Usage:
python scripts/run_daily_discovery.py # Uses defaults
python scripts/run_daily_discovery.py --date 2026-02-12 # Specific date
python scripts/run_daily_discovery.py --provider google # Override LLM provider
Scheduling (macOS launchd):
See the companion plist at scripts/com.tradingagents.discovery.plist
Scheduling (cron):
30 13 * * 1-5 cd /path/to/TradingAgents && .venv/bin/python scripts/run_daily_discovery.py >> logs/discovery_cron.log 2>&1
(13:30 UTC = 8:30 AM ET, weekdays only)
"""
import argparse
import json
import os
import sys
from datetime import datetime
from pathlib import Path
# Ensure project root is on sys.path
ROOT = Path(__file__).resolve().parent.parent
sys.path.insert(0, str(ROOT))
os.chdir(ROOT)
from tradingagents.dataflows.config import set_config
from tradingagents.default_config import DEFAULT_CONFIG
from tradingagents.graph.discovery_graph import DiscoveryGraph
from tradingagents.utils.logger import get_logger
logger = get_logger("daily_discovery")
def parse_args():
parser = argparse.ArgumentParser(description="Run daily discovery pipeline")
parser.add_argument(
"--date",
default=datetime.now().strftime("%Y-%m-%d"),
help="Analysis date (YYYY-MM-DD), defaults to today",
)
parser.add_argument(
"--provider",
default=None,
help="LLM provider override (openai, google, anthropic)",
)
parser.add_argument(
"--shallow-model",
default=None,
help="Override quick_think_llm model name",
)
parser.add_argument(
"--deep-model",
default=None,
help="Override deep_think_llm model name",
)
parser.add_argument(
"--update-positions",
action="store_true",
default=True,
help="Update position tracking after discovery (default: True)",
)
parser.add_argument(
"--no-update-positions",
action="store_false",
dest="update_positions",
)
return parser.parse_args()
def run_discovery(args):
"""Run the discovery pipeline with the given arguments."""
config = DEFAULT_CONFIG.copy()
# Apply overrides
if args.provider:
config["llm_provider"] = args.provider.lower()
if args.shallow_model:
config["quick_think_llm"] = args.shallow_model
if args.deep_model:
config["deep_think_llm"] = args.deep_model
set_config(config)
# Create results directory
run_timestamp = datetime.now().strftime("%H_%M_%S")
results_dir = Path(config["results_dir"]) / "discovery" / args.date / f"run_{run_timestamp}"
results_dir.mkdir(parents=True, exist_ok=True)
config["discovery_run_dir"] = str(results_dir)
logger.info(f"Starting daily discovery for {args.date}")
logger.info(
f"Provider: {config['llm_provider']} | "
f"Shallow: {config['quick_think_llm']} | "
f"Deep: {config['deep_think_llm']}"
)
# Run discovery
graph = DiscoveryGraph(config=config)
result = graph.run(trade_date=args.date)
final_ranking = result.get("final_ranking", "No ranking available")
logger.info(f"Discovery complete. Results saved to {results_dir}")
return result
def update_positions():
"""Run position updates after discovery."""
try:
from scripts.update_positions import main as update_main
logger.info("Updating position tracking...")
update_main()
except Exception as e:
logger.error(f"Position update failed: {e}")
def main():
args = parse_args()
logger.info("=" * 60)
logger.info(f"DAILY DISCOVERY RUN — {datetime.now().isoformat()}")
logger.info("=" * 60)
try:
result = run_discovery(args)
if args.update_positions:
update_positions()
logger.info("Daily discovery completed successfully")
except Exception as e:
logger.error(f"Discovery failed: {e}", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@ -1,6 +1,12 @@
"""Unusual options activity scanner."""
"""Unusual options activity scanner.
from typing import Any, Dict, List
Scans a ticker universe (loaded from data/tickers.txt by default) for
unusual options volume relative to open interest. Uses ThreadPoolExecutor
for parallel chain fetching so large universes remain practical.
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Dict, List, Optional
from tradingagents.dataflows.discovery.scanner_registry import SCANNER_REGISTRY, BaseScanner
from tradingagents.dataflows.y_finance import get_option_chain, get_ticker_options
@ -8,9 +14,30 @@ from tradingagents.utils.logger import get_logger
logger = get_logger(__name__)
DEFAULT_TICKER_FILE = "data/tickers.txt"
def _load_tickers_from_file(path: str) -> List[str]:
"""Load ticker symbols from a text file (one per line, # comments allowed)."""
try:
with open(path) as f:
tickers = [
line.strip().upper()
for line in f
if line.strip() and not line.strip().startswith("#")
]
if tickers:
logger.info(f"Options scanner: loaded {len(tickers)} tickers from {path}")
return tickers
except FileNotFoundError:
logger.warning(f"Ticker file not found: {path}")
except Exception as e:
logger.warning(f"Failed to load ticker file {path}: {e}")
return []
class OptionsFlowScanner(BaseScanner):
"""Scan for unusual options activity."""
"""Scan for unusual options activity across a ticker universe."""
name = "options_flow"
pipeline = "edge"
@ -20,32 +47,55 @@ class OptionsFlowScanner(BaseScanner):
self.min_volume_oi_ratio = self.scanner_config.get("unusual_volume_multiple", 2.0)
self.min_volume = self.scanner_config.get("min_volume", 1000)
self.min_premium = self.scanner_config.get("min_premium", 25000)
self.ticker_universe = self.scanner_config.get(
"ticker_universe", ["AAPL", "MSFT", "GOOGL", "AMZN", "META", "NVDA", "AMD", "TSLA"]
)
self.max_tickers = self.scanner_config.get("max_tickers", 150)
self.max_workers = self.scanner_config.get("max_workers", 8)
# Load universe: explicit list > ticker_file > default file
if "ticker_universe" in self.scanner_config:
self.ticker_universe = self.scanner_config["ticker_universe"]
else:
ticker_file = self.scanner_config.get(
"ticker_file",
config.get("tickers_file", DEFAULT_TICKER_FILE),
)
self.ticker_universe = _load_tickers_from_file(ticker_file)
if not self.ticker_universe:
logger.warning("No tickers loaded — options scanner will be empty")
def scan(self, state: Dict[str, Any]) -> List[Dict[str, Any]]:
if not self.is_enabled():
return []
logger.info("Scanning unusual options activity...")
universe = self.ticker_universe[: self.max_tickers]
logger.info(
f"Scanning {len(universe)} tickers for unusual options activity "
f"({self.max_workers} workers)..."
)
candidates = []
candidates: List[Dict[str, Any]] = []
for ticker in self.ticker_universe[:20]: # Limit for speed
try:
unusual = self._analyze_ticker_options(ticker)
if unusual:
candidates.append(unusual)
if len(candidates) >= self.limit:
break
except Exception:
continue
with ThreadPoolExecutor(max_workers=self.max_workers) as pool:
futures = {
pool.submit(self._analyze_ticker_options, ticker): ticker
for ticker in universe
}
for future in as_completed(futures):
try:
result = future.result()
if result:
candidates.append(result)
if len(candidates) >= self.limit:
# Cancel remaining futures
for f in futures:
f.cancel()
break
except Exception:
continue
logger.info(f"Found {len(candidates)} unusual options flows")
return candidates
def _analyze_ticker_options(self, ticker: str) -> Dict[str, Any]:
def _analyze_ticker_options(self, ticker: str) -> Optional[Dict[str, Any]]:
try:
expirations = get_ticker_options(ticker)
if not expirations:
@ -58,8 +108,8 @@ class OptionsFlowScanner(BaseScanner):
# Find unusual strikes
unusual_strikes = []
for _, opt in calls.iterrows():
vol = opt.get("volume", 0)
oi = opt.get("openInterest", 0)
vol = opt.get("volume", 0) or 0
oi = opt.get("openInterest", 0) or 0
if oi > 0 and vol > self.min_volume and (vol / oi) >= self.min_volume_oi_ratio:
unusual_strikes.append(
{"type": "call", "strike": opt["strike"], "volume": vol, "oi": oi}
@ -78,7 +128,10 @@ class OptionsFlowScanner(BaseScanner):
return {
"ticker": ticker,
"source": self.name,
"context": f"Unusual options: {len(unusual_strikes)} strikes, P/C={pc_ratio:.2f} ({sentiment})",
"context": (
f"Unusual options: {len(unusual_strikes)} strikes, "
f"P/C={pc_ratio:.2f} ({sentiment})"
),
"priority": "high" if sentiment == "bullish" else "medium",
"strategy": "options_flow",
"put_call_ratio": round(pc_ratio, 2),

View File

@ -137,28 +137,10 @@ DEFAULT_CONFIG = {
"unusual_volume_multiple": 2.0, # Min volume/OI ratio for unusual activity
"min_premium": 25000, # Minimum premium ($) to filter noise
"min_volume": 1000, # Minimum option volume to consider
"ticker_universe": [
"AAPL",
"MSFT",
"GOOGL",
"AMZN",
"META",
"NVDA",
"AMD",
"TSLA",
"TSMC",
"ASML",
"AVGO",
"ORCL",
"CRM",
"ADBE",
"INTC",
"QCOM",
"TXN",
"AMAT",
"LRCX",
"KLAC",
], # Top 20 liquid options
# ticker_file: path to ticker list (defaults to tickers_file from root config)
# ticker_universe: explicit list overrides ticker_file if set
"max_tickers": 150, # Max tickers to scan (from start of file)
"max_workers": 8, # Parallel option chain fetch threads
},
"congress_trades": {
"enabled": False,
@ -178,7 +160,7 @@ DEFAULT_CONFIG = {
"compression_min_volume_ratio": 1.3, # Min volume ratio for compression
},
"market_movers": {
"enabled": True,
"enabled": False,
"pipeline": "momentum",
"limit": 10,
},
@ -195,7 +177,7 @@ DEFAULT_CONFIG = {
"news_lookback_days": 0.5, # Days of news history to analyze
},
"analyst_upgrade": {
"enabled": False,
"enabled": True,
"pipeline": "news",
"limit": 5,
"lookback_days": 1, # Days to look back for rating changes
@ -221,7 +203,7 @@ DEFAULT_CONFIG = {
"min_market_cap": 0, # Minimum market cap in billions (0 = no filter)
},
"short_squeeze": {
"enabled": False,
"enabled": True,
"pipeline": "events",
"limit": 5,
"min_short_interest_pct": 15.0, # Minimum short interest %

View File

@ -119,10 +119,16 @@ def route_page(page):
"Config": pages.settings,
}
module = page_map.get(page)
if module:
module.render()
else:
if module is None:
st.error(f"Unknown page: {page}")
return
try:
module.render()
except Exception as exc:
st.error(f"Error rendering {page}: {exc}")
import traceback
st.code(traceback.format_exc(), language="python")
def main():

View File

@ -5,29 +5,38 @@ This package contains all page modules that can be rendered in the dashboard.
Each module should have a render() function that displays the page content.
"""
import logging
_logger = logging.getLogger(__name__)
try:
from tradingagents.ui.pages import home
except ImportError:
except Exception as _e:
_logger.error("Failed to import home page: %s", _e, exc_info=True)
home = None
try:
from tradingagents.ui.pages import todays_picks
except ImportError:
except Exception as _e:
_logger.error("Failed to import todays_picks page: %s", _e, exc_info=True)
todays_picks = None
try:
from tradingagents.ui.pages import portfolio
except ImportError:
except Exception as _e:
_logger.error("Failed to import portfolio page: %s", _e, exc_info=True)
portfolio = None
try:
from tradingagents.ui.pages import performance
except ImportError:
except Exception as _e:
_logger.error("Failed to import performance page: %s", _e, exc_info=True)
performance = None
try:
from tradingagents.ui.pages import settings
except ImportError:
except Exception as _e:
_logger.error("Failed to import settings page: %s", _e, exc_info=True)
settings = None

View File

@ -2,16 +2,21 @@
Performance analytics page strategy comparison and win/loss analysis.
Shows strategy scatter plot with themed Plotly charts, per-strategy
breakdown table, and win rate distribution.
breakdown table, win rate distribution, and full recommendation history.
"""
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
from tradingagents.ui.theme import COLORS, get_plotly_template, page_header
from tradingagents.ui.utils import load_statistics, load_strategy_metrics
from tradingagents.ui.theme import COLORS, get_plotly_template, page_header, pnl_color
from tradingagents.ui.utils import (
load_performance_database,
load_statistics,
load_strategy_metrics,
)
def render() -> None:
@ -35,8 +40,19 @@ def render() -> None:
# ---- Summary KPIs ----
total_trades = df["Count"].sum()
avg_wr = (df["Win Rate"] * df["Count"]).sum() / total_trades if total_trades > 0 else 0
avg_ret = (df["Avg Return"] * df["Count"]).sum() / total_trades if total_trades > 0 else 0
# Weighted averages only over strategies that have evaluated data (non-NaN)
eval_df = df.dropna(subset=["Win Rate", "Avg Return"])
eval_trades = eval_df["Count"].sum()
avg_wr = (
(eval_df["Win Rate"] * eval_df["Count"]).sum() / eval_trades
if eval_trades > 0
else 0
)
avg_ret = (
(eval_df["Avg Return"] * eval_df["Count"]).sum() / eval_trades
if eval_trades > 0
else 0
)
n_strategies = len(df)
cols = st.columns(4)
@ -107,16 +123,16 @@ def render() -> None:
unsafe_allow_html=True,
)
df_sorted = df.sort_values("Win Rate", ascending=True)
colors = [COLORS["green"] if wr >= 50 else COLORS["red"] for wr in df_sorted["Win Rate"]]
df_bar = df.dropna(subset=["Win Rate"]).sort_values("Win Rate", ascending=True)
colors = [COLORS["green"] if wr >= 50 else COLORS["red"] for wr in df_bar["Win Rate"]]
fig_bar = go.Figure(
go.Bar(
x=df_sorted["Win Rate"],
y=df_sorted["Strategy"],
x=df_bar["Win Rate"],
y=df_bar["Strategy"],
orientation="h",
marker_color=colors,
text=[f"{wr:.0f}%" for wr in df_sorted["Win Rate"]],
text=[f"{wr:.0f}%" for wr in df_bar["Win Rate"]],
textposition="auto",
textfont=dict(family="JetBrains Mono", size=11, color=COLORS["text_primary"]),
)
@ -169,18 +185,334 @@ def render() -> None:
{
"Strategy": strat_name,
"Count": data.get("count", 0),
"Win Rate 1d": (
f"{data.get('win_rate_1d', 0):.0f}%" if "win_rate_1d" in data else "N/A"
"Win Rate 1d": data.get("win_rate_1d") if "win_rate_1d" in data else None,
"Avg Ret 1d": data.get("avg_return_1d") if "avg_return_1d" in data else None,
"W/L 1d": (
f"{data.get('wins_1d', 0)}W/{data.get('losses_1d', 0)}L"
if data.get("wins_1d", 0) + data.get("losses_1d", 0) > 0
else ""
),
"Win Rate 7d": (
f"{data.get('win_rate_7d', 0):.0f}%" if "win_rate_7d" in data else "N/A"
"Win Rate 7d": data.get("win_rate_7d") if "win_rate_7d" in data else None,
"Avg Ret 7d": data.get("avg_return_7d") if "avg_return_7d" in data else None,
"W/L 7d": (
f"{data.get('wins_7d', 0)}W/{data.get('losses_7d', 0)}L"
if data.get("wins_7d", 0) + data.get("losses_7d", 0) > 0
else ""
),
"Wins 1d": data.get("wins_1d", 0),
"Losses 1d": data.get("losses_1d", 0),
"Wins 7d": data.get("wins_7d", 0),
"Losses 7d": data.get("losses_7d", 0),
}
)
if rows:
st.dataframe(pd.DataFrame(rows), width="stretch", hide_index=True)
period_df = pd.DataFrame(rows).sort_values("Count", ascending=False)
st.dataframe(
period_df,
width="stretch",
hide_index=True,
column_config={
"Count": st.column_config.NumberColumn(format="%d"),
"Win Rate 1d": st.column_config.NumberColumn(format="%.1f%%"),
"Avg Ret 1d": st.column_config.NumberColumn(format="%+.2f%%"),
"Win Rate 7d": st.column_config.NumberColumn(format="%.1f%%"),
"Avg Ret 7d": st.column_config.NumberColumn(format="%+.2f%%"),
},
)
# ---- Recommendation History ----
_render_recommendation_history(template)
# ---------------------------------------------------------------------------
# Recommendation history helpers
# ---------------------------------------------------------------------------
def _return_cell(val) -> str:
"""Format a return value as a colored HTML span."""
if val is None or (isinstance(val, float) and np.isnan(val)):
return '<span style="color:{c};">—</span>'.format(c=COLORS["text_muted"])
color = pnl_color(val)
return f'<span style="color:{color};font-weight:600;">{val:+.2f}%</span>'
def _win_dot(val) -> str:
"""Green/red dot for win/loss boolean."""
if val is None or (isinstance(val, float) and np.isnan(val)):
return ""
color = COLORS["green"] if val else COLORS["red"]
return f'<span style="color:{color};font-size:0.7rem;">●</span>'
def _render_recommendation_history(template: dict) -> None:
"""Full recommendation history with charts and filterable table."""
recs = load_performance_database()
if not recs:
return
st.markdown("<div style='height:2rem;'></div>", unsafe_allow_html=True)
st.markdown(
'<div class="section-title">Recommendation History '
'<span class="accent">// all picks</span></div>',
unsafe_allow_html=True,
)
# Build DataFrame
hist_df = pd.DataFrame(recs)
# Ensure numeric types
for col in ["return_1d", "return_7d", "return_30d", "return_pct", "final_score", "confidence"]:
if col in hist_df.columns:
hist_df[col] = pd.to_numeric(hist_df[col], errors="coerce")
# Parse dates
if "discovery_date" in hist_df.columns:
hist_df["discovery_date"] = pd.to_datetime(hist_df["discovery_date"], errors="coerce")
# ---- Filters row ----
filter_cols = st.columns([2, 2, 2, 1])
with filter_cols[0]:
strategies = sorted(hist_df["strategy_match"].dropna().unique())
selected_strategies = st.multiselect(
"Strategy",
strategies,
default=[],
placeholder="All strategies",
)
with filter_cols[1]:
dates = hist_df["discovery_date"].dropna().sort_values()
min_date = dates.min().date() if len(dates) > 0 else None
max_date = dates.max().date() if len(dates) > 0 else None
if min_date and max_date:
date_range = st.date_input(
"Date range",
value=(min_date, max_date),
min_value=min_date,
max_value=max_date,
)
else:
date_range = None
with filter_cols[2]:
outcome_filter = st.selectbox(
"Outcome (7d)",
["All", "Winners", "Losers", "Pending"],
index=0,
)
with filter_cols[3]:
sort_by = st.selectbox("Sort", ["Date", "Return 1d", "Return 7d", "Score"], index=0)
# Apply filters
mask = pd.Series(True, index=hist_df.index)
if selected_strategies:
mask &= hist_df["strategy_match"].isin(selected_strategies)
if date_range and len(date_range) == 2:
start, end = pd.Timestamp(date_range[0]), pd.Timestamp(date_range[1])
mask &= (hist_df["discovery_date"] >= start) & (hist_df["discovery_date"] <= end)
if outcome_filter == "Winners":
mask &= hist_df.get("win_7d", pd.Series(dtype=bool)) == True # noqa: E712
elif outcome_filter == "Losers":
mask &= hist_df.get("win_7d", pd.Series(dtype=bool)) == False # noqa: E712
elif outcome_filter == "Pending":
mask &= hist_df.get("return_7d").isna() if "return_7d" in hist_df.columns else True
filtered = hist_df[mask].copy()
# Sort
sort_map = {
"Date": ("discovery_date", False),
"Return 1d": ("return_1d", False),
"Return 7d": ("return_7d", False),
"Score": ("final_score", False),
}
sort_col, sort_asc = sort_map.get(sort_by, ("discovery_date", False))
if sort_col in filtered.columns:
filtered = filtered.sort_values(sort_col, ascending=sort_asc, na_position="last")
st.caption(f"Showing {len(filtered)} of {len(hist_df)} recommendations")
# ---- Two-column charts ----
if len(filtered) > 0:
left_ch, right_ch = st.columns(2)
with left_ch:
st.markdown(
'<div class="section-title">Return Distribution '
'<span class="accent">// 1d vs 7d</span></div>',
unsafe_allow_html=True,
)
_render_return_distribution(filtered, template)
with right_ch:
st.markdown(
'<div class="section-title">Cumulative P/L by Date '
'<span class="accent">// equity curve</span></div>',
unsafe_allow_html=True,
)
_render_cumulative_pnl(filtered, template)
# ---- Full history table ----
st.markdown("<div style='height:1rem;'></div>", unsafe_allow_html=True)
st.markdown(
'<div class="section-title">All Picks '
'<span class="accent">// detail table</span></div>',
unsafe_allow_html=True,
)
_render_history_table(filtered)
def _render_return_distribution(df: pd.DataFrame, template: dict) -> None:
"""Box plot comparing 1d vs 7d return distributions."""
ret_data = []
for _, row in df.iterrows():
if pd.notna(row.get("return_1d")):
ret_data.append({"Period": "1-Day", "Return (%)": row["return_1d"]})
if pd.notna(row.get("return_7d")):
ret_data.append({"Period": "7-Day", "Return (%)": row["return_7d"]})
if not ret_data:
st.info("No return data available for the selected filters.")
return
ret_df = pd.DataFrame(ret_data)
fig = go.Figure()
for period, color in [("1-Day", COLORS["blue"]), ("7-Day", COLORS["cyan"])]:
subset = ret_df[ret_df["Period"] == period]["Return (%)"]
if len(subset) == 0:
continue
fig.add_trace(
go.Box(
y=subset,
name=period,
marker_color=color,
boxmean=True,
jitter=0.3,
pointpos=-1.5,
boxpoints="outliers",
)
)
fig.add_hline(y=0, line_dash="dot", line_color=COLORS["text_muted"], opacity=0.4)
fig.update_layout(
**template,
height=350,
showlegend=True,
legend=dict(orientation="h", y=1.02, x=0.5, xanchor="center"),
yaxis_title="Return (%)",
)
st.plotly_chart(fig, width="stretch")
def _render_cumulative_pnl(df: pd.DataFrame, template: dict) -> None:
"""Cumulative average return by discovery date (equity curve style)."""
if "discovery_date" not in df.columns:
st.info("No date data available.")
return
# Use 7d return where available, fall back to 1d
df_dated = df.dropna(subset=["discovery_date"]).copy()
df_dated["best_return"] = df_dated["return_7d"].fillna(df_dated.get("return_1d", 0))
df_dated = df_dated.dropna(subset=["best_return"])
if len(df_dated) == 0:
st.info("No return data available for equity curve.")
return
# Group by date, get mean return per day
daily = (
df_dated.groupby("discovery_date")["best_return"]
.mean()
.reset_index()
.sort_values("discovery_date")
)
daily.columns = ["Date", "Avg Return"]
daily["Cumulative"] = daily["Avg Return"].cumsum()
# Color based on cumulative being positive/negative
colors = [COLORS["green"] if v >= 0 else COLORS["red"] for v in daily["Cumulative"]]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=daily["Date"],
y=daily["Cumulative"],
mode="lines+markers",
line=dict(color=COLORS["green"], width=2),
marker=dict(color=colors, size=7, line=dict(color=COLORS["bg_card"], width=1)),
fill="tozeroy",
fillcolor="rgba(34, 197, 94, 0.08)",
hovertemplate="Date: %{x|%b %d}<br>Cumulative: %{y:+.2f}%<extra></extra>",
)
)
fig.add_hline(y=0, line_dash="dot", line_color=COLORS["text_muted"], opacity=0.4)
fig.update_layout(
**template,
height=350,
showlegend=False,
yaxis_title="Cumulative Avg Return (%)",
xaxis_title="",
)
st.plotly_chart(fig, width="stretch")
def _render_history_table(df: pd.DataFrame) -> None:
"""Render the full recommendation history as a styled dataframe."""
if len(df) == 0:
st.info("No recommendations match the selected filters.")
return
# Build display dataframe with readable columns
display_rows = []
for _, row in df.iterrows():
disc_date = row.get("discovery_date")
date_str = disc_date.strftime("%Y-%m-%d") if pd.notna(disc_date) else ""
display_rows.append(
{
"Date": date_str,
"Ticker": row.get("ticker", ""),
"#": int(row["rank"]) if pd.notna(row.get("rank")) else 0,
"Strategy": row.get("strategy_match", ""),
"Score": row.get("final_score"),
"Conf": int(row["confidence"]) if pd.notna(row.get("confidence")) else None,
"Entry $": row.get("entry_price"),
"Now $": row.get("current_price"),
"Ret 1d %": row.get("return_1d"),
"Ret 7d %": row.get("return_7d"),
"Ret 30d %": row.get("return_30d") if "return_30d" in row.index else None,
"Current %": row.get("return_pct"),
"Days": int(row["days_held"]) if pd.notna(row.get("days_held")) else None,
"Status": row.get("status", ""),
}
)
table_df = pd.DataFrame(display_rows)
st.dataframe(
table_df,
width="stretch",
hide_index=True,
height=min(len(table_df) * 35 + 38, 600),
column_config={
"Date": st.column_config.TextColumn(width="small"),
"Ticker": st.column_config.TextColumn(width="small"),
"#": st.column_config.NumberColumn(format="%d", width="small"),
"Strategy": st.column_config.TextColumn(width="medium"),
"Score": st.column_config.NumberColumn(format="%.0f", width="small"),
"Conf": st.column_config.NumberColumn(format="%d/10", width="small"),
"Entry $": st.column_config.NumberColumn(format="$%.2f"),
"Now $": st.column_config.NumberColumn(format="$%.2f"),
"Ret 1d %": st.column_config.NumberColumn(format="%+.2f%%"),
"Ret 7d %": st.column_config.NumberColumn(format="%+.2f%%"),
"Ret 30d %": st.column_config.NumberColumn(format="%+.2f%%"),
"Current %": st.column_config.NumberColumn(format="%+.2f%%"),
"Days": st.column_config.NumberColumn(format="%d"),
"Status": st.column_config.TextColumn(width="small"),
},
)

View File

@ -196,42 +196,73 @@ def load_performance_database() -> List[Dict[str, Any]]:
return []
_STRATEGY_ALIASES: Dict[str, str] = {
"momentum": "momentum",
"momentum/hype": "momentum",
"momentum/hype / short squeeze": "momentum",
"insider play": "insider_buying",
"insider_buying": "insider_buying",
"earnings play": "earnings_play",
"earnings_play": "earnings_play",
"earnings_calendar": "earnings_calendar",
"news catalyst": "news_catalyst",
"news_catalyst": "news_catalyst",
"volume accumulation": "volume_accumulation",
"volume_accumulation": "volume_accumulation",
"contrarian value": "contrarian_value",
"contrarian_value": "contrarian_value",
}
def normalize_strategy(raw: str) -> str:
"""Map strategy name variants to a canonical lowercase form."""
key = raw.strip().lower()
return _STRATEGY_ALIASES.get(key, key)
def load_strategy_metrics() -> List[Dict[str, Any]]:
"""
Build per-strategy metrics from the performance database if available.
Falls back to statistics.json when performance database is missing.
Normalizes strategy names so variants like 'Momentum', 'momentum',
and 'Momentum/Hype' all merge into a single bucket. Counts ALL
recommendations per strategy; win rate and avg return are computed
from the subset that has 7-day return data.
"""
recs = load_performance_database()
if recs:
metrics: Dict[str, Dict[str, float]] = {}
for rec in recs:
strategy = rec.get("strategy_match", "unknown")
strategy = normalize_strategy(rec.get("strategy_match", "unknown"))
if strategy not in metrics:
metrics[strategy] = {
"count": 0,
"total": 0,
"evaluated": 0,
"wins": 0,
"sum_return": 0.0,
}
if "return_7d" in rec:
metrics[strategy]["count"] += 1
metrics[strategy]["total"] += 1
if "return_7d" in rec and rec["return_7d"] is not None:
metrics[strategy]["evaluated"] += 1
metrics[strategy]["sum_return"] += float(rec.get("return_7d", 0.0) or 0.0)
if rec.get("win_7d"):
metrics[strategy]["wins"] += 1
results = []
for strategy, data in metrics.items():
count = int(data["count"])
if count == 0:
continue
win_rate = round((data["wins"] / count) * 100, 1)
avg_return = round(data["sum_return"] / count, 2)
total = int(data["total"])
evaluated = int(data["evaluated"])
win_rate = round((data["wins"] / evaluated) * 100, 1) if evaluated > 0 else None
avg_return = round(data["sum_return"] / evaluated, 2) if evaluated > 0 else None
results.append(
{
"Strategy": strategy,
"Win Rate": win_rate,
"Avg Return": avg_return,
"Count": count,
"Count": total,
}
)
return results
@ -242,10 +273,10 @@ def load_strategy_metrics() -> List[Dict[str, Any]]:
for strategy, data in by_strategy.items():
win_rate = data.get("win_rate_7d") or data.get("win_rate", 0)
avg_return = data.get("avg_return_7d", 0)
count = data.get("wins_7d", 0) + data.get("losses_7d", 0)
count = data.get("count", data.get("wins_7d", 0) + data.get("losses_7d", 0))
results.append(
{
"Strategy": strategy,
"Strategy": normalize_strategy(strategy),
"Win Rate": win_rate,
"Avg Return": avg_return,
"Count": count,