diff --git a/.playwright-mcp/01-dashboard.png b/.playwright-mcp/01-dashboard.png deleted file mode 100644 index 43f80757..00000000 Binary files a/.playwright-mcp/01-dashboard.png and /dev/null differ diff --git a/.playwright-mcp/02-settings-modal.png b/.playwright-mcp/02-settings-modal.png deleted file mode 100644 index 3b67507e..00000000 Binary files a/.playwright-mcp/02-settings-modal.png and /dev/null differ diff --git a/.playwright-mcp/03-stock-detail-overview.png b/.playwright-mcp/03-stock-detail-overview.png deleted file mode 100644 index 07f7fb7f..00000000 Binary files a/.playwright-mcp/03-stock-detail-overview.png and /dev/null differ diff --git a/.playwright-mcp/04-analysis-pipeline.png b/.playwright-mcp/04-analysis-pipeline.png deleted file mode 100644 index ff13ced7..00000000 Binary files a/.playwright-mcp/04-analysis-pipeline.png and /dev/null differ diff --git a/.playwright-mcp/05-debates-tab.png b/.playwright-mcp/05-debates-tab.png deleted file mode 100644 index 45c40c5d..00000000 Binary files a/.playwright-mcp/05-debates-tab.png and /dev/null differ diff --git a/.playwright-mcp/06-investment-debate-expanded.png b/.playwright-mcp/06-investment-debate-expanded.png deleted file mode 100644 index 02bc602f..00000000 Binary files a/.playwright-mcp/06-investment-debate-expanded.png and /dev/null differ diff --git a/.playwright-mcp/07-data-sources-tab.png b/.playwright-mcp/07-data-sources-tab.png deleted file mode 100644 index 2df93b64..00000000 Binary files a/.playwright-mcp/07-data-sources-tab.png and /dev/null differ diff --git a/.playwright-mcp/08-dashboard-dark-mode.png b/.playwright-mcp/08-dashboard-dark-mode.png deleted file mode 100644 index 36680cd3..00000000 Binary files a/.playwright-mcp/08-dashboard-dark-mode.png and /dev/null differ diff --git a/.playwright-mcp/09-how-it-works.png b/.playwright-mcp/09-how-it-works.png deleted file mode 100644 index 95140dae..00000000 Binary files a/.playwright-mcp/09-how-it-works.png and /dev/null differ diff --git a/.playwright-mcp/10-history-page.png b/.playwright-mcp/10-history-page.png deleted file mode 100644 index 0a5d01c4..00000000 Binary files a/.playwright-mcp/10-history-page.png and /dev/null differ diff --git a/.playwright-mcp/analysis-in-progress.png b/.playwright-mcp/analysis-in-progress.png deleted file mode 100644 index e9314a9f..00000000 Binary files a/.playwright-mcp/analysis-in-progress.png and /dev/null differ diff --git a/.playwright-mcp/analysis-running.png b/.playwright-mcp/analysis-running.png deleted file mode 100644 index 4f6a5a9e..00000000 Binary files a/.playwright-mcp/analysis-running.png and /dev/null differ diff --git a/.playwright-mcp/analysis-working-network.png b/.playwright-mcp/analysis-working-network.png deleted file mode 100644 index 2a8d1247..00000000 Binary files a/.playwright-mcp/analysis-working-network.png and /dev/null differ diff --git a/.playwright-mcp/analysis-working-tcs.png b/.playwright-mcp/analysis-working-tcs.png deleted file mode 100644 index 7e6c7dfa..00000000 Binary files a/.playwright-mcp/analysis-working-tcs.png and /dev/null differ diff --git a/.playwright-mcp/chrome-headless-test.png b/.playwright-mcp/chrome-headless-test.png deleted file mode 100644 index 1bd12bc4..00000000 Binary files a/.playwright-mcp/chrome-headless-test.png and /dev/null differ diff --git a/.playwright-mcp/current-state.png b/.playwright-mcp/current-state.png deleted file mode 100644 index 5ed48249..00000000 Binary files a/.playwright-mcp/current-state.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-analyze-all.png b/.playwright-mcp/dashboard-analyze-all.png deleted file mode 100644 index 93a8a4f8..00000000 Binary files a/.playwright-mcp/dashboard-analyze-all.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-before.png b/.playwright-mcp/dashboard-before.png deleted file mode 100644 index f45dc554..00000000 Binary files a/.playwright-mcp/dashboard-before.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-buy-filter-active.png b/.playwright-mcp/dashboard-buy-filter-active.png deleted file mode 100644 index c42e1fdb..00000000 Binary files a/.playwright-mcp/dashboard-buy-filter-active.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-compact.png b/.playwright-mcp/dashboard-compact.png deleted file mode 100644 index 5a6e3049..00000000 Binary files a/.playwright-mcp/dashboard-compact.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-hold-filter-final.png b/.playwright-mcp/dashboard-hold-filter-final.png deleted file mode 100644 index b9dbea78..00000000 Binary files a/.playwright-mcp/dashboard-hold-filter-final.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-scrolled.png b/.playwright-mcp/dashboard-scrolled.png deleted file mode 100644 index 367aae9b..00000000 Binary files a/.playwright-mcp/dashboard-scrolled.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-search-visible.png b/.playwright-mcp/dashboard-search-visible.png deleted file mode 100644 index 1efea958..00000000 Binary files a/.playwright-mcp/dashboard-search-visible.png and /dev/null differ diff --git a/.playwright-mcp/dashboard-with-search.png b/.playwright-mcp/dashboard-with-search.png deleted file mode 100644 index 25855731..00000000 Binary files a/.playwright-mcp/dashboard-with-search.png and /dev/null differ diff --git a/.playwright-mcp/history-compact.png b/.playwright-mcp/history-compact.png deleted file mode 100644 index 9a2afb97..00000000 Binary files a/.playwright-mcp/history-compact.png and /dev/null differ diff --git a/.playwright-mcp/history-new-calc.png b/.playwright-mcp/history-new-calc.png deleted file mode 100644 index 95f70d68..00000000 Binary files a/.playwright-mcp/history-new-calc.png and /dev/null differ diff --git a/.playwright-mcp/history-page-current.png b/.playwright-mcp/history-page-current.png deleted file mode 100644 index 39990d98..00000000 Binary files a/.playwright-mcp/history-page-current.png and /dev/null differ diff --git a/.playwright-mcp/history-page-updated.png b/.playwright-mcp/history-page-updated.png deleted file mode 100644 index 608254ca..00000000 Binary files a/.playwright-mcp/history-page-updated.png and /dev/null differ diff --git a/.playwright-mcp/history-sparklines-2.png b/.playwright-mcp/history-sparklines-2.png deleted file mode 100644 index 7d9c5af3..00000000 Binary files a/.playwright-mcp/history-sparklines-2.png and /dev/null differ diff --git a/.playwright-mcp/history-sparklines-more.png b/.playwright-mcp/history-sparklines-more.png deleted file mode 100644 index 71225674..00000000 Binary files a/.playwright-mcp/history-sparklines-more.png and /dev/null differ diff --git a/.playwright-mcp/history-sparklines-normalized.png b/.playwright-mcp/history-sparklines-normalized.png deleted file mode 100644 index 0d5ce53b..00000000 Binary files a/.playwright-mcp/history-sparklines-normalized.png and /dev/null differ diff --git a/.playwright-mcp/history-sparklines-scrolled.png b/.playwright-mcp/history-sparklines-scrolled.png deleted file mode 100644 index 33129dd4..00000000 Binary files a/.playwright-mcp/history-sparklines-scrolled.png and /dev/null differ diff --git a/.playwright-mcp/history-sparklines.png b/.playwright-mcp/history-sparklines.png deleted file mode 100644 index 7d9c5af3..00000000 Binary files a/.playwright-mcp/history-sparklines.png and /dev/null differ diff --git a/.playwright-mcp/history-stock-list.png b/.playwright-mcp/history-stock-list.png deleted file mode 100644 index a3c88907..00000000 Binary files a/.playwright-mcp/history-stock-list.png and /dev/null differ diff --git a/.playwright-mcp/mobile-view.png b/.playwright-mcp/mobile-view.png deleted file mode 100644 index eae6f0b6..00000000 Binary files a/.playwright-mcp/mobile-view.png and /dev/null differ diff --git a/.playwright-mcp/overall-modal-fixed.png b/.playwright-mcp/overall-modal-fixed.png deleted file mode 100644 index 3a06d7f0..00000000 Binary files a/.playwright-mcp/overall-modal-fixed.png and /dev/null differ diff --git a/.playwright-mcp/overall-modal-table.png b/.playwright-mcp/overall-modal-table.png deleted file mode 100644 index cdcbcf36..00000000 Binary files a/.playwright-mcp/overall-modal-table.png and /dev/null differ diff --git a/.playwright-mcp/overall-modal.png b/.playwright-mcp/overall-modal.png deleted file mode 100644 index 3a06d7f0..00000000 Binary files a/.playwright-mcp/overall-modal.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-39-38-424Z.png b/.playwright-mcp/page-2026-01-31T10-39-38-424Z.png deleted file mode 100644 index cd6f8045..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-39-38-424Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-41-56-205Z.png b/.playwright-mcp/page-2026-01-31T10-41-56-205Z.png deleted file mode 100644 index e0df99eb..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-41-56-205Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-42-07-250Z.png b/.playwright-mcp/page-2026-01-31T10-42-07-250Z.png deleted file mode 100644 index 96ab48c6..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-42-07-250Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-42-21-398Z.png b/.playwright-mcp/page-2026-01-31T10-42-21-398Z.png deleted file mode 100644 index 71be46f7..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-42-21-398Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-43-02-673Z.png b/.playwright-mcp/page-2026-01-31T10-43-02-673Z.png deleted file mode 100644 index 5970ecb6..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-43-02-673Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-43-38-177Z.png b/.playwright-mcp/page-2026-01-31T10-43-38-177Z.png deleted file mode 100644 index ad8898b0..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-43-38-177Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-44-36-104Z.png b/.playwright-mcp/page-2026-01-31T10-44-36-104Z.png deleted file mode 100644 index fc31ccb0..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-44-36-104Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-44-56-012Z.png b/.playwright-mcp/page-2026-01-31T10-44-56-012Z.png deleted file mode 100644 index 8bb9d2ae..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-44-56-012Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-45-15-489Z.png b/.playwright-mcp/page-2026-01-31T10-45-15-489Z.png deleted file mode 100644 index 916b50bf..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-45-15-489Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-45-42-676Z.png b/.playwright-mcp/page-2026-01-31T10-45-42-676Z.png deleted file mode 100644 index 4d1b71e4..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-45-42-676Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-45-58-686Z.png b/.playwright-mcp/page-2026-01-31T10-45-58-686Z.png deleted file mode 100644 index c50f2026..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-45-58-686Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-46-33-307Z.png b/.playwright-mcp/page-2026-01-31T10-46-33-307Z.png deleted file mode 100644 index ca2e0763..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-46-33-307Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-47-05-151Z.png b/.playwright-mcp/page-2026-01-31T10-47-05-151Z.png deleted file mode 100644 index 40448610..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-47-05-151Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-47-42-171Z.png b/.playwright-mcp/page-2026-01-31T10-47-42-171Z.png deleted file mode 100644 index 742cb46f..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-47-42-171Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-49-11-278Z.png b/.playwright-mcp/page-2026-01-31T10-49-11-278Z.png deleted file mode 100644 index 425d3016..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-49-11-278Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-49-27-614Z.png b/.playwright-mcp/page-2026-01-31T10-49-27-614Z.png deleted file mode 100644 index 4c04faae..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-49-27-614Z.png and /dev/null differ diff --git a/.playwright-mcp/page-2026-01-31T10-49-46-409Z.png b/.playwright-mcp/page-2026-01-31T10-49-46-409Z.png deleted file mode 100644 index 68827663..00000000 Binary files a/.playwright-mcp/page-2026-01-31T10-49-46-409Z.png and /dev/null differ diff --git a/.playwright-mcp/return-modal-formula.png b/.playwright-mcp/return-modal-formula.png deleted file mode 100644 index bd655874..00000000 Binary files a/.playwright-mcp/return-modal-formula.png and /dev/null differ diff --git a/.playwright-mcp/return-modal-scrolled.png b/.playwright-mcp/return-modal-scrolled.png deleted file mode 100644 index 468e1059..00000000 Binary files a/.playwright-mcp/return-modal-scrolled.png and /dev/null differ diff --git a/.playwright-mcp/return-modal.png b/.playwright-mcp/return-modal.png deleted file mode 100644 index 57878255..00000000 Binary files a/.playwright-mcp/return-modal.png and /dev/null differ diff --git a/.playwright-mcp/settings-api-key.png b/.playwright-mcp/settings-api-key.png deleted file mode 100644 index 3b67507e..00000000 Binary files a/.playwright-mcp/settings-api-key.png and /dev/null differ diff --git a/.playwright-mcp/settings-modal.png b/.playwright-mcp/settings-modal.png deleted file mode 100644 index dc1b3608..00000000 Binary files a/.playwright-mcp/settings-modal.png and /dev/null differ diff --git a/.playwright-mcp/stock-detail-compact.png b/.playwright-mcp/stock-detail-compact.png deleted file mode 100644 index 1d5665a6..00000000 Binary files a/.playwright-mcp/stock-detail-compact.png and /dev/null differ diff --git a/.playwright-mcp/stocks-page-compact.png b/.playwright-mcp/stocks-page-compact.png deleted file mode 100644 index 3cbec6f0..00000000 Binary files a/.playwright-mcp/stocks-page-compact.png and /dev/null differ diff --git a/analysis-cancelled.png b/analysis-cancelled.png new file mode 100644 index 00000000..1dea274f Binary files /dev/null and b/analysis-cancelled.png differ diff --git a/analysis-live-fullpage.png b/analysis-live-fullpage.png new file mode 100644 index 00000000..296d7dda Binary files /dev/null and b/analysis-live-fullpage.png differ diff --git a/analysis-live-progress.png b/analysis-live-progress.png new file mode 100644 index 00000000..1ad5969e Binary files /dev/null and b/analysis-live-progress.png differ diff --git a/analysis-running-with-cancel.png b/analysis-running-with-cancel.png new file mode 100644 index 00000000..3638ce15 Binary files /dev/null and b/analysis-running-with-cancel.png differ diff --git a/analyze-all-skipped.png b/analyze-all-skipped.png new file mode 100644 index 00000000..b8f95d46 Binary files /dev/null and b/analyze-all-skipped.png differ diff --git a/current-state.png b/current-state.png new file mode 100644 index 00000000..737cd6e9 Binary files /dev/null and b/current-state.png differ diff --git a/data-source-raw-content.png b/data-source-raw-content.png new file mode 100644 index 00000000..88e5274f Binary files /dev/null and b/data-source-raw-content.png differ diff --git a/data-source-raw-viewer.png b/data-source-raw-viewer.png new file mode 100644 index 00000000..667cb45f Binary files /dev/null and b/data-source-raw-viewer.png differ diff --git a/data-sources-all.png b/data-sources-all.png new file mode 100644 index 00000000..986e62f3 Binary files /dev/null and b/data-sources-all.png differ diff --git a/data-sources-fixed.png b/data-sources-fixed.png new file mode 100644 index 00000000..9a3c5960 Binary files /dev/null and b/data-sources-fixed.png differ diff --git a/detail-drawer-bottom.png b/detail-drawer-bottom.png new file mode 100644 index 00000000..9b7f986a Binary files /dev/null and b/detail-drawer-bottom.png differ diff --git a/detail-drawer-test.png b/detail-drawer-test.png new file mode 100644 index 00000000..ce7e2908 Binary files /dev/null and b/detail-drawer-test.png differ diff --git a/drawer-header.png b/drawer-header.png new file mode 100644 index 00000000..7d2ac1b6 Binary files /dev/null and b/drawer-header.png differ diff --git a/frontend/backend/backtest_service.py b/frontend/backend/backtest_service.py new file mode 100644 index 00000000..4df17615 --- /dev/null +++ b/frontend/backend/backtest_service.py @@ -0,0 +1,237 @@ +"""Backtest service for calculating real prediction accuracy.""" +import yfinance as yf +import pandas as pd +from datetime import datetime, timedelta +from typing import Optional +import database as db + + +def get_trading_day_price(ticker: yf.Ticker, target_date: datetime, + direction: str = 'forward', max_days: int = 7) -> Optional[float]: + """ + Get the closing price for a trading day near the target date. + + Args: + ticker: yfinance Ticker object + target_date: The date we want price for + direction: 'forward' to look for next trading day, 'backward' for previous + max_days: Maximum days to search + + Returns: + Closing price or None if not found + """ + for i in range(max_days): + if direction == 'forward': + check_date = target_date + timedelta(days=i) + else: + check_date = target_date - timedelta(days=i) + + start = check_date + end = check_date + timedelta(days=1) + + hist = ticker.history(start=start.strftime('%Y-%m-%d'), + end=end.strftime('%Y-%m-%d')) + if not hist.empty: + return hist['Close'].iloc[0] + + return None + + +def calculate_backtest_for_recommendation(date: str, symbol: str, decision: str, + hold_days: int = None) -> Optional[dict]: + """ + Calculate backtest results for a single recommendation. + + Args: + date: Prediction date (YYYY-MM-DD) + symbol: Stock symbol (NSE format like RELIANCE.NS) + decision: BUY, SELL, or HOLD + hold_days: Recommended holding period in days (for BUY/HOLD) + + Returns: + Dict with backtest results or None if calculation failed + """ + try: + # Convert date + pred_date = datetime.strptime(date, '%Y-%m-%d') + + # For Indian stocks, append .NS suffix if not present + yf_symbol = symbol if '.' in symbol else f"{symbol}.NS" + + ticker = yf.Ticker(yf_symbol) + + # Get price at prediction date (or next trading day) + price_at_pred = get_trading_day_price(ticker, pred_date, 'forward') + if price_at_pred is None: + return None + + # Get prices for 1 day, 1 week, 1 month later + date_1d = pred_date + timedelta(days=1) + date_1w = pred_date + timedelta(weeks=1) + date_1m = pred_date + timedelta(days=30) + + price_1d = get_trading_day_price(ticker, date_1d, 'forward') + price_1w = get_trading_day_price(ticker, date_1w, 'forward') + price_1m = get_trading_day_price(ticker, date_1m, 'forward') + + # Calculate returns + return_1d = ((price_1d - price_at_pred) / price_at_pred * 100) if price_1d else None + return_1w = ((price_1w - price_at_pred) / price_at_pred * 100) if price_1w else None + return_1m = ((price_1m - price_at_pred) / price_at_pred * 100) if price_1m else None + + # Calculate return at hold_days horizon if specified + return_at_hold = None + if hold_days and hold_days > 0: + date_hold = pred_date + timedelta(days=hold_days) + price_at_hold = get_trading_day_price(ticker, date_hold, 'forward') + if price_at_hold: + return_at_hold = round(((price_at_hold - price_at_pred) / price_at_pred * 100), 2) + + # Determine if prediction was correct + # Use hold_days return when available, fall back to 1-week return + prediction_correct = None + check_return = return_at_hold if return_at_hold is not None else return_1w + if check_return is not None: + if decision == 'BUY' or decision == 'HOLD': + prediction_correct = check_return > 0 + elif decision == 'SELL': + prediction_correct = check_return < 0 + + return { + 'date': date, + 'symbol': symbol, + 'decision': decision, + 'price_at_prediction': round(price_at_pred, 2), + 'price_1d_later': round(price_1d, 2) if price_1d else None, + 'price_1w_later': round(price_1w, 2) if price_1w else None, + 'price_1m_later': round(price_1m, 2) if price_1m else None, + 'return_1d': round(return_1d, 2) if return_1d is not None else None, + 'return_1w': round(return_1w, 2) if return_1w is not None else None, + 'return_1m': round(return_1m, 2) if return_1m is not None else None, + 'return_at_hold': return_at_hold, + 'hold_days': hold_days, + 'prediction_correct': prediction_correct + } + + except Exception as e: + print(f"Error calculating backtest for {symbol} on {date}: {e}") + return None + + +def calculate_and_save_backtest(date: str, symbol: str, decision: str, + hold_days: int = None) -> Optional[dict]: + """Calculate backtest and save to database.""" + result = calculate_backtest_for_recommendation(date, symbol, decision, hold_days) + + if result: + db.save_backtest_result( + date=result['date'], + symbol=result['symbol'], + decision=result['decision'], + price_at_prediction=result['price_at_prediction'], + price_1d_later=result['price_1d_later'], + price_1w_later=result['price_1w_later'], + price_1m_later=result['price_1m_later'], + return_1d=result['return_1d'], + return_1w=result['return_1w'], + return_1m=result['return_1m'], + prediction_correct=result['prediction_correct'], + hold_days=result.get('hold_days') + ) + + return result + + +def backtest_all_recommendations_for_date(date: str) -> dict: + """ + Calculate backtest for all recommendations on a given date. + + Returns summary statistics. + """ + rec = db.get_recommendation_by_date(date) + if not rec or 'analysis' not in rec: + return {'error': 'No recommendations found for date', 'date': date} + + analysis = rec['analysis'] # Dict keyed by symbol + results = [] + errors = [] + + for symbol, stock_data in analysis.items(): + decision = stock_data['decision'] + hold_days = stock_data.get('hold_days') + + # Check if we already have a backtest result + existing = db.get_backtest_result(date, symbol) + if existing: + results.append(existing) + continue + + # Calculate new backtest + result = calculate_and_save_backtest(date, symbol, decision, hold_days) + if result: + results.append(result) + else: + errors.append(symbol) + + # Calculate summary + correct = sum(1 for r in results if r.get('prediction_correct')) + total_with_result = sum(1 for r in results if r.get('prediction_correct') is not None) + + return { + 'date': date, + 'total_stocks': len(analysis), + 'calculated': len(results), + 'errors': errors, + 'correct_predictions': correct, + 'total_with_result': total_with_result, + 'accuracy': round(correct / total_with_result * 100, 1) if total_with_result > 0 else 0 + } + + +def get_backtest_data_for_frontend(date: str, symbol: str) -> dict: + """ + Get backtest data formatted for frontend display. + Includes price history for charts. + """ + result = db.get_backtest_result(date, symbol) + + if not result: + # Try to calculate it + rec = db.get_recommendation_by_date(date) + if rec and 'analysis' in rec: + stock_data = rec['analysis'].get(symbol) + if stock_data: + result = calculate_and_save_backtest(date, symbol, stock_data['decision'], stock_data.get('hold_days')) + + if not result: + return {'available': False, 'reason': 'Could not calculate backtest'} + + # Get price history for chart + try: + pred_date = datetime.strptime(date, '%Y-%m-%d') + yf_symbol = symbol if '.' in symbol else f"{symbol}.NS" + ticker = yf.Ticker(yf_symbol) + + # Get 30 days of history starting from prediction date + end_date = pred_date + timedelta(days=35) + hist = ticker.history(start=pred_date.strftime('%Y-%m-%d'), + end=end_date.strftime('%Y-%m-%d')) + + price_history = [ + {'date': idx.strftime('%Y-%m-%d'), 'price': round(row['Close'], 2)} + for idx, row in hist.iterrows() + ][:30] # Limit to 30 data points + + except Exception: + price_history = [] + + return { + 'available': True, + 'prediction_correct': result['prediction_correct'], + 'actual_return_1d': result['return_1d'], + 'actual_return_1w': result['return_1w'], + 'actual_return_1m': result['return_1m'], + 'price_at_prediction': result['price_at_prediction'], + 'current_price': result.get('price_1m_later') or result.get('price_1w_later'), + 'price_history': price_history + } diff --git a/frontend/backend/database.py b/frontend/backend/database.py index da3a72a2..fbdf5154 100644 --- a/frontend/backend/database.py +++ b/frontend/backend/database.py @@ -105,10 +105,17 @@ def init_db(): completed_at TEXT, duration_ms INTEGER, output_summary TEXT, + step_details TEXT, UNIQUE(date, symbol, step_number) ) """) + # Add step_details column if it doesn't exist (migration for existing DBs) + try: + cursor.execute("ALTER TABLE pipeline_steps ADD COLUMN step_details TEXT") + except sqlite3.OperationalError: + pass # Column already exists + # Create data_source_logs table (stores what raw data was fetched) cursor.execute(""" CREATE TABLE IF NOT EXISTS data_source_logs ( @@ -117,6 +124,8 @@ def init_db(): symbol TEXT NOT NULL, source_type TEXT, source_name TEXT, + method TEXT, + args TEXT, data_fetched TEXT, fetch_timestamp TEXT, success INTEGER DEFAULT 1, @@ -124,6 +133,46 @@ def init_db(): ) """) + # Migrate: add method/args columns if missing (existing databases) + try: + cursor.execute("ALTER TABLE data_source_logs ADD COLUMN method TEXT") + except Exception: + pass # Column already exists + try: + cursor.execute("ALTER TABLE data_source_logs ADD COLUMN args TEXT") + except Exception: + pass # Column already exists + + # Create backtest_results table (stores calculated backtest accuracy) + cursor.execute(""" + CREATE TABLE IF NOT EXISTS backtest_results ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + date TEXT NOT NULL, + symbol TEXT NOT NULL, + decision TEXT, + price_at_prediction REAL, + price_1d_later REAL, + price_1w_later REAL, + price_1m_later REAL, + return_1d REAL, + return_1w REAL, + return_1m REAL, + prediction_correct INTEGER, + calculated_at TEXT DEFAULT CURRENT_TIMESTAMP, + UNIQUE(date, symbol) + ) + """) + + # Add hold_days column if it doesn't exist (migration for existing DBs) + try: + cursor.execute("ALTER TABLE stock_analysis ADD COLUMN hold_days INTEGER") + except sqlite3.OperationalError: + pass # Column already exists + try: + cursor.execute("ALTER TABLE backtest_results ADD COLUMN hold_days INTEGER") + except sqlite3.OperationalError: + pass # Column already exists + # Create indexes for new tables cursor.execute(""" CREATE INDEX IF NOT EXISTS idx_agent_reports_date_symbol ON agent_reports(date, symbol) @@ -137,6 +186,9 @@ def init_db(): cursor.execute(""" CREATE INDEX IF NOT EXISTS idx_data_source_logs_date_symbol ON data_source_logs(date, symbol) """) + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_backtest_results_date ON backtest_results(date) + """) conn.commit() conn.close() @@ -168,8 +220,8 @@ def save_recommendation(date: str, analysis_data: dict, summary: dict, for symbol, analysis in analysis_data.items(): cursor.execute(""" INSERT OR REPLACE INTO stock_analysis - (date, symbol, company_name, decision, confidence, risk, raw_analysis) - VALUES (?, ?, ?, ?, ?, ?, ?) + (date, symbol, company_name, decision, confidence, risk, raw_analysis, hold_days) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) """, ( date, symbol, @@ -177,7 +229,8 @@ def save_recommendation(date: str, analysis_data: dict, summary: dict, analysis.get('decision'), analysis.get('confidence'), analysis.get('risk'), - analysis.get('raw_analysis', '') + analysis.get('raw_analysis', ''), + analysis.get('hold_days') )) conn.commit() @@ -185,6 +238,52 @@ def save_recommendation(date: str, analysis_data: dict, summary: dict, conn.close() +def save_single_stock_analysis(date: str, symbol: str, analysis: dict): + """Save analysis for a single stock. + + Args: + date: Date string (YYYY-MM-DD) + symbol: Stock symbol + analysis: Dict with keys: company_name, decision, confidence, risk, raw_analysis, hold_days + """ + conn = get_connection() + cursor = conn.cursor() + + try: + cursor.execute(""" + INSERT OR REPLACE INTO stock_analysis + (date, symbol, company_name, decision, confidence, risk, raw_analysis, hold_days) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, ( + date, + symbol, + analysis.get('company_name', symbol), + analysis.get('decision', 'HOLD'), + analysis.get('confidence', 'MEDIUM'), + analysis.get('risk', 'MEDIUM'), + analysis.get('raw_analysis', ''), + analysis.get('hold_days') + )) + conn.commit() + finally: + conn.close() + + +def get_analyzed_symbols_for_date(date: str) -> list: + """Get list of symbols that already have analysis for a given date. + + Used by bulk analysis to skip already-completed stocks when resuming. + """ + conn = get_connection() + cursor = conn.cursor() + + try: + cursor.execute("SELECT symbol FROM stock_analysis WHERE date = ?", (date,)) + return [row['symbol'] for row in cursor.fetchall()] + finally: + conn.close() + + def get_recommendation_by_date(date: str) -> Optional[dict]: """Get recommendation for a specific date.""" conn = get_connection() @@ -197,37 +296,60 @@ def get_recommendation_by_date(date: str) -> Optional[dict]: """, (date,)) row = cursor.fetchone() - if not row: - return None - # Get stock analysis for this date cursor.execute(""" SELECT * FROM stock_analysis WHERE date = ? """, (date,)) analysis_rows = cursor.fetchall() + # If no daily_recommendations AND no stock_analysis, return None + if not row and not analysis_rows: + return None + analysis = {} for a in analysis_rows: + decision = (a['decision'] or '').strip().upper() + if decision not in ('BUY', 'SELL', 'HOLD'): + decision = 'HOLD' analysis[a['symbol']] = { 'symbol': a['symbol'], 'company_name': a['company_name'], - 'decision': a['decision'], - 'confidence': a['confidence'], - 'risk': a['risk'], - 'raw_analysis': a['raw_analysis'] + 'decision': decision, + 'confidence': a['confidence'] or 'MEDIUM', + 'risk': a['risk'] or 'MEDIUM', + 'raw_analysis': a['raw_analysis'], + 'hold_days': a['hold_days'] if 'hold_days' in a.keys() else None } + if row: + return { + 'date': row['date'], + 'analysis': analysis, + 'summary': { + 'total': row['summary_total'], + 'buy': row['summary_buy'], + 'sell': row['summary_sell'], + 'hold': row['summary_hold'] + }, + 'top_picks': json.loads(row['top_picks']) if row['top_picks'] else [], + 'stocks_to_avoid': json.loads(row['stocks_to_avoid']) if row['stocks_to_avoid'] else [] + } + + # Fallback: build summary from stock_analysis when daily_recommendations is missing + buy_count = sum(1 for a in analysis.values() if a['decision'] == 'BUY') + sell_count = sum(1 for a in analysis.values() if a['decision'] == 'SELL') + hold_count = sum(1 for a in analysis.values() if a['decision'] == 'HOLD') return { - 'date': row['date'], + 'date': date, 'analysis': analysis, 'summary': { - 'total': row['summary_total'], - 'buy': row['summary_buy'], - 'sell': row['summary_sell'], - 'hold': row['summary_hold'] + 'total': len(analysis), + 'buy': buy_count, + 'sell': sell_count, + 'hold': hold_count }, - 'top_picks': json.loads(row['top_picks']) if row['top_picks'] else [], - 'stocks_to_avoid': json.loads(row['stocks_to_avoid']) if row['stocks_to_avoid'] else [] + 'top_picks': [], + 'stocks_to_avoid': [] } finally: conn.close() @@ -253,13 +375,17 @@ def get_latest_recommendation() -> Optional[dict]: def get_all_dates() -> list: - """Get all available dates.""" + """Get all available dates (union of daily_recommendations and stock_analysis).""" conn = get_connection() cursor = conn.cursor() try: cursor.execute(""" - SELECT date FROM daily_recommendations ORDER BY date DESC + SELECT DISTINCT date FROM ( + SELECT date FROM daily_recommendations + UNION + SELECT date FROM stock_analysis + ) ORDER BY date DESC """) return [row['date'] for row in cursor.fetchall()] finally: @@ -273,21 +399,26 @@ def get_stock_history(symbol: str) -> list: try: cursor.execute(""" - SELECT date, decision, confidence, risk + SELECT date, decision, confidence, risk, hold_days FROM stock_analysis WHERE symbol = ? ORDER BY date DESC """, (symbol,)) - return [ - { + results = [] + for row in cursor.fetchall(): + decision = (row['decision'] or '').strip().upper() + # Sanitize: only allow BUY/SELL/HOLD + if decision not in ('BUY', 'SELL', 'HOLD'): + decision = 'HOLD' + results.append({ 'date': row['date'], - 'decision': row['decision'], - 'confidence': row['confidence'], - 'risk': row['risk'] - } - for row in cursor.fetchall() - ] + 'decision': decision, + 'confidence': row['confidence'] or 'MEDIUM', + 'risk': row['risk'] or 'MEDIUM', + 'hold_days': row['hold_days'] if 'hold_days' in row.keys() else None + }) + return results finally: conn.close() @@ -467,11 +598,14 @@ def save_pipeline_steps_bulk(date: str, symbol: str, steps: list): try: for step in steps: + step_details = step.get('step_details') + if step_details and not isinstance(step_details, str): + step_details = json.dumps(step_details) cursor.execute(""" INSERT OR REPLACE INTO pipeline_steps (date, symbol, step_number, step_name, status, - started_at, completed_at, duration_ms, output_summary) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) + started_at, completed_at, duration_ms, output_summary, step_details) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, ( date, symbol, step.get('step_number'), @@ -480,7 +614,8 @@ def save_pipeline_steps_bulk(date: str, symbol: str, steps: list): step.get('started_at'), step.get('completed_at'), step.get('duration_ms'), - step.get('output_summary') + step.get('output_summary'), + step_details )) conn.commit() finally: @@ -499,18 +634,26 @@ def get_pipeline_steps(date: str, symbol: str) -> list: ORDER BY step_number """, (date, symbol)) - return [ - { + results = [] + for row in cursor.fetchall(): + step_details = None + raw_details = row['step_details'] if 'step_details' in row.keys() else None + if raw_details: + try: + step_details = json.loads(raw_details) + except (json.JSONDecodeError, TypeError): + step_details = None + results.append({ 'step_number': row['step_number'], 'step_name': row['step_name'], 'status': row['status'], 'started_at': row['started_at'], 'completed_at': row['completed_at'], 'duration_ms': row['duration_ms'], - 'output_summary': row['output_summary'] - } - for row in cursor.fetchall() - ] + 'output_summary': row['output_summary'], + 'step_details': step_details, + }) + return results finally: conn.close() @@ -550,13 +693,15 @@ def save_data_source_logs_bulk(date: str, symbol: str, logs: list): for log in logs: cursor.execute(""" INSERT INTO data_source_logs - (date, symbol, source_type, source_name, data_fetched, + (date, symbol, source_type, source_name, method, args, data_fetched, fetch_timestamp, success, error_message) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, ( date, symbol, log.get('source_type'), log.get('source_name'), + log.get('method'), + log.get('args'), json.dumps(log.get('data_fetched')) if log.get('data_fetched') else None, log.get('fetch_timestamp') or datetime.now().isoformat(), 1 if log.get('success', True) else 0, @@ -568,7 +713,8 @@ def save_data_source_logs_bulk(date: str, symbol: str, logs: list): def get_data_source_logs(date: str, symbol: str) -> list: - """Get all data source logs for a stock on a date.""" + """Get all data source logs for a stock on a date. + Falls back to generating entries from agent_reports if no explicit logs exist.""" conn = get_connection() cursor = conn.cursor() @@ -579,10 +725,12 @@ def get_data_source_logs(date: str, symbol: str) -> list: ORDER BY fetch_timestamp """, (date, symbol)) - return [ + logs = [ { 'source_type': row['source_type'], 'source_name': row['source_name'], + 'method': row['method'] if 'method' in row.keys() else None, + 'args': row['args'] if 'args' in row.keys() else None, 'data_fetched': json.loads(row['data_fetched']) if row['data_fetched'] else None, 'fetch_timestamp': row['fetch_timestamp'], 'success': bool(row['success']), @@ -590,6 +738,39 @@ def get_data_source_logs(date: str, symbol: str) -> list: } for row in cursor.fetchall() ] + + if logs: + return logs + + # No explicit logs — generate from agent_reports with full raw content + AGENT_TO_SOURCE = { + 'market': ('market_data', 'Yahoo Finance'), + 'news': ('news', 'Google News'), + 'social_media': ('social_media', 'Social Sentiment'), + 'fundamentals': ('fundamentals', 'Financial Data'), + } + + cursor.execute(""" + SELECT agent_type, report_content, created_at + FROM agent_reports + WHERE date = ? AND symbol = ? + """, (date, symbol)) + + generated = [] + for row in cursor.fetchall(): + source_type, source_name = AGENT_TO_SOURCE.get( + row['agent_type'], ('other', row['agent_type']) + ) + generated.append({ + 'source_type': source_type, + 'source_name': source_name, + 'data_fetched': row['report_content'], + 'fetch_timestamp': row['created_at'], + 'success': True, + 'error_message': None + }) + + return generated finally: conn.close() @@ -698,5 +879,283 @@ def get_pipeline_summary_for_date(date: str) -> list: conn.close() +def save_backtest_result(date: str, symbol: str, decision: str, + price_at_prediction: float, price_1d_later: float = None, + price_1w_later: float = None, price_1m_later: float = None, + return_1d: float = None, return_1w: float = None, + return_1m: float = None, prediction_correct: bool = None, + hold_days: int = None): + """Save a backtest result for a stock recommendation.""" + conn = get_connection() + cursor = conn.cursor() + + try: + cursor.execute(""" + INSERT OR REPLACE INTO backtest_results + (date, symbol, decision, price_at_prediction, + price_1d_later, price_1w_later, price_1m_later, + return_1d, return_1w, return_1m, prediction_correct, hold_days) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + date, symbol, decision, price_at_prediction, + price_1d_later, price_1w_later, price_1m_later, + return_1d, return_1w, return_1m, + 1 if prediction_correct else 0 if prediction_correct is not None else None, + hold_days + )) + conn.commit() + finally: + conn.close() + + +def get_backtest_result(date: str, symbol: str) -> Optional[dict]: + """Get backtest result for a specific stock and date.""" + conn = get_connection() + cursor = conn.cursor() + + try: + cursor.execute(""" + SELECT * FROM backtest_results WHERE date = ? AND symbol = ? + """, (date, symbol)) + row = cursor.fetchone() + + if row: + return { + 'date': row['date'], + 'symbol': row['symbol'], + 'decision': row['decision'], + 'price_at_prediction': row['price_at_prediction'], + 'price_1d_later': row['price_1d_later'], + 'price_1w_later': row['price_1w_later'], + 'price_1m_later': row['price_1m_later'], + 'return_1d': row['return_1d'], + 'return_1w': row['return_1w'], + 'return_1m': row['return_1m'], + 'prediction_correct': bool(row['prediction_correct']) if row['prediction_correct'] is not None else None, + 'hold_days': row['hold_days'] if 'hold_days' in row.keys() else None, + 'calculated_at': row['calculated_at'] + } + return None + finally: + conn.close() + + +def get_backtest_results_by_date(date: str) -> list: + """Get all backtest results for a specific date.""" + conn = get_connection() + cursor = conn.cursor() + + try: + cursor.execute(""" + SELECT * FROM backtest_results WHERE date = ? + """, (date,)) + + return [ + { + 'symbol': row['symbol'], + 'decision': row['decision'], + 'price_at_prediction': row['price_at_prediction'], + 'price_1d_later': row['price_1d_later'], + 'price_1w_later': row['price_1w_later'], + 'price_1m_later': row['price_1m_later'], + 'return_1d': row['return_1d'], + 'return_1w': row['return_1w'], + 'return_1m': row['return_1m'], + 'prediction_correct': bool(row['prediction_correct']) if row['prediction_correct'] is not None else None, + 'hold_days': row['hold_days'] if 'hold_days' in row.keys() else None + } + for row in cursor.fetchall() + ] + finally: + conn.close() + + +def get_all_backtest_results() -> list: + """Get all backtest results for accuracy calculation.""" + conn = get_connection() + cursor = conn.cursor() + + try: + cursor.execute(""" + SELECT br.*, sa.confidence, sa.risk + FROM backtest_results br + LEFT JOIN stock_analysis sa ON br.date = sa.date AND br.symbol = sa.symbol + WHERE br.prediction_correct IS NOT NULL + ORDER BY br.date DESC + """) + + return [ + { + 'date': row['date'], + 'symbol': row['symbol'], + 'decision': row['decision'], + 'confidence': row['confidence'], + 'risk': row['risk'], + 'price_at_prediction': row['price_at_prediction'], + 'return_1d': row['return_1d'], + 'return_1w': row['return_1w'], + 'return_1m': row['return_1m'], + 'prediction_correct': bool(row['prediction_correct']) + } + for row in cursor.fetchall() + ] + finally: + conn.close() + + +def calculate_accuracy_metrics() -> dict: + """Calculate overall backtest accuracy metrics.""" + results = get_all_backtest_results() + + if not results: + return { + 'overall_accuracy': 0, + 'total_predictions': 0, + 'correct_predictions': 0, + 'by_decision': {'BUY': {'accuracy': 0, 'total': 0}, 'SELL': {'accuracy': 0, 'total': 0}, 'HOLD': {'accuracy': 0, 'total': 0}}, + 'by_confidence': {'High': {'accuracy': 0, 'total': 0}, 'Medium': {'accuracy': 0, 'total': 0}, 'Low': {'accuracy': 0, 'total': 0}} + } + + total = len(results) + correct = sum(1 for r in results if r['prediction_correct']) + + # By decision type + by_decision = {} + for decision in ['BUY', 'SELL', 'HOLD']: + decision_results = [r for r in results if r['decision'] == decision] + if decision_results: + decision_correct = sum(1 for r in decision_results if r['prediction_correct']) + by_decision[decision] = { + 'accuracy': round(decision_correct / len(decision_results) * 100, 1), + 'total': len(decision_results), + 'correct': decision_correct + } + else: + by_decision[decision] = {'accuracy': 0, 'total': 0, 'correct': 0} + + # By confidence level + by_confidence = {} + for conf in ['High', 'Medium', 'Low']: + conf_results = [r for r in results if r.get('confidence') == conf] + if conf_results: + conf_correct = sum(1 for r in conf_results if r['prediction_correct']) + by_confidence[conf] = { + 'accuracy': round(conf_correct / len(conf_results) * 100, 1), + 'total': len(conf_results), + 'correct': conf_correct + } + else: + by_confidence[conf] = {'accuracy': 0, 'total': 0, 'correct': 0} + + return { + 'overall_accuracy': round(correct / total * 100, 1) if total > 0 else 0, + 'total_predictions': total, + 'correct_predictions': correct, + 'by_decision': by_decision, + 'by_confidence': by_confidence + } + + +def update_daily_recommendation_summary(date: str): + """Auto-create/update daily_recommendations from stock_analysis for a date. + + Counts BUY/SELL/HOLD decisions, generates top_picks and stocks_to_avoid, + and upserts the daily_recommendations row. + """ + conn = get_connection() + cursor = conn.cursor() + + try: + # Get all stock analyses for this date + cursor.execute(""" + SELECT symbol, company_name, decision, confidence, risk, raw_analysis + FROM stock_analysis WHERE date = ? + """, (date,)) + rows = cursor.fetchall() + + if not rows: + return + + buy_count = 0 + sell_count = 0 + hold_count = 0 + buy_stocks = [] + sell_stocks = [] + + for row in rows: + decision = (row['decision'] or '').upper() + if decision == 'BUY': + buy_count += 1 + buy_stocks.append({ + 'symbol': row['symbol'], + 'company_name': row['company_name'] or row['symbol'], + 'decision': 'BUY', + 'confidence': row['confidence'] or 'MEDIUM', + 'reason': (row['raw_analysis'] or '')[:200] + }) + elif decision == 'SELL': + sell_count += 1 + sell_stocks.append({ + 'symbol': row['symbol'], + 'company_name': row['company_name'] or row['symbol'], + 'decision': 'SELL', + 'confidence': row['confidence'] or 'MEDIUM', + 'reason': (row['raw_analysis'] or '')[:200] + }) + else: + hold_count += 1 + + total = buy_count + sell_count + hold_count + + # Top picks: up to 5 BUY stocks + top_picks = [ + {'symbol': s['symbol'], 'company_name': s['company_name'], + 'confidence': s['confidence'], 'reason': s['reason']} + for s in buy_stocks[:5] + ] + + # Stocks to avoid: up to 5 SELL stocks + stocks_to_avoid = [ + {'symbol': s['symbol'], 'company_name': s['company_name'], + 'confidence': s['confidence'], 'reason': s['reason']} + for s in sell_stocks[:5] + ] + + cursor.execute(""" + INSERT OR REPLACE INTO daily_recommendations + (date, summary_total, summary_buy, summary_sell, summary_hold, top_picks, stocks_to_avoid) + VALUES (?, ?, ?, ?, ?, ?, ?) + """, ( + date, total, buy_count, sell_count, hold_count, + json.dumps(top_picks), + json.dumps(stocks_to_avoid) + )) + conn.commit() + finally: + conn.close() + + +def rebuild_all_daily_recommendations(): + """Rebuild daily_recommendations for all dates that have stock_analysis data. + + This ensures dates with stock_analysis but missing daily_recommendations + entries become visible to the API. + """ + conn = get_connection() + cursor = conn.cursor() + + try: + cursor.execute("SELECT DISTINCT date FROM stock_analysis") + dates = [row['date'] for row in cursor.fetchall()] + finally: + conn.close() + + for date in dates: + update_daily_recommendation_summary(date) + + if dates: + print(f"[DB] Rebuilt daily_recommendations for {len(dates)} dates: {sorted(dates)}") + + # Initialize database on module import init_db() diff --git a/frontend/backend/recommendations.db b/frontend/backend/recommendations.db index 0ec5c455..9d729e07 100644 Binary files a/frontend/backend/recommendations.db and b/frontend/backend/recommendations.db differ diff --git a/frontend/backend/server.py b/frontend/backend/server.py index 110b4fd3..38b537b4 100644 --- a/frontend/backend/server.py +++ b/frontend/backend/server.py @@ -1,6 +1,7 @@ """FastAPI server for Nifty50 AI recommendations.""" from fastapi import FastAPI, HTTPException, BackgroundTasks from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import StreamingResponse from pydantic import BaseModel from typing import Optional import database as db @@ -9,11 +10,18 @@ import os from pathlib import Path from datetime import datetime import threading +from concurrent.futures import ThreadPoolExecutor, as_completed +import asyncio +import json +import time # Add parent directories to path for importing trading agents PROJECT_ROOT = Path(__file__).parent.parent.parent sys.path.insert(0, str(PROJECT_ROOT)) +# Import shared logging system +from tradingagents.log_utils import add_log, analysis_logs, log_lock, log_subscribers + # Track running analyses # NOTE: This is not thread-safe for production multi-worker deployments. # For production, use Redis or a database-backed job queue instead. @@ -145,6 +153,11 @@ class RunAnalysisRequest(BaseModel): config: Optional[AnalysisConfig] = None +def _is_cancelled(symbol: str) -> bool: + """Check if an analysis has been cancelled.""" + return running_analyses.get(symbol, {}).get("cancelled", False) + + def run_analysis_task(symbol: str, date: str, analysis_config: dict = None): """Background task to run trading analysis for a stock.""" global running_analyses @@ -163,14 +176,20 @@ def run_analysis_task(symbol: str, date: str, analysis_config: dict = None): running_analyses[symbol] = { "status": "initializing", "started_at": datetime.now().isoformat(), - "progress": "Loading trading agents..." + "progress": "Loading trading agents...", + "cancelled": False, } + add_log("info", "system", f"🚀 Starting analysis for {symbol} on {date}") + add_log("info", "system", f"Config: deep_think={deep_think_model}, quick_think={quick_think_model}") + # Import trading agents + add_log("info", "system", "Loading TradingAgentsGraph module...") from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.default_config import DEFAULT_CONFIG running_analyses[symbol]["progress"] = "Initializing analysis pipeline..." + add_log("info", "system", "Initializing analysis pipeline...") # Create config from user settings config = DEFAULT_CONFIG.copy() @@ -183,14 +202,77 @@ def run_analysis_task(symbol: str, date: str, analysis_config: dict = None): if provider == "anthropic_api" and api_key: os.environ["ANTHROPIC_API_KEY"] = api_key + # Check cancellation before starting + if _is_cancelled(symbol): + add_log("info", "system", f"Analysis for {symbol} was cancelled before starting") + running_analyses[symbol]["status"] = "cancelled" + running_analyses[symbol]["progress"] = "Analysis cancelled" + return + running_analyses[symbol]["status"] = "running" running_analyses[symbol]["progress"] = f"Running market analysis (model: {deep_think_model})..." + add_log("agent", "system", f"Creating TradingAgentsGraph for {symbol}...") + # Initialize and run ta = TradingAgentsGraph(debug=False, config=config) + # Check cancellation before graph execution + if _is_cancelled(symbol): + add_log("info", "system", f"Analysis for {symbol} was cancelled before graph execution") + running_analyses[symbol]["status"] = "cancelled" + running_analyses[symbol]["progress"] = "Analysis cancelled" + return + running_analyses[symbol]["progress"] = f"Analyzing {symbol}..." - final_state, decision = ta.propagate(symbol, date) + add_log("agent", "system", f"Starting propagation for {symbol}...") + add_log("data", "data_fetch", f"Fetching market data for {symbol}...") + + final_state, decision, hold_days = ta.propagate(symbol, date) + + # Check cancellation after graph execution (skip saving results) + if _is_cancelled(symbol): + add_log("info", "system", f"Analysis for {symbol} was cancelled after completion — results discarded") + running_analyses[symbol]["status"] = "cancelled" + running_analyses[symbol]["progress"] = "Analysis cancelled (results discarded)" + return + + add_log("success", "system", f"✅ Analysis complete for {symbol}: {decision}") + + # Extract raw analysis from final_state if available + raw_analysis = "" + if final_state: + if "final_trade_decision" in final_state: + raw_analysis = final_state.get("final_trade_decision", "") + elif "risk_debate_state" in final_state: + raw_analysis = final_state.get("risk_debate_state", {}).get("judge_decision", "") + + # Save the analysis result to the database + analysis_data = { + "company_name": symbol, + "decision": decision.upper() if decision else "HOLD", + "confidence": "MEDIUM", + "risk": "MEDIUM", + "raw_analysis": raw_analysis, + "hold_days": hold_days + } + db.save_single_stock_analysis(date, symbol, analysis_data) + add_log("info", "system", f"💾 Saved analysis for {symbol} to database") + + # Auto-update daily recommendation summary (counts, top_picks, stocks_to_avoid) + db.update_daily_recommendation_summary(date) + add_log("info", "system", f"📊 Updated daily recommendation summary for {date}") + + # Auto-trigger backtest calculation for this stock + try: + import backtest_service as bt + bt_result = bt.calculate_and_save_backtest(date, symbol, analysis_data["decision"], analysis_data.get("hold_days")) + if bt_result: + add_log("info", "system", f"📈 Backtest calculated for {symbol}: correct={bt_result.get('prediction_correct')}") + else: + add_log("info", "system", f"📈 Backtest not available yet for {symbol} (future date or no price data)") + except Exception as bt_err: + add_log("warning", "system", f"⚠️ Backtest calculation skipped for {symbol}: {bt_err}") running_analyses[symbol] = { "status": "completed", @@ -198,9 +280,16 @@ def run_analysis_task(symbol: str, date: str, analysis_config: dict = None): "progress": f"Analysis complete: {decision}", "decision": decision } + # Clear per-symbol step progress after completion + try: + from tradingagents.log_utils import symbol_progress + symbol_progress.clear(symbol) + except Exception: + pass except Exception as e: error_msg = str(e) if str(e) else f"{type(e).__name__}: No details provided" + add_log("error", "system", f"❌ Error analyzing {symbol}: {error_msg}") running_analyses[symbol] = { "status": "error", "error": error_msg, @@ -295,6 +384,60 @@ async def health_check(): return {"status": "healthy", "database": "connected"} +# ============== Live Log Streaming Endpoint ============== + +@app.get("/stream/logs") +async def stream_logs(): + """Server-Sent Events endpoint for streaming analysis logs.""" + import queue + + # Create a queue for this subscriber + subscriber_queue = queue.Queue(maxsize=100) + + with log_lock: + log_subscribers.append(subscriber_queue) + + async def event_generator(): + try: + # Send initial connection message + yield f"data: {json.dumps({'type': 'info', 'source': 'system', 'message': 'Connected to log stream', 'timestamp': datetime.now().isoformat()})}\n\n" + + # Send any recent logs from buffer + with log_lock: + recent_logs = list(analysis_logs)[-50:] # Last 50 logs + for log in recent_logs: + yield f"data: {json.dumps(log)}\n\n" + + # Stream new logs as they arrive + while True: + try: + # Check for new logs with timeout + log_entry = await asyncio.get_event_loop().run_in_executor( + None, lambda: subscriber_queue.get(timeout=5) + ) + yield f"data: {json.dumps(log_entry)}\n\n" + except queue.Empty: + # Send heartbeat to keep connection alive + yield f"data: {json.dumps({'type': 'heartbeat', 'timestamp': datetime.now().isoformat()})}\n\n" + except Exception: + break + finally: + # Remove subscriber on disconnect + with log_lock: + if subscriber_queue in log_subscribers: + log_subscribers.remove(subscriber_queue) + + return StreamingResponse( + event_generator(), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "Access-Control-Allow-Origin": "*", + } + ) + + # ============== Pipeline Data Endpoints ============== @app.get("/recommendations/{date}/{symbol}/pipeline") @@ -395,14 +538,15 @@ async def save_pipeline_data(request: SavePipelineDataRequest): # Track bulk analysis state bulk_analysis_state = { - "status": "idle", # idle, running, completed, error + "status": "idle", # idle, running, completed, error, cancelled "total": 0, "completed": 0, "failed": 0, "current_symbol": None, "started_at": None, "completed_at": None, - "results": {} + "results": {}, + "cancelled": False # Flag to signal cancellation } # List of Nifty 50 stocks @@ -423,11 +567,12 @@ class BulkAnalysisRequest(BaseModel): provider: Optional[str] = "claude_subscription" api_key: Optional[str] = None max_debate_rounds: Optional[int] = 1 + parallel_workers: Optional[int] = 3 @app.post("/analyze/all") async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date: Optional[str] = None): - """Trigger analysis for all Nifty 50 stocks. Runs in background.""" + """Trigger analysis for all Nifty 50 stocks. Runs in background with parallel processing.""" global bulk_analysis_state # Check if bulk analysis is already running @@ -443,6 +588,7 @@ async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date: # Build analysis config from request analysis_config = {} + parallel_workers = 3 if request: analysis_config = { "deep_think_model": request.deep_think_model, @@ -451,57 +597,129 @@ async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date: "api_key": request.api_key, "max_debate_rounds": request.max_debate_rounds } + if request.parallel_workers is not None: + parallel_workers = max(1, min(5, request.parallel_workers)) + + # Resume support: skip stocks already analyzed for this date + already_analyzed = set(db.get_analyzed_symbols_for_date(date)) + symbols_to_analyze = [s for s in NIFTY_50_SYMBOLS if s not in already_analyzed] + skipped_count = len(already_analyzed) + + # If all stocks are already analyzed, return immediately + if not symbols_to_analyze: + bulk_analysis_state = { + "status": "completed", + "total": 0, + "total_all": len(NIFTY_50_SYMBOLS), + "skipped": skipped_count, + "completed": 0, + "failed": 0, + "current_symbols": [], + "started_at": datetime.now().isoformat(), + "completed_at": datetime.now().isoformat(), + "results": {}, + "parallel_workers": parallel_workers, + "cancelled": False + } + return { + "message": f"All {skipped_count} stocks already analyzed for {date}", + "date": date, + "total_stocks": 0, + "skipped": skipped_count, + "parallel_workers": parallel_workers, + "status": "completed" + } + + def analyze_single_stock(symbol: str, analysis_date: str, config: dict) -> tuple: + """Analyze a single stock and return (symbol, status, error).""" + try: + # Check if cancelled before starting + if bulk_analysis_state.get("cancelled"): + return (symbol, "cancelled", "Bulk analysis was cancelled") + + run_analysis_task(symbol, analysis_date, config) + + # Wait for completion with timeout + import time + max_wait = 600 # 10 minute timeout per stock + waited = 0 + while waited < max_wait: + # Check for cancellation during wait + if bulk_analysis_state.get("cancelled"): + return (symbol, "cancelled", "Bulk analysis was cancelled") + + if symbol not in running_analyses: + return (symbol, "unknown", None) + status = running_analyses[symbol].get("status") + if status != "running" and status != "initializing": + return (symbol, status, None) + time.sleep(2) + waited += 2 + + return (symbol, "timeout", "Analysis timed out after 10 minutes") + + except Exception as e: + return (symbol, "error", str(e)) # Start bulk analysis in background thread - def run_bulk(): + def run_bulk_parallel(): global bulk_analysis_state bulk_analysis_state = { "status": "running", - "total": len(NIFTY_50_SYMBOLS), + "total": len(symbols_to_analyze), + "total_all": len(NIFTY_50_SYMBOLS), + "skipped": skipped_count, "completed": 0, "failed": 0, - "current_symbol": None, + "current_symbols": [], "started_at": datetime.now().isoformat(), "completed_at": None, - "results": {} + "results": {}, + "parallel_workers": parallel_workers, + "cancelled": False } - for symbol in NIFTY_50_SYMBOLS: - try: - bulk_analysis_state["current_symbol"] = symbol - run_analysis_task(symbol, date, analysis_config) + with ThreadPoolExecutor(max_workers=parallel_workers) as executor: + future_to_symbol = { + executor.submit(analyze_single_stock, symbol, date, analysis_config): symbol + for symbol in symbols_to_analyze + } - # Wait for completion - import time - while symbol in running_analyses and running_analyses[symbol].get("status") == "running": - time.sleep(2) + bulk_analysis_state["current_symbols"] = list(symbols_to_analyze[:parallel_workers]) + + for future in as_completed(future_to_symbol): + symbol = future_to_symbol[future] + try: + symbol, status, error = future.result() + bulk_analysis_state["results"][symbol] = status if not error else f"error: {error}" - if symbol in running_analyses: - status = running_analyses[symbol].get("status", "unknown") - bulk_analysis_state["results"][symbol] = status if status == "completed": bulk_analysis_state["completed"] += 1 else: bulk_analysis_state["failed"] += 1 - else: - bulk_analysis_state["results"][symbol] = "unknown" + + remaining = [s for s in symbols_to_analyze + if s not in bulk_analysis_state["results"]] + bulk_analysis_state["current_symbols"] = remaining[:parallel_workers] + + except Exception as e: + bulk_analysis_state["results"][symbol] = f"error: {str(e)}" bulk_analysis_state["failed"] += 1 - except Exception as e: - bulk_analysis_state["results"][symbol] = f"error: {str(e)}" - bulk_analysis_state["failed"] += 1 - bulk_analysis_state["status"] = "completed" - bulk_analysis_state["current_symbol"] = None + bulk_analysis_state["current_symbols"] = [] bulk_analysis_state["completed_at"] = datetime.now().isoformat() - thread = threading.Thread(target=run_bulk) + thread = threading.Thread(target=run_bulk_parallel) thread.start() + skipped_msg = f", {skipped_count} already done" if skipped_count > 0 else "" return { - "message": "Bulk analysis started for all Nifty 50 stocks", + "message": f"Bulk analysis started for {len(symbols_to_analyze)} stocks ({parallel_workers} parallel workers{skipped_msg})", "date": date, - "total_stocks": len(NIFTY_50_SYMBOLS), + "total_stocks": len(symbols_to_analyze), + "skipped": skipped_count, + "parallel_workers": parallel_workers, "status": "started" } @@ -509,7 +727,47 @@ async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date: @app.get("/analyze/all/status") async def get_bulk_analysis_status(): """Get the status of bulk analysis.""" - return bulk_analysis_state + # Add backward compatibility for current_symbol (old format) + result = dict(bulk_analysis_state) + if "current_symbols" in result: + result["current_symbol"] = result["current_symbols"][0] if result["current_symbols"] else None + + # Include per-stock step progress for currently-analyzing stocks + if result.get("status") == "running" and result.get("current_symbols"): + try: + from tradingagents.log_utils import symbol_progress + stock_progress = {} + for sym in result["current_symbols"]: + stock_progress[sym] = symbol_progress.get(sym) + result["stock_progress"] = stock_progress + except Exception: + pass + + return result + + +@app.post("/analyze/all/cancel") +async def cancel_bulk_analysis(): + """Cancel the running bulk analysis.""" + global bulk_analysis_state + + if bulk_analysis_state.get("status") != "running": + return { + "message": "No bulk analysis is running", + "status": bulk_analysis_state.get("status") + } + + # Set the cancelled flag + bulk_analysis_state["cancelled"] = True + bulk_analysis_state["status"] = "cancelled" + bulk_analysis_state["completed_at"] = datetime.now().isoformat() + + return { + "message": "Bulk analysis cancellation requested", + "completed": bulk_analysis_state.get("completed", 0), + "total": bulk_analysis_state.get("total", 0), + "status": "cancelled" + } @app.get("/analyze/running") @@ -571,7 +829,7 @@ async def run_analysis(symbol: str, background_tasks: BackgroundTasks, request: @app.get("/analyze/{symbol}/status") async def get_analysis_status(symbol: str): - """Get the status of a running or completed analysis.""" + """Get the status of a running or completed analysis, including live pipeline step progress.""" symbol = symbol.upper() if symbol not in running_analyses: @@ -581,11 +839,234 @@ async def get_analysis_status(symbol: str): "message": "No analysis has been run for this stock" } - return { + result = { "symbol": symbol, **running_analyses[symbol] } + # Include live pipeline step progress from step_timer when analysis is running + if running_analyses[symbol].get("status") == "running": + try: + from tradingagents.log_utils import step_timer + + steps = step_timer.get_steps() + if steps: + # Build a live progress summary + STEP_NAMES = { + "market_analyst": "Market Analysis", + "social_media_analyst": "Social Media Analysis", + "news_analyst": "News Analysis", + "fundamentals_analyst": "Fundamental Analysis", + "bull_researcher": "Bull Research", + "bear_researcher": "Bear Research", + "research_manager": "Research Manager", + "trader": "Trader Decision", + "aggressive_analyst": "Aggressive Analysis", + "conservative_analyst": "Conservative Analysis", + "neutral_analyst": "Neutral Analysis", + "risk_manager": "Risk Manager", + } + + completed = [k for k, v in steps.items() if v.get("status") == "completed"] + running = [k for k, v in steps.items() if v.get("status") == "running"] + total = 12 + + # Build progress message from live step data + if running: + current_step = STEP_NAMES.get(running[0], running[0]) + result["progress"] = f"Step {len(completed)+1}/{total}: {current_step}..." + elif completed: + last_step = STEP_NAMES.get(completed[-1], completed[-1]) + result["progress"] = f"Step {len(completed)}/{total}: {last_step} done" + + result["steps_completed"] = len(completed) + result["steps_running"] = [STEP_NAMES.get(s, s) for s in running] + result["steps_total"] = total + result["pipeline_steps"] = { + k: {"status": v.get("status"), "duration_ms": v.get("duration_ms")} + for k, v in steps.items() + } + except Exception: + pass # Don't fail status endpoint if step_timer unavailable + + return result + + +@app.post("/analyze/{symbol}/cancel") +async def cancel_analysis(symbol: str): + """Cancel a running analysis for a stock.""" + symbol = symbol.upper() + + if symbol not in running_analyses: + return {"message": f"No analysis found for {symbol}", "status": "not_found"} + + current_status = running_analyses[symbol].get("status") + if current_status not in ("running", "initializing"): + return {"message": f"Analysis for {symbol} is not running (status: {current_status})", "status": current_status} + + # Set cancellation flag — the background thread checks this + running_analyses[symbol]["cancelled"] = True + running_analyses[symbol]["status"] = "cancelled" + running_analyses[symbol]["progress"] = "Cancellation requested..." + running_analyses[symbol]["completed_at"] = datetime.now().isoformat() + + add_log("info", "system", f"🛑 Cancellation requested for {symbol}") + + return { + "message": f"Cancellation requested for {symbol}", + "symbol": symbol, + "status": "cancelled" + } + + +# ============== Backtest Endpoints ============== +# NOTE: Static routes must come BEFORE parameterized routes to avoid +# "accuracy" being matched as a {date} parameter. + +@app.get("/backtest/accuracy") +async def get_accuracy_metrics(): + """Get overall backtest accuracy metrics.""" + metrics = db.calculate_accuracy_metrics() + return metrics + + +@app.get("/backtest/{date}/{symbol}") +async def get_backtest_result(date: str, symbol: str): + """Get backtest result for a specific stock and date. + + Returns pre-calculated results only (no on-demand yfinance fetching) + to avoid blocking the event loop. + """ + result = db.get_backtest_result(date, symbol.upper()) + if not result: + return {'available': False, 'reason': 'Backtest not yet calculated'} + + return { + 'available': True, + 'prediction_correct': result['prediction_correct'], + 'actual_return_1d': result['return_1d'], + 'actual_return_1w': result['return_1w'], + 'actual_return_1m': result['return_1m'], + 'price_at_prediction': result['price_at_prediction'], + 'current_price': result.get('price_1m_later') or result.get('price_1w_later'), + 'hold_days': result.get('hold_days'), + } + + +@app.get("/backtest/{date}") +async def get_backtest_results_for_date(date: str): + """Get all backtest results for a specific date.""" + results = db.get_backtest_results_by_date(date) + return {"date": date, "results": results} + + +@app.post("/backtest/{date}/calculate") +async def calculate_backtest_for_date(date: str): + """Calculate backtest for all recommendations on a date (runs in background thread).""" + import backtest_service as bt + + # Run calculation in a separate thread to avoid blocking the event loop + def run_backtest(): + try: + bt.backtest_all_recommendations_for_date(date) + except Exception as e: + print(f"Backtest calculation error for {date}: {e}") + + thread = threading.Thread(target=run_backtest) + thread.start() + return {"status": "started", "date": date, "message": "Backtest calculation started in background"} + + +# ============== Stock Price History Endpoint ============== + +@app.get("/stocks/{symbol}/prices") +async def get_stock_price_history(symbol: str, days: int = 90): + """Get real historical closing prices for a stock from yfinance.""" + try: + import yfinance as yf + from datetime import timedelta + + yf_symbol = symbol if '.' in symbol else f"{symbol}.NS" + ticker = yf.Ticker(yf_symbol) + + end_date = datetime.now() + start_date = end_date - timedelta(days=days) + + hist = ticker.history(start=start_date.strftime('%Y-%m-%d'), + end=end_date.strftime('%Y-%m-%d')) + + if hist.empty: + return {"symbol": symbol, "prices": [], "error": "No price data found"} + + prices = [ + {"date": idx.strftime('%Y-%m-%d'), "price": round(float(row['Close']), 2)} + for idx, row in hist.iterrows() + ] + + return {"symbol": symbol, "prices": prices} + except ImportError: + return {"symbol": symbol, "prices": [], "error": "yfinance not installed"} + except Exception as e: + return {"symbol": symbol, "prices": [], "error": str(e)} + + +# ============== Nifty50 Index Endpoint ============== + +@app.get("/nifty50/history") +async def get_nifty50_history(): + """Get Nifty50 index closing prices for recommendation date range.""" + try: + import yfinance as yf + from datetime import timedelta + + # Get the date range from our recommendations + dates = db.get_all_dates() + if not dates: + return {"dates": [], "prices": {}} + + # Get date range with buffer for daily return calculation + start_date = (datetime.strptime(min(dates), "%Y-%m-%d") - timedelta(days=7)).strftime("%Y-%m-%d") + end_date = (datetime.strptime(max(dates), "%Y-%m-%d") + timedelta(days=7)).strftime("%Y-%m-%d") + + # Fetch ^NSEI data + nifty = yf.Ticker("^NSEI") + hist = nifty.history(start=start_date, end=end_date, interval="1d") + + prices = {} + for idx, row in hist.iterrows(): + date_str = idx.strftime("%Y-%m-%d") + prices[date_str] = round(float(row['Close']), 2) + + return {"dates": sorted(prices.keys()), "prices": prices} + except ImportError: + return {"dates": [], "prices": {}, "error": "yfinance not installed"} + except Exception as e: + return {"dates": [], "prices": {}, "error": str(e)} + + +@app.on_event("startup") +async def startup_event(): + """Rebuild daily_recommendations and trigger backtest calculations at startup.""" + db.rebuild_all_daily_recommendations() + + # Trigger backtest calculation for all dates in background + def startup_backtest(): + import backtest_service as bt + dates = db.get_all_dates() + for date in dates: + existing = db.get_backtest_results_by_date(date) + rec = db.get_recommendation_by_date(date) + expected_count = len(rec.get('analysis', {})) if rec else 0 + if len(existing) < expected_count: + print(f"[Backtest] Calculating for {date} ({len(existing)}/{expected_count} done)...") + try: + bt.backtest_all_recommendations_for_date(date) + except Exception as e: + print(f"[Backtest] Error for {date}: {e}") + + thread = threading.Thread(target=startup_backtest, daemon=True) + thread.start() + if __name__ == "__main__": import uvicorn diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index bef80a5b..e5771e51 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -1,9 +1,11 @@ import { Routes, Route } from 'react-router-dom'; import { ThemeProvider } from './contexts/ThemeContext'; import { SettingsProvider } from './contexts/SettingsContext'; +import { NotificationProvider } from './contexts/NotificationContext'; import Header from './components/Header'; import Footer from './components/Footer'; import SettingsModal from './components/SettingsModal'; +import ToastContainer from './components/Toast'; import Dashboard from './pages/Dashboard'; import History from './pages/History'; import StockDetail from './pages/StockDetail'; @@ -13,19 +15,22 @@ function App() { return ( -
-
-
- - } /> - } /> - } /> - } /> - -
-
- -
+ +
+
+
+ + } /> + } /> + } /> + } /> + +
+
+
); diff --git a/frontend/src/components/AccuracyTrendChart.tsx b/frontend/src/components/AccuracyTrendChart.tsx index ab56a20b..ccdc22cb 100644 --- a/frontend/src/components/AccuracyTrendChart.tsx +++ b/frontend/src/components/AccuracyTrendChart.tsx @@ -1,13 +1,23 @@ import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, Legend } from 'recharts'; import { getAccuracyTrend } from '../data/recommendations'; +export interface AccuracyTrendPoint { + date: string; + overall: number; + buy: number; + sell: number; + hold: number; +} + interface AccuracyTrendChartProps { height?: number; className?: string; + data?: AccuracyTrendPoint[]; // Optional prop for real data } -export default function AccuracyTrendChart({ height = 200, className = '' }: AccuracyTrendChartProps) { - const data = getAccuracyTrend(); +export default function AccuracyTrendChart({ height = 200, className = '', data: propData }: AccuracyTrendChartProps) { + // Use provided data or fall back to mock data + const data = propData || getAccuracyTrend(); if (data.length === 0) { return ( diff --git a/frontend/src/components/CumulativeReturnChart.tsx b/frontend/src/components/CumulativeReturnChart.tsx index d5b22c9c..5f90e388 100644 --- a/frontend/src/components/CumulativeReturnChart.tsx +++ b/frontend/src/components/CumulativeReturnChart.tsx @@ -1,13 +1,16 @@ import { AreaChart, Area, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, ReferenceLine } from 'recharts'; import { getCumulativeReturns } from '../data/recommendations'; +import type { CumulativeReturnPoint } from '../types'; interface CumulativeReturnChartProps { height?: number; className?: string; + data?: CumulativeReturnPoint[]; // Optional prop for real data } -export default function CumulativeReturnChart({ height = 160, className = '' }: CumulativeReturnChartProps) { - const data = getCumulativeReturns(); +export default function CumulativeReturnChart({ height = 160, className = '', data: propData }: CumulativeReturnChartProps) { + // Use provided data or fall back to mock data + const data = propData || getCumulativeReturns(); if (data.length === 0) { return ( diff --git a/frontend/src/components/IndexComparisonChart.tsx b/frontend/src/components/IndexComparisonChart.tsx index 59de0ab6..cbd2d5d5 100644 --- a/frontend/src/components/IndexComparisonChart.tsx +++ b/frontend/src/components/IndexComparisonChart.tsx @@ -1,14 +1,17 @@ import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, Legend, ReferenceLine } from 'recharts'; import { TrendingUp, TrendingDown } from 'lucide-react'; import { getCumulativeReturns } from '../data/recommendations'; +import type { CumulativeReturnPoint } from '../types'; -interface IndexComparisonChartProps { +export interface IndexComparisonChartProps { height?: number; className?: string; + data?: CumulativeReturnPoint[]; // Optional prop for real data } -export default function IndexComparisonChart({ height = 220, className = '' }: IndexComparisonChartProps) { - const data = getCumulativeReturns(); +export default function IndexComparisonChart({ height = 220, className = '', data: propData }: IndexComparisonChartProps) { + // Use provided data or fall back to mock data + const data = propData || getCumulativeReturns(); if (data.length === 0) { return ( diff --git a/frontend/src/components/InfoModal.tsx b/frontend/src/components/InfoModal.tsx new file mode 100644 index 00000000..9fe2b746 --- /dev/null +++ b/frontend/src/components/InfoModal.tsx @@ -0,0 +1,82 @@ +import { X, Info } from 'lucide-react'; +import type { ReactNode } from 'react'; + +interface InfoModalProps { + isOpen: boolean; + onClose: () => void; + title: string; + children: ReactNode; + icon?: ReactNode; +} + +export default function InfoModal({ isOpen, onClose, title, children, icon }: InfoModalProps) { + if (!isOpen) return null; + + return ( +
+ {/* Backdrop */} +
+ + {/* Modal */} +
+
+ {/* Header */} +
+
+ {icon || } +

{title}

+
+ +
+ + {/* Content */} +
+ {children} +
+ + {/* Footer */} +
+ +
+
+
+
+ ); +} + +// Reusable info button component +interface InfoButtonProps { + onClick: () => void; + className?: string; + size?: 'sm' | 'md'; +} + +export function InfoButton({ onClick, className = '', size = 'sm' }: InfoButtonProps) { + const sizeClasses = size === 'sm' ? 'w-3.5 h-3.5' : 'w-4 h-4'; + + return ( + + ); +} diff --git a/frontend/src/components/OverallReturnModal.tsx b/frontend/src/components/OverallReturnModal.tsx index 4eddfe67..95b99724 100644 --- a/frontend/src/components/OverallReturnModal.tsx +++ b/frontend/src/components/OverallReturnModal.tsx @@ -1,16 +1,27 @@ import { X, Activity } from 'lucide-react'; import { getOverallReturnBreakdown } from '../data/recommendations'; import CumulativeReturnChart from './CumulativeReturnChart'; +import type { CumulativeReturnPoint } from '../types'; + +export interface OverallReturnBreakdown { + dailyReturns: { date: string; return: number; multiplier: number; cumulative: number }[]; + finalMultiplier: number; + finalReturn: number; + formula: string; +} interface OverallReturnModalProps { isOpen: boolean; onClose: () => void; + breakdown?: OverallReturnBreakdown; // Optional prop for real data + cumulativeData?: CumulativeReturnPoint[]; // Optional prop for chart data } -export default function OverallReturnModal({ isOpen, onClose }: OverallReturnModalProps) { +export default function OverallReturnModal({ isOpen, onClose, breakdown: propBreakdown, cumulativeData }: OverallReturnModalProps) { if (!isOpen) return null; - const breakdown = getOverallReturnBreakdown(); + // Use provided breakdown or fall back to mock data + const breakdown = propBreakdown || getOverallReturnBreakdown(); return (
@@ -55,7 +66,7 @@ export default function OverallReturnModal({ isOpen, onClose }: OverallReturnMod

Portfolio Growth

- +
diff --git a/frontend/src/components/PortfolioSimulator.tsx b/frontend/src/components/PortfolioSimulator.tsx index 285ca059..d928209d 100644 --- a/frontend/src/components/PortfolioSimulator.tsx +++ b/frontend/src/components/PortfolioSimulator.tsx @@ -1,39 +1,271 @@ import { useState, useMemo } from 'react'; -import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, ReferenceLine } from 'recharts'; -import { Calculator, ChevronDown, ChevronUp, IndianRupee } from 'lucide-react'; -import { getOverallReturnBreakdown } from '../data/recommendations'; +import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, ReferenceLine, Legend, BarChart, Bar, Cell, LabelList } from 'recharts'; +import { Calculator, ChevronDown, ChevronUp, IndianRupee, Settings2, BarChart3, Info, TrendingUp, TrendingDown, ArrowRightLeft, Wallet, PiggyBank, Receipt, HelpCircle, AlertCircle } from 'lucide-react'; +import { sampleRecommendations, getNifty50IndexHistory, getBacktestResult } from '../data/recommendations'; +import { calculateBrokerage, formatINR, type BrokerageBreakdown } from '../utils/brokerageCalculator'; +import InfoModal, { InfoButton } from './InfoModal'; +import type { Decision, DailyRecommendation } from '../types'; interface PortfolioSimulatorProps { className?: string; + recommendations?: DailyRecommendation[]; + isUsingMockData?: boolean; + nifty50Prices?: Record; + allBacktestData?: Record>; } -export default function PortfolioSimulator({ className = '' }: PortfolioSimulatorProps) { +export type InvestmentMode = 'all50' | 'topPicks'; + +interface TradeRecord { + symbol: string; + entryDate: string; + entryPrice: number; + exitDate: string; + exitPrice: number; + quantity: number; + brokerage: BrokerageBreakdown; + profitLoss: number; +} + +interface TradeStats { + totalTrades: number; + buyTrades: number; + sellTrades: number; + brokerageBreakdown: BrokerageBreakdown; + trades: TradeRecord[]; +} + +// Smart trade counting logic using Zerodha brokerage for Equity Delivery +function calculateSmartTrades( + recommendations: typeof sampleRecommendations, + mode: InvestmentMode, + startingAmount: number, + nifty50Prices?: Record, + allBacktestData?: Record> +): { + portfolioData: Array<{ date: string; rawDate: string; value: number; niftyValue: number; return: number; cumulative: number }>; + stats: TradeStats; + openPositions: Record; +} { + const hasRealNifty = nifty50Prices && Object.keys(nifty50Prices).length > 0; + const niftyHistory = hasRealNifty ? null : getNifty50IndexHistory(); + const sortedRecs = [...recommendations].sort((a, b) => new Date(a.date).getTime() - new Date(b.date).getTime()); + + // Precompute real Nifty start price for comparison + const sortedNiftyDates = hasRealNifty ? Object.keys(nifty50Prices).sort() : []; + const niftyStartPrice = hasRealNifty && sortedNiftyDates.length > 0 + ? nifty50Prices[sortedNiftyDates[0]] + : null; + + // Track open positions per stock + const openPositions: Record = {}; + const completedTrades: TradeRecord[] = []; + let buyTrades = 0; + let sellTrades = 0; + + const getStocksToTrack = (rec: typeof recommendations[0]) => { + if (mode === 'topPicks') { + return rec.top_picks.map(p => p.symbol); + } + return Object.keys(rec.analysis); + }; + + const stockCount = mode === 'topPicks' ? 3 : 50; + const investmentPerStock = startingAmount / stockCount; + + let portfolioValue = startingAmount; + let niftyValue = startingAmount; + const niftyStartValue = niftyHistory?.[0]?.value || 21500; + + const portfolioData = sortedRecs.map((rec) => { + const stocks = getStocksToTrack(rec); + let dayReturn = 0; + let stocksTracked = 0; + + stocks.forEach(symbol => { + const analysis = rec.analysis[symbol]; + if (!analysis || !analysis.decision) return; + + const decision = analysis.decision; + const prevPosition = openPositions[symbol]; + + const backtest = getBacktestResult(symbol); + const currentPrice = backtest?.current_price || 1000; + const quantity = Math.floor(investmentPerStock / currentPrice); + + if (decision === 'BUY') { + if (!prevPosition) { + openPositions[symbol] = { entryDate: rec.date, entryPrice: currentPrice, decision }; + buyTrades++; + } else if (prevPosition.decision === 'SELL') { + buyTrades++; + openPositions[symbol] = { entryDate: rec.date, entryPrice: currentPrice, decision }; + } else { + openPositions[symbol].decision = decision; + } + // Use real backtest return if available, otherwise 0 (neutral) + const realBuyReturn = allBacktestData?.[rec.date]?.[symbol]; + dayReturn += realBuyReturn !== undefined ? realBuyReturn : 0; + stocksTracked++; + } else if (decision === 'HOLD') { + if (prevPosition) { + openPositions[symbol].decision = decision; + } + // Use real backtest return if available, otherwise 0 (neutral) + const realHoldReturn = allBacktestData?.[rec.date]?.[symbol]; + dayReturn += realHoldReturn !== undefined ? realHoldReturn : 0; + stocksTracked++; + } else if (decision === 'SELL') { + if (prevPosition && (prevPosition.decision === 'BUY' || prevPosition.decision === 'HOLD')) { + sellTrades++; + + // Use real backtest return for exit price if available, otherwise break-even + const realSellReturn = allBacktestData?.[rec.date]?.[symbol]; + const exitPrice = realSellReturn !== undefined + ? currentPrice * (1 + realSellReturn / 100) + : currentPrice; + const brokerage = calculateBrokerage({ + buyPrice: prevPosition.entryPrice, + sellPrice: exitPrice, + quantity, + tradeType: 'delivery', + }); + + const grossProfit = (exitPrice - prevPosition.entryPrice) * quantity; + const profitLoss = grossProfit - brokerage.totalCharges; + + completedTrades.push({ + symbol, + entryDate: prevPosition.entryDate, + entryPrice: prevPosition.entryPrice, + exitDate: rec.date, + exitPrice, + quantity, + brokerage, + profitLoss, + }); + + delete openPositions[symbol]; + } + stocksTracked++; + } + }); + + const avgDayReturn = stocksTracked > 0 ? dayReturn / stocksTracked : 0; + portfolioValue = portfolioValue * (1 + avgDayReturn / 100); + + // Use real Nifty50 prices if available, otherwise use mock history + if (hasRealNifty && niftyStartPrice) { + const closestDate = sortedNiftyDates.find(d => d >= rec.date) || sortedNiftyDates[sortedNiftyDates.length - 1]; + if (closestDate && nifty50Prices[closestDate]) { + niftyValue = startingAmount * (nifty50Prices[closestDate] / niftyStartPrice); + } + } else if (niftyHistory) { + const niftyPoint = niftyHistory.find(n => n.date === rec.date); + if (niftyPoint) { + niftyValue = startingAmount * (niftyPoint.value / niftyStartValue); + } + } + + return { + date: new Date(rec.date).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' }), + rawDate: rec.date, + value: Math.round(portfolioValue), + niftyValue: Math.round(niftyValue), + return: avgDayReturn, + cumulative: ((portfolioValue - startingAmount) / startingAmount) * 100, + }; + }); + + const totalBrokerage = completedTrades.reduce( + (acc, trade) => ({ + brokerage: acc.brokerage + trade.brokerage.brokerage, + stt: acc.stt + trade.brokerage.stt, + exchangeCharges: acc.exchangeCharges + trade.brokerage.exchangeCharges, + sebiCharges: acc.sebiCharges + trade.brokerage.sebiCharges, + gst: acc.gst + trade.brokerage.gst, + stampDuty: acc.stampDuty + trade.brokerage.stampDuty, + totalCharges: acc.totalCharges + trade.brokerage.totalCharges, + netProfit: acc.netProfit + trade.brokerage.netProfit, + turnover: acc.turnover + trade.brokerage.turnover, + }), + { brokerage: 0, stt: 0, exchangeCharges: 0, sebiCharges: 0, gst: 0, stampDuty: 0, totalCharges: 0, netProfit: 0, turnover: 0 } + ); + + return { + portfolioData, + stats: { + totalTrades: buyTrades + sellTrades, + buyTrades, + sellTrades, + brokerageBreakdown: totalBrokerage, + trades: completedTrades, + }, + openPositions, + }; +} + +// Helper for consistent positive/negative color classes +function getValueColorClass(value: number): string { + return value >= 0 + ? 'text-green-600 dark:text-green-400' + : 'text-red-600 dark:text-red-400'; +} + +export default function PortfolioSimulator({ + className = '', + recommendations = sampleRecommendations, + isUsingMockData = true, // Default to true since this uses simulated returns + nifty50Prices, + allBacktestData, +}: PortfolioSimulatorProps) { const [startingAmount, setStartingAmount] = useState(100000); const [showBreakdown, setShowBreakdown] = useState(false); + const [showSettings, setShowSettings] = useState(false); + const [showBrokerageDetails, setShowBrokerageDetails] = useState(false); + const [showTradeWaterfall, setShowTradeWaterfall] = useState(false); + const [investmentMode, setInvestmentMode] = useState('all50'); + const [includeBrokerage, setIncludeBrokerage] = useState(true); - const breakdown = useMemo(() => getOverallReturnBreakdown(), []); + // Modal state - single state for all modals instead of 7 separate booleans + type ModalType = 'totalTrades' | 'buyTrades' | 'sellTrades' | 'portfolioValue' | 'profitLoss' | 'comparison' | null; + const [activeModal, setActiveModal] = useState(null); - // Calculate portfolio values over time - const portfolioData = useMemo(() => { - let value = startingAmount; - return breakdown.dailyReturns.map(day => { - value = value * day.multiplier; - return { - date: new Date(day.date).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' }), - value: Math.round(value), - return: day.return, - cumulative: day.cumulative, - }; - }); - }, [breakdown.dailyReturns, startingAmount]); + const { portfolioData, stats, openPositions } = useMemo(() => { + return calculateSmartTrades( + recommendations, + investmentMode, + startingAmount, + nifty50Prices, + allBacktestData + ); + }, [recommendations, investmentMode, startingAmount, nifty50Prices, allBacktestData]); - const currentValue = portfolioData.length > 0 - ? portfolioData[portfolioData.length - 1].value - : startingAmount; - const totalReturn = ((currentValue - startingAmount) / startingAmount) * 100; - const profitLoss = currentValue - startingAmount; + const lastDataPoint = portfolioData[portfolioData.length - 1]; + const currentValue = lastDataPoint?.value ?? startingAmount; + const niftyValue = lastDataPoint?.niftyValue ?? startingAmount; + + const totalCharges = includeBrokerage ? stats.brokerageBreakdown.totalCharges : 0; + const finalValue = currentValue - totalCharges; + const totalReturn = ((finalValue - startingAmount) / startingAmount) * 100; + const profitLoss = finalValue - startingAmount; const isPositive = profitLoss >= 0; + const niftyReturn = ((niftyValue - startingAmount) / startingAmount) * 100; + const outperformance = totalReturn - niftyReturn; + + // Calculate Y-axis domain with padding + const yAxisDomain = useMemo(() => { + if (portfolioData.length === 0) return [0, startingAmount * 1.2]; + + const allValues = portfolioData.flatMap(d => [d.value, d.niftyValue]); + const minValue = Math.min(...allValues); + const maxValue = Math.max(...allValues); + const padding = (maxValue - minValue) * 0.1; + + return [Math.floor((minValue - padding) / 1000) * 1000, Math.ceil((maxValue + padding) / 1000) * 1000]; + }, [portfolioData, startingAmount]); + const handleAmountChange = (e: React.ChangeEvent) => { const value = parseInt(e.target.value.replace(/,/g, ''), 10); if (!isNaN(value) && value >= 0) { @@ -41,21 +273,73 @@ export default function PortfolioSimulator({ className = '' }: PortfolioSimulato } }; - const formatCurrency = (value: number) => { - return new Intl.NumberFormat('en-IN', { - style: 'currency', - currency: 'INR', - maximumFractionDigits: 0, - }).format(value); - }; + const openPositionsCount = Object.keys(openPositions).length; return (
-
- -

Portfolio Simulator

+
+
+ +

Portfolio Simulator

+
+
+ {/* Settings Panel */} + {showSettings && ( +
+
+ +
+ + +
+
+ +
+ +
+
+ )} + {/* Input Section */}
@@ -89,24 +373,158 @@ export default function PortfolioSimulator({ className = '' }: PortfolioSimulato {/* Results Section */}
-
-
Current Value
-
- {formatCurrency(currentValue)} +
+
+ Final Portfolio Value + setActiveModal('portfolioValue')} /> +
+
+ {formatINR(finalValue, 0)}
-
Profit/Loss
-
- {isPositive ? '+' : ''}{formatCurrency(profitLoss)} +
+ Net Profit/Loss + setActiveModal('profitLoss')} /> +
+
+ {isPositive ? '+' : ''}{formatINR(profitLoss, 0)} ({isPositive ? '+' : ''}{totalReturn.toFixed(1)}%)
- {/* Chart */} + {/* Trade Stats with Info Buttons */} +
+
setActiveModal('totalTrades')} + > +
{stats.totalTrades}
+
+ Total Trades +
+
+
setActiveModal('buyTrades')} + > +
{stats.buyTrades}
+
+ Buy Trades +
+
+
setActiveModal('sellTrades')} + > +
{stats.sellTrades}
+
+ Sell Trades +
+
+
setShowBrokerageDetails(!showBrokerageDetails)} + title="Click for detailed breakdown" + > +
{formatINR(totalCharges, 0)}
+
+ Total Charges +
+
+
+ + {/* Open Positions Badge */} + {openPositionsCount > 0 && ( +
+
+ + + Open Positions (not yet sold) + + {openPositionsCount} stocks +
+
+ )} + + {/* Brokerage Breakdown */} + {showBrokerageDetails && includeBrokerage && ( +
+
+ + Zerodha Equity Delivery Charges +
+
+
+ Brokerage: + {formatINR(stats.brokerageBreakdown.brokerage)} +
+
+ STT: + {formatINR(stats.brokerageBreakdown.stt)} +
+
+ Exchange Charges: + {formatINR(stats.brokerageBreakdown.exchangeCharges)} +
+
+ SEBI Charges: + {formatINR(stats.brokerageBreakdown.sebiCharges)} +
+
+ GST (18%): + {formatINR(stats.brokerageBreakdown.gst)} +
+
+ Stamp Duty: + {formatINR(stats.brokerageBreakdown.stampDuty)} +
+
+
+ Total Turnover: + {formatINR(stats.brokerageBreakdown.turnover, 0)} +
+
+ )} + + {/* Comparison with Nifty */} +
setActiveModal('comparison')} + > +
+
+ + vs Nifty 50 Index +
+ +
+
+
+
+ {totalReturn >= 0 ? '+' : ''}{totalReturn.toFixed(1)}% +
+
AI Strategy
+
+
+
+ {niftyReturn >= 0 ? '+' : ''}{niftyReturn.toFixed(1)}% +
+
Nifty 50
+
+
+
= 0 ? 'text-nifty-600 dark:text-nifty-400' : 'text-red-600 dark:text-red-400'}`}> + {outperformance >= 0 ? '+' : ''}{outperformance.toFixed(1)}% +
+
Outperformance
+
+
+
+ + {/* Chart with Nifty Comparison - Fixed Y-axis */} {portfolioData.length > 0 && ( -
+
@@ -117,9 +535,10 @@ export default function PortfolioSimulator({ className = '' }: PortfolioSimulato /> formatCurrency(v).replace('₹', '')} + tickFormatter={(v) => formatINR(v, 0).replace('₹', '')} className="text-gray-500 dark:text-gray-400" width={60} + domain={yAxisDomain} /> [formatCurrency(value as number), 'Value']} + formatter={(value, name) => [ + formatINR(Number(value) || 0, 0), + name === 'value' ? 'AI Strategy' : 'Nifty 50' + ]} + /> + value === 'value' ? 'AI Strategy' : 'Nifty 50'} /> +
)} + {/* Trade Waterfall Toggle */} + + + {/* Trade Waterfall Chart */} + {showTradeWaterfall && stats.trades.length > 0 && ( +
+
+ Each bar represents a trade from buy to sell. Green = Profit, Red = Loss. +
+
+
+ + ({ + ...t, + idx: i, + displayName: `${t.symbol}`, + duration: `${new Date(t.entryDate).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' })} → ${new Date(t.exitDate).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' })}`, + }))} + layout="vertical" + margin={{ top: 5, right: 60, bottom: 5, left: 70 }} + > + + formatINR(v, 0)} + domain={['dataMin', 'dataMax']} + /> + + [formatINR(Number(value) || 0, 2), 'P/L']} + labelFormatter={(_, payload) => { + if (payload && payload[0]) { + const d = payload[0].payload; + return `${d.symbol}: ${d.duration}`; + } + return ''; + }} + /> + + {stats.trades.map((trade, index) => ( + = 0 ? '#22c55e' : '#ef4444'} + /> + ))} + formatINR(Number(v) || 0, 0)} + style={{ fontSize: 9, fill: '#6b7280' }} + /> + + + +
+
+
+ )} + {/* Daily Breakdown (Collapsible) */}
)} + {/* Demo Data Notice */} + {isUsingMockData && ( +
+ + + Simulation uses demo data. Results are illustrative only. + +
+ )} +

- Simulated returns based on AI recommendation performance. Past performance does not guarantee future results. + Simulated using Zerodha Equity Delivery rates (0% brokerage, STT 0.1%, Exchange 0.00345%, SEBI 0.0001%, Stamp 0.015%). + {investmentMode === 'topPicks' ? ' Investing in Top Picks only.' : ' Investing in all 50 stocks.'} + {includeBrokerage ? ` Total Charges: ${formatINR(totalCharges, 0)}` : ''}

+ + {/* Info Modals */} + setActiveModal(null)} + title="Total Trades" + icon={} + > +
+

Total Trades represents the sum of all buy and sell transactions executed during the simulation period.

+
+
Calculation:
+ Total Trades = Buy Trades + Sell Trades +
= {stats.buyTrades} + {stats.sellTrades} = {stats.totalTrades}
+
+

Note: A complete round-trip trade (buy then sell) counts as 2 trades.

+
+
+ + setActiveModal(null)} + title="Buy Trades" + icon={} + > +
+

Buy Trades counts when a new position is opened based on AI's BUY recommendation.

+
+
When is a Buy Trade counted?
+
    +
  • When AI recommends BUY and no position exists
  • +
  • When AI recommends BUY after a previous SELL
  • +
+
+

Note: If AI recommends BUY while already holding (from previous BUY or HOLD), no new buy trade is counted - the position is simply carried forward.

+
+
+ + setActiveModal(null)} + title="Sell Trades" + icon={} + > +
+

Sell Trades counts when a position is closed based on AI's SELL recommendation.

+
+
When is a Sell Trade counted?
+
    +
  • When AI recommends SELL while holding a position
  • +
  • Position must have been opened via BUY or carried via HOLD
  • +
+
+

Note: Brokerage is calculated when a sell trade completes a round-trip transaction.

+
+
+ + setActiveModal(null)} + title="Final Portfolio Value" + icon={} + > +
+

Final Portfolio Value is the total worth of your investments at the end of the simulation period.

+
+
Calculation:
+ Final Value = Portfolio Value - Total Charges +
+ = {formatINR(currentValue, 0)} - {formatINR(totalCharges, 0)} = {formatINR(finalValue, 0)} +
+
+

This includes all realized gains/losses from completed trades and deducts Zerodha brokerage charges.

+
+
+ + setActiveModal(null)} + title="Net Profit/Loss" + icon={} + > +
+

Net Profit/Loss shows your actual earnings or losses after all charges.

+
+
Calculation:
+ Net P/L = Final Value - Starting Investment +
+ = {formatINR(finalValue, 0)} - {formatINR(startingAmount, 0)} = = 0 ? 'text-green-600' : 'text-red-600'}>{formatINR(profitLoss, 0)} +
+
+ Return = ({formatINR(profitLoss, 0)} / {formatINR(startingAmount, 0)}) × 100 = {totalReturn.toFixed(2)}% +
+
+
+
+ + setActiveModal(null)} + title="vs Nifty 50 Index" + icon={} + > +
+

This compares the AI strategy's performance against simply investing in the Nifty 50 index.

+
+
+ AI Strategy Return: + = 0 ? 'text-green-600' : 'text-red-600'}>{totalReturn.toFixed(2)}% +
+
+ Nifty 50 Return: + = 0 ? 'text-green-600' : 'text-red-600'}>{niftyReturn.toFixed(2)}% +
+
+ Outperformance (Alpha): + = 0 ? 'text-nifty-600' : 'text-red-600'}>{outperformance.toFixed(2)}% +
+
+

+ {outperformance >= 0 + ? `The AI strategy beat the Nifty 50 index by ${outperformance.toFixed(2)} percentage points.` + : `The AI strategy underperformed the Nifty 50 index by ${Math.abs(outperformance).toFixed(2)} percentage points.` + } +

+
+
); } + +// Export the type for use in other components +export { type InvestmentMode as PortfolioInvestmentMode }; diff --git a/frontend/src/components/ReturnDistributionChart.tsx b/frontend/src/components/ReturnDistributionChart.tsx index 2276d30b..cca1aea4 100644 --- a/frontend/src/components/ReturnDistributionChart.tsx +++ b/frontend/src/components/ReturnDistributionChart.tsx @@ -2,15 +2,18 @@ import { useState } from 'react'; import { BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer } from 'recharts'; import { X } from 'lucide-react'; import { getReturnDistribution } from '../data/recommendations'; +import type { ReturnBucket } from '../types'; -interface ReturnDistributionChartProps { +export interface ReturnDistributionChartProps { height?: number; className?: string; + data?: ReturnBucket[]; // Optional prop for real data } -export default function ReturnDistributionChart({ height = 200, className = '' }: ReturnDistributionChartProps) { +export default function ReturnDistributionChart({ height = 200, className = '', data: propData }: ReturnDistributionChartProps) { const [selectedBucket, setSelectedBucket] = useState<{ range: string; stocks: string[] } | null>(null); - const data = getReturnDistribution(); + // Use provided data or fall back to mock data + const data = propData || getReturnDistribution(); if (data.every(d => d.count === 0)) { return ( diff --git a/frontend/src/components/RiskMetricsCard.tsx b/frontend/src/components/RiskMetricsCard.tsx index 238c0799..0e93b557 100644 --- a/frontend/src/components/RiskMetricsCard.tsx +++ b/frontend/src/components/RiskMetricsCard.tsx @@ -1,36 +1,50 @@ -import { HelpCircle, TrendingUp, TrendingDown, Activity, Target } from 'lucide-react'; +import { TrendingUp, TrendingDown, Activity, Target } from 'lucide-react'; import { calculateRiskMetrics } from '../data/recommendations'; import { useState } from 'react'; +import InfoModal, { InfoButton } from './InfoModal'; +import type { RiskMetrics } from '../types'; -interface RiskMetricsCardProps { +export interface RiskMetricsCardProps { className?: string; + metrics?: RiskMetrics; // Optional prop for real data } -export default function RiskMetricsCard({ className = '' }: RiskMetricsCardProps) { - const [showTooltip, setShowTooltip] = useState(null); - const metrics = calculateRiskMetrics(); +type MetricModal = 'sharpe' | 'drawdown' | 'winloss' | 'winrate' | null; - const tooltips: Record = { - sharpe: 'Sharpe Ratio measures risk-adjusted returns. Higher is better (>1 is good, >2 is excellent).', - drawdown: 'Maximum Drawdown shows the largest peak-to-trough decline. Lower is better.', - winloss: 'Win/Loss Ratio compares average winning trade to average losing trade. Higher means bigger wins than losses.', - winrate: 'Win Rate is the percentage of predictions that were correct.', - }; +export default function RiskMetricsCard({ className = '', metrics: propMetrics }: RiskMetricsCardProps) { + const [activeModal, setActiveModal] = useState(null); + // Use provided metrics or fall back to mock data + const metrics = propMetrics || calculateRiskMetrics(); - const getColor = (metric: string, value: number) => { - switch (metric) { - case 'sharpe': - return value >= 1 ? 'text-green-600 dark:text-green-400' : value >= 0 ? 'text-amber-600 dark:text-amber-400' : 'text-red-600 dark:text-red-400'; - case 'drawdown': - return value <= 5 ? 'text-green-600 dark:text-green-400' : value <= 15 ? 'text-amber-600 dark:text-amber-400' : 'text-red-600 dark:text-red-400'; - case 'winloss': - return value >= 1.5 ? 'text-green-600 dark:text-green-400' : value >= 1 ? 'text-amber-600 dark:text-amber-400' : 'text-red-600 dark:text-red-400'; - case 'winrate': - return value >= 70 ? 'text-green-600 dark:text-green-400' : value >= 50 ? 'text-amber-600 dark:text-amber-400' : 'text-red-600 dark:text-red-400'; - default: - return 'text-gray-700 dark:text-gray-300'; + // Color classes for metric values + const COLOR_GOOD = 'text-green-600 dark:text-green-400'; + const COLOR_NEUTRAL = 'text-amber-600 dark:text-amber-400'; + const COLOR_BAD = 'text-red-600 dark:text-red-400'; + + function getColor(metric: string, value: number): string { + // Thresholds for each metric: [good, neutral] - values below neutral are bad + const thresholds: Record = { + sharpe: { good: 1, neutral: 0 }, + drawdown: { good: 5, neutral: 15, inverted: true }, // Lower is better + winloss: { good: 1.5, neutral: 1 }, + winrate: { good: 70, neutral: 50 }, + }; + + const config = thresholds[metric]; + if (!config) return 'text-gray-700 dark:text-gray-300'; + + if (config.inverted) { + // For drawdown: lower is better + if (value <= config.good) return COLOR_GOOD; + if (value <= config.neutral) return COLOR_NEUTRAL; + return COLOR_BAD; } - }; + + // For other metrics: higher is better + if (value >= config.good) return COLOR_GOOD; + if (value >= config.neutral) return COLOR_NEUTRAL; + return COLOR_BAD; + } const cards = [ { @@ -64,38 +78,282 @@ export default function RiskMetricsCard({ className = '' }: RiskMetricsCardProps ]; return ( -
- {cards.map((card) => { - const Icon = card.icon; - return ( -
-
- - {card.value} + <> +
+ {cards.map((card) => { + const Icon = card.icon; + return ( +
+
+ + {card.value} +
+
+ {card.label} + setActiveModal(card.id as MetricModal)} /> +
-
- {card.label} - + ); + })} +
+ + {/* Sharpe Ratio Modal */} + setActiveModal(null)} + title="Sharpe Ratio" + icon={} + > +
+

+ The Sharpe Ratio measures risk-adjusted returns + by comparing the excess return of an investment to its standard deviation (volatility). +

+ + {/* Current Value Display */} +
+
Current Sharpe Ratio
+
{metrics.sharpeRatio.toFixed(2)}
+
+ + {/* Formula and Calculation */} +
+
+

Formula:

+
+ Sharpe Ratio = (R̄ − Rf) / σ +
+

+ Where R̄ = Mean Return, Rf = Risk-Free Rate, σ = Standard Deviation +

- {/* Tooltip */} - {showTooltip === card.id && ( -
- {tooltips[card.id]} -
+ {metrics.meanReturn !== undefined && ( +
+

Your Values:

+
+

• Mean Daily Return (R̄) = {metrics.meanReturn}%

+

• Risk-Free Rate (Rf) = {metrics.riskFreeRate}% (daily)

+

• Volatility (σ) = {metrics.volatility}%

+
+ +

Calculation:

+
+

= ({metrics.meanReturn} − {metrics.riskFreeRate}) / {metrics.volatility}

+

= {(metrics.meanReturn - (metrics.riskFreeRate || 0)).toFixed(2)} / {metrics.volatility}

+

= {metrics.sharpeRatio.toFixed(2)}

+
)}
- ); - })} -
+ +
+

Interpretation:

+
    +
  • > 1.0: Good risk-adjusted returns
  • +
  • > 2.0: Excellent performance
  • +
  • 0 - 1.0: Acceptable but not optimal
  • +
  • < 0: Returns below risk-free rate
  • +
+
+

+ Higher Sharpe Ratio indicates better compensation for the risk taken. +

+
+ + + {/* Max Drawdown Modal */} + setActiveModal(null)} + title="Maximum Drawdown" + icon={} + > +
+

+ Maximum Drawdown (MDD) measures the largest + peak-to-trough decline in portfolio value before a new peak is reached. +

+ + {/* Current Value Display */} +
+
Maximum Drawdown
+
{metrics.maxDrawdown.toFixed(1)}%
+
+ + {/* Formula and Calculation */} +
+
+

Formula:

+
+ MDD = (Vpeak − Vtrough) / Vpeak × 100% +
+

+ Where Vpeak = Peak Portfolio Value, Vtrough = Lowest Value after Peak +

+
+ + {metrics.peakValue !== undefined && metrics.troughValue !== undefined && ( +
+

Your Values:

+
+

• Peak Value (Vpeak) = ₹{metrics.peakValue.toFixed(2)} (normalized from ₹100)

+

• Trough Value (Vtrough) = ₹{metrics.troughValue.toFixed(2)}

+
+ +

Calculation:

+
+

= ({metrics.peakValue.toFixed(2)} − {metrics.troughValue.toFixed(2)}) / {metrics.peakValue.toFixed(2)} × 100

+

= {(metrics.peakValue - metrics.troughValue).toFixed(2)} / {metrics.peakValue.toFixed(2)} × 100

+

= {metrics.maxDrawdown.toFixed(1)}%

+
+
+ )} +
+ +
+

Interpretation:

+
    +
  • < 5%: Very low risk
  • +
  • 5% - 15%: Moderate risk
  • +
  • > 15%: Higher risk exposure
  • +
+
+

+ Lower drawdown indicates better capital preservation during market downturns. +

+
+
+ + {/* Win/Loss Ratio Modal */} + setActiveModal(null)} + title="Win/Loss Ratio" + icon={} + > +
+

+ The Win/Loss Ratio compares the average + profit from winning trades to the average loss from losing trades. +

+ + {/* Current Value Display */} +
+
Win/Loss Ratio
+
{metrics.winLossRatio.toFixed(2)}
+
+ + {/* Formula and Calculation */} +
+
+

Formula:

+
+ Win/Loss Ratio = R̄w / |R̄l| +
+

+ Where R̄w = Avg Winning Return, R̄l = Avg Losing Return (absolute value) +

+
+ + {metrics.winningTrades !== undefined && metrics.losingTrades !== undefined && ( +
+

Your Values:

+
+

• Winning Predictions = {metrics.winningTrades} days

+

• Losing Predictions = {metrics.losingTrades} days

+

• Avg Winning Return (R̄w) = +{metrics.avgWinReturn?.toFixed(2)}%

+

• Avg Losing Return (R̄l) = −{metrics.avgLossReturn?.toFixed(2)}%

+
+ +

Calculation:

+
+

= {metrics.avgWinReturn?.toFixed(2)} / {metrics.avgLossReturn?.toFixed(2)}

+

= {metrics.winLossRatio.toFixed(2)}

+
+
+ )} +
+ +
+

Interpretation:

+
    +
  • > 1.5: Strong profit potential
  • +
  • 1.0 - 1.5: Balanced trades
  • +
  • < 1.0: Losses exceed wins on average
  • +
+
+

+ A ratio above 1.0 means your winning trades are larger than your losing ones on average. +

+
+
+ + {/* Win Rate Modal */} + setActiveModal(null)} + title="Win Rate" + icon={} + > +
+

+ Win Rate is the percentage of predictions + that were correct (BUY/HOLD with positive return, or SELL with negative return). +

+ + {/* Current Value Display */} +
+
Win Rate
+
{metrics.winRate}%
+
+ + {/* Formula and Calculation */} +
+
+

Formula:

+
+ Win Rate = (Ncorrect / Ntotal) × 100% +
+

+ Where Ncorrect = Correct Predictions, Ntotal = Total Predictions +

+
+ + {metrics.winningTrades !== undefined && ( +
+

Your Values:

+
+

• Correct Predictions (Ncorrect) = {metrics.winningTrades}

+

• Total Predictions (Ntotal) = {metrics.totalTrades}

+
+ +

Calculation:

+
+

= ({metrics.winningTrades} / {metrics.totalTrades}) × 100

+

= {(metrics.winningTrades / metrics.totalTrades).toFixed(4)} × 100

+

= {metrics.winRate}%

+
+
+ )} +
+ +
+

Interpretation:

+
    +
  • > 70%: Excellent accuracy
  • +
  • 50% - 70%: Above average
  • +
  • < 50%: Below random chance
  • +
+
+

+ Note: Win rate alone doesn't determine profitability. A 40% win rate can still be profitable with a high Win/Loss ratio. +

+
+
+ ); } diff --git a/frontend/src/components/SettingsModal.tsx b/frontend/src/components/SettingsModal.tsx index 6e743bc3..93cf32da 100644 --- a/frontend/src/components/SettingsModal.tsx +++ b/frontend/src/components/SettingsModal.tsx @@ -272,6 +272,29 @@ export default function SettingsModal() { 5 (More thorough)
+ + {/* Parallel Workers */} +
+ + updateSettings({ parallelWorkers: parseInt(e.target.value) })} + className="w-full h-2 bg-gray-200 dark:bg-slate-700 rounded-lg appearance-none cursor-pointer accent-nifty-600" + /> +
+ 1 (Conservative) + 5 (Aggressive) +
+

+ Number of stocks to analyze simultaneously during Analyze All +

+
diff --git a/frontend/src/components/StockCard.tsx b/frontend/src/components/StockCard.tsx index bb90ec47..b02ae4e2 100644 --- a/frontend/src/components/StockCard.tsx +++ b/frontend/src/components/StockCard.tsx @@ -1,5 +1,5 @@ import { Link } from 'react-router-dom'; -import { TrendingUp, TrendingDown, Minus, ChevronRight } from 'lucide-react'; +import { TrendingUp, TrendingDown, Minus, ChevronRight, Clock } from 'lucide-react'; import type { StockAnalysis, Decision } from '../types'; interface StockCardProps { @@ -29,7 +29,9 @@ export function DecisionBadge({ decision, size = 'default' }: { decision: Decisi }, }; - const { bg, text, icon: Icon } = config[decision]; + const entry = config[decision]; + if (!entry) return null; + const { bg, text, icon: Icon } = entry; const sizeClasses = size === 'small' ? 'px-2 py-0.5 text-xs gap-1' : 'px-2.5 py-0.5 text-xs gap-1'; @@ -75,6 +77,19 @@ export function RiskBadge({ risk }: { risk?: string }) { ); } +export function HoldDaysBadge({ holdDays, decision }: { holdDays?: number | null; decision?: Decision | null }) { + if (!holdDays || decision === 'SELL') return null; + + const label = holdDays === 1 ? '1 day' : `${holdDays}d`; + + return ( + + + Hold {label} + + ); +} + export default function StockCard({ stock, showDetails = true, compact = false }: StockCardProps) { if (compact) { return ( @@ -116,6 +131,7 @@ export default function StockCard({ stock, showDetails = true, compact = false }
+
)}
diff --git a/frontend/src/components/TerminalModal.tsx b/frontend/src/components/TerminalModal.tsx new file mode 100644 index 00000000..cb780c29 --- /dev/null +++ b/frontend/src/components/TerminalModal.tsx @@ -0,0 +1,412 @@ +import { useState, useEffect, useRef, useCallback } from 'react'; +import { X, Terminal, Trash2, Download, Pause, Play, ChevronDown, Plus, Minus } from 'lucide-react'; + +interface LogEntry { + timestamp: string; + type: 'info' | 'success' | 'error' | 'warning' | 'llm' | 'agent' | 'data'; + source: string; + message: string; +} + +interface TerminalModalProps { + isOpen: boolean; + onClose: () => void; + isAnalyzing: boolean; +} + +export default function TerminalModal({ isOpen, onClose, isAnalyzing }: TerminalModalProps) { + const [logs, setLogs] = useState([]); + const [isPaused, setIsPaused] = useState(false); + const [autoScroll, setAutoScroll] = useState(true); + const [filter, setFilter] = useState('all'); + const [connectionStatus, setConnectionStatus] = useState<'connecting' | 'connected' | 'error'>('connecting'); + const [fontSize, setFontSize] = useState(12); // Font size in px + const terminalRef = useRef(null); + const eventSourceRef = useRef(null); + const isPausedRef = useRef(isPaused); + const firstLogTimeRef = useRef(null); + + // Keep isPausedRef in sync with isPaused state + useEffect(() => { + isPausedRef.current = isPaused; + }, [isPaused]); + + // Connect to SSE stream when modal opens + useEffect(() => { + if (!isOpen) return; + + setConnectionStatus('connecting'); + + // Connect to the backend SSE endpoint + const connectToStream = () => { + if (eventSourceRef.current) { + eventSourceRef.current.close(); + } + + // Use the same hostname as the current page, but with the backend port + const backendHost = window.location.hostname; + const sseUrl = `http://${backendHost}:8001/stream/logs`; + console.log('[Terminal] Connecting to SSE stream at:', sseUrl); + const eventSource = new EventSource(sseUrl); + eventSourceRef.current = eventSource; + + eventSource.onopen = () => { + console.log('[Terminal] SSE connection opened'); + setConnectionStatus('connected'); + }; + + eventSource.onmessage = (event) => { + if (isPausedRef.current) return; + + try { + const data = JSON.parse(event.data); + + if (data.type === 'heartbeat') return; // Ignore heartbeats + + // Skip the initial "Connected to log stream" message - it's not a real log + if (data.message === 'Connected to log stream') return; + + const logEntry: LogEntry = { + timestamp: data.timestamp || new Date().toISOString(), + type: data.type || 'info', + source: data.source || 'system', + message: data.message || '' + }; + + // Update the earliest timestamp reference for elapsed time + const logTime = new Date(logEntry.timestamp).getTime(); + if (firstLogTimeRef.current === null || logTime < firstLogTimeRef.current) { + firstLogTimeRef.current = logTime; + } + + setLogs(prev => [...prev.slice(-500), logEntry]); // Keep last 500 logs + } catch (e) { + // Handle non-JSON messages + console.log('[Terminal] Non-JSON message:', event.data); + setLogs(prev => [...prev.slice(-500), { + timestamp: new Date().toISOString(), + type: 'info', + source: 'stream', + message: event.data + }]); + } + }; + + eventSource.onerror = (err) => { + console.error('[Terminal] SSE connection error:', err); + setConnectionStatus('error'); + // Reconnect after a delay + setTimeout(() => { + if (isOpen && eventSourceRef.current === eventSource) { + console.log('[Terminal] Attempting to reconnect...'); + connectToStream(); + } + }, 3000); + }; + }; + + connectToStream(); + + return () => { + console.log('[Terminal] Closing SSE connection'); + if (eventSourceRef.current) { + eventSourceRef.current.close(); + eventSourceRef.current = null; + } + }; + }, [isOpen]); + + // Auto-scroll to bottom when new logs arrive + useEffect(() => { + if (autoScroll && terminalRef.current) { + terminalRef.current.scrollTop = terminalRef.current.scrollHeight; + } + }, [logs, autoScroll]); + + // Handle scroll to detect manual scrolling + const handleScroll = useCallback(() => { + if (!terminalRef.current) return; + const { scrollTop, scrollHeight, clientHeight } = terminalRef.current; + const isAtBottom = scrollHeight - scrollTop - clientHeight < 50; + setAutoScroll(isAtBottom); + }, []); + + const clearLogs = () => { + setLogs([]); + firstLogTimeRef.current = null; + }; + + const downloadLogs = () => { + const content = logs.map(log => { + const d = new Date(log.timestamp); + const dateStr = formatDate(d); + const timeStr = formatTime(d); + return `[${dateStr} ${timeStr}] [${log.type.toUpperCase()}] [${log.source}] ${log.message}`; + }).join('\n'); + + const blob = new Blob([content], { type: 'text/plain' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `analysis-logs-${new Date().toISOString().split('T')[0]}.txt`; + a.click(); + URL.revokeObjectURL(url); + }; + + const scrollToBottom = () => { + if (terminalRef.current) { + terminalRef.current.scrollTop = terminalRef.current.scrollHeight; + setAutoScroll(true); + } + }; + + // Format date as DD/MM/YYYY + const formatDate = (d: Date) => { + const day = d.getDate().toString().padStart(2, '0'); + const month = (d.getMonth() + 1).toString().padStart(2, '0'); + const year = d.getFullYear(); + return `${day}/${month}/${year}`; + }; + + // Format time as HH:MM:SS + const formatTime = (d: Date) => { + return d.toLocaleTimeString('en-US', { hour12: false, hour: '2-digit', minute: '2-digit', second: '2-digit' }); + }; + + // Calculate elapsed time from first log + const getElapsed = (timestamp: string) => { + if (!firstLogTimeRef.current) return ''; + const logTime = new Date(timestamp).getTime(); + const elapsed = Math.max(0, (logTime - firstLogTimeRef.current) / 1000); + if (elapsed < 60) return `+${elapsed.toFixed(0)}s`; + const mins = Math.floor(elapsed / 60); + const secs = Math.floor(elapsed % 60); + return `+${mins}m${secs.toString().padStart(2, '0')}s`; + }; + + const getTypeColor = (type: string) => { + switch (type) { + case 'success': return 'text-green-400'; + case 'error': return 'text-red-400'; + case 'warning': return 'text-yellow-400'; + case 'llm': return 'text-purple-400'; + case 'agent': return 'text-cyan-400'; + case 'data': return 'text-blue-400'; + default: return 'text-gray-300'; + } + }; + + const getSourceBadge = (source: string) => { + const colors: Record = { + 'bull_researcher': 'bg-green-900/50 text-green-400 border-green-700', + 'bear_researcher': 'bg-red-900/50 text-red-400 border-red-700', + 'market_analyst': 'bg-blue-900/50 text-blue-400 border-blue-700', + 'news_analyst': 'bg-teal-900/50 text-teal-400 border-teal-700', + 'social_analyst': 'bg-pink-900/50 text-pink-400 border-pink-700', + 'fundamentals': 'bg-emerald-900/50 text-emerald-400 border-emerald-700', + 'risk_manager': 'bg-amber-900/50 text-amber-400 border-amber-700', + 'research_mgr': 'bg-violet-900/50 text-violet-400 border-violet-700', + 'trader': 'bg-purple-900/50 text-purple-400 border-purple-700', + 'aggressive': 'bg-orange-900/50 text-orange-400 border-orange-700', + 'conservative': 'bg-sky-900/50 text-sky-400 border-sky-700', + 'neutral': 'bg-gray-700/50 text-gray-300 border-gray-500', + 'debate': 'bg-cyan-900/50 text-cyan-400 border-cyan-700', + 'data_fetch': 'bg-indigo-900/50 text-indigo-400 border-indigo-700', + 'system': 'bg-gray-800/50 text-gray-400 border-gray-600', + }; + + return colors[source] || 'bg-gray-800/50 text-gray-400 border-gray-600'; + }; + + const filteredLogs = filter === 'all' + ? logs + : logs.filter(log => log.type === filter || log.source === filter); + + if (!isOpen) return null; + + return ( +
+ {/* Backdrop */} +
+ + {/* Modal */} +
+ {/* Header */} +
+ {/* Title row */} +
+
+ +

Terminal

+
+ {isAnalyzing && ( + + + LIVE + + )} + {/* Close button - visible on mobile in title row */} + +
+ + {/* Controls row */} +
+ {/* Filter dropdown */} + + + {/* Font size controls */} +
+ + {fontSize} + +
+ + {/* Pause/Resume */} + + + {/* Download */} + + + {/* Clear */} + + + {/* Close - hidden on mobile, shown on desktop */} + +
+
+ + {/* Terminal Content */} +
+ {filteredLogs.length === 0 ? ( +
+ +

+ {connectionStatus === 'connecting' && 'Connecting to log stream...'} + {connectionStatus === 'error' && 'Connection error. Retrying...'} + {connectionStatus === 'connected' && (isAnalyzing + ? 'Waiting for analysis logs...' + : 'Start an analysis to see live updates here')} +

+

+ {connectionStatus === 'connected' + ? 'Logs will appear in real-time as the AI analyzes stocks' + : 'Establishing connection to backend...'} +

+
+ ) : ( +
+ {filteredLogs.map((log, index) => { + const d = new Date(log.timestamp); + const dateStr = formatDate(d); + const timeStr = formatTime(d); + const elapsed = getElapsed(log.timestamp); + return ( +
+ {/* Date + Time */} + + {dateStr} {timeStr} + + {/* Elapsed time */} + + {elapsed} + + {/* Source badge */} + + {log.source.length > 14 ? log.source.slice(0, 12) + '..' : log.source} + + {/* Message */} + + {log.message} + +
+ ); + })} +
+ )} +
+ + {/* Footer with scroll indicator */} + {!autoScroll && ( + + )} + + {/* Status Bar */} +
+ {filteredLogs.length} logs | Font: {fontSize}px + + {isPaused ? 'PAUSED' : autoScroll ? 'AUTO' : 'MANUAL'} + +
+
+
+ ); +} diff --git a/frontend/src/components/Toast.tsx b/frontend/src/components/Toast.tsx new file mode 100644 index 00000000..2c4f9dd9 --- /dev/null +++ b/frontend/src/components/Toast.tsx @@ -0,0 +1,85 @@ +import { X, CheckCircle, AlertCircle, AlertTriangle, Info } from 'lucide-react'; +import { useNotification } from '../contexts/NotificationContext'; +import type { NotificationType } from '../contexts/NotificationContext'; + +const iconMap: Record = { + success: CheckCircle, + error: AlertCircle, + warning: AlertTriangle, + info: Info, +}; + +const colorMap: Record = { + success: { + bg: 'bg-green-50 dark:bg-green-900/30', + border: 'border-green-200 dark:border-green-800', + icon: 'text-green-500 dark:text-green-400', + title: 'text-green-800 dark:text-green-200', + }, + error: { + bg: 'bg-red-50 dark:bg-red-900/30', + border: 'border-red-200 dark:border-red-800', + icon: 'text-red-500 dark:text-red-400', + title: 'text-red-800 dark:text-red-200', + }, + warning: { + bg: 'bg-amber-50 dark:bg-amber-900/30', + border: 'border-amber-200 dark:border-amber-800', + icon: 'text-amber-500 dark:text-amber-400', + title: 'text-amber-800 dark:text-amber-200', + }, + info: { + bg: 'bg-blue-50 dark:bg-blue-900/30', + border: 'border-blue-200 dark:border-blue-800', + icon: 'text-blue-500 dark:text-blue-400', + title: 'text-blue-800 dark:text-blue-200', + }, +}; + +export default function ToastContainer() { + const { notifications, removeNotification } = useNotification(); + + if (notifications.length === 0) return null; + + return ( +
+ {notifications.map(notification => { + const Icon = iconMap[notification.type]; + const colors = colorMap[notification.type]; + + return ( +
+ +
+

+ {notification.title} +

+ {notification.message && ( +

+ {notification.message} +

+ )} +
+ +
+ ); + })} +
+ ); +} diff --git a/frontend/src/components/pipeline/DataSourcesPanel.tsx b/frontend/src/components/pipeline/DataSourcesPanel.tsx index 37985d6c..29470d73 100644 --- a/frontend/src/components/pipeline/DataSourcesPanel.tsx +++ b/frontend/src/components/pipeline/DataSourcesPanel.tsx @@ -1,7 +1,7 @@ import { useState } from 'react'; import { Database, ChevronDown, ChevronUp, CheckCircle, - XCircle, Clock, Server + XCircle, Clock, Server, Copy, Check, Maximize2, Minimize2 } from 'lucide-react'; import type { DataSourceLog } from '../../types/pipeline'; @@ -19,6 +19,83 @@ const SOURCE_TYPE_COLORS: Record = { default: { bg: 'bg-slate-100 dark:bg-slate-800', text: 'text-slate-700 dark:text-slate-300' } }; +// Raw data viewer with copy, expand/collapse, and formatted display +function RawDataViewer({ data, error }: { data: unknown; error?: string | null }) { + const [isFullHeight, setIsFullHeight] = useState(false); + const [copied, setCopied] = useState(false); + + if (error) { + return ( +
+
+

+ Error: {error} +

+
+
+ ); + } + + if (!data) { + return ( +
+

No data details available

+
+ ); + } + + const rawText = typeof data === 'string' ? data : JSON.stringify(data, null, 2); + const dataSize = rawText.length; + + const handleCopy = () => { + navigator.clipboard.writeText(rawText); + setCopied(true); + setTimeout(() => setCopied(false), 2000); + }; + + return ( +
+ {/* Toolbar */} +
+
+ + Raw Data + + + {dataSize > 1000 ? `${(dataSize / 1000).toFixed(1)}KB` : `${dataSize} chars`} + +
+
+ + +
+
+ + {/* Raw data content */} +
+
+          {rawText}
+        
+
+
+ ); +} + + export function DataSourcesPanel({ dataSources, isLoading }: DataSourcesPanelProps) { const [isExpanded, setIsExpanded] = useState(false); const [expandedSources, setExpandedSources] = useState>(new Set()); @@ -124,6 +201,18 @@ export function DataSourcesPanel({ dataSources, isLoading }: DataSourcesPanelPro {source.source_name}
+ {source.method && ( +
+ + {source.method}() + + {source.args && ( + + {source.args} + + )} +
+ )}
{formatTimestamp(source.fetch_timestamp)} @@ -145,32 +234,12 @@ export function DataSourcesPanel({ dataSources, isLoading }: DataSourcesPanelPro
- {/* Source details (expanded) */} + {/* Source details (expanded) — full raw data viewer */} {isSourceExpanded && ( -
- {source.error_message ? ( -
-

- Error: {source.error_message} -

-
- ) : source.data_fetched ? ( -
-

- Data Summary: -

-
-                            {typeof source.data_fetched === 'string'
-                              ? source.data_fetched.slice(0, 500) + (source.data_fetched.length > 500 ? '...' : '')
-                              : JSON.stringify(source.data_fetched, null, 2).slice(0, 500)}
-                          
-
- ) : ( -

- No data details available -

- )} -
+ )}
); diff --git a/frontend/src/components/pipeline/FlowchartConnector.tsx b/frontend/src/components/pipeline/FlowchartConnector.tsx new file mode 100644 index 00000000..193d0a9f --- /dev/null +++ b/frontend/src/components/pipeline/FlowchartConnector.tsx @@ -0,0 +1,28 @@ +interface FlowchartConnectorProps { + completed: boolean; + isPhase?: boolean; +} + +export function FlowchartConnector({ completed, isPhase = false }: FlowchartConnectorProps) { + const height = isPhase ? 32 : 20; + const color = completed ? '#22c55e' : '#475569'; + + return ( +
+ + + + +
+ ); +} diff --git a/frontend/src/components/pipeline/FlowchartNode.tsx b/frontend/src/components/pipeline/FlowchartNode.tsx new file mode 100644 index 00000000..17e64e6c --- /dev/null +++ b/frontend/src/components/pipeline/FlowchartNode.tsx @@ -0,0 +1,178 @@ +import { + TrendingUp, TrendingDown, Users, Newspaper, FileText, + Scale, Target, Zap, Shield, ShieldCheck, + Clock, Loader2, CheckCircle, AlertCircle, ChevronDown, ChevronUp +} from 'lucide-react'; +import { useState } from 'react'; +import type { FlowchartNodeData, PipelineStepStatus } from '../../types/pipeline'; + +interface FlowchartNodeProps { + node: FlowchartNodeData; + isSelected: boolean; + onClick: () => void; +} + +const ICON_MAP: Record = { + TrendingUp, TrendingDown, Users, Newspaper, FileText, + Scale, Target, Zap, Shield, ShieldCheck, +}; + +const STATUS_CONFIG: Record = { + pending: { + bg: 'bg-slate-50 dark:bg-slate-800/60', + border: 'border-slate-200 dark:border-slate-700', + text: 'text-slate-400 dark:text-slate-500', + badge: 'bg-slate-100 dark:bg-slate-700', + badgeText: 'text-slate-500 dark:text-slate-400', + StatusIcon: Clock, + statusLabel: 'Pending', + }, + running: { + bg: 'bg-blue-50/80 dark:bg-blue-900/20', + border: 'border-blue-300 dark:border-blue-600', + text: 'text-blue-600 dark:text-blue-400', + badge: 'bg-blue-100 dark:bg-blue-900/40', + badgeText: 'text-blue-600 dark:text-blue-400', + StatusIcon: Loader2, + statusLabel: 'Running', + }, + completed: { + bg: 'bg-green-50/60 dark:bg-green-900/15', + border: 'border-green-300 dark:border-green-700', + text: 'text-green-600 dark:text-green-400', + badge: 'bg-green-100 dark:bg-green-900/30', + badgeText: 'text-green-600 dark:text-green-400', + StatusIcon: CheckCircle, + statusLabel: 'Completed', + }, + error: { + bg: 'bg-red-50/60 dark:bg-red-900/15', + border: 'border-red-300 dark:border-red-700', + text: 'text-red-600 dark:text-red-400', + badge: 'bg-red-100 dark:bg-red-900/30', + badgeText: 'text-red-600 dark:text-red-400', + StatusIcon: AlertCircle, + statusLabel: 'Error', + }, +}; + +const NODE_COLORS: Record = { + blue: { iconBg: 'bg-blue-100 dark:bg-blue-900/40', iconText: 'text-blue-600 dark:text-blue-400' }, + pink: { iconBg: 'bg-pink-100 dark:bg-pink-900/40', iconText: 'text-pink-600 dark:text-pink-400' }, + purple: { iconBg: 'bg-purple-100 dark:bg-purple-900/40', iconText: 'text-purple-600 dark:text-purple-400' }, + emerald: { iconBg: 'bg-emerald-100 dark:bg-emerald-900/40', iconText: 'text-emerald-600 dark:text-emerald-400' }, + green: { iconBg: 'bg-green-100 dark:bg-green-900/40', iconText: 'text-green-600 dark:text-green-400' }, + red: { iconBg: 'bg-red-100 dark:bg-red-900/40', iconText: 'text-red-600 dark:text-red-400' }, + violet: { iconBg: 'bg-violet-100 dark:bg-violet-900/40', iconText: 'text-violet-600 dark:text-violet-400' }, + amber: { iconBg: 'bg-amber-100 dark:bg-amber-900/40', iconText: 'text-amber-600 dark:text-amber-400' }, + orange: { iconBg: 'bg-orange-100 dark:bg-orange-900/40', iconText: 'text-orange-600 dark:text-orange-400' }, + sky: { iconBg: 'bg-sky-100 dark:bg-sky-900/40', iconText: 'text-sky-600 dark:text-sky-400' }, + slate: { iconBg: 'bg-slate-200 dark:bg-slate-700', iconText: 'text-slate-600 dark:text-slate-400' }, + indigo: { iconBg: 'bg-indigo-100 dark:bg-indigo-900/40', iconText: 'text-indigo-600 dark:text-indigo-400' }, +}; + +function formatDuration(ms: number): string { + if (ms < 1000) return `${ms}ms`; + const secs = ms / 1000; + if (secs < 60) return `${secs.toFixed(1)}s`; + const mins = Math.floor(secs / 60); + const remSecs = Math.floor(secs % 60); + return `${mins}m ${remSecs}s`; +} + +function formatTimestamp(iso: string): string { + const d = new Date(iso); + const day = d.getDate().toString().padStart(2, '0'); + const month = (d.getMonth() + 1).toString().padStart(2, '0'); + const year = d.getFullYear(); + const time = d.toLocaleTimeString('en-US', { hour12: false, hour: '2-digit', minute: '2-digit', second: '2-digit' }); + return `${day}/${month}/${year} ${time}`; +} + +export function FlowchartNode({ node, isSelected, onClick }: FlowchartNodeProps) { + const [isExpanded, setIsExpanded] = useState(false); + const status = STATUS_CONFIG[node.status]; + const colors = NODE_COLORS[node.color] || NODE_COLORS.blue; + const Icon = ICON_MAP[node.icon] || Target; + + const hasPreview = !!(node.output_summary || node.agentReport?.report_content || node.debateContent); + const previewText = node.output_summary || node.agentReport?.report_content || node.debateContent || ''; + + return ( +
+
{ if (e.key === 'Enter' || e.key === ' ') { e.preventDefault(); onClick(); } }} + className={` + w-full flex items-center gap-2 sm:gap-3 p-2.5 sm:p-3 rounded-xl border-2 transition-all text-left cursor-pointer + ${status.bg} ${status.border} + ${isSelected ? 'ring-2 ring-nifty-500 ring-offset-1 dark:ring-offset-slate-900 shadow-lg' : 'hover:shadow-md'} + ${node.status === 'running' ? 'animate-pulse' : ''} + `} + > + {/* Icon */} +
+ +
+ + {/* Name & Step # */} +
+
+ + {node.label} + + + #{node.number} + +
+ {/* Timestamp */} + {node.completed_at && ( +
+ {formatTimestamp(node.completed_at)} +
+ )} + {node.status === 'running' && node.started_at && ( +
+ Started {formatTimestamp(node.started_at)} +
+ )} +
+ + {/* Status + Duration */} +
+ {node.duration_ms != null && node.status === 'completed' && ( + + {formatDuration(node.duration_ms)} + + )} +
+ + {status.statusLabel} +
+
+ + {/* Expand toggle */} + {hasPreview && node.status === 'completed' && ( + + )} +
+ + {/* Inline preview */} + {isExpanded && hasPreview && ( +
+ {previewText.slice(0, 500)}{previewText.length > 500 ? '...' : ''} +
+ )} +
+ ); +} diff --git a/frontend/src/components/pipeline/FlowchartPhaseGroup.tsx b/frontend/src/components/pipeline/FlowchartPhaseGroup.tsx new file mode 100644 index 00000000..e4691d6b --- /dev/null +++ b/frontend/src/components/pipeline/FlowchartPhaseGroup.tsx @@ -0,0 +1,49 @@ +import { CheckCircle, Loader2, Clock } from 'lucide-react'; +import type { FlowchartPhase } from '../../types/pipeline'; +import { PHASE_META } from '../../types/pipeline'; + +interface FlowchartPhaseGroupProps { + phase: FlowchartPhase; + totalSteps: number; + completedSteps: number; + isActive: boolean; + children: React.ReactNode; +} + +export function FlowchartPhaseGroup({ phase, totalSteps, completedSteps, isActive, children }: FlowchartPhaseGroupProps) { + const meta = PHASE_META[phase]; + const isComplete = completedSteps === totalSteps && totalSteps > 0; + + return ( +
+ {/* Phase header */} +
+
+ + Phase {meta.number} + + + {meta.label} + +
+
+ + {completedSteps}/{totalSteps} + + {isComplete ? ( + + ) : isActive ? ( + + ) : ( + + )} +
+
+ + {/* Phase nodes */} +
+ {children} +
+
+ ); +} diff --git a/frontend/src/components/pipeline/NodeDetailDrawer.tsx b/frontend/src/components/pipeline/NodeDetailDrawer.tsx new file mode 100644 index 00000000..61c9d211 --- /dev/null +++ b/frontend/src/components/pipeline/NodeDetailDrawer.tsx @@ -0,0 +1,262 @@ +import { useState } from 'react'; +import { X, Clock, Timer, CheckCircle, AlertCircle, FileText, MessageSquare, ChevronDown, ChevronRight, Terminal, Bot, User, Wrench } from 'lucide-react'; +import type { FlowchartNodeData, FullPipelineData, StepDetails } from '../../types/pipeline'; + +interface NodeDetailDrawerProps { + node: FlowchartNodeData; + pipelineData: FullPipelineData | null; + onClose: () => void; +} + +function formatTimestamp(iso: string): string { + const d = new Date(iso); + const day = d.getDate().toString().padStart(2, '0'); + const month = (d.getMonth() + 1).toString().padStart(2, '0'); + const year = d.getFullYear(); + const time = d.toLocaleTimeString('en-US', { hour12: false, hour: '2-digit', minute: '2-digit', second: '2-digit' }); + return `${day}/${month}/${year} ${time}`; +} + +function formatDuration(ms: number): string { + if (ms < 1000) return `${ms}ms`; + const secs = ms / 1000; + if (secs < 60) return `${secs.toFixed(1)}s`; + const mins = Math.floor(secs / 60); + const remSecs = Math.floor(secs % 60); + return `${mins}m ${remSecs}s`; +} + +function getFallbackContent(node: FlowchartNodeData, data: FullPipelineData | null): string { + if (node.agentReport?.report_content) return node.agentReport.report_content; + if (node.debateContent) return node.debateContent; + if (node.output_summary) return node.output_summary; + if (!data) return ''; + + if (node.debateType === 'investment' && data.debates?.investment) { + const d = data.debates.investment; + if (node.debateRole === 'bull') return d.bull_arguments || ''; + if (node.debateRole === 'bear') return d.bear_arguments || ''; + if (node.debateRole === 'judge') return d.judge_decision || ''; + } + if (node.debateType === 'risk' && data.debates?.risk) { + const d = data.debates.risk; + if (node.debateRole === 'risky') return d.risky_arguments || ''; + if (node.debateRole === 'safe') return d.safe_arguments || ''; + if (node.debateRole === 'neutral') return d.neutral_arguments || ''; + if (node.debateRole === 'judge') return d.judge_decision || ''; + } + return ''; +} + +/** Collapsible section component */ +function Section({ title, icon: Icon, iconColor, defaultOpen, children, badge }: { + title: string; + icon: React.ElementType; + iconColor: string; + defaultOpen?: boolean; + children: React.ReactNode; + badge?: string; +}) { + const [open, setOpen] = useState(defaultOpen ?? false); + + return ( +
+ + {open && ( +
+ {children} +
+ )} +
+ ); +} + +/** Code block with monospace text */ +function CodeBlock({ content, maxHeight = 'max-h-64' }: { content: string; maxHeight?: string }) { + return ( +
+
+        {content}
+      
+
+ ); +} + +export function NodeDetailDrawer({ node, pipelineData, onClose }: NodeDetailDrawerProps) { + const details: StepDetails | undefined = node.step_details; + const fallbackContent = getFallbackContent(node, pipelineData); + + // Determine if we have structured data or just fallback + const hasStructuredData = details && (details.system_prompt || details.user_prompt || details.response); + + return ( +
+ {/* Header */} +
+
+ + + {node.label} + + #{node.number} + {node.status === 'completed' && ( + + completed + + )} +
+ +
+ + {/* Timing info bar */} +
+ {node.started_at && ( +
+ + Started: {formatTimestamp(node.started_at)} +
+ )} + {node.completed_at && ( +
+ + Completed: {formatTimestamp(node.completed_at)} +
+ )} + {node.duration_ms != null && ( +
+ + {formatDuration(node.duration_ms)} +
+ )} + {node.status === 'error' && ( +
+ + Failed +
+ )} +
+ + {/* Content sections */} +
+ {hasStructuredData ? ( + <> + {/* System Prompt */} + {details.system_prompt && ( +
+ +
+ )} + + {/* User Prompt / Input */} + {details.user_prompt && ( +
+ +
+ )} + + {/* Tool Calls */} + {details.tool_calls && details.tool_calls.length > 0 && ( +
+
+ {details.tool_calls.map((tc, i) => ( +
+
+ + {tc.name}() + + {tc.args && ( + + {tc.args} + + )} +
+ {tc.result_preview && ( +
+ {tc.result_preview} +
+ )} +
+ ))} +
+
+ )} + + {/* LLM Response */} + {details.response && ( +
+ +
+ )} + + ) : fallbackContent ? ( + /* Fallback: show the old-style content */ + <> +
+ +
+ + ) : node.status === 'pending' ? ( +
+ +

This step hasn't run yet

+

Run an analysis to see results here

+
+ ) : node.status === 'running' ? ( +
+
+

Processing...

+
+ ) : ( +
+ +

No output data available

+
+ )} +
+
+ ); +} diff --git a/frontend/src/components/pipeline/PipelineFlowchart.tsx b/frontend/src/components/pipeline/PipelineFlowchart.tsx new file mode 100644 index 00000000..e5da8679 --- /dev/null +++ b/frontend/src/components/pipeline/PipelineFlowchart.tsx @@ -0,0 +1,153 @@ +import { useState, useMemo } from 'react'; +import { Layers, Loader2 } from 'lucide-react'; +import type { FullPipelineData, FlowchartPhase, FlowchartNodeData } from '../../types/pipeline'; +import { mapPipelineToFlowchart } from '../../types/pipeline'; +import { FlowchartNode } from './FlowchartNode'; +import { FlowchartConnector } from './FlowchartConnector'; +import { FlowchartPhaseGroup } from './FlowchartPhaseGroup'; +import { NodeDetailDrawer } from './NodeDetailDrawer'; + +interface PipelineFlowchartProps { + pipelineData: FullPipelineData | null; + isAnalyzing: boolean; + isLoading: boolean; +} + +// Group flowchart steps by phase +const PHASE_ORDER: FlowchartPhase[] = ['data_analysis', 'investment_debate', 'trading', 'risk_debate']; + +function groupByPhase(nodes: FlowchartNodeData[]): Record { + const groups: Record = { + data_analysis: [], + investment_debate: [], + trading: [], + risk_debate: [], + }; + for (const node of nodes) { + groups[node.phase].push(node); + } + return groups; +} + +export function PipelineFlowchart({ pipelineData, isAnalyzing, isLoading }: PipelineFlowchartProps) { + const [selectedNodeId, setSelectedNodeId] = useState(null); + + const nodes = useMemo(() => mapPipelineToFlowchart(pipelineData), [pipelineData]); + const groups = useMemo(() => groupByPhase(nodes), [nodes]); + + const completedCount = nodes.filter(n => n.status === 'completed').length; + const totalSteps = nodes.length; + const progress = totalSteps > 0 ? Math.round((completedCount / totalSteps) * 100) : 0; + + const selectedNode = selectedNodeId ? nodes.find(n => n.id === selectedNodeId) : null; + + const handleNodeClick = (nodeId: string) => { + setSelectedNodeId(prev => prev === nodeId ? null : nodeId); + }; + + if (isLoading) { + return ( +
+ +

Loading pipeline data...

+
+ ); + } + + return ( +
+ {/* Header with progress */} +
+
+
+ +

+ Analysis Pipeline +

+ {isAnalyzing && ( + + + LIVE + + )} +
+ + {completedCount}/{totalSteps} steps + +
+ + {/* Progress bar */} +
+
+
+
+ + {progress}% + +
+
+ + {/* Flowchart */} +
+ {PHASE_ORDER.map((phase, phaseIndex) => { + const phaseNodes = groups[phase]; + const phaseCompleted = phaseNodes.filter(n => n.status === 'completed').length; + const phaseActive = phaseNodes.some(n => n.status === 'running'); + + return ( +
+ {/* Phase connector (between phases) */} + {phaseIndex > 0 && ( + n.status === 'completed')} + isPhase + /> + )} + + + {phaseNodes.map((node, nodeIndex) => ( +
+ {/* Node connector (within phase) */} + {nodeIndex > 0 && ( + + )} + handleNodeClick(node.id)} + /> +
+ ))} +
+
+ ); + })} +
+ + {/* Detail drawer */} + {selectedNode && ( + setSelectedNodeId(null)} + /> + )} +
+ ); +} diff --git a/frontend/src/components/pipeline/PipelineOverview.tsx b/frontend/src/components/pipeline/PipelineOverview.tsx index 791b27c0..73c2c859 100644 --- a/frontend/src/components/pipeline/PipelineOverview.tsx +++ b/frontend/src/components/pipeline/PipelineOverview.tsx @@ -101,26 +101,44 @@ export function PipelineOverview({ steps, onStepClick, compact = false }: Pipeli } return ( -
- {/* Progress bar */} -
-
-
+
+ {/* Compact Progress Header */} +
+
+
+ {displaySteps.map((step) => ( +
+ ))} +
+ + {completedCount}/{totalSteps} steps + +
+
+
+
+
+ {progress}%
- - {completedCount}/{totalSteps} -
- {/* Pipeline steps */} -
+ {/* Compact Pipeline Steps Grid */} +
{displaySteps.map((step) => { const StepIcon = STEP_ICONS[step.step_name] || Database; const styles = STATUS_STYLES[step.status]; - const StatusIcon = styles.icon; const label = STEP_LABELS[step.step_name] || step.step_name; return ( @@ -128,24 +146,22 @@ export function PipelineOverview({ steps, onStepClick, compact = false }: Pipeli key={step.step_number} onClick={() => onStepClick?.(step)} className={` - flex items-center gap-2 px-3 py-2 rounded-lg border-2 transition-all + relative flex flex-col items-center gap-1 p-2 rounded-lg border transition-all ${styles.bg} ${styles.border} ${styles.text} - hover:scale-105 hover:shadow-md + hover:shadow-sm ${onStepClick ? 'cursor-pointer' : 'cursor-default'} `} + title={`${label}: ${step.status}${step.duration_ms ? ` (${(step.duration_ms / 1000).toFixed(1)}s)` : ''}`} >
- {StatusIcon && step.status === 'running' && ( - + {step.status === 'running' && ( + )}
- {label} - {step.duration_ms && ( - - {(step.duration_ms / 1000).toFixed(1)}s - - )} + + {label.split(' ')[0]} + ); })} diff --git a/frontend/src/components/pipeline/index.ts b/frontend/src/components/pipeline/index.ts index e33595ab..f68c36ce 100644 --- a/frontend/src/components/pipeline/index.ts +++ b/frontend/src/components/pipeline/index.ts @@ -1,4 +1,5 @@ export { PipelineOverview } from './PipelineOverview'; +export { PipelineFlowchart } from './PipelineFlowchart'; export { AgentReportCard } from './AgentReportCard'; export { DebateViewer } from './DebateViewer'; export { RiskDebateViewer } from './RiskDebateViewer'; diff --git a/frontend/src/contexts/NotificationContext.tsx b/frontend/src/contexts/NotificationContext.tsx new file mode 100644 index 00000000..bec5a84e --- /dev/null +++ b/frontend/src/contexts/NotificationContext.tsx @@ -0,0 +1,72 @@ +import { createContext, useContext, useState, useCallback, useMemo } from 'react'; +import type { ReactNode } from 'react'; + +export type NotificationType = 'success' | 'error' | 'warning' | 'info'; + +export interface Notification { + id: string; + type: NotificationType; + title: string; + message?: string; + duration?: number; // ms, 0 means persistent +} + +interface NotificationContextType { + notifications: Notification[]; + addNotification: (notification: Omit) => string; + removeNotification: (id: string) => void; + clearAll: () => void; +} + +const NotificationContext = createContext(undefined); + +export function NotificationProvider({ children }: { children: ReactNode }) { + const [notifications, setNotifications] = useState([]); + + const addNotification = useCallback((notification: Omit) => { + const id = `notification-${Date.now()}-${Math.random().toString(36).substring(2, 11)}`; + const newNotification: Notification = { + ...notification, + id, + duration: notification.duration ?? 5000, // Default 5 seconds + }; + + setNotifications(prev => [...prev, newNotification]); + + // Auto-remove after duration (if not persistent) + if (newNotification.duration && newNotification.duration > 0) { + setTimeout(() => { + setNotifications(prev => prev.filter(n => n.id !== id)); + }, newNotification.duration); + } + + return id; + }, []); + + const removeNotification = useCallback((id: string) => { + setNotifications(prev => prev.filter(n => n.id !== id)); + }, []); + + const clearAll = useCallback(() => { + setNotifications([]); + }, []); + + const value = useMemo( + () => ({ notifications, addNotification, removeNotification, clearAll }), + [notifications, addNotification, removeNotification, clearAll] + ); + + return ( + + {children} + + ); +} + +export function useNotification() { + const context = useContext(NotificationContext); + if (!context) { + throw new Error('useNotification must be used within a NotificationProvider'); + } + return context; +} diff --git a/frontend/src/contexts/SettingsContext.tsx b/frontend/src/contexts/SettingsContext.tsx index 45d55ad7..3767ed3b 100644 --- a/frontend/src/contexts/SettingsContext.tsx +++ b/frontend/src/contexts/SettingsContext.tsx @@ -40,6 +40,7 @@ interface Settings { // Analysis settings maxDebateRounds: number; + parallelWorkers: number; } interface SettingsContextType { @@ -57,6 +58,7 @@ const DEFAULT_SETTINGS: Settings = { provider: 'claude_subscription', anthropicApiKey: '', maxDebateRounds: 1, + parallelWorkers: 3, }; const STORAGE_KEY = 'nifty50ai_settings'; diff --git a/frontend/src/data/recommendations.ts b/frontend/src/data/recommendations.ts index 356b21b2..30bb45a9 100644 --- a/frontend/src/data/recommendations.ts +++ b/frontend/src/data/recommendations.ts @@ -352,7 +352,12 @@ function generatePriceHistory(basePrice: number, trend: 'up' | 'down' | 'flat', return history; } -// Mock backtest results based on decision type - with next-day returns +// FALLBACK backtest results - used when real API data is not available +// For accurate backtesting, use the API endpoints: +// - GET /backtest/{date}/{symbol} - Get real backtest for a stock +// - GET /backtest/accuracy - Get real accuracy metrics from database +// The real backtest calculates actual returns from Yahoo Finance price data +// and determines prediction accuracy based on actual price movements export const mockBacktestResults: Record = { 'BAJFINANCE': { prediction_correct: true, @@ -533,18 +538,23 @@ function getSymbolSeed(symbol: string): number { return Math.abs(hash); } +// Helper function to calculate prediction correctness based on decision and return +// LONG-ONLY strategy: BUY/HOLD correct if return > 0, SELL correct if return < 0 +function calculatePredictionCorrect(decision: Decision, return1d: number): boolean { + if (decision === 'BUY' || decision === 'HOLD') { + // BUY and HOLD are correct if stock price went up + return return1d > 0; + } else { + // SELL is correct if stock price went down + return return1d < 0; + } +} + // Get backtest result for a symbol - generates dynamically if not in static data +// NOTE: This is FALLBACK/DEMO data only. For real backtest accuracy: +// Use api.getBacktestResult(date, symbol) which fetches from the backend +// The backend calculates real returns using actual Yahoo Finance price data export function getBacktestResult(symbol: string): BacktestResult | undefined { - // Return existing backtest data if available - if (mockBacktestResults[symbol]) { - return mockBacktestResults[symbol]; - } - - // Return cached generated result if available - if (generatedBacktestCache[symbol]) { - return generatedBacktestCache[symbol]; - } - // Get the stock's decision from the latest recommendation const latestRec = sampleRecommendations[0]; const stockAnalysis = latestRec?.analysis[symbol]; @@ -553,45 +563,63 @@ export function getBacktestResult(symbol: string): BacktestResult | undefined { return undefined; } - // Generate backtest result based on decision type with consistent seeding const decision = stockAnalysis.decision; + + // If we have static mock data, use its prices but RECALCULATE prediction_correct + if (mockBacktestResults[symbol]) { + const mockData = mockBacktestResults[symbol]; + // Always recalculate prediction_correct based on actual return and decision + const correctPrediction = calculatePredictionCorrect(decision, mockData.actual_return_1d); + return { + ...mockData, + prediction_correct: correctPrediction, + }; + } + + // Return cached generated result if available (with recalculated correctness) + if (generatedBacktestCache[symbol]) { + const cachedData = generatedBacktestCache[symbol]; + // Always recalculate prediction_correct based on actual return and decision + const correctPrediction = calculatePredictionCorrect(decision, cachedData.actual_return_1d); + return { + ...cachedData, + prediction_correct: correctPrediction, + }; + } + + // Generate backtest result based on decision type with consistent seeding const seed = getSymbolSeed(symbol); const basePrice = 1000 + seededRandom(seed) * 2000; // Consistent base price between 1000-3000 - // Determine trend and accuracy based on decision + // First, generate the return randomly (with market-like distribution) + // Return can be positive or negative - this is NOT pre-determined by decision + const randomReturn = (seededRandom(seed + 1) - 0.45) * 10; // -4.5% to +5.5% range (slight positive bias) + const returnMultiplier = 1 + (randomReturn / 100); + + // Determine trend based on actual return let trend: 'up' | 'down' | 'flat'; - let predictionCorrect: boolean; - let returnMultiplier: number; - - // Simulate varied but consistent outcomes based on symbol seed - const randomOutcome = seededRandom(seed + 1); - - if (decision === 'BUY') { - // 75% chance BUY predictions are correct (stock goes up) - predictionCorrect = randomOutcome < 0.75; - trend = predictionCorrect ? 'up' : 'down'; - returnMultiplier = predictionCorrect ? (1 + seededRandom(seed + 2) * 0.08) : (1 - seededRandom(seed + 2) * 0.05); - } else if (decision === 'SELL') { - // 83% chance SELL predictions are correct (stock goes down) - predictionCorrect = randomOutcome < 0.83; - trend = predictionCorrect ? 'down' : 'up'; - returnMultiplier = predictionCorrect ? (1 - seededRandom(seed + 2) * 0.08) : (1 + seededRandom(seed + 2) * 0.05); + if (randomReturn > 0.5) { + trend = 'up'; + } else if (randomReturn < -0.5) { + trend = 'down'; } else { - // HOLD - 70% chance it stays relatively flat - predictionCorrect = randomOutcome < 0.70; trend = 'flat'; - returnMultiplier = 1 + (seededRandom(seed + 2) - 0.5) * 0.04; // +/- 2% } + // Calculate actual returns const currentPrice = basePrice * returnMultiplier; const actualReturn1m = ((currentPrice - basePrice) / basePrice) * 100; const actualReturn1w = actualReturn1m * 0.3; // Approximate // Next trading day return - about 15-25% of weekly return with some variance const actualReturn1d = actualReturn1w * (0.4 + seededRandom(seed + 3) * 0.3); + const roundedReturn1d = Math.round(actualReturn1d * 10) / 10; + + // Calculate prediction correctness based on actual return vs decision + const predictionCorrect = calculatePredictionCorrect(decision, roundedReturn1d); const result: BacktestResult = { prediction_correct: predictionCorrect, - actual_return_1d: Math.round(actualReturn1d * 10) / 10, + actual_return_1d: roundedReturn1d, actual_return_1w: Math.round(actualReturn1w * 10) / 10, actual_return_1m: Math.round(actualReturn1m * 10) / 10, price_at_prediction: Math.round(basePrice * 100) / 100, @@ -1025,29 +1053,23 @@ export function getDateStats(date: string): DateStats | null { if (backtest.prediction_correct) { correctCount++; - // For correct predictions: - // - BUY that went up: add the positive return - // - SELL that went down: add the absolute value (we gained by not holding/shorting) - // - HOLD that stayed flat: add the small return (we correctly avoided volatility) - if (decision === 'BUY') { - correctTotalReturn += return1d; // Positive return + // For correct predictions (LONG-ONLY strategy, no short positions): + // - BUY/HOLD that went up: add the positive return (we profited from long position) + // - SELL that went down: add the absolute value (we avoided loss by exiting) + if (decision === 'BUY' || decision === 'HOLD') { + correctTotalReturn += return1d; // Positive return from long position } else if (decision === 'SELL') { - correctTotalReturn += Math.abs(return1d); // We avoided this loss - } else { - correctTotalReturn += Math.abs(return1d) < 2 ? 0.1 : 0; // Small gain for correct hold + correctTotalReturn += Math.abs(return1d); // We avoided this loss by exiting } } else { incorrectCount++; - // For incorrect predictions: - // - BUY that went down: subtract the loss - // - SELL that went up: subtract the missed gain - // - HOLD that moved significantly: subtract the missed opportunity - if (decision === 'BUY') { - incorrectTotalReturn += return1d; // Negative return (loss) + // For incorrect predictions (LONG-ONLY strategy, no short positions): + // - BUY/HOLD that went down: subtract the loss (we lost on long position) + // - SELL that went up: subtract the missed gain (we missed out by exiting) + if (decision === 'BUY' || decision === 'HOLD') { + incorrectTotalReturn += return1d; // Negative return (loss from long position) } else if (decision === 'SELL') { - incorrectTotalReturn += -Math.abs(return1d); // We missed this gain - } else { - incorrectTotalReturn += -Math.abs(return1d); // Missed the move + incorrectTotalReturn += -Math.abs(return1d); // We missed this gain by exiting } } } @@ -1315,6 +1337,8 @@ export function calculateRiskMetrics(): RiskMetrics { // Calculate max drawdown let peak = 100; let maxDrawdown = 0; + let maxDrawdownPeak = 100; + let maxDrawdownTrough = 100; let currentValue = 100; for (const ret of dailyReturns) { currentValue = currentValue * (1 + ret / 100); @@ -1324,6 +1348,8 @@ export function calculateRiskMetrics(): RiskMetrics { const drawdown = ((peak - currentValue) / peak) * 100; if (drawdown > maxDrawdown) { maxDrawdown = drawdown; + maxDrawdownPeak = peak; + maxDrawdownTrough = currentValue; } } @@ -1342,6 +1368,15 @@ export function calculateRiskMetrics(): RiskMetrics { winRate: Math.round(winRate), volatility: Math.round(volatility * 100) / 100, totalTrades: totalPredictions, + // Calculation details for showing formulas + meanReturn: Math.round(mean * 100) / 100, + riskFreeRate: riskFreeRate, + winningTrades: totalCorrect, + losingTrades: totalPredictions - totalCorrect, + avgWinReturn: Math.round(avgWin * 100) / 100, + avgLossReturn: Math.round(avgLoss * 100) / 100, + peakValue: Math.round(maxDrawdownPeak * 100) / 100, + troughValue: Math.round(maxDrawdownTrough * 100) / 100, }; } @@ -1468,3 +1503,94 @@ export function getAllSectors(): string[] { } return Array.from(sectors).sort(); } + +// Get stock history with prediction outcomes (1-day return after each prediction) +export interface StockHistoryWithOutcome { + date: string; + decision: Decision; + outcome: { + return1d: number; + predictionCorrect: boolean; + } | null; +} + +export function getStockHistoryWithOutcomes(symbol: string): StockHistoryWithOutcome[] { + const history = getStockHistory(symbol); + + // Map each historical entry with simulated outcomes based on the decision + return history.map((entry, index) => { + // Use seeded random for consistent outcomes per stock/date combination + const seed = getSymbolSeed(symbol) + getSymbolSeed(entry.date) + index; + + // First, generate the return randomly (with market-like distribution) + // Return can be positive or negative - this is NOT pre-determined by decision + const return1d = (seededRandom(seed) - 0.45) * 6; // -2.7% to +3.3% range (slight positive bias) + + // Determine prediction correctness based on actual return vs decision + // This is the CORRECT logic: BUY/HOLD correct if return > 0, SELL correct if return < 0 + let predictionCorrect: boolean; + if (entry.decision === 'BUY' || entry.decision === 'HOLD') { + // BUY and HOLD are correct if stock price went up + predictionCorrect = return1d > 0; + } else { + // SELL is correct if stock price went down + predictionCorrect = return1d < 0; + } + + return { + date: entry.date, + decision: entry.decision, + outcome: { + return1d: Math.round(return1d * 10) / 10, + predictionCorrect, + }, + }; + }); +} + +// Get prediction accuracy stats for a specific stock +export function getStockPredictionStats(symbol: string): { + totalPredictions: number; + correctPredictions: number; + accuracy: number; + avgReturn: number; + buyAccuracy: number; + sellAccuracy: number; + holdAccuracy: number; +} { + const history = getStockHistoryWithOutcomes(symbol); + + let correct = 0; + let totalReturn = 0; + let buyTotal = 0, buyCorrect = 0; + let sellTotal = 0, sellCorrect = 0; + let holdTotal = 0, holdCorrect = 0; + + for (const entry of history) { + if (entry.outcome) { + totalReturn += entry.outcome.return1d; + if (entry.outcome.predictionCorrect) correct++; + + if (entry.decision === 'BUY') { + buyTotal++; + if (entry.outcome.predictionCorrect) buyCorrect++; + } else if (entry.decision === 'SELL') { + sellTotal++; + if (entry.outcome.predictionCorrect) sellCorrect++; + } else { + holdTotal++; + if (entry.outcome.predictionCorrect) holdCorrect++; + } + } + } + + return { + totalPredictions: history.length, + correctPredictions: correct, + accuracy: history.length > 0 ? Math.round((correct / history.length) * 100) : 0, + avgReturn: history.length > 0 ? Math.round((totalReturn / history.length) * 10) / 10 : 0, + buyAccuracy: buyTotal > 0 ? Math.round((buyCorrect / buyTotal) * 100) : 0, + sellAccuracy: sellTotal > 0 ? Math.round((sellCorrect / sellTotal) * 100) : 0, + holdAccuracy: holdTotal > 0 ? Math.round((holdCorrect / holdTotal) * 100) : 0, + }; +} diff --git a/frontend/src/hooks/usePipelinePolling.ts b/frontend/src/hooks/usePipelinePolling.ts new file mode 100644 index 00000000..5c5b4e1b --- /dev/null +++ b/frontend/src/hooks/usePipelinePolling.ts @@ -0,0 +1,86 @@ +import { useState, useEffect, useRef, useCallback } from 'react'; +import { api } from '../services/api'; +import type { FullPipelineData } from '../types/pipeline'; + +interface UsePipelinePollingResult { + pipelineData: FullPipelineData | null; + isLive: boolean; + lastUpdated: Date | null; + error: string | null; +} + +/** + * Polls pipeline data during active analysis. + * When isAnalyzing is true, fetches fresh data every pollIntervalMs. + * When isAnalyzing is false, stops polling. + */ +export function usePipelinePolling( + symbol: string | undefined, + date: string | undefined, + isAnalyzing: boolean, + initialData: FullPipelineData | null = null, + pollIntervalMs: number = 5000 +): UsePipelinePollingResult { + const [pipelineData, setPipelineData] = useState(initialData); + const [lastUpdated, setLastUpdated] = useState(null); + const [error, setError] = useState(null); + const intervalRef = useRef | null>(null); + + // Update from initial data when it changes externally + useEffect(() => { + if (initialData) { + setPipelineData(initialData); + } + }, [initialData]); + + const fetchData = useCallback(async () => { + if (!symbol || !date) return; + try { + const data = await api.getPipelineData(date, symbol, true); + setPipelineData(data); + setLastUpdated(new Date()); + setError(null); + } catch (err) { + setError(err instanceof Error ? err.message : 'Failed to fetch pipeline data'); + } + }, [symbol, date]); + + useEffect(() => { + if (isAnalyzing && symbol && date) { + // Fetch immediately when analysis starts + fetchData(); + + // Then poll at interval + intervalRef.current = setInterval(fetchData, pollIntervalMs); + + return () => { + if (intervalRef.current) { + clearInterval(intervalRef.current); + intervalRef.current = null; + } + }; + } else { + // Stop polling when not analyzing + if (intervalRef.current) { + clearInterval(intervalRef.current); + intervalRef.current = null; + } + } + }, [isAnalyzing, symbol, date, fetchData, pollIntervalMs]); + + // Do a final fetch when analysis completes + useEffect(() => { + if (!isAnalyzing && symbol && date && lastUpdated) { + // Small delay to ensure backend has saved final state + const timeout = setTimeout(fetchData, 1000); + return () => clearTimeout(timeout); + } + }, [isAnalyzing]); + + return { + pipelineData, + isLive: isAnalyzing, + lastUpdated, + error, + }; +} diff --git a/frontend/src/index.css b/frontend/src/index.css index 53f42e27..c6dc4b76 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -166,6 +166,18 @@ } } + /* Shimmer animation for stocks being analyzed */ + .shimmer-effect { + background: linear-gradient(90deg, transparent 0%, rgba(59, 130, 246, 0.08) 50%, transparent 100%); + background-size: 200% 100%; + animation: shimmer 1.5s ease-in-out infinite; + } + + @keyframes shimmer { + 0% { background-position: 200% 0; } + 100% { background-position: -200% 0; } + } + /* Smooth scrollbar styling */ .scroll-smooth { scroll-behavior: smooth; diff --git a/frontend/src/pages/Dashboard.tsx b/frontend/src/pages/Dashboard.tsx index af955021..9ac3da6a 100644 --- a/frontend/src/pages/Dashboard.tsx +++ b/frontend/src/pages/Dashboard.tsx @@ -1,63 +1,178 @@ -import { useState, useMemo, useEffect } from 'react'; +import { useState, useMemo, useEffect, useCallback, useRef } from 'react'; import { Link } from 'react-router-dom'; -import { Calendar, RefreshCw, Filter, ChevronRight, TrendingUp, TrendingDown, Minus, History, Search, X, Play, Loader2 } from 'lucide-react'; +import { Calendar, RefreshCw, Filter, ChevronRight, TrendingUp, TrendingDown, Minus, History, Search, X, Play, Loader2, Square, AlertCircle, Terminal } from 'lucide-react'; import TopPicks, { StocksToAvoid } from '../components/TopPicks'; import { DecisionBadge } from '../components/StockCard'; +import TerminalModal from '../components/TerminalModal'; import HowItWorks from '../components/HowItWorks'; import BackgroundSparkline from '../components/BackgroundSparkline'; -import { getLatestRecommendation, getBacktestResult } from '../data/recommendations'; +import { getLatestRecommendation, getBacktestResult as getStaticBacktestResult } from '../data/recommendations'; import { api } from '../services/api'; import { useSettings } from '../contexts/SettingsContext'; -import type { Decision, StockAnalysis } from '../types'; +import { useNotification } from '../contexts/NotificationContext'; +import { NIFTY_50_STOCKS } from '../types'; +import type { Decision, StockAnalysis, DailyRecommendation, NiftyStock } from '../types'; type FilterType = 'ALL' | Decision; export default function Dashboard() { - const recommendation = getLatestRecommendation(); + // State for real API data + const [recommendation, setRecommendation] = useState(null); + const [isLoadingData, setIsLoadingData] = useState(true); + const [isUsingMockData, setIsUsingMockData] = useState(false); + + // Fetch real recommendation from API + const fetchRecommendation = useCallback(async () => { + setIsLoadingData(true); + try { + const data = await api.getLatestRecommendation(); + if (data && data.analysis && Object.keys(data.analysis).length > 0) { + setRecommendation(data); + setIsUsingMockData(false); + } else { + // API returned empty data, fall back to mock + const mockData = getLatestRecommendation(); + setRecommendation(mockData || null); + setIsUsingMockData(true); + } + } catch (error) { + console.error('Failed to fetch recommendation from API:', error); + // Fall back to mock data + const mockData = getLatestRecommendation(); + setRecommendation(mockData || null); + setIsUsingMockData(true); + } finally { + setIsLoadingData(false); + } + }, []); + + // Fetch on mount + useEffect(() => { + fetchRecommendation(); + }, [fetchRecommendation]); const [filter, setFilter] = useState('ALL'); const [searchQuery, setSearchQuery] = useState(''); const { settings } = useSettings(); + const { addNotification } = useNotification(); - // Bulk analysis state - const [isAnalyzing, setIsAnalyzing] = useState(false); + // Terminal modal state + const [isTerminalOpen, setIsTerminalOpen] = useState(false); + + // Track completed count to trigger incremental re-fetch + const prevCompletedRef = useRef(0); + + // Bulk analysis state — initialize from localStorage for instant display on refresh + const [isAnalyzing, setIsAnalyzing] = useState(() => { + try { + return localStorage.getItem('bulkAnalysisRunning') === 'true'; + } catch { return false; } + }); + const [isCancelling, setIsCancelling] = useState(false); const [analysisProgress, setAnalysisProgress] = useState<{ status: string; total: number; completed: number; failed: number; + skipped?: number; current_symbol: string | null; - } | null>(null); + current_symbols: string[]; + results: Record; + parallel_workers?: number; + stock_progress?: Record; + } | null>(() => { + try { + const saved = localStorage.getItem('bulkAnalysisProgress'); + return saved ? JSON.parse(saved) : null; + } catch { return null; } + }); - // Check for running analysis on mount + // Persist analysis state to localStorage + const updateAnalysisState = useCallback((analyzing: boolean, progress: typeof analysisProgress) => { + setIsAnalyzing(analyzing); + setAnalysisProgress(progress); + try { + if (analyzing) { + localStorage.setItem('bulkAnalysisRunning', 'true'); + if (progress) localStorage.setItem('bulkAnalysisProgress', JSON.stringify(progress)); + } else { + localStorage.removeItem('bulkAnalysisRunning'); + localStorage.removeItem('bulkAnalysisProgress'); + } + } catch { /* localStorage unavailable */ } + }, []); + + // Validate analysis state against backend on mount useEffect(() => { const checkAnalysisStatus = async () => { try { const status = await api.getBulkAnalysisStatus(); if (status.status === 'running') { - setIsAnalyzing(true); - setAnalysisProgress(status); + updateAnalysisState(true, status); + } else if (isAnalyzing) { + // localStorage said running but backend says otherwise (server restarted) + updateAnalysisState(false, null); } } catch (e) { console.error('Failed to check analysis status:', e); } }; checkAnalysisStatus(); + // eslint-disable-next-line react-hooks/exhaustive-deps }, []); - // Poll for analysis progress + // Poll for analysis progress + incremental re-fetch useEffect(() => { if (!isAnalyzing) return; const pollInterval = setInterval(async () => { try { const status = await api.getBulkAnalysisStatus(); - setAnalysisProgress(status); + // Persist progress to localStorage on every poll + updateAnalysisState(true, status); - if (status.status === 'completed' || status.status === 'idle') { - setIsAnalyzing(false); + // Incremental re-fetch: when completed count increases, refresh recommendation data + const newCompleted = status.completed + (status.skipped || 0); + if (newCompleted > prevCompletedRef.current) { + prevCompletedRef.current = newCompleted; + try { + const data = await api.getLatestRecommendation(); + if (data && data.analysis && Object.keys(data.analysis).length > 0) { + setRecommendation(data); + setIsUsingMockData(false); + } + } catch (e) { + console.warn('Re-fetch during analysis failed:', e); + } + } + + if (status.status === 'completed') { + updateAnalysisState(false, null); + prevCompletedRef.current = 0; + clearInterval(pollInterval); + // Final re-fetch for complete dataset + fetchRecommendation(); + addNotification({ + type: 'success', + title: 'Analysis Complete', + message: `Successfully analyzed ${status.completed} stocks.${status.skipped ? ` ${status.skipped} already analyzed (skipped).` : ''} ${status.failed > 0 ? `${status.failed} failed.` : ''}`, + duration: 8000, + }); + } else if (status.status === 'cancelled') { + updateAnalysisState(false, null); + prevCompletedRef.current = 0; + setIsCancelling(false); + clearInterval(pollInterval); + fetchRecommendation(); + addNotification({ + type: 'warning', + title: 'Analysis Cancelled', + message: `Cancelled after analyzing ${status.completed} stocks.`, + duration: 5000, + }); + } else if (status.status === 'idle') { + updateAnalysisState(false, null); + prevCompletedRef.current = 0; clearInterval(pollInterval); - // Refresh the page to show updated data - window.location.reload(); } } catch (e) { console.error('Failed to poll analysis status:', e); @@ -65,19 +180,19 @@ export default function Dashboard() { }, 3000); return () => clearInterval(pollInterval); - }, [isAnalyzing]); + }, [isAnalyzing, addNotification, updateAnalysisState, fetchRecommendation]); const handleAnalyzeAll = async () => { if (isAnalyzing) return; - setIsAnalyzing(true); - setAnalysisProgress({ + const initialProgress = { status: 'starting', total: 50, completed: 0, failed: 0, - current_symbol: null - }); + current_symbol: null as string | null + }; + updateAnalysisState(true, initialProgress); try { // Pass settings from context to the API @@ -86,43 +201,187 @@ export default function Dashboard() { quick_think_model: settings.quickThinkModel, provider: settings.provider, api_key: settings.provider === 'anthropic_api' ? settings.anthropicApiKey : undefined, - max_debate_rounds: settings.maxDebateRounds + max_debate_rounds: settings.maxDebateRounds, + parallel_workers: settings.parallelWorkers + }); + addNotification({ + type: 'info', + title: 'Analysis Started', + message: 'Running AI analysis for all 50 Nifty stocks...', + duration: 3000, }); } catch (e) { console.error('Failed to start bulk analysis:', e); - setIsAnalyzing(false); - setAnalysisProgress(null); + updateAnalysisState(false, null); + addNotification({ + type: 'error', + title: 'Analysis Failed', + message: 'Failed to start bulk analysis. Please try again.', + duration: 5000, + }); } }; - if (!recommendation) { + const handleCancelAnalysis = async () => { + if (!isAnalyzing || isCancelling) return; + + setIsCancelling(true); + try { + await api.cancelBulkAnalysis(); + addNotification({ + type: 'info', + title: 'Cancelling...', + message: 'Stopping analysis after current stocks complete.', + duration: 3000, + }); + } catch (e) { + console.error('Failed to cancel analysis:', e); + setIsCancelling(false); + addNotification({ + type: 'error', + title: 'Cancel Failed', + message: 'Failed to cancel analysis.', + duration: 3000, + }); + } + }; + + // Live state for each stock in the grid + type StockLiveState = 'completed' | 'analyzing' | 'pending' | 'failed'; + interface StockGridItem { + symbol: string; + company_name: string; + liveState: StockLiveState; + analysis: StockAnalysis | null; + } + + // Build unified stock grid: during analysis show all 50, otherwise only analyzed + const stockGridItems = useMemo((): StockGridItem[] => { + if (!isAnalyzing || !analysisProgress) { + // Normal mode: only show analyzed stocks + return (recommendation ? Object.values(recommendation.analysis) : []).map(s => ({ + symbol: s.symbol, + company_name: s.company_name, + liveState: 'completed' as StockLiveState, + analysis: s, + })); + } + + // Analysis mode: show all 50 stocks with live states + const analysisResults = analysisProgress.results || {}; + const currentSymbols = new Set(analysisProgress.current_symbols || []); + const analysisMap = recommendation?.analysis || {}; + + return NIFTY_50_STOCKS.map((niftyStock: NiftyStock): StockGridItem => { + const { symbol } = niftyStock; + const resultStatus = analysisResults[symbol]; + const existingAnalysis = analysisMap[symbol] || null; + + let liveState: StockLiveState; + if (existingAnalysis) { + liveState = 'completed'; + } else if (resultStatus === 'completed') { + liveState = 'completed'; // just completed, data not re-fetched yet + } else if (resultStatus && resultStatus.startsWith('error')) { + liveState = 'failed'; + } else if (currentSymbols.has(symbol)) { + liveState = 'analyzing'; + } else { + liveState = 'pending'; + } + + return { + symbol, + company_name: niftyStock.company_name, + liveState, + analysis: existingAnalysis, + }; + }); + }, [isAnalyzing, analysisProgress, recommendation]); + + // Filter grid items based on filter and search query + const filteredItems = useMemo(() => { + let result = stockGridItems; + if (filter !== 'ALL') { + // During analysis, show matching completed + all non-completed (so progress stays visible) + result = result.filter(item => + item.liveState !== 'completed' || item.analysis?.decision === filter + ); + } + if (searchQuery.trim()) { + const query = searchQuery.toLowerCase(); + result = result.filter(item => + item.symbol.toLowerCase().includes(query) || + item.company_name.toLowerCase().includes(query) + ); + } + return result; + }, [stockGridItems, filter, searchQuery]); + + // Show loading state — but also include analysis progress banner if running + if (isLoadingData || !recommendation) { return ( -
-
- -

Loading recommendations...

+
+ {isAnalyzing && analysisProgress && ( +
+
+
+ + + {isCancelling ? 'Cancelling...' : `Analyzing ${analysisProgress.current_symbols?.length > 0 ? analysisProgress.current_symbols.join(', ') : analysisProgress.current_symbol || 'stocks'}...`} + +
+
+ + {analysisProgress.completed + analysisProgress.failed} / {analysisProgress.total} stocks + {analysisProgress.skipped ? ` (${analysisProgress.skipped} skipped)` : ''} + + +
+
+
+
0 ? ((analysisProgress.completed + analysisProgress.failed) / analysisProgress.total) * 100 : 0}%` }} + /> +
+ {analysisProgress.failed > 0 && ( +

+ {analysisProgress.failed} failed +

+ )} +
+ )} +
+
+ +

Loading recommendations...

+

Fetching data from API...

+
); } - const stocks = Object.values(recommendation.analysis); - const filteredStocks = useMemo(() => { - let result = filter === 'ALL' ? stocks : stocks.filter(s => s.decision === filter); - if (searchQuery.trim()) { - const query = searchQuery.toLowerCase(); - result = result.filter(s => - s.symbol.toLowerCase().includes(query) || - s.company_name.toLowerCase().includes(query) - ); - } - return result; - }, [stocks, filter, searchQuery]); - - const { buy, sell, hold, total } = recommendation.summary; - const buyPct = ((buy / total) * 100).toFixed(0); - const holdPct = ((hold / total) * 100).toFixed(0); - const sellPct = ((sell / total) * 100).toFixed(0); + const { buy, sell, hold, total: analyzedTotal } = recommendation.summary; + const total = isAnalyzing ? 50 : analyzedTotal; + const buyPct = total > 0 ? ((buy / total) * 100).toFixed(0) : '0'; + const holdPct = total > 0 ? ((hold / total) * 100).toFixed(0) : '0'; + const sellPct = total > 0 ? ((sell / total) * 100).toFixed(0) : '0'; return (
@@ -145,13 +404,13 @@ export default function Dashboard() {
{/* Analyze All Button + Inline Stats */} -
+
{/* Analyze All Button */} -
setFilter('BUY')} title="Click to filter Buy stocks"> -
+ {/* Mock Data Indicator */} + {isUsingMockData && ( +
+ + + Using demo data. Run "Analyze All" or start the backend server for real AI recommendations. + +
+ )} + {/* Analysis Progress Banner */} {isAnalyzing && analysisProgress && (
@@ -201,17 +486,43 @@ export default function Dashboard() {
- Analyzing {analysisProgress.current_symbol || 'stocks'}... + {isCancelling ? 'Cancelling...' : ( + <> + Analyzing{' '} + {analysisProgress.current_symbols?.length > 0 + ? analysisProgress.current_symbols.join(', ') + : analysisProgress.current_symbol || 'stocks'} + ... + + )}
- - {analysisProgress.completed + analysisProgress.failed} / {analysisProgress.total} stocks - +
+ + {analysisProgress.completed + analysisProgress.failed} / {analysisProgress.total} stocks + {analysisProgress.skipped ? ` (${analysisProgress.skipped} skipped)` : ''} + + +
0 ? ((analysisProgress.completed + analysisProgress.failed) / analysisProgress.total) * 100 : 0}%` }} />
{analysisProgress.failed > 0 && ( @@ -239,7 +550,9 @@ export default function Dashboard() {
-

All {total} Stocks

+

+ {isAnalyzing ? `All 50 Stocks (${analysisProgress?.completed || 0} analyzed)` : `All ${total} Stocks`} +

+ +
+ ); +} export default function History() { const [selectedDate, setSelectedDate] = useState(null); @@ -22,25 +81,759 @@ export default function History() { const [returnModalDate, setReturnModalDate] = useState(null); const [showOverallModal, setShowOverallModal] = useState(false); - const dates = sampleRecommendations.map(r => r.date); - const accuracyMetrics = calculateAccuracyMetrics(); - const overallStats = useMemo(() => getOverallStats(), []); + // Investment mode for Select Date section + const [dateFilterMode, setDateFilterMode] = useState('all50'); + // Investment mode for Performance Summary + const [summaryMode, setSummaryMode] = useState('all50'); + // Investment mode for Index Comparison Chart + const [indexChartMode, setIndexChartMode] = useState('all50'); + // Investment mode for Return Distribution Chart + const [distributionMode, setDistributionMode] = useState('all50'); - // Pre-calculate date stats for all dates + // Performance Summary modal state - single state instead of 4 booleans + type SummaryModalType = 'daysTracked' | 'avgReturn' | 'buySignals' | 'sellSignals' | null; + const [activeSummaryModal, setActiveSummaryModal] = useState(null); + + // State for real backtest data + const [realBacktestData, setRealBacktestData] = useState>({}); + const [isLoadingBacktest, setIsLoadingBacktest] = useState(false); + + // State for real recommendations from API + const [recommendations, setRecommendations] = useState(sampleRecommendations); + const [isLoadingRecommendations, setIsLoadingRecommendations] = useState(true); + const [isUsingMockData, setIsUsingMockData] = useState(false); + + // State for accuracy trend data (calculated from real backtest API) + const [accuracyTrendData, setAccuracyTrendData] = useState([]); + const [isLoadingAccuracyTrend, setIsLoadingAccuracyTrend] = useState(false); + + // State for risk metrics (calculated from real backtest API) + const [realRiskMetrics, setRealRiskMetrics] = useState(undefined); + const [isLoadingRiskMetrics, setIsLoadingRiskMetrics] = useState(false); + + // State for return distribution (calculated from real backtest API) + const [realReturnDistribution, setRealReturnDistribution] = useState(undefined); + const [isLoadingReturnDistribution, setIsLoadingReturnDistribution] = useState(false); + + // State for cumulative returns / index comparison (calculated from real backtest API) + const [realCumulativeReturns, setRealCumulativeReturns] = useState(undefined); + const [isLoadingCumulativeReturns, setIsLoadingCumulativeReturns] = useState(false); + + // State for overall return breakdown (calculated from real backtest API) + const [realOverallBreakdown, setRealOverallBreakdown] = useState(undefined); + + // State for Top Picks mode data + const [topPicksCumulativeReturns, setTopPicksCumulativeReturns] = useState(undefined); + const [topPicksReturnDistribution, setTopPicksReturnDistribution] = useState(undefined); + + // State for real Nifty50 index prices + const [nifty50Prices, setNifty50Prices] = useState>({}); + + // State for per-date weighted returns (for dateStatsMap) + const [realDateReturns, setRealDateReturns] = useState>({}); + + // State for all backtest data by date and symbol (for PortfolioSimulator) + const [allBacktestData, setAllBacktestData] = useState>>({}); + + // Fetch real recommendations from API + useEffect(() => { + const fetchRecommendations = async () => { + setIsLoadingRecommendations(true); + try { + const data = await api.getAllRecommendations(); + if (data && data.recommendations && data.recommendations.length > 0) { + setRecommendations(data.recommendations); + setIsUsingMockData(false); + } else { + // API returned empty data, use mock + setRecommendations(sampleRecommendations); + setIsUsingMockData(true); + } + } catch (error) { + console.error('Failed to fetch recommendations from API:', error); + // Fall back to mock data + setRecommendations(sampleRecommendations); + setIsUsingMockData(true); + } finally { + setIsLoadingRecommendations(false); + } + }; + fetchRecommendations(); + }, []); + + // Batch-fetch all backtest results per date (used by both accuracy trend and chart data) + const [batchBacktestByDate, setBatchBacktestByDate] = useState< + Record> + >({}); + const [isBatchLoading, setIsBatchLoading] = useState(false); + + useEffect(() => { + const fetchBatchBacktest = async () => { + if (recommendations.length === 0) return; + setIsBatchLoading(true); + + const sortedDates = [...recommendations] + .sort((a, b) => new Date(a.date).getTime() - new Date(b.date).getTime()) + .map(r => r.date); + + const batchData: typeof batchBacktestByDate = {}; + + // Trigger batch calculation for all dates in parallel, then fetch results + await Promise.all(sortedDates.map(async (date) => { + try { + // First try to get existing results + let response = await api.getBacktestResultsForDate(date); + + // If no results, trigger calculation and wait + if (!response.results || response.results.length === 0) { + try { + await api.calculateBacktest(date); + // Wait a bit for calculation, then re-fetch + await new Promise(r => setTimeout(r, 5000)); + response = await api.getBacktestResultsForDate(date); + } catch { + // Calculation may already be running + } + } + + if (response.results && response.results.length > 0) { + batchData[date] = {}; + for (const r of response.results) { + batchData[date][r.symbol] = { + return_1d: r.return_1d, + return_1w: r.return_1w, + return_1m: r.return_1m, + prediction_correct: r.prediction_correct, + decision: r.decision, + }; + } + } + } catch (err) { + console.warn(`Failed to fetch batch backtest for ${date}:`, err); + } + })); + + setBatchBacktestByDate(batchData); + setIsBatchLoading(false); + }; + + if (!isUsingMockData && !isLoadingRecommendations) { + fetchBatchBacktest(); + } + }, [recommendations, isUsingMockData, isLoadingRecommendations]); + + // Compute accuracy trend from batch backtest data + useEffect(() => { + if (isBatchLoading || Object.keys(batchBacktestByDate).length === 0) return; + + setIsLoadingAccuracyTrend(true); + const trendData: AccuracyTrendPoint[] = []; + + const sortedDates = [...recommendations] + .sort((a, b) => new Date(a.date).getTime() - new Date(b.date).getTime()) + .map(r => r.date); + + for (const date of sortedDates) { + const rec = recommendations.find(r => r.date === date); + const dateBacktest = batchBacktestByDate[date]; + if (!rec || !dateBacktest) continue; + + let totalBuy = 0, correctBuy = 0; + let totalSell = 0, correctSell = 0; + let totalHold = 0, correctHold = 0; + + for (const symbol of Object.keys(rec.analysis)) { + const stockAnalysis = rec.analysis[symbol]; + const bt = dateBacktest[symbol]; + if (!stockAnalysis?.decision || !bt || bt.return_1d === undefined || bt.return_1d === null) continue; + + const predictionCorrect = (stockAnalysis.decision === 'BUY' || stockAnalysis.decision === 'HOLD') + ? bt.return_1d > 0 + : bt.return_1d < 0; + + if (stockAnalysis.decision === 'BUY') { totalBuy++; if (predictionCorrect) correctBuy++; } + else if (stockAnalysis.decision === 'SELL') { totalSell++; if (predictionCorrect) correctSell++; } + else { totalHold++; if (predictionCorrect) correctHold++; } + } + + const totalPredictions = totalBuy + totalSell + totalHold; + const totalCorrect = correctBuy + correctSell + correctHold; + + trendData.push({ + date, + overall: totalPredictions > 0 ? Math.round((totalCorrect / totalPredictions) * 100) : 0, + buy: totalBuy > 0 ? Math.round((correctBuy / totalBuy) * 100) : 0, + sell: totalSell > 0 ? Math.round((correctSell / totalSell) * 100) : 0, + hold: totalHold > 0 ? Math.round((correctHold / totalHold) * 100) : 0, + }); + } + + setAccuracyTrendData(trendData); + setIsLoadingAccuracyTrend(false); + }, [batchBacktestByDate, isBatchLoading, recommendations]); + + // Compute all chart data from batch backtest results (no individual API calls) + useEffect(() => { + const computeAllChartData = async () => { + if (recommendations.length === 0 || isBatchLoading || Object.keys(batchBacktestByDate).length === 0) return; + + setIsLoadingRiskMetrics(true); + setIsLoadingReturnDistribution(true); + setIsLoadingCumulativeReturns(true); + + // Sort dates chronologically + const sortedDates = [...recommendations] + .sort((a, b) => new Date(a.date).getTime() - new Date(b.date).getTime()) + .map(r => r.date); + + // Data collection for risk metrics calculation + const dailyReturns: number[] = []; + let wins = 0; + let losses = 0; + let totalWinReturn = 0; + let totalLossReturn = 0; + let totalCorrect = 0; + let totalPredictions = 0; + + // Data for return distribution (latest date only) + const returnBuckets: ReturnBucket[] = [ + { range: '< -3%', min: -Infinity, max: -3, count: 0, stocks: [] }, + { range: '-3% to -2%', min: -3, max: -2, count: 0, stocks: [] }, + { range: '-2% to -1%', min: -2, max: -1, count: 0, stocks: [] }, + { range: '-1% to 0%', min: -1, max: 0, count: 0, stocks: [] }, + { range: '0% to 1%', min: 0, max: 1, count: 0, stocks: [] }, + { range: '1% to 2%', min: 1, max: 2, count: 0, stocks: [] }, + { range: '2% to 3%', min: 2, max: 3, count: 0, stocks: [] }, + { range: '> 3%', min: 3, max: Infinity, count: 0, stocks: [] }, + ]; + + // Data for cumulative returns + const cumulativeData: CumulativeReturnPoint[] = []; + let aiMultiplier = 1; + let indexMultiplier = 1; + + // Fetch real Nifty50 index data + let niftyPrices: Record = {}; + try { + const niftyData = await api.getNifty50History(); + if (niftyData.prices && Object.keys(niftyData.prices).length > 0) { + niftyPrices = niftyData.prices; + setNifty50Prices(niftyPrices); + } + } catch (err) { + console.warn('Failed to fetch Nifty50 index data'); + } + + // Precompute Nifty daily returns from prices + const sortedNiftyDates = Object.keys(niftyPrices).sort(); + const niftyDailyReturns: Record = {}; + for (let i = 1; i < sortedNiftyDates.length; i++) { + const prevPrice = niftyPrices[sortedNiftyDates[i - 1]]; + const currPrice = niftyPrices[sortedNiftyDates[i]]; + niftyDailyReturns[sortedNiftyDates[i]] = ((currPrice - prevPrice) / prevPrice) * 100; + } + + // Helper to get Nifty return for a date (exact or closest match) + const getNiftyReturn = (date: string): number => { + if (niftyDailyReturns[date] !== undefined) return niftyDailyReturns[date]; + const closestDate = sortedNiftyDates.find(d => d >= date) || sortedNiftyDates[sortedNiftyDates.length - 1]; + return (closestDate && niftyDailyReturns[closestDate] !== undefined) ? niftyDailyReturns[closestDate] : 0; + }; + + // Track per-date returns for dateStatsMap + const dateReturnsMap: Record = {}; + // Track all backtest results for PortfolioSimulator + const allBacktest: Record> = {}; + // Track which date last had valid return data (for return distribution) + let latestDateWithData: string | null = null; + + // Process each date using batch data (no individual API calls) + for (const date of sortedDates) { + const rec = recommendations.find(r => r.date === date); + const dateBacktest = batchBacktestByDate[date]; + if (!rec || !dateBacktest) continue; + + let dateCorrectCount = 0; + let dateTotalCount = 0; + let dateCorrectReturn = 0; + let dateIncorrectReturn = 0; + + for (const symbol of Object.keys(rec.analysis)) { + const stockAnalysis = rec.analysis[symbol]; + const bt = dateBacktest[symbol]; + if (!stockAnalysis?.decision || !bt || bt.return_1d === undefined || bt.return_1d === null) continue; + + const return1d = bt.return_1d; + + // Store for PortfolioSimulator + if (!allBacktest[date]) allBacktest[date] = {}; + allBacktest[date][symbol] = return1d; + + const predictionCorrect = (stockAnalysis.decision === 'BUY' || stockAnalysis.decision === 'HOLD') + ? return1d > 0 + : return1d < 0; + + totalPredictions++; + if (predictionCorrect) { + totalCorrect++; + dateCorrectCount++; + if (stockAnalysis.decision === 'BUY' || stockAnalysis.decision === 'HOLD') { + dateCorrectReturn += return1d; + } else { + dateCorrectReturn += Math.abs(return1d); + } + } else { + if (stockAnalysis.decision === 'BUY' || stockAnalysis.decision === 'HOLD') { + dateIncorrectReturn += return1d; + } else { + dateIncorrectReturn += -Math.abs(return1d); + } + } + dateTotalCount++; + } + + if (dateTotalCount > 0) latestDateWithData = date; + + // Calculate weighted daily return for this date + if (dateTotalCount > 0) { + const correctAvg = dateCorrectCount > 0 ? dateCorrectReturn / dateCorrectCount : 0; + const incorrectAvg = (dateTotalCount - dateCorrectCount) > 0 + ? dateIncorrectReturn / (dateTotalCount - dateCorrectCount) : 0; + const weightedReturn = (correctAvg * (dateCorrectCount / dateTotalCount)) + + (incorrectAvg * ((dateTotalCount - dateCorrectCount) / dateTotalCount)); + + dailyReturns.push(weightedReturn); + dateReturnsMap[date] = Math.round(weightedReturn * 10) / 10; + + if (weightedReturn > 0) { wins++; totalWinReturn += weightedReturn; } + else if (weightedReturn < 0) { losses++; totalLossReturn += Math.abs(weightedReturn); } + + aiMultiplier *= (1 + weightedReturn / 100); + const indexDailyReturn = getNiftyReturn(date); + indexMultiplier *= (1 + indexDailyReturn / 100); + + cumulativeData.push({ + date, + value: Math.round(aiMultiplier * 10000) / 100, + aiReturn: Math.round((aiMultiplier - 1) * 1000) / 10, + indexReturn: Math.round((indexMultiplier - 1) * 1000) / 10, + }); + } + } + + // Populate return distribution using the latest date with actual data + if (latestDateWithData) { + const rec = recommendations.find(r => r.date === latestDateWithData); + const dateBacktest = batchBacktestByDate[latestDateWithData]; + if (rec && dateBacktest) { + for (const symbol of Object.keys(rec.analysis)) { + const bt = dateBacktest[symbol]; + if (!bt || bt.return_1d === undefined || bt.return_1d === null) continue; + for (const bucket of returnBuckets) { + if (bt.return_1d >= bucket.min && bt.return_1d < bucket.max) { + bucket.count++; + bucket.stocks.push(symbol); + break; + } + } + } + } + } + + // Calculate risk metrics + if (dailyReturns.length > 0) { + const mean = dailyReturns.reduce((a, b) => a + b, 0) / dailyReturns.length; + const variance = dailyReturns.reduce((sum, r) => sum + Math.pow(r - mean, 2), 0) / dailyReturns.length; + const volatility = Math.sqrt(variance); + const riskFreeRate = 0.02; + const sharpeRatio = volatility > 0 ? (mean - riskFreeRate) / volatility : 0; + + let peak = 100, maxDrawdown = 0, maxDrawdownTrough = 100, maxDrawdownPeak = 100, currentValue = 100; + for (const ret of dailyReturns) { + currentValue = currentValue * (1 + ret / 100); + if (currentValue > peak) peak = currentValue; + const drawdown = ((peak - currentValue) / peak) * 100; + if (drawdown > maxDrawdown) { maxDrawdown = drawdown; maxDrawdownPeak = peak; maxDrawdownTrough = currentValue; } + } + + const avgWin = wins > 0 ? totalWinReturn / wins : 0; + const avgLoss = losses > 0 ? totalLossReturn / losses : 1; + + setRealRiskMetrics({ + sharpeRatio: Math.round(sharpeRatio * 100) / 100, + maxDrawdown: Math.round(maxDrawdown * 10) / 10, + winLossRatio: Math.round((avgLoss > 0 ? avgWin / avgLoss : avgWin) * 100) / 100, + winRate: Math.round(totalPredictions > 0 ? (totalCorrect / totalPredictions) * 100 : 0), + volatility: Math.round(volatility * 100) / 100, + totalTrades: totalPredictions, + meanReturn: Math.round(mean * 100) / 100, + riskFreeRate, + winningTrades: wins, + losingTrades: losses, + avgWinReturn: Math.round(avgWin * 100) / 100, + avgLossReturn: Math.round(avgLoss * 100) / 100, + peakValue: Math.round(maxDrawdownPeak * 100) / 100, + troughValue: Math.round(maxDrawdownTrough * 100) / 100, + }); + } + + setRealReturnDistribution(returnBuckets); + setRealCumulativeReturns(cumulativeData); + + // Calculate overall return breakdown + if (cumulativeData.length > 0) { + const breakdownDailyReturns: { date: string; return: number; multiplier: number; cumulative: number }[] = []; + let cumulativeMultiplier = 1; + + for (let i = 0; i < cumulativeData.length; i++) { + const point = cumulativeData[i]; + const dailyReturn = i === 0 + ? point.aiReturn + : Math.round((((1 + point.aiReturn / 100) / (1 + cumulativeData[i - 1].aiReturn / 100)) - 1) * 1000) / 10; + const dailyMultiplier = 1 + dailyReturn / 100; + cumulativeMultiplier *= dailyMultiplier; + breakdownDailyReturns.push({ + date: point.date, return: dailyReturn, + multiplier: Math.round(dailyMultiplier * 10000) / 10000, + cumulative: Math.round((cumulativeMultiplier - 1) * 1000) / 10, + }); + } + + const finalMultiplier = 1 + cumulativeData[cumulativeData.length - 1].aiReturn / 100; + setRealOverallBreakdown({ + dailyReturns: breakdownDailyReturns, + finalMultiplier: Math.round(finalMultiplier * 10000) / 10000, + finalReturn: Math.round((finalMultiplier - 1) * 1000) / 10, + formula: '', + }); + } + + // Calculate Top Picks data + const topPicksCumulative: CumulativeReturnPoint[] = []; + const topPicksDistribution: ReturnBucket[] = [ + { range: '< -3%', min: -Infinity, max: -3, count: 0, stocks: [] }, + { range: '-3% to -2%', min: -3, max: -2, count: 0, stocks: [] }, + { range: '-2% to -1%', min: -2, max: -1, count: 0, stocks: [] }, + { range: '-1% to 0%', min: -1, max: 0, count: 0, stocks: [] }, + { range: '0% to 1%', min: 0, max: 1, count: 0, stocks: [] }, + { range: '1% to 2%', min: 1, max: 2, count: 0, stocks: [] }, + { range: '2% to 3%', min: 2, max: 3, count: 0, stocks: [] }, + { range: '> 3%', min: 3, max: Infinity, count: 0, stocks: [] }, + ]; + let topPicksMultiplier = 1; + let topPicksIndexMultiplier = 1; + + let latestTopPicksDateWithData: string | null = null; + + for (const date of sortedDates) { + const rec = recommendations.find(r => r.date === date); + const dateBacktest = batchBacktestByDate[date]; + if (!rec || !rec.top_picks || !dateBacktest) continue; + + let dateReturn = 0; + let dateCount = 0; + + for (const pick of rec.top_picks) { + const bt = dateBacktest[pick.symbol]; + if (bt && bt.return_1d !== undefined && bt.return_1d !== null) { + dateReturn += bt.return_1d; + dateCount++; + } + } + + if (dateCount > 0) latestTopPicksDateWithData = date; + + if (dateCount > 0) { + const avgReturn = dateReturn / dateCount; + topPicksMultiplier *= (1 + avgReturn / 100); + const indexDailyReturn = getNiftyReturn(date); + topPicksIndexMultiplier *= (1 + indexDailyReturn / 100); + topPicksCumulative.push({ + date, + value: Math.round(topPicksMultiplier * 10000) / 100, + aiReturn: Math.round((topPicksMultiplier - 1) * 1000) / 10, + indexReturn: Math.round((topPicksIndexMultiplier - 1) * 1000) / 10, + }); + } + } + + // Populate top picks distribution from latest date with data + if (latestTopPicksDateWithData) { + const rec = recommendations.find(r => r.date === latestTopPicksDateWithData); + const dateBacktest = batchBacktestByDate[latestTopPicksDateWithData]; + if (rec && dateBacktest) { + for (const pick of rec.top_picks) { + const bt = dateBacktest[pick.symbol]; + if (bt && bt.return_1d !== undefined && bt.return_1d !== null) { + for (const bucket of topPicksDistribution) { + if (bt.return_1d >= bucket.min && bt.return_1d < bucket.max) { + bucket.count++; + bucket.stocks.push(pick.symbol); + break; + } + } + } + } + } + } + + setTopPicksCumulativeReturns(topPicksCumulative); + setTopPicksReturnDistribution(topPicksDistribution); + setRealDateReturns(dateReturnsMap); + setAllBacktestData(allBacktest); + + setIsLoadingRiskMetrics(false); + setIsLoadingReturnDistribution(false); + setIsLoadingCumulativeReturns(false); + }; + + if (!isUsingMockData && !isLoadingRecommendations) { + computeAllChartData(); + } + }, [batchBacktestByDate, isBatchLoading, recommendations, isUsingMockData, isLoadingRecommendations]); + + const dates = recommendations.map(r => r.date); + + // API-first accuracy metrics with mock fallback + const [apiAccuracyMetrics, setApiAccuracyMetrics] = useState<{ + overall_accuracy: number; + total_predictions: number; + correct_predictions: number; + by_decision: Record; + } | null>(null); + + useEffect(() => { + if (isUsingMockData) return; + const fetchAccuracy = async () => { + try { + const metrics = await api.getAccuracyMetrics(); + if (metrics && metrics.total_predictions > 0) { + setApiAccuracyMetrics(metrics); + } + } catch { + // Will use mock fallback + } + }; + fetchAccuracy(); + }, [isUsingMockData]); + + // Convert API or static accuracy to consistent format + const accuracyMetrics = useMemo(() => { + if (apiAccuracyMetrics && apiAccuracyMetrics.total_predictions > 0) { + return { + total_predictions: apiAccuracyMetrics.total_predictions, + correct_predictions: apiAccuracyMetrics.correct_predictions, + success_rate: apiAccuracyMetrics.overall_accuracy / 100, + buy_accuracy: (apiAccuracyMetrics.by_decision?.BUY?.accuracy || 0) / 100, + sell_accuracy: (apiAccuracyMetrics.by_decision?.SELL?.accuracy || 0) / 100, + hold_accuracy: (apiAccuracyMetrics.by_decision?.HOLD?.accuracy || 0) / 100, + }; + } + // Only fall back to mock when actually using mock data + if (isUsingMockData) { + return calculateStaticAccuracyMetrics(); + } + // Real data mode but no backtest predictions available yet + return { + total_predictions: 0, + correct_predictions: 0, + success_rate: 0, + buy_accuracy: 0, + sell_accuracy: 0, + hold_accuracy: 0, + }; + }, [apiAccuracyMetrics, isUsingMockData]); + + // Compute overall stats from real recommendations data (or fallback to static) + const overallStats = useMemo(() => { + if (!isUsingMockData && recommendations.length > 0) { + // Calculate from real cumulative returns data if available + if (realCumulativeReturns && realCumulativeReturns.length > 0) { + const lastPoint = realCumulativeReturns[realCumulativeReturns.length - 1]; + return { + totalDays: recommendations.length, + totalPredictions: accuracyMetrics.total_predictions, + avgDailyReturn: Math.round((lastPoint.aiReturn / realCumulativeReturns.length) * 10) / 10, + avgMonthlyReturn: 0, + overallAccuracy: Math.round(accuracyMetrics.success_rate * 100), + bestDay: null, + worstDay: null, + }; + } + // Real data mode but no backtest data available - show zeros, not mock + return { + totalDays: recommendations.length, + totalPredictions: 0, + avgDailyReturn: 0, + avgMonthlyReturn: 0, + overallAccuracy: 0, + bestDay: null, + worstDay: null, + }; + } + return getStaticOverallStats(); + }, [isUsingMockData, recommendations, realCumulativeReturns, accuracyMetrics]); + + // Fetch real backtest data for selected date + const fetchBacktestForDate = useCallback(async (date: string) => { + const rec = recommendations.find(r => r.date === date); + if (!rec) return; + + setIsLoadingBacktest(true); + const newData: Record = {}; + + const stocks = Object.values(rec.analysis); + for (const stock of stocks) { + if (!stock.symbol || !stock.decision) continue; + + try { + const backtest = await api.getBacktestResult(date, stock.symbol); + + if (backtest.available) { + // Calculate prediction correctness based on 1-day return + // BUY/HOLD correct if return > 0, SELL correct if return < 0 + let predictionCorrect: boolean | null = null; + if (backtest.actual_return_1d !== undefined && backtest.actual_return_1d !== null) { + if (stock.decision === 'BUY' || stock.decision === 'HOLD') { + predictionCorrect = backtest.actual_return_1d > 0; + } else if (stock.decision === 'SELL') { + predictionCorrect = backtest.actual_return_1d < 0; + } + } + + newData[stock.symbol] = { + symbol: stock.symbol, + decision: stock.decision, + return1d: backtest.actual_return_1d ?? null, + return1w: backtest.actual_return_1w ?? null, + predictionCorrect, + priceHistory: backtest.price_history, + }; + } + } catch (err) { + console.error(`Failed to fetch backtest for ${stock.symbol}:`, err); + } + } + + setRealBacktestData(prev => ({ ...prev, ...newData })); + setIsLoadingBacktest(false); + }, [recommendations]); + + // Fetch backtest data when date is selected + useEffect(() => { + if (selectedDate) { + fetchBacktestForDate(selectedDate); + } + }, [selectedDate, fetchBacktestForDate]); + + // Calculate stats based on mode + const filteredStats = useMemo(() => { + if (summaryMode === 'all50') { + // Consolidate three reduce calls into one + const signalTotals = recommendations.reduce( + (acc, r) => ({ + buy: acc.buy + r.summary.buy, + sell: acc.sell + r.summary.sell, + hold: acc.hold + r.summary.hold, + }), + { buy: 0, sell: 0, hold: 0 } + ); + return { + totalDays: dates.length, + avgDailyReturn: overallStats.avgDailyReturn, + buySignals: signalTotals.buy, + sellSignals: signalTotals.sell, + holdSignals: signalTotals.hold, + }; + } + + // Top Picks mode - calculate stats from real data or static fallback + const topPicksData = recommendations.flatMap(rec => + rec.top_picks.map(pick => { + // Try real backtest data first + const realData = realBacktestData[pick.symbol]; + if (realData?.return1d !== null && realData?.return1d !== undefined) { + return realData.return1d; + } + // Only fall back to mock when actually using mock data + return isUsingMockData ? getStaticBacktestResult(pick.symbol)?.actual_return_1d : undefined; + }) + ).filter((r): r is number => r !== undefined); + + return { + totalDays: dates.length, + avgDailyReturn: topPicksData.length > 0 + ? topPicksData.reduce((sum, r) => sum + r, 0) / topPicksData.length + : 0, + buySignals: recommendations.reduce((acc, r) => acc + r.top_picks.length, 0), + sellSignals: 0, // Top picks are always BUY recommendations + holdSignals: 0, + }; + }, [summaryMode, dates.length, overallStats.avgDailyReturn, recommendations]); + + // Pre-calculate date stats: from real recommendations data (counts) and real backtest returns const dateStatsMap = useMemo(() => { - const map: Record> = {}; - dates.forEach(date => { - map[date] = getDateStats(date); - }); - return map; - }, [dates]); + return Object.fromEntries(dates.map(date => { + const rec = recommendations.find(r => r.date === date); + if (rec && !isUsingMockData) { + // Use real recommendation data for counts and real backtest returns + const stocks = Object.values(rec.analysis); + return [date, { + date, + avgReturn1d: realDateReturns[date] ?? 0, + avgReturn1m: 0, + totalStocks: stocks.length, + correctPredictions: 0, + accuracy: 0, + buyCount: rec.summary.buy, + sellCount: rec.summary.sell, + holdCount: rec.summary.hold, + }]; + } + return [date, getStaticDateStats(date)]; + })); + }, [dates, recommendations, isUsingMockData, realDateReturns]); const getRecommendation = (date: string) => { - return sampleRecommendations.find(r => r.date === date); + return recommendations.find(r => r.date === date); }; + // Filter stocks based on mode for display + const getFilteredStocks = (date: string) => { + const rec = getRecommendation(date); + if (!rec) return []; + + if (dateFilterMode === 'topPicks') { + return rec.top_picks.map(pick => rec.analysis[pick.symbol]).filter(Boolean); + } + return Object.values(rec.analysis); + }; + + // Show loading state + if (isLoadingRecommendations) { + return ( +
+
+ +

Loading historical data...

+

Fetching recommendations from API...

+
+
+ ); + } + return (
+ {/* Mock Data Indicator */} + {isUsingMockData && ( +
+ + + Using demo data. Start the backend server and run analysis for real AI recommendations. + +
+ )} + {/* Compact Header */}
@@ -109,36 +902,85 @@ export default function History() { {/* Accuracy Trend Chart */}
-
- -

Accuracy Trend

+
+
+ +

Accuracy Trend

+
+ {isLoadingAccuracyTrend && ( +
+ + Loading real data... +
+ )}
- + {/* Pass real data if available, use mock fallback only when in mock mode */} + 0 ? accuracyTrendData : undefined) + : accuracyTrendData + } + />

- Prediction accuracy over the past {dates.length} trading days + {accuracyTrendData.length > 0 ? ( + <>Prediction accuracy from real backtest data over {accuracyTrendData.length} trading days + ) : isUsingMockData ? ( + <>Demo data - Start backend for real accuracy tracking + ) : ( + <>Prediction accuracy over the past {dates.length} trading days + )}

{/* Risk Metrics */}
-
- -

Risk Metrics

+
+
+ +

Risk Metrics

+
+ {isLoadingRiskMetrics && ( +
+ + Loading real data... +
+ )}
- +

- Risk-adjusted performance metrics for the AI trading strategy + {realRiskMetrics ? ( + <>Risk-adjusted performance from real backtest data ({realRiskMetrics.totalTrades} trades) + ) : isUsingMockData ? ( + <>Demo data - Start backend for real risk metrics + ) : ( + <>Risk-adjusted performance metrics for the AI trading strategy + )}

{/* Portfolio Simulator */} - + - {/* Date Selector */} + {/* Date Selector with Mode Toggle */}
-
- -

Select Date

+
+
+ +

Select Date

+
+
+ + +
{dates.map((date) => { @@ -147,6 +989,11 @@ export default function History() { const avgReturn = stats?.avgReturn1d ?? 0; const isPositive = avgReturn >= 0; + // Calculate filtered summary for this date + const filteredSummary = dateFilterMode === 'topPicks' + ? { buy: rec?.top_picks.length || 0, sell: 0, hold: 0 } + : rec?.summary || { buy: 0, sell: 0, hold: 0 }; + return (
{/* Help button for return explanation */} @@ -221,132 +1066,254 @@ export default function History() {
-

- {new Date(selectedDate).toLocaleDateString('en-IN', { - weekday: 'short', - month: 'short', - day: 'numeric', - year: 'numeric', - })} -

+
+

+ {new Date(selectedDate).toLocaleDateString('en-IN', { + weekday: 'short', + month: 'short', + day: 'numeric', + year: 'numeric', + })} +

+ +
- - - {getRecommendation(selectedDate)?.summary.buy} Buy - - - - {getRecommendation(selectedDate)?.summary.sell} Sell - - - - {getRecommendation(selectedDate)?.summary.hold} Hold - + {dateFilterMode === 'all50' ? ( + <> + + + {getRecommendation(selectedDate)?.summary.buy} Buy + + + + {getRecommendation(selectedDate)?.summary.sell} Sell + + + + {getRecommendation(selectedDate)?.summary.hold} Hold + + + ) : ( + + + {getRecommendation(selectedDate)?.top_picks.length} Top Picks (BUY) + + )}
-
- {Object.values(getRecommendation(selectedDate)?.analysis || {}).map((stock: StockAnalysis) => { - const backtest = getBacktestResult(stock.symbol); - // Use next-day return for the display - const nextDayReturn = backtest?.actual_return_1d ?? 0; - const isPositive = nextDayReturn >= 0; + {isLoadingBacktest ? ( +
+ +

Fetching real market data...

+
+ ) : ( +
+ {getFilteredStocks(selectedDate).map((stock: StockAnalysis) => { + const realData = realBacktestData[stock.symbol]; - return ( - -
- {stock.symbol} - {stock.company_name} -
-
- {/* Sparkline */} - {backtest && ( - - )} - {/* Next-Day Return Badge */} - {backtest && ( - - )} - - -
- - ); - })} -
+ let nextDayReturn: number | null; + let priceHistory: Array<{ date: string; price: number }> | undefined; + let predictionCorrect: boolean | null = null; + + if (!isUsingMockData) { + // Real data mode: only use real backtest, no mock fallback + nextDayReturn = realData?.return1d ?? null; + priceHistory = realData?.priceHistory; + if (realData?.predictionCorrect !== undefined) { + predictionCorrect = realData.predictionCorrect; + } + } else { + // Mock data mode: use real if available, fall back to mock + const mockBacktest = getStaticBacktestResult(stock.symbol); + nextDayReturn = realData?.return1d ?? mockBacktest?.actual_return_1d ?? 0; + priceHistory = realData?.priceHistory ?? mockBacktest?.price_history; + if (realData?.predictionCorrect !== undefined) { + predictionCorrect = realData.predictionCorrect; + } else if (mockBacktest && stock.decision) { + if (stock.decision === 'BUY' || stock.decision === 'HOLD') { + predictionCorrect = nextDayReturn > 0; + } else if (stock.decision === 'SELL') { + predictionCorrect = nextDayReturn < 0; + } + } + } + const isPositive = (nextDayReturn ?? 0) >= 0; + + return ( + +
+ {stock.symbol} + {stock.company_name} + {realData && ( + + Real + + )} +
+
+ + {predictionCorrect !== null && ( + + )} + {priceHistory && ( + + )} + +
+ + ); + })} +
+ )}
)} - {/* Performance Summary Cards */} + {/* Performance Summary Cards with Mode Toggle */}
-
- -

Performance Summary

+
+
+ +

Performance Summary

+
+
-
-
{overallStats.totalDays}
-
Days Tracked
-
-
-
= 0 ? 'text-green-600 dark:text-green-400' : 'text-red-600 dark:text-red-400'}`}> - {overallStats.avgDailyReturn >= 0 ? '+' : ''}{overallStats.avgDailyReturn.toFixed(1)}% +
setActiveSummaryModal('daysTracked')} + > +
{filteredStats.totalDays}
+
+ Days Tracked
-
Avg Next-Day Return
-
+
setActiveSummaryModal('avgReturn')} + > +
+ {filteredStats.avgDailyReturn >= 0 ? '+' : ''}{filteredStats.avgDailyReturn.toFixed(1)}% +
+
+ Avg Next-Day Return +
+
+
setActiveSummaryModal('buySignals')} + >
- {sampleRecommendations.reduce((acc, r) => acc + r.summary.buy, 0)} + {filteredStats.buySignals} +
+
+ {summaryMode === 'topPicks' ? 'Top Pick Signals' : 'Buy Signals'}
-
Buy Signals
-
+
setActiveSummaryModal('sellSignals')} + >
- {sampleRecommendations.reduce((acc, r) => acc + r.summary.sell, 0)} + {filteredStats.sellSignals} +
+
+ Sell Signals
-
Sell Signals

- Next-day return = Price change on the trading day after recommendation + {summaryMode === 'topPicks' + ? 'Performance based on Top Picks recommendations only (3 stocks per day)' + : 'Next-day return = Price change on the trading day after recommendation' + }

{/* AI vs Nifty50 Index Comparison */}
-
- -

AI Strategy vs Nifty50 Index

+
+
+ +

AI Strategy vs Nifty50 Index

+
+
+ {isLoadingCumulativeReturns && ( +
+ + Loading... +
+ )} + +
- +

- Comparison of cumulative returns between AI strategy and Nifty50 index + {(indexChartMode === 'topPicks' ? topPicksCumulativeReturns : realCumulativeReturns)?.length ? ( + <> + Cumulative returns for {indexChartMode === 'topPicks' ? 'Top Picks' : 'All 50 stocks'} over{' '} + {(indexChartMode === 'topPicks' ? topPicksCumulativeReturns : realCumulativeReturns)?.length} trading days + + ) : isUsingMockData ? ( + <>Demo data - Start backend for real performance comparison + ) : ( + <>Comparison of cumulative returns between AI strategy and Nifty50 index + )}

{/* Return Distribution */}
-
- -

Return Distribution

+
+
+ +

Return Distribution

+
+
+ {isLoadingReturnDistribution && ( +
+ + Loading... +
+ )} + +
- +

- Distribution of next-day returns across all predictions. Click bars to see stocks. + {(distributionMode === 'topPicks' ? topPicksReturnDistribution : realReturnDistribution) ? ( + <>Distribution of {distributionMode === 'topPicks' ? 'Top Picks' : 'all 50 stocks'} next-day returns. Click bars to see stocks. + ) : isUsingMockData ? ( + <>Demo data - Start backend for real return distribution + ) : ( + <>Distribution of next-day returns across all predictions. Click bars to see stocks. + )}

@@ -361,7 +1328,7 @@ export default function History() { setShowReturnModal(false)} - breakdown={returnModalDate ? getReturnBreakdown(returnModalDate) : null} + breakdown={returnModalDate ? (isUsingMockData ? getStaticReturnBreakdown(returnModalDate) : null) : null} date={returnModalDate || ''} /> @@ -369,7 +1336,122 @@ export default function History() { setShowOverallModal(false)} + breakdown={realOverallBreakdown} + cumulativeData={realCumulativeReturns} /> + + {/* Performance Summary Modals */} + setActiveSummaryModal(null)} + title="Days Tracked" + icon={} + > +
+

Days Tracked shows the total number of trading days where AI recommendations have been recorded and analyzed.

+
+
Current Count:
+
{filteredStats.totalDays} days
+
+

Each day includes analysis for {summaryMode === 'topPicks' ? '3 top picks' : 'all 50 Nifty stocks'}.

+
+
+ + setActiveSummaryModal(null)} + title="Average Next-Day Return" + icon={} + > +
+

Average Next-Day Return measures the mean percentage price change one trading day after each recommendation.

+
+
How it's calculated:
+
    +
  1. Record stock price at recommendation time
  2. +
  3. Record price at next trading day close
  4. +
  5. Calculate: (Next Day Price - Rec Price) / Rec Price × 100
  6. +
  7. Average all these returns
  8. +
+
+
= 0 ? 'bg-green-50 dark:bg-green-900/20' : 'bg-red-50 dark:bg-red-900/20'} rounded-lg`}> +
Current Average:
+
= 0 ? 'text-green-600' : 'text-red-600'}`}> + {filteredStats.avgDailyReturn >= 0 ? '+' : ''}{filteredStats.avgDailyReturn.toFixed(2)}% +
+
+
+
+ + setActiveSummaryModal(null)} + title={summaryMode === 'topPicks' ? 'Top Pick Signals' : 'Buy Signals'} + icon={} + > +
+ {summaryMode === 'topPicks' ? ( + <> +

Top Pick Signals counts all stocks that were selected as "Top Picks" across all tracked days.

+
+
What makes a Top Pick?
+
    +
  • Strong bullish momentum indicators
  • +
  • Positive technical analysis signals
  • +
  • Favorable risk-reward ratio
  • +
  • High confidence BUY recommendation
  • +
+
+ + ) : ( + <> +

Buy Signals counts every BUY recommendation issued by the AI across all tracked days and all 50 stocks.

+
+
When is BUY recommended?
+
    +
  • Technical indicators show bullish momentum
  • +
  • Positive sentiment in news/fundamentals
  • +
  • Expected price appreciation in short term
  • +
+
+ + )} +
+ Total {summaryMode === 'topPicks' ? 'Top Pick' : 'Buy'} Signals: + {filteredStats.buySignals} +
+
+
+ + setActiveSummaryModal(null)} + title="Sell Signals" + icon={} + > +
+

Sell Signals counts every SELL recommendation issued by the AI across all tracked days.

+
+
When is SELL recommended?
+
    +
  • Technical indicators show bearish momentum
  • +
  • Negative sentiment in news/fundamentals
  • +
  • Expected price decline in short term
  • +
  • Risk level exceeds acceptable threshold
  • +
+
+ {summaryMode === 'topPicks' ? ( +
+ Note: Top Picks mode only shows BUY recommendations, so sell signals are 0. +
+ ) : ( +
+ Total Sell Signals: + {filteredStats.sellSignals} +
+ )} +
+
); } diff --git a/frontend/src/services/api.ts b/frontend/src/services/api.ts index 086cce82..e19f3422 100644 --- a/frontend/src/services/api.ts +++ b/frontend/src/services/api.ts @@ -11,6 +11,17 @@ import type { PipelineSummary } from '../types/pipeline'; +// Import types from the centralized types file +import type { + StockAnalysis, + TopPick, + StockToAvoid, + DailyRecommendation, +} from '../types'; + +// Re-export types for consumers who import from api.ts +export type { StockAnalysis, TopPick, StockToAvoid, DailyRecommendation }; + // Use same hostname as the page, just different port for API const getApiBaseUrl = () => { // If env variable is set, use it @@ -24,30 +35,6 @@ const getApiBaseUrl = () => { const API_BASE_URL = getApiBaseUrl(); -export interface StockAnalysis { - symbol: string; - company_name: string; - decision: 'BUY' | 'SELL' | 'HOLD' | null; - confidence?: 'HIGH' | 'MEDIUM' | 'LOW'; - risk?: 'HIGH' | 'MEDIUM' | 'LOW'; - raw_analysis?: string; -} - -export interface TopPick { - rank: number; - symbol: string; - company_name: string; - decision: string; - reason: string; - risk_level: string; -} - -export interface StockToAvoid { - symbol: string; - company_name: string; - reason: string; -} - export interface Summary { total: number; buy: number; @@ -55,19 +42,12 @@ export interface Summary { hold: number; } -export interface DailyRecommendation { - date: string; - analysis: Record; - summary: Summary; - top_picks: TopPick[]; - stocks_to_avoid: StockToAvoid[]; -} - export interface StockHistory { date: string; decision: string; confidence?: string; risk?: string; + hold_days?: number | null; } /** @@ -282,10 +262,25 @@ class ApiService { decision?: string; started_at?: string; completed_at?: string; + steps_completed?: number; + steps_total?: number; + steps_running?: string[]; + pipeline_steps?: Record; }> { return this.fetch(`/analyze/${symbol}/status`, { noCache: true }); } + /** + * Cancel a running analysis for a stock + */ + async cancelAnalysis(symbol: string): Promise<{ + message: string; + symbol: string; + status: string; + }> { + return this.fetch(`/analyze/${symbol}/cancel`, { method: 'POST', noCache: true }); + } + /** * Get all running analyses */ @@ -305,10 +300,12 @@ class ApiService { provider?: string; api_key?: string; max_debate_rounds?: number; + parallel_workers?: number; }): Promise<{ message: string; date: string; total_stocks: number; + skipped?: number; status: string; }> { const url = date ? `/analyze/all?date=${date}` : '/analyze/all'; @@ -327,13 +324,119 @@ class ApiService { total: number; completed: number; failed: number; + skipped?: number; current_symbol: string | null; + current_symbols: string[]; started_at: string | null; completed_at: string | null; results: Record; + parallel_workers?: number; + stock_progress?: Record; + cancelled?: boolean; }> { return this.fetch('/analyze/all/status', { noCache: true }); } + + /** + * Cancel bulk analysis + */ + async cancelBulkAnalysis(): Promise<{ + message: string; + completed: number; + total: number; + status: string; + }> { + return this.fetch('/analyze/all/cancel', { method: 'POST', noCache: true }); + } + + // ============== Stock Price History Methods ============== + + /** + * Get real historical closing prices for a stock from yfinance + */ + async getStockPriceHistory(symbol: string, days: number = 90): Promise<{ + symbol: string; + prices: Array<{ date: string; price: number }>; + error?: string; + }> { + return this.fetch(`/stocks/${symbol}/prices?days=${days}`); + } + + // ============== Nifty50 Index Methods ============== + + /** + * Get Nifty50 index closing prices for recommendation date range + */ + async getNifty50History(): Promise<{ + dates: string[]; + prices: Record; + error?: string; + }> { + return this.fetch('/nifty50/history'); + } + + // ============== Backtest Methods ============== + + /** + * Get backtest result for a specific stock and date + */ + async getBacktestResult(date: string, symbol: string): Promise<{ + available: boolean; + reason?: string; + prediction_correct?: boolean; + actual_return_1d?: number; + actual_return_1w?: number; + actual_return_1m?: number; + price_at_prediction?: number; + current_price?: number; + price_history?: Array<{ date: string; price: number }>; + hold_days?: number | null; + return_at_hold?: number | null; + }> { + return this.fetch(`/backtest/${date}/${symbol}`, { noCache: true }); + } + + /** + * Get all backtest results for a specific date + */ + async getBacktestResultsForDate(date: string): Promise<{ + date: string; + results: Array<{ + symbol: string; + decision: string; + price_at_prediction: number; + return_1d?: number; + return_1w?: number; + return_1m?: number; + prediction_correct?: boolean; + }>; + }> { + return this.fetch(`/backtest/${date}`); + } + + /** + * Calculate backtest for all recommendations on a date + */ + async calculateBacktest(date: string): Promise<{ + status: string; + date: string; + message: string; + }> { + return this.fetch(`/backtest/${date}/calculate`, { method: 'POST' }); + } + + /** + * Get overall accuracy metrics from backtest results + */ + async getAccuracyMetrics(): Promise<{ + overall_accuracy: number; + total_predictions: number; + correct_predictions: number; + by_decision: Record; + by_confidence: Record; + }> { + return this.fetch('/backtest/accuracy', { noCache: true }); + } } export const api = new ApiService(); diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 20ad7dd4..941f7bf3 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -57,6 +57,7 @@ export interface StockAnalysis { confidence?: Confidence; risk?: Risk; raw_analysis?: string; + hold_days?: number | null; error?: string | null; } @@ -85,7 +86,7 @@ export interface StockToAvoid { export interface DailyRecommendation { date: string; analysis: Record; - ranking: RankingResult; + ranking?: RankingResult; // Optional since API may not return it summary: { total: number; buy: number; @@ -103,6 +104,7 @@ export interface HistoricalEntry { decision: Decision; confidence?: Confidence; risk?: Risk; + hold_days?: number | null; } export interface StockHistory { @@ -139,6 +141,15 @@ export interface RiskMetrics { winRate: number; // % of winning predictions volatility: number; // std dev of returns totalTrades: number; + // Additional calculation details for showing formulas + meanReturn?: number; + riskFreeRate?: number; + winningTrades?: number; + losingTrades?: number; + avgWinReturn?: number; + avgLossReturn?: number; + peakValue?: number; + troughValue?: number; } // Return distribution bucket @@ -168,6 +179,14 @@ export interface AccuracyTrendPoint { hold: number; } +// Cumulative return data point for index comparison chart +export interface CumulativeReturnPoint { + date: string; + value: number; + aiReturn: number; + indexReturn: number; +} + export const NIFTY_50_STOCKS: NiftyStock[] = [ { symbol: 'RELIANCE', company_name: 'Reliance Industries Ltd', sector: 'Energy' }, { symbol: 'TCS', company_name: 'Tata Consultancy Services Ltd', sector: 'IT' }, diff --git a/frontend/src/types/pipeline.ts b/frontend/src/types/pipeline.ts index d59727b6..126ac75b 100644 --- a/frontend/src/types/pipeline.ts +++ b/frontend/src/types/pipeline.ts @@ -57,6 +57,16 @@ export interface DebatesMap { risk?: DebateHistory; } +/** + * Structured details for a pipeline step (prompt, response, tool calls) + */ +export interface StepDetails { + system_prompt?: string; + user_prompt?: string; + response?: string; + tool_calls?: Array<{ name: string; args?: string; result_preview?: string }>; +} + /** * Single step in the analysis pipeline */ @@ -68,6 +78,7 @@ export interface PipelineStep { completed_at?: string; duration_ms?: number; output_summary?: string; + step_details?: StepDetails; } /** @@ -76,6 +87,8 @@ export interface PipelineStep { export interface DataSourceLog { source_type: string; source_name: string; + method?: string; + args?: string; data_fetched?: Record | string; fetch_timestamp?: string; success: boolean; @@ -197,3 +210,130 @@ export const DEBATE_ROLES = { judge: { label: 'Risk Manager', color: 'blue', icon: 'ShieldCheck' } } } as const; + +// ============================================================ +// Flowchart types for the 12-step visual pipeline debug view +// ============================================================ + +export type FlowchartPhase = 'data_analysis' | 'investment_debate' | 'trading' | 'risk_debate'; + +export interface FlowchartStepDef { + number: number; + id: string; + label: string; + icon: string; + phase: FlowchartPhase; + phaseLabel: string; + agentType?: AgentType; + debateType?: DebateType; + debateRole?: string; + color: string; +} + +export const FLOWCHART_STEPS: FlowchartStepDef[] = [ + { number: 1, id: 'market_analyst', label: 'Market Analyst', icon: 'TrendingUp', phase: 'data_analysis', phaseLabel: 'Data & Analysis', agentType: 'market', color: 'blue' }, + { number: 2, id: 'social_analyst', label: 'Social Media Analyst', icon: 'Users', phase: 'data_analysis', phaseLabel: 'Data & Analysis', agentType: 'social_media', color: 'pink' }, + { number: 3, id: 'news_analyst', label: 'News Analyst', icon: 'Newspaper', phase: 'data_analysis', phaseLabel: 'Data & Analysis', agentType: 'news', color: 'purple' }, + { number: 4, id: 'fundamentals_analyst', label: 'Fundamentals Analyst', icon: 'FileText', phase: 'data_analysis', phaseLabel: 'Data & Analysis', agentType: 'fundamentals', color: 'emerald' }, + { number: 5, id: 'bull_researcher', label: 'Bull Researcher', icon: 'TrendingUp', phase: 'investment_debate', phaseLabel: 'Investment Debate', debateType: 'investment', debateRole: 'bull', color: 'green' }, + { number: 6, id: 'bear_researcher', label: 'Bear Researcher', icon: 'TrendingDown', phase: 'investment_debate', phaseLabel: 'Investment Debate', debateType: 'investment', debateRole: 'bear', color: 'red' }, + { number: 7, id: 'research_manager', label: 'Research Manager', icon: 'Scale', phase: 'investment_debate', phaseLabel: 'Investment Debate', debateType: 'investment', debateRole: 'judge', color: 'violet' }, + { number: 8, id: 'trader', label: 'Trader', icon: 'Target', phase: 'trading', phaseLabel: 'Trading', color: 'amber' }, + { number: 9, id: 'aggressive_analyst', label: 'Aggressive Analyst', icon: 'Zap', phase: 'risk_debate', phaseLabel: 'Risk Debate', debateType: 'risk', debateRole: 'risky', color: 'orange' }, + { number: 10, id: 'conservative_analyst', label: 'Conservative Analyst', icon: 'Shield', phase: 'risk_debate', phaseLabel: 'Risk Debate', debateType: 'risk', debateRole: 'safe', color: 'sky' }, + { number: 11, id: 'neutral_analyst', label: 'Neutral Analyst', icon: 'Scale', phase: 'risk_debate', phaseLabel: 'Risk Debate', debateType: 'risk', debateRole: 'neutral', color: 'slate' }, + { number: 12, id: 'risk_manager', label: 'Risk Manager', icon: 'ShieldCheck', phase: 'risk_debate', phaseLabel: 'Risk Debate', debateType: 'risk', debateRole: 'judge', color: 'indigo' }, +]; + +export interface FlowchartNodeData extends FlowchartStepDef { + status: PipelineStepStatus; + started_at?: string; + completed_at?: string; + duration_ms?: number; + output_summary?: string; + step_details?: StepDetails; + agentReport?: AgentReport; + debateContent?: string; +} + +/** Phase metadata for UI rendering */ +export const PHASE_META: Record = { + data_analysis: { label: 'Data & Analysis', number: 1, color: 'blue', borderColor: 'border-l-blue-500', bgColor: 'bg-blue-500/5', textColor: 'text-blue-600 dark:text-blue-400' }, + investment_debate: { label: 'Investment Debate', number: 2, color: 'violet', borderColor: 'border-l-violet-500', bgColor: 'bg-violet-500/5', textColor: 'text-violet-600 dark:text-violet-400' }, + trading: { label: 'Trading', number: 3, color: 'amber', borderColor: 'border-l-amber-500', bgColor: 'bg-amber-500/5', textColor: 'text-amber-600 dark:text-amber-400' }, + risk_debate: { label: 'Risk Assessment', number: 4, color: 'red', borderColor: 'border-l-red-500', bgColor: 'bg-red-500/5', textColor: 'text-red-600 dark:text-red-400' }, +}; + +/** + * Map backend FullPipelineData (9 grouped steps) to 12 individual FlowchartNodeData entries. + * When the backend provides 12 granular steps, they map directly by step name. + */ +export function mapPipelineToFlowchart(data: FullPipelineData | null): FlowchartNodeData[] { + if (!data) { + return FLOWCHART_STEPS.map(step => ({ ...step, status: 'pending' as PipelineStepStatus })); + } + + const stepsByName: Record = {}; + for (const ps of data.pipeline_steps || []) { + stepsByName[ps.step_name] = ps; + } + + // Mapping from flowchart step id to possible backend step names + const STEP_NAME_MAP: Record = { + market_analyst: ['market_analyst', 'market_analysis'], + social_analyst: ['social_analyst', 'social_analysis', 'social_media_analyst'], + news_analyst: ['news_analyst', 'news_analysis'], + fundamentals_analyst: ['fundamentals_analyst', 'fundamental_analysis', 'fundamentals_analysis'], + bull_researcher: ['bull_researcher', 'bull_research', 'investment_debate'], + bear_researcher: ['bear_researcher', 'bear_research', 'investment_debate'], + research_manager: ['research_manager', 'investment_debate'], + trader: ['trader', 'trader_decision'], + aggressive_analyst: ['aggressive_analyst', 'aggressive_analysis', 'risk_debate'], + conservative_analyst: ['conservative_analyst', 'conservative_analysis', 'risk_debate'], + neutral_analyst: ['neutral_analyst', 'neutral_analysis', 'risk_debate'], + risk_manager: ['risk_manager', 'final_decision'], + }; + + return FLOWCHART_STEPS.map(step => { + // Find matching backend step + const candidates = STEP_NAME_MAP[step.id] || [step.id]; + let matchedStep: PipelineStep | undefined; + for (const name of candidates) { + if (stepsByName[name]) { + matchedStep = stepsByName[name]; + break; + } + } + + // Get linked content + let agentReport: AgentReport | undefined; + if (step.agentType && data.agent_reports) { + agentReport = data.agent_reports[step.agentType]; + } + + let debateContent: string | undefined; + if (step.debateType && step.debateRole && data.debates) { + const debate = data.debates[step.debateType]; + if (debate) { + if (step.debateRole === 'bull') debateContent = debate.bull_arguments; + else if (step.debateRole === 'bear') debateContent = debate.bear_arguments; + else if (step.debateRole === 'risky') debateContent = debate.risky_arguments; + else if (step.debateRole === 'safe') debateContent = debate.safe_arguments; + else if (step.debateRole === 'neutral') debateContent = debate.neutral_arguments; + else if (step.debateRole === 'judge') debateContent = debate.judge_decision; + } + } + + return { + ...step, + status: matchedStep?.status || 'pending', + started_at: matchedStep?.started_at, + completed_at: matchedStep?.completed_at, + duration_ms: matchedStep?.duration_ms, + output_summary: matchedStep?.output_summary, + step_details: matchedStep?.step_details, + agentReport, + debateContent, + }; + }); +} diff --git a/frontend/src/utils/brokerageCalculator.ts b/frontend/src/utils/brokerageCalculator.ts new file mode 100644 index 00000000..157f0c1d --- /dev/null +++ b/frontend/src/utils/brokerageCalculator.ts @@ -0,0 +1,234 @@ +/** + * Zerodha Brokerage Calculator + * Based on https://github.com/hemangjoshi37a/Zerodha-Brokerage-Calculator + * + * Implements accurate brokerage calculation for Indian equity markets + */ + +export interface BrokerageBreakdown { + brokerage: number; + stt: number; + exchangeCharges: number; + sebiCharges: number; + gst: number; + stampDuty: number; + totalCharges: number; + netProfit: number; + turnover: number; +} + +export interface TradeDetails { + buyPrice: number; + sellPrice: number; + quantity: number; + tradeType: 'delivery' | 'intraday'; +} + +// Zerodha charge rates +const RATES = { + // Equity Delivery + delivery: { + brokerage: 0, // Zero brokerage for delivery + sttBuy: 0.001, // 0.1% on buy + sttSell: 0.001, // 0.1% on sell + exchangeCharges: 0.0000345, // 0.00345% + sebiCharges: 0.000001, // 0.0001% + gst: 0.18, // 18% + stampDuty: 0.00015, // 0.015% on buy side + }, + // Equity Intraday + intraday: { + brokerageRate: 0.0003, // 0.03% + brokerageCap: 20, // Max Rs. 20 per side + sttSell: 0.00025, // 0.025% on sell side only + exchangeCharges: 0.0000345, // 0.00345% + sebiCharges: 0.000001, // 0.0001% + gst: 0.18, // 18% + stampDuty: 0.00003, // 0.003% on buy side + }, +}; + +/** + * Calculate brokerage for a single trade + */ +export function calculateBrokerage(trade: TradeDetails): BrokerageBreakdown { + const { buyPrice, sellPrice, quantity, tradeType } = trade; + const buyValue = buyPrice * quantity; + const sellValue = sellPrice * quantity; + const turnover = buyValue + sellValue; + + if (tradeType === 'delivery') { + return calculateDeliveryBrokerage(buyValue, sellValue, turnover); + } else { + return calculateIntradayBrokerage(buyValue, sellValue, turnover); + } +} + +function calculateDeliveryBrokerage( + buyValue: number, + sellValue: number, + turnover: number +): BrokerageBreakdown { + const rates = RATES.delivery; + + // Brokerage is zero for delivery + const brokerage = 0; + + // STT on both buy and sell + const stt = (buyValue * rates.sttBuy) + (sellValue * rates.sttSell); + + // Exchange transaction charges on turnover + const exchangeCharges = turnover * rates.exchangeCharges; + + // SEBI charges on turnover + const sebiCharges = turnover * rates.sebiCharges; + + // GST on brokerage + exchange charges + const gst = (brokerage + exchangeCharges) * rates.gst; + + // Stamp duty on buy side only + const stampDuty = buyValue * rates.stampDuty; + + const totalCharges = brokerage + stt + exchangeCharges + sebiCharges + gst + stampDuty; + const netProfit = sellValue - buyValue - totalCharges; + + return { + brokerage, + stt, + exchangeCharges, + sebiCharges, + gst, + stampDuty, + totalCharges, + netProfit, + turnover, + }; +} + +function calculateIntradayBrokerage( + buyValue: number, + sellValue: number, + turnover: number +): BrokerageBreakdown { + const rates = RATES.intraday; + + // Brokerage: min(0.03% * value, Rs. 20) per side + const buyBrokerage = Math.min(buyValue * rates.brokerageRate, rates.brokerageCap); + const sellBrokerage = Math.min(sellValue * rates.brokerageRate, rates.brokerageCap); + const brokerage = buyBrokerage + sellBrokerage; + + // STT on sell side only for intraday + const stt = sellValue * rates.sttSell; + + // Exchange transaction charges on turnover + const exchangeCharges = turnover * rates.exchangeCharges; + + // SEBI charges on turnover + const sebiCharges = turnover * rates.sebiCharges; + + // GST on brokerage + exchange charges + const gst = (brokerage + exchangeCharges) * rates.gst; + + // Stamp duty on buy side only + const stampDuty = buyValue * rates.stampDuty; + + const totalCharges = brokerage + stt + exchangeCharges + sebiCharges + gst + stampDuty; + const netProfit = sellValue - buyValue - totalCharges; + + return { + brokerage, + stt, + exchangeCharges, + sebiCharges, + gst, + stampDuty, + totalCharges, + netProfit, + turnover, + }; +} + +/** + * Calculate total brokerage for multiple trades + */ +export function calculateTotalBrokerage( + trades: TradeDetails[] +): { + breakdown: BrokerageBreakdown; + tradeCount: number; +} { + const totals: BrokerageBreakdown = { + brokerage: 0, + stt: 0, + exchangeCharges: 0, + sebiCharges: 0, + gst: 0, + stampDuty: 0, + totalCharges: 0, + netProfit: 0, + turnover: 0, + }; + + for (const trade of trades) { + const result = calculateBrokerage(trade); + totals.brokerage += result.brokerage; + totals.stt += result.stt; + totals.exchangeCharges += result.exchangeCharges; + totals.sebiCharges += result.sebiCharges; + totals.gst += result.gst; + totals.stampDuty += result.stampDuty; + totals.totalCharges += result.totalCharges; + totals.netProfit += result.netProfit; + totals.turnover += result.turnover; + } + + return { + breakdown: totals, + tradeCount: trades.length, + }; +} + +/** + * Quick estimate for a round-trip delivery trade + * (buy and later sell the same quantity) + */ +export function estimateDeliveryCharges( + buyPrice: number, + sellPrice: number, + quantity: number +): BrokerageBreakdown { + return calculateBrokerage({ + buyPrice, + sellPrice, + quantity, + tradeType: 'delivery', + }); +} + +/** + * Quick estimate for intraday trade + */ +export function estimateIntradayCharges( + buyPrice: number, + sellPrice: number, + quantity: number +): BrokerageBreakdown { + return calculateBrokerage({ + buyPrice, + sellPrice, + quantity, + tradeType: 'intraday', + }); +} + +/** + * Format currency in Indian format + */ +export function formatINR(value: number, decimals: number = 2): string { + return new Intl.NumberFormat('en-IN', { + style: 'currency', + currency: 'INR', + minimumFractionDigits: decimals, + maximumFractionDigits: decimals, + }).format(value); +} diff --git a/hdfcbank-chart-fixed.png b/hdfcbank-chart-fixed.png new file mode 100644 index 00000000..12cd7733 Binary files /dev/null and b/hdfcbank-chart-fixed.png differ diff --git a/history-after-fixes.png b/history-after-fixes.png new file mode 100644 index 00000000..b09bee62 Binary files /dev/null and b/history-after-fixes.png differ diff --git a/history-final-check.png b/history-final-check.png new file mode 100644 index 00000000..0e62af17 Binary files /dev/null and b/history-final-check.png differ diff --git a/history-page-after-fixes.png b/history-page-after-fixes.png new file mode 100644 index 00000000..2622b20b Binary files /dev/null and b/history-page-after-fixes.png differ diff --git a/history-page-final.png b/history-page-final.png new file mode 100644 index 00000000..7bfe6df9 Binary files /dev/null and b/history-page-final.png differ diff --git a/pipeline-bottom.png b/pipeline-bottom.png new file mode 100644 index 00000000..6486230d Binary files /dev/null and b/pipeline-bottom.png differ diff --git a/pipeline-current-state.png b/pipeline-current-state.png new file mode 100644 index 00000000..ccf55967 Binary files /dev/null and b/pipeline-current-state.png differ diff --git a/pipeline-detail-drawer.png b/pipeline-detail-drawer.png new file mode 100644 index 00000000..6538b46e Binary files /dev/null and b/pipeline-detail-drawer.png differ diff --git a/pipeline-flowchart-clicked.png b/pipeline-flowchart-clicked.png new file mode 100644 index 00000000..6c333640 Binary files /dev/null and b/pipeline-flowchart-clicked.png differ diff --git a/pipeline-flowchart-full.png b/pipeline-flowchart-full.png new file mode 100644 index 00000000..b84e5837 Binary files /dev/null and b/pipeline-flowchart-full.png differ diff --git a/pipeline-full.png b/pipeline-full.png new file mode 100644 index 00000000..b84e5837 Binary files /dev/null and b/pipeline-full.png differ diff --git a/pipeline-node-clicked.png b/pipeline-node-clicked.png new file mode 100644 index 00000000..ce7e2908 Binary files /dev/null and b/pipeline-node-clicked.png differ diff --git a/pipeline-tab.png b/pipeline-tab.png new file mode 100644 index 00000000..7d68e052 Binary files /dev/null and b/pipeline-tab.png differ diff --git a/reliance-chart-real.png b/reliance-chart-real.png new file mode 100644 index 00000000..8eed7e61 Binary files /dev/null and b/reliance-chart-real.png differ diff --git a/sbin-chart-fixed.png b/sbin-chart-fixed.png new file mode 100644 index 00000000..a5712b49 Binary files /dev/null and b/sbin-chart-fixed.png differ diff --git a/sbin-chart-real.png b/sbin-chart-real.png new file mode 100644 index 00000000..feaff013 Binary files /dev/null and b/sbin-chart-real.png differ diff --git a/settings-parallel-workers-5.png b/settings-parallel-workers-5.png new file mode 100644 index 00000000..8586951d Binary files /dev/null and b/settings-parallel-workers-5.png differ diff --git a/settings-parallel-workers-scrolled.png b/settings-parallel-workers-scrolled.png new file mode 100644 index 00000000..78442019 Binary files /dev/null and b/settings-parallel-workers-scrolled.png differ diff --git a/settings-parallel-workers.png b/settings-parallel-workers.png new file mode 100644 index 00000000..3cd5e9b2 Binary files /dev/null and b/settings-parallel-workers.png differ diff --git a/stock-chart-scrolled.png b/stock-chart-scrolled.png new file mode 100644 index 00000000..c371b885 Binary files /dev/null and b/stock-chart-scrolled.png differ diff --git a/stock-page-final.png b/stock-page-final.png new file mode 100644 index 00000000..e31640fc Binary files /dev/null and b/stock-page-final.png differ diff --git a/stock-price-chart-real.png b/stock-price-chart-real.png new file mode 100644 index 00000000..b2a36f5a Binary files /dev/null and b/stock-price-chart-real.png differ diff --git a/terminal-modal-updated.png b/terminal-modal-updated.png new file mode 100644 index 00000000..1c3f0dfd Binary files /dev/null and b/terminal-modal-updated.png differ diff --git a/test-dashboard-full.png b/test-dashboard-full.png new file mode 100644 index 00000000..8cc0c855 Binary files /dev/null and b/test-dashboard-full.png differ diff --git a/test-history-page.png b/test-history-page.png new file mode 100644 index 00000000..a7efe3eb Binary files /dev/null and b/test-history-page.png differ diff --git a/test-light-theme.png b/test-light-theme.png new file mode 100644 index 00000000..11ff7113 Binary files /dev/null and b/test-light-theme.png differ diff --git a/test-mobile-dashboard.png b/test-mobile-dashboard.png new file mode 100644 index 00000000..672cd4b9 Binary files /dev/null and b/test-mobile-dashboard.png differ diff --git a/test-mobile-stock-detail.png b/test-mobile-stock-detail.png new file mode 100644 index 00000000..66c6ba53 Binary files /dev/null and b/test-mobile-stock-detail.png differ diff --git a/test-pipeline-fixed.png b/test-pipeline-fixed.png new file mode 100644 index 00000000..74e4c87b Binary files /dev/null and b/test-pipeline-fixed.png differ diff --git a/test-pipeline-tab.png b/test-pipeline-tab.png new file mode 100644 index 00000000..d6436f01 Binary files /dev/null and b/test-pipeline-tab.png differ diff --git a/test-stock-detail.png b/test-stock-detail.png new file mode 100644 index 00000000..e91dd062 Binary files /dev/null and b/test-stock-detail.png differ diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..8486ae46 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# Tests for TradingAgents diff --git a/tests/test_data_leakage.py b/tests/test_data_leakage.py new file mode 100644 index 00000000..865f2476 --- /dev/null +++ b/tests/test_data_leakage.py @@ -0,0 +1,161 @@ +""" +Tests to verify no future data leakage in backtesting. + +These tests ensure that when running backtests for a historical date, +the system only uses data that would have been available at that time. +""" +import pytest +from datetime import datetime, timedelta +import pandas as pd + + +class TestStockstatsDataLeakage: + """Test that stockstats_utils.py doesn't leak future data.""" + + def test_stockstats_uses_curr_date_not_today(self): + """Verify stockstats filters data up to curr_date, not today.""" + from tradingagents.dataflows.stockstats_utils import StockstatsUtils + + # Test with a historical date + historical_date = "2024-06-15" + + # This should only use data up to 2024-06-15 + # If the function works correctly, we shouldn't get data beyond this date + try: + result = StockstatsUtils.get_stock_stats("AAPL", "close", historical_date) + # Result should be a value, not an error + assert result is not None + except Exception as e: + # If local data not available, that's expected + if "Yahoo Finance data not fetched yet" not in str(e): + raise + + def test_end_date_equals_curr_date(self): + """Verify that yfinance download uses curr_date as end_date.""" + # This is a structural check - the code should use curr_date_dt as end_date_dt + from tradingagents.dataflows.stockstats_utils import StockstatsUtils + import inspect + + source = inspect.getsource(StockstatsUtils.get_stock_stats) + + # Check that the code uses curr_date for end_date calculation + assert "end_date_dt = curr_date_dt" in source or "end=end_date" in source, \ + "stockstats should use curr_date as end_date to prevent future data leakage" + + +class TestAlphaVantageDataLeakage: + """Test that alpha_vantage_stock.py doesn't leak future data.""" + + def test_outputsize_uses_end_date_not_now(self): + """Verify outputsize calculation uses end_date, not datetime.now().""" + from tradingagents.dataflows.alpha_vantage_stock import get_stock + import inspect + + source = inspect.getsource(get_stock) + + # Should NOT contain datetime.now() for outputsize calculation + assert "datetime.now()" not in source, \ + "alpha_vantage_stock should not use datetime.now() - use end_date instead" + + # Should use end_date for the calculation + assert "end_dt" in source or "end_date" in source, \ + "Should use end_date for outputsize calculation" + + +class TestFundamentalsDataLeakage: + """Test that fundamentals data respects publication delays.""" + + def test_publication_delay_is_conservative(self): + """Verify publication delay is at least 60 days (conservative estimate).""" + from tradingagents.dataflows.y_finance import _filter_fundamentals_by_date + import inspect + + source = inspect.getsource(_filter_fundamentals_by_date) + + # Check that publication delay is at least 60 days + assert "publication_delay_days = 60" in source or "publication_delay_days = 90" in source, \ + "Publication delay should be at least 60 days for conservative backtesting" + + def test_fundamentals_filtered_by_publish_date(self): + """Verify fundamentals are filtered by estimated publish date, not report date.""" + from tradingagents.dataflows.y_finance import _filter_fundamentals_by_date + import pandas as pd + + # Create mock fundamentals data with future dates + curr_date = "2024-06-15" + # Report from Q1 2024 (dated 2024-03-31) should be visible + # Report from Q2 2024 (dated 2024-06-30) should NOT be visible + + mock_data = pd.DataFrame({ + "2024-03-31": [100, 200], + "2024-06-30": [150, 250], # This shouldn't be visible in June + "2024-09-30": [175, 275], # This definitely shouldn't be visible + }, index=["Revenue", "Profit"]) + + filtered = _filter_fundamentals_by_date(mock_data, curr_date) + + # With 60-day delay, only 2024-03-31 should be visible on 2024-06-15 + # because 2024-03-31 + 60 days = 2024-05-30, which is before 2024-06-15 + assert "2024-03-31" in filtered.columns, \ + "Q1 2024 report (dated 2024-03-31) should be visible on 2024-06-15" + + assert "2024-06-30" not in filtered.columns, \ + "Q2 2024 report (dated 2024-06-30) should NOT be visible on 2024-06-15" + + assert "2024-09-30" not in filtered.columns, \ + "Q3 2024 report should definitely NOT be visible on 2024-06-15" + + +class TestLocalDataLeakage: + """Test that local data files are properly filtered.""" + + def test_local_data_has_date_filtering(self): + """Verify local data reading includes date filtering.""" + from tradingagents.dataflows.local import get_stock_data + import inspect + + source = inspect.getsource(get_stock_data) + + # Should filter data by date range + assert "start_date" in source and "end_date" in source, \ + "Local data should be filtered by date range" + + +class TestBacktestIntegrity: + """Test overall backtest integrity.""" + + def test_no_future_imports_in_dataflows(self): + """Verify dataflows don't use any patterns that could leak future data.""" + import os + import re + + dataflows_dir = "tradingagents/dataflows" + + dangerous_patterns = [ + # Using today's date when historical date should be used + r"pd\.Timestamp\.today\(\)", + r"datetime\.today\(\)", + r"date\.today\(\)", + # Unfiltered data access (should always filter by date) + r"\.history\([^)]*\)(?![^)]*end=)", # history() without end parameter + ] + + issues = [] + + for root, dirs, files in os.walk(dataflows_dir): + for file in files: + if file.endswith(".py"): + filepath = os.path.join(root, file) + with open(filepath, "r") as f: + content = f.read() + for pattern in dangerous_patterns: + matches = re.findall(pattern, content) + if matches: + issues.append(f"{filepath}: Found dangerous pattern {pattern}") + + assert len(issues) == 0, \ + f"Found potential data leakage patterns:\n" + "\n".join(issues) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tradingagents/agents/analysts/fundamentals_analyst.py b/tradingagents/agents/analysts/fundamentals_analyst.py index e20139cb..0fe41a40 100644 --- a/tradingagents/agents/analysts/fundamentals_analyst.py +++ b/tradingagents/agents/analysts/fundamentals_analyst.py @@ -4,6 +4,16 @@ import json from tradingagents.agents.utils.agent_utils import get_fundamentals, get_balance_sheet, get_cashflow, get_income_statement, get_insider_sentiment, get_insider_transactions from tradingagents.dataflows.config import get_config +from tradingagents.log_utils import add_log, step_timer, symbol_progress + +ANALYST_RESPONSE_FORMAT = """ + +RESPONSE FORMAT RULES: +- Keep your analysis concise: maximum 3000 characters total +- Use a compact markdown table to organize key findings +- Do NOT repeat raw data values verbatim — summarize trends and insights +- Complete your ENTIRE analysis in a SINGLE response — do not split across multiple messages""" + def create_fundamentals_analyst(llm): def fundamentals_analyst_node(state): @@ -21,7 +31,8 @@ def create_fundamentals_analyst(llm): system_message = ( "You are a researcher tasked with analyzing fundamental information over the past week about a company. Please write a comprehensive report of the company's fundamental information such as financial documents, company profile, basic company financials, and company financial history to gain a full view of the company's fundamental information to inform traders. Make sure to include as much detail as possible. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + " Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read." - + " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements.", + + " Use the available tools: `get_fundamentals` for comprehensive company analysis, `get_balance_sheet`, `get_cashflow`, and `get_income_statement` for specific financial statements." + + ANALYST_RESPONSE_FORMAT, ) prompt = ChatPromptTemplate.from_messages( @@ -48,12 +59,34 @@ def create_fundamentals_analyst(llm): chain = prompt | llm.bind_tools(tools) + step_timer.start_step("fundamentals_analyst") + add_log("agent", "fundamentals", f"📈 Fundamentals Analyst calling LLM for {ticker}...") + t0 = time.time() result = chain.invoke(state["messages"]) + elapsed = time.time() - t0 report = "" if len(result.tool_calls) == 0: report = result.content + add_log("llm", "fundamentals", f"LLM responded in {elapsed:.1f}s ({len(report)} chars)") + add_log("agent", "fundamentals", f"✅ Fundamentals report ready: {report[:300]}...") + step_timer.end_step("fundamentals_analyst", "completed", report[:200]) + symbol_progress.step_done(ticker, "fundamentals_analyst") + step_timer.update_details("fundamentals_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze fundamentals for {ticker} on {current_date}", + "response": report[:3000], + }) + else: + tool_call_info = [{"name": tc["name"], "args": str(tc.get("args", {}))[:200]} for tc in result.tool_calls] + step_timer.set_details("fundamentals_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze fundamentals for {ticker} on {current_date}", + "response": "(Pending - tool calls in progress)", + "tool_calls": tool_call_info, + }) + add_log("data", "fundamentals", f"LLM requested {len(result.tool_calls)} tool calls in {elapsed:.1f}s: {', '.join(tc['name'] for tc in result.tool_calls)}") return { "messages": [result], diff --git a/tradingagents/agents/analysts/market_analyst.py b/tradingagents/agents/analysts/market_analyst.py index c955dd76..318293cd 100644 --- a/tradingagents/agents/analysts/market_analyst.py +++ b/tradingagents/agents/analysts/market_analyst.py @@ -4,6 +4,17 @@ import json from tradingagents.agents.utils.agent_utils import get_stock_data, get_indicators from tradingagents.dataflows.config import get_config +from tradingagents.log_utils import add_log, step_timer, symbol_progress + +# Verbosity format appended to analyst prompts +ANALYST_RESPONSE_FORMAT = """ + +RESPONSE FORMAT RULES: +- Keep your analysis concise: maximum 3000 characters total +- Use a compact markdown table to organize key findings +- Do NOT repeat raw data values verbatim — summarize trends and insights +- Complete your ENTIRE analysis in a SINGLE response — do not split across multiple messages""" + def create_market_analyst(llm): @@ -44,6 +55,7 @@ Volume-Based Indicators: - Select indicators that provide diverse and complementary information. Avoid redundancy (e.g., do not select both rsi and stochrsi). Also briefly explain why they are suitable for the given market context. When you tool call, please use the exact name of the indicators provided above as they are defined parameters, otherwise your call will fail. Please make sure to call get_stock_data first to retrieve the CSV that is needed to generate indicators. Then use get_indicators with the specific indicator names. Write a very detailed and nuanced report of the trends you observe. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions.""" + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" + + ANALYST_RESPONSE_FORMAT ) prompt = ChatPromptTemplate.from_messages( @@ -70,13 +82,36 @@ Volume-Based Indicators: chain = prompt | llm.bind_tools(tools) + step_timer.start_step("market_analyst") + add_log("agent", "market_analyst", f"📊 Market Analyst calling LLM for {ticker}...") + t0 = time.time() result = chain.invoke(state["messages"]) + elapsed = time.time() - t0 report = "" if len(result.tool_calls) == 0: report = result.content - + add_log("llm", "market_analyst", f"LLM responded in {elapsed:.1f}s ({len(report)} chars)") + add_log("agent", "market_analyst", f"✅ Market report ready: {report[:300]}...") + step_timer.end_step("market_analyst", "completed", report[:200]) + symbol_progress.step_done(ticker, "market_analyst") + # Use update_details to preserve tool_calls from previous invocation + step_timer.update_details("market_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze {ticker} on {current_date} using technical indicators", + "response": report[:3000], + }) + else: + tool_call_info = [{"name": tc["name"], "args": str(tc.get("args", {}))[:200]} for tc in result.tool_calls] + step_timer.set_details("market_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze {ticker} on {current_date} using technical indicators", + "response": "(Pending - tool calls in progress)", + "tool_calls": tool_call_info, + }) + add_log("data", "market_analyst", f"LLM requested {len(result.tool_calls)} tool calls in {elapsed:.1f}s: {', '.join(tc['name'] for tc in result.tool_calls)}") + return { "messages": [result], "market_report": report, diff --git a/tradingagents/agents/analysts/news_analyst.py b/tradingagents/agents/analysts/news_analyst.py index 03b4fae4..ac8a4003 100644 --- a/tradingagents/agents/analysts/news_analyst.py +++ b/tradingagents/agents/analysts/news_analyst.py @@ -4,6 +4,16 @@ import json from tradingagents.agents.utils.agent_utils import get_news, get_global_news from tradingagents.dataflows.config import get_config +from tradingagents.log_utils import add_log, step_timer, symbol_progress + +ANALYST_RESPONSE_FORMAT = """ + +RESPONSE FORMAT RULES: +- Keep your analysis concise: maximum 3000 characters total +- Use a compact markdown table to organize key findings +- Do NOT repeat raw data values verbatim — summarize trends and insights +- Complete your ENTIRE analysis in a SINGLE response — do not split across multiple messages""" + def create_news_analyst(llm): def news_analyst_node(state): @@ -18,6 +28,7 @@ def create_news_analyst(llm): system_message = ( "You are a news researcher tasked with analyzing recent news and trends over the past week. Please write a comprehensive report of the current state of the world that is relevant for trading and macroeconomics. Use the available tools: get_news(query, start_date, end_date) for company-specific or targeted news searches, and get_global_news(curr_date, look_back_days, limit) for broader macroeconomic news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" + + ANALYST_RESPONSE_FORMAT ) prompt = ChatPromptTemplate.from_messages( @@ -43,12 +54,35 @@ def create_news_analyst(llm): prompt = prompt.partial(ticker=ticker) chain = prompt | llm.bind_tools(tools) + + step_timer.start_step("news_analyst") + add_log("agent", "news_analyst", f"📰 News Analyst calling LLM for {ticker}...") + t0 = time.time() result = chain.invoke(state["messages"]) + elapsed = time.time() - t0 report = "" if len(result.tool_calls) == 0: report = result.content + add_log("llm", "news_analyst", f"LLM responded in {elapsed:.1f}s ({len(report)} chars)") + add_log("agent", "news_analyst", f"✅ News report ready: {report[:300]}...") + step_timer.end_step("news_analyst", "completed", report[:200]) + symbol_progress.step_done(ticker, "news_analyst") + step_timer.update_details("news_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze news and macro trends for {ticker} on {current_date}", + "response": report[:3000], + }) + else: + tool_call_info = [{"name": tc["name"], "args": str(tc.get("args", {}))[:200]} for tc in result.tool_calls] + step_timer.set_details("news_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze news and macro trends for {ticker} on {current_date}", + "response": "(Pending - tool calls in progress)", + "tool_calls": tool_call_info, + }) + add_log("data", "news_analyst", f"LLM requested {len(result.tool_calls)} tool calls in {elapsed:.1f}s: {', '.join(tc['name'] for tc in result.tool_calls)}") return { "messages": [result], diff --git a/tradingagents/agents/analysts/social_media_analyst.py b/tradingagents/agents/analysts/social_media_analyst.py index b25712d7..fadd2e0c 100644 --- a/tradingagents/agents/analysts/social_media_analyst.py +++ b/tradingagents/agents/analysts/social_media_analyst.py @@ -4,6 +4,16 @@ import json from tradingagents.agents.utils.agent_utils import get_news from tradingagents.dataflows.config import get_config +from tradingagents.log_utils import add_log, step_timer, symbol_progress + +ANALYST_RESPONSE_FORMAT = """ + +RESPONSE FORMAT RULES: +- Keep your analysis concise: maximum 3000 characters total +- Use a compact markdown table to organize key findings +- Do NOT repeat raw data values verbatim — summarize trends and insights +- Complete your ENTIRE analysis in a SINGLE response — do not split across multiple messages""" + def create_social_media_analyst(llm): def social_media_analyst_node(state): @@ -17,7 +27,8 @@ def create_social_media_analyst(llm): system_message = ( "You are a social media and company specific news researcher/analyst tasked with analyzing social media posts, recent company news, and public sentiment for a specific company over the past week. You will be given a company's name your objective is to write a comprehensive long report detailing your analysis, insights, and implications for traders and investors on this company's current state after looking at social media and what people are saying about that company, analyzing sentiment data of what people feel each day about the company, and looking at recent company news. Use the get_news(query, start_date, end_date) tool to search for company-specific news and social media discussions. Try to look at all sources possible from social media to sentiment to news. Do not simply state the trends are mixed, provide detailed and finegrained analysis and insights that may help traders make decisions." - + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""", + + """ Make sure to append a Markdown table at the end of the report to organize key points in the report, organized and easy to read.""" + + ANALYST_RESPONSE_FORMAT, ) prompt = ChatPromptTemplate.from_messages( @@ -44,12 +55,34 @@ def create_social_media_analyst(llm): chain = prompt | llm.bind_tools(tools) + step_timer.start_step("social_media_analyst") + add_log("agent", "social_analyst", f"💬 Social Media Analyst calling LLM for {ticker}...") + t0 = time.time() result = chain.invoke(state["messages"]) + elapsed = time.time() - t0 report = "" if len(result.tool_calls) == 0: report = result.content + add_log("llm", "social_analyst", f"LLM responded in {elapsed:.1f}s ({len(report)} chars)") + add_log("agent", "social_analyst", f"✅ Sentiment report ready: {report[:300]}...") + step_timer.end_step("social_media_analyst", "completed", report[:200]) + symbol_progress.step_done(ticker, "social_media_analyst") + step_timer.update_details("social_media_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze social media sentiment for {ticker} on {current_date}", + "response": report[:3000], + }) + else: + tool_call_info = [{"name": tc["name"], "args": str(tc.get("args", {}))[:200]} for tc in result.tool_calls] + step_timer.set_details("social_media_analyst", { + "system_prompt": system_message[:2000], + "user_prompt": f"Analyze social media sentiment for {ticker} on {current_date}", + "response": "(Pending - tool calls in progress)", + "tool_calls": tool_call_info, + }) + add_log("data", "social_analyst", f"LLM requested {len(result.tool_calls)} tool calls in {elapsed:.1f}s: {', '.join(tc['name'] for tc in result.tool_calls)}") return { "messages": [result], diff --git a/tradingagents/agents/managers/research_manager.py b/tradingagents/agents/managers/research_manager.py index c537fa2f..e1492c04 100644 --- a/tradingagents/agents/managers/research_manager.py +++ b/tradingagents/agents/managers/research_manager.py @@ -1,5 +1,8 @@ import time import json +from langchain_core.messages import SystemMessage, HumanMessage + +from tradingagents.log_utils import add_log, step_timer, symbol_progress def create_research_manager(llm, memory): @@ -13,30 +16,63 @@ def create_research_manager(llm, memory): investment_debate_state = state["investment_debate_state"] curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" - past_memories = memory.get_memories(curr_situation, n_matches=2) past_memory_str = "" - for i, rec in enumerate(past_memories, 1): - past_memory_str += rec["recommendation"] + "\n\n" + if memory is not None: + past_memories = memory.get_memories(curr_situation, n_matches=2) + for i, rec in enumerate(past_memories, 1): + past_memory_str += rec["recommendation"] + "\n\n" - prompt = f"""As the portfolio manager and debate facilitator, your role is to critically evaluate this round of debate and make a definitive decision: align with the bear analyst, the bull analyst, or choose Hold only if it is strongly justified based on the arguments presented. + system_prompt = """You are a Research Manager at a financial research firm. You MUST stay in character as a financial professional at all times. -Summarize the key points from both sides concisely, focusing on the most compelling evidence or reasoning. Your recommendation—Buy, Sell, or Hold—must be clear and actionable. Avoid defaulting to Hold simply because both sides have valid points; commit to a stance grounded in the debate's strongest arguments. +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and make a decision based on available information -Additionally, develop a detailed investment plan for the trader. This should include: +Your task: Review the Bull vs Bear arguments and provide a clear investment recommendation. -Your Recommendation: A decisive stance supported by the most convincing arguments. -Rationale: An explanation of why these arguments lead to your conclusion. -Strategic Actions: Concrete steps for implementing the recommendation. -Take into account your past mistakes on similar situations. Use these insights to refine your decision-making and ensure you are learning and improving. Present your analysis conversationally, as if speaking naturally, without special formatting. +Your response must include: +1. RECOMMENDATION: BUY, SELL, or HOLD +2. RATIONALE: Why this recommendation based on the strongest arguments +3. KEY FACTORS: The most compelling evidence from the debate -Here are your past reflections on mistakes: -\"{past_memory_str}\" +RESPONSE FORMAT: +- Maximum 1500 characters. Lead with your recommendation, then key rationale. +- Complete your ENTIRE response in a SINGLE message. -Here is the debate: -Debate History: -{history}""" - response = llm.invoke(prompt) +Respond only with your analysis and recommendation. No disclaimers or meta-commentary.""" + + user_prompt = f"""Review this investment debate and provide your recommendation: + +DEBATE HISTORY: +{history} + +PAST LEARNINGS: +{past_memory_str if past_memory_str else "None"} + +Based on the bull and bear arguments above, what is your investment recommendation?""" + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt) + ] + step_timer.start_step("research_manager") + add_log("agent", "research_mgr", f"⚖️ Research Manager evaluating debate...") + t0 = time.time() + response = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "research_mgr", f"LLM responded in {elapsed:.1f}s ({len(response.content)} chars)") + add_log("agent", "research_mgr", f"✅ Investment decision: {response.content[:300]}...") + step_timer.end_step("research_manager", "completed", response.content[:200]) + symbol_progress.step_done(state.get("company_of_interest", ""), "research_manager") + step_timer.set_details("research_manager", { + "system_prompt": system_prompt, + "user_prompt": user_prompt[:3000], + "response": response.content[:3000], + "tool_calls": [], + }) new_investment_debate_state = { "judge_decision": response.content, diff --git a/tradingagents/agents/managers/risk_manager.py b/tradingagents/agents/managers/risk_manager.py index fba763d6..667530ec 100644 --- a/tradingagents/agents/managers/risk_manager.py +++ b/tradingagents/agents/managers/risk_manager.py @@ -1,5 +1,8 @@ import time import json +from langchain_core.messages import SystemMessage, HumanMessage + +from tradingagents.log_utils import add_log, step_timer, symbol_progress def create_risk_manager(llm, memory): @@ -16,34 +19,69 @@ def create_risk_manager(llm, memory): trader_plan = state["investment_plan"] curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" - past_memories = memory.get_memories(curr_situation, n_matches=2) past_memory_str = "" - for i, rec in enumerate(past_memories, 1): - past_memory_str += rec["recommendation"] + "\n\n" + if memory is not None: + past_memories = memory.get_memories(curr_situation, n_matches=2) + for i, rec in enumerate(past_memories, 1): + past_memory_str += rec["recommendation"] + "\n\n" - prompt = f"""As the Risk Management Judge and Debate Facilitator, your goal is to evaluate the debate between three risk analysts—Risky, Neutral, and Safe/Conservative—and determine the best course of action for the trader. Your decision must result in a clear recommendation: Buy, Sell, or Hold. Choose Hold only if strongly justified by specific arguments, not as a fallback when all sides seem valid. Strive for clarity and decisiveness. + system_prompt = """You are a Risk Manager at a financial advisory firm making the final investment decision. You MUST stay in character as a financial professional at all times. -Guidelines for Decision-Making: -1. **Summarize Key Arguments**: Extract the strongest points from each analyst, focusing on relevance to the context. -2. **Provide Rationale**: Support your recommendation with direct quotes and counterarguments from the debate. -3. **Refine the Trader's Plan**: Start with the trader's original plan, **{trader_plan}**, and adjust it based on the analysts' insights. -4. **Learn from Past Mistakes**: Use lessons from **{past_memory_str}** to address prior misjudgments and improve the decision you are making now to make sure you don't make a wrong BUY/SELL/HOLD call that loses money. +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and make a decision based on available information -Deliverables: -- A clear and actionable recommendation: Buy, Sell, or Hold. -- Detailed reasoning anchored in the debate and past reflections. +Your task: Evaluate the risk debate between Aggressive, Neutral, and Conservative analysts. ---- +Your response must include: +1. FINAL DECISION: BUY, SELL, or HOLD +2. HOLD_DAYS: Number of trading days to hold the position before exiting (for BUY/HOLD only, write N/A for SELL) +3. RISK ASSESSMENT: Summary of key risks identified +4. RATIONALE: Why this decision balances risk and reward appropriately -**Analysts Debate History:** +RESPONSE FORMAT: +- Maximum 1500 characters. Lead with your decision, then key rationale. +- Complete your ENTIRE response in a SINGLE message. + +Respond only with your analysis and decision. No disclaimers or meta-commentary.""" + + user_prompt = f"""Make the final risk-adjusted investment decision: + +COMPANY: {company_name} + +ORIGINAL TRADER PLAN: +{trader_plan} + +RISK ANALYSTS DEBATE: {history} ---- +PAST LEARNINGS: +{past_memory_str if past_memory_str else "None"} -Focus on actionable insights and continuous improvement. Build on past lessons, critically evaluate all perspectives, and ensure each decision advances better outcomes.""" +Based on the risk analysis above, what is your final investment decision?""" - response = llm.invoke(prompt) + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt) + ] + step_timer.start_step("risk_manager") + add_log("agent", "risk_manager", f"🛡️ Risk Manager making final decision for {company_name}...") + t0 = time.time() + response = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "risk_manager", f"LLM responded in {elapsed:.1f}s ({len(response.content)} chars)") + add_log("agent", "risk_manager", f"✅ Final decision: {response.content[:300]}...") + step_timer.end_step("risk_manager", "completed", response.content[:200]) + symbol_progress.step_done(company_name, "risk_manager") + step_timer.set_details("risk_manager", { + "system_prompt": system_prompt, + "user_prompt": user_prompt[:3000], + "response": response.content[:3000], + "tool_calls": [], + }) new_risk_debate_state = { "judge_decision": response.content, diff --git a/tradingagents/agents/researchers/bear_researcher.py b/tradingagents/agents/researchers/bear_researcher.py index 6634490a..f4cb73bd 100644 --- a/tradingagents/agents/researchers/bear_researcher.py +++ b/tradingagents/agents/researchers/bear_researcher.py @@ -1,7 +1,9 @@ -from langchain_core.messages import AIMessage +from langchain_core.messages import AIMessage, SystemMessage, HumanMessage import time import json +from tradingagents.log_utils import add_log, step_timer, symbol_progress + def create_bear_researcher(llm, memory): def bear_node(state) -> dict: @@ -16,35 +18,74 @@ def create_bear_researcher(llm, memory): fundamentals_report = state["fundamentals_report"] curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" - past_memories = memory.get_memories(curr_situation, n_matches=2) past_memory_str = "" - for i, rec in enumerate(past_memories, 1): - past_memory_str += rec["recommendation"] + "\n\n" + if memory is not None: + past_memories = memory.get_memories(curr_situation, n_matches=2) + for i, rec in enumerate(past_memories, 1): + past_memory_str += rec["recommendation"] + "\n\n" - prompt = f"""You are a Bear Analyst making the case against investing in the stock. Your goal is to present a well-reasoned argument emphasizing risks, challenges, and negative indicators. Leverage the provided research and data to highlight potential downsides and counter bullish arguments effectively. + system_prompt = """You are a Bear Analyst at a financial research firm. You MUST stay in character as a financial analyst at all times. -Key points to focus on: +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and focus your analysis on the data that IS available -- Risks and Challenges: Highlight factors like market saturation, financial instability, or macroeconomic threats that could hinder the stock's performance. -- Competitive Weaknesses: Emphasize vulnerabilities such as weaker market positioning, declining innovation, or threats from competitors. -- Negative Indicators: Use evidence from financial data, market trends, or recent adverse news to support your position. -- Bull Counterpoints: Critically analyze the bull argument with specific data and sound reasoning, exposing weaknesses or over-optimistic assumptions. -- Engagement: Present your argument in a conversational style, directly engaging with the bull analyst's points and debating effectively rather than simply listing facts. +Your role: Present a case AGAINST investing in this stock by highlighting risks, challenges, and negative indicators. +Focus on: downside risks, competitive weaknesses, negative market signals, valuation concerns, macro headwinds. -Resources available: +RESPONSE FORMAT: +- Maximum 2000 characters. Focus on the 3-5 strongest bearish points. +- Complete your ENTIRE argument in a SINGLE response. -Market research report: {market_research_report} -Social media sentiment report: {sentiment_report} -Latest world affairs news: {news_report} -Company fundamentals report: {fundamentals_report} -Conversation history of the debate: {history} -Last bull argument: {current_response} -Reflections from similar situations and lessons learned: {past_memory_str} -Use this information to deliver a compelling bear argument, refute the bull's claims, and engage in a dynamic debate that demonstrates the risks and weaknesses of investing in the stock. You must also address reflections and learn from lessons and mistakes you made in the past. -""" +Respond only with your bearish financial analysis. No disclaimers or meta-commentary.""" - response = llm.invoke(prompt) + user_prompt = f"""Analyze this stock from a bearish perspective: + +MARKET DATA: +{market_research_report} + +SENTIMENT: +{sentiment_report} + +NEWS: +{news_report} + +FUNDAMENTALS: +{fundamentals_report} + +DEBATE HISTORY: +{history} + +BULL'S LAST ARGUMENT: +{current_response} + +PAST LEARNINGS: +{past_memory_str if past_memory_str else "None"} + +Provide your bearish case highlighting risks and concerns.""" + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt) + ] + step_timer.start_step("bear_researcher") + add_log("agent", "bear_researcher", f"🐻 Bear Analyst calling LLM...") + t0 = time.time() + response = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "bear_researcher", f"LLM responded in {elapsed:.1f}s ({len(response.content)} chars)") + add_log("agent", "bear_researcher", f"✅ Bear argument ready: {response.content[:300]}...") + step_timer.end_step("bear_researcher", "completed", response.content[:200]) + symbol_progress.step_done(state["company_of_interest"], "bear_researcher") + step_timer.set_details("bear_researcher", { + "system_prompt": system_prompt, + "user_prompt": user_prompt[:3000], + "response": response.content[:3000], + "tool_calls": [], + }) argument = f"Bear Analyst: {response.content}" diff --git a/tradingagents/agents/researchers/bull_researcher.py b/tradingagents/agents/researchers/bull_researcher.py index b03ef755..c4b02c4c 100644 --- a/tradingagents/agents/researchers/bull_researcher.py +++ b/tradingagents/agents/researchers/bull_researcher.py @@ -1,7 +1,9 @@ -from langchain_core.messages import AIMessage +from langchain_core.messages import AIMessage, SystemMessage, HumanMessage import time import json +from tradingagents.log_utils import add_log, step_timer, symbol_progress + def create_bull_researcher(llm, memory): def bull_node(state) -> dict: @@ -16,33 +18,74 @@ def create_bull_researcher(llm, memory): fundamentals_report = state["fundamentals_report"] curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" - past_memories = memory.get_memories(curr_situation, n_matches=2) past_memory_str = "" - for i, rec in enumerate(past_memories, 1): - past_memory_str += rec["recommendation"] + "\n\n" + if memory is not None: + past_memories = memory.get_memories(curr_situation, n_matches=2) + for i, rec in enumerate(past_memories, 1): + past_memory_str += rec["recommendation"] + "\n\n" - prompt = f"""You are a Bull Analyst advocating for investing in the stock. Your task is to build a strong, evidence-based case emphasizing growth potential, competitive advantages, and positive market indicators. Leverage the provided research and data to address concerns and counter bearish arguments effectively. + system_prompt = """You are a Bull Analyst at a financial research firm. You MUST stay in character as a financial analyst at all times. -Key points to focus on: -- Growth Potential: Highlight the company's market opportunities, revenue projections, and scalability. -- Competitive Advantages: Emphasize factors like unique products, strong branding, or dominant market positioning. -- Positive Indicators: Use financial health, industry trends, and recent positive news as evidence. -- Bear Counterpoints: Critically analyze the bear argument with specific data and sound reasoning, addressing concerns thoroughly and showing why the bull perspective holds stronger merit. -- Engagement: Present your argument in a conversational style, engaging directly with the bear analyst's points and debating effectively rather than just listing data. +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and focus your analysis on the data that IS available -Resources available: -Market research report: {market_research_report} -Social media sentiment report: {sentiment_report} -Latest world affairs news: {news_report} -Company fundamentals report: {fundamentals_report} -Conversation history of the debate: {history} -Last bear argument: {current_response} -Reflections from similar situations and lessons learned: {past_memory_str} -Use this information to deliver a compelling bull argument, refute the bear's concerns, and engage in a dynamic debate that demonstrates the strengths of the bull position. You must also address reflections and learn from lessons and mistakes you made in the past. -""" +Your role: Advocate for investing in this stock with evidence-based bullish arguments. +Focus on: growth potential, competitive advantages, positive market indicators, upside catalysts. - response = llm.invoke(prompt) +RESPONSE FORMAT: +- Maximum 2000 characters. Focus on the 3-5 strongest bullish points. +- Complete your ENTIRE argument in a SINGLE response. + +Respond only with your bullish financial analysis. No disclaimers or meta-commentary.""" + + user_prompt = f"""Analyze this stock from a bullish perspective: + +MARKET DATA: +{market_research_report} + +SENTIMENT: +{sentiment_report} + +NEWS: +{news_report} + +FUNDAMENTALS: +{fundamentals_report} + +DEBATE HISTORY: +{history} + +BEAR'S LAST ARGUMENT: +{current_response} + +PAST LEARNINGS: +{past_memory_str if past_memory_str else "None"} + +Provide your bullish case for this investment.""" + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt) + ] + step_timer.start_step("bull_researcher") + add_log("agent", "bull_researcher", f"🐂 Bull Analyst calling LLM...") + t0 = time.time() + response = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "bull_researcher", f"LLM responded in {elapsed:.1f}s ({len(response.content)} chars)") + add_log("agent", "bull_researcher", f"✅ Bull argument ready: {response.content[:300]}...") + step_timer.end_step("bull_researcher", "completed", response.content[:200]) + symbol_progress.step_done(state["company_of_interest"], "bull_researcher") + step_timer.set_details("bull_researcher", { + "system_prompt": system_prompt, + "user_prompt": user_prompt[:3000], + "response": response.content[:3000], + "tool_calls": [], + }) argument = f"Bull Analyst: {response.content}" diff --git a/tradingagents/agents/risk_mgmt/aggresive_debator.py b/tradingagents/agents/risk_mgmt/aggresive_debator.py index 7e2b4937..45e32642 100644 --- a/tradingagents/agents/risk_mgmt/aggresive_debator.py +++ b/tradingagents/agents/risk_mgmt/aggresive_debator.py @@ -1,5 +1,8 @@ import time import json +from langchain_core.messages import SystemMessage, HumanMessage + +from tradingagents.log_utils import add_log, step_timer, symbol_progress def create_risky_debator(llm): @@ -18,21 +21,71 @@ def create_risky_debator(llm): trader_decision = state["trader_investment_plan"] - prompt = f"""As the Risky Risk Analyst, your role is to actively champion high-reward, high-risk opportunities, emphasizing bold strategies and competitive advantages. When evaluating the trader's decision or plan, focus intently on the potential upside, growth potential, and innovative benefits—even when these come with elevated risk. Use the provided market data and sentiment analysis to strengthen your arguments and challenge the opposing views. Specifically, respond directly to each point made by the conservative and neutral analysts, countering with data-driven rebuttals and persuasive reasoning. Highlight where their caution might miss critical opportunities or where their assumptions may be overly conservative. Here is the trader's decision: + system_prompt = """You are an Aggressive Risk Analyst at a financial advisory firm. You MUST stay in character as a financial analyst at all times. +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and focus your analysis on the data that IS available + +Your role: Advocate for growth-oriented, higher-risk investment strategies that maximize potential returns. +Focus on: growth opportunities, upside potential, momentum signals, and why bolder strategies are justified. +Counter conservative arguments with data-driven rebuttals. + +RESPONSE FORMAT: +- Maximum 2000 characters. Focus on the 3-5 strongest growth-oriented points. +- Complete your ENTIRE argument in a SINGLE response. + +Respond only with your aggressive financial analysis. No disclaimers or meta-commentary.""" + + user_prompt = f"""Provide the aggressive/growth-oriented perspective on this investment: + +TRADER'S DECISION: {trader_decision} -Your task is to create a compelling case for the trader's decision by questioning and critiquing the conservative and neutral stances to demonstrate why your high-reward perspective offers the best path forward. Incorporate insights from the following sources into your arguments: +MARKET DATA: +{market_research_report} -Market Research Report: {market_research_report} -Social Media Sentiment Report: {sentiment_report} -Latest World Affairs Report: {news_report} -Company Fundamentals Report: {fundamentals_report} -Here is the current conversation history: {history} Here are the last arguments from the conservative analyst: {current_safe_response} Here are the last arguments from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. +SENTIMENT: +{sentiment_report} -Engage actively by addressing any specific concerns raised, refuting the weaknesses in their logic, and asserting the benefits of risk-taking to outpace market norms. Maintain a focus on debating and persuading, not just presenting data. Challenge each counterpoint to underscore why a high-risk approach is optimal. Output conversationally as if you are speaking without any special formatting.""" +NEWS: +{news_report} - response = llm.invoke(prompt) +FUNDAMENTALS: +{fundamentals_report} + +DEBATE HISTORY: +{history} + +CONSERVATIVE ANALYST'S ARGUMENT: +{current_safe_response if current_safe_response else "None yet"} + +NEUTRAL ANALYST'S ARGUMENT: +{current_neutral_response if current_neutral_response else "None yet"} + +Present your aggressive/growth-oriented case.""" + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt) + ] + step_timer.start_step("aggressive_analyst") + add_log("agent", "aggressive", f"🔥 Aggressive Analyst calling LLM...") + t0 = time.time() + response = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "aggressive", f"LLM responded in {elapsed:.1f}s ({len(response.content)} chars)") + add_log("agent", "aggressive", f"✅ Aggressive argument ready: {response.content[:300]}...") + step_timer.end_step("aggressive_analyst", "completed", response.content[:200]) + symbol_progress.step_done(state["company_of_interest"], "aggressive_analyst") + step_timer.set_details("aggressive_analyst", { + "system_prompt": system_prompt, + "user_prompt": user_prompt[:3000], + "response": response.content[:3000], + "tool_calls": [], + }) argument = f"Risky Analyst: {response.content}" diff --git a/tradingagents/agents/risk_mgmt/conservative_debator.py b/tradingagents/agents/risk_mgmt/conservative_debator.py index c56e16ad..b0b2cafb 100644 --- a/tradingagents/agents/risk_mgmt/conservative_debator.py +++ b/tradingagents/agents/risk_mgmt/conservative_debator.py @@ -1,7 +1,9 @@ -from langchain_core.messages import AIMessage +from langchain_core.messages import AIMessage, SystemMessage, HumanMessage import time import json +from tradingagents.log_utils import add_log, step_timer, symbol_progress + def create_safe_debator(llm): def safe_node(state) -> dict: @@ -19,21 +21,71 @@ def create_safe_debator(llm): trader_decision = state["trader_investment_plan"] - prompt = f"""As the Safe/Conservative Risk Analyst, your primary objective is to protect assets, minimize volatility, and ensure steady, reliable growth. You prioritize stability, security, and risk mitigation, carefully assessing potential losses, economic downturns, and market volatility. When evaluating the trader's decision or plan, critically examine high-risk elements, pointing out where the decision may expose the firm to undue risk and where more cautious alternatives could secure long-term gains. Here is the trader's decision: + system_prompt = """You are a Conservative Risk Analyst at a financial advisory firm. You MUST stay in character as a financial analyst at all times. +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and focus your analysis on the data that IS available + +Your role: Protect capital, minimize volatility, and advocate for steady, reliable growth strategies. +Focus on: downside risks, capital preservation, volatility concerns, drawdown scenarios. +Counter aggressive arguments by highlighting overlooked risks. + +RESPONSE FORMAT: +- Maximum 2000 characters. Focus on the 3-5 most critical risk factors. +- Complete your ENTIRE argument in a SINGLE response. + +Respond only with your conservative financial analysis. No disclaimers or meta-commentary.""" + + user_prompt = f"""Provide the conservative/risk-averse perspective on this investment: + +TRADER'S DECISION: {trader_decision} -Your task is to actively counter the arguments of the Risky and Neutral Analysts, highlighting where their views may overlook potential threats or fail to prioritize sustainability. Respond directly to their points, drawing from the following data sources to build a convincing case for a low-risk approach adjustment to the trader's decision: +MARKET DATA: +{market_research_report} -Market Research Report: {market_research_report} -Social Media Sentiment Report: {sentiment_report} -Latest World Affairs Report: {news_report} -Company Fundamentals Report: {fundamentals_report} -Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the neutral analyst: {current_neutral_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. +SENTIMENT: +{sentiment_report} -Engage by questioning their optimism and emphasizing the potential downsides they may have overlooked. Address each of their counterpoints to showcase why a conservative stance is ultimately the safest path for the firm's assets. Focus on debating and critiquing their arguments to demonstrate the strength of a low-risk strategy over their approaches. Output conversationally as if you are speaking without any special formatting.""" +NEWS: +{news_report} - response = llm.invoke(prompt) +FUNDAMENTALS: +{fundamentals_report} + +DEBATE HISTORY: +{history} + +AGGRESSIVE ANALYST'S ARGUMENT: +{current_risky_response if current_risky_response else "None yet"} + +NEUTRAL ANALYST'S ARGUMENT: +{current_neutral_response if current_neutral_response else "None yet"} + +Present your conservative/risk-averse case.""" + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt) + ] + step_timer.start_step("conservative_analyst") + add_log("agent", "conservative", f"🛡️ Conservative Analyst calling LLM...") + t0 = time.time() + response = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "conservative", f"LLM responded in {elapsed:.1f}s ({len(response.content)} chars)") + add_log("agent", "conservative", f"✅ Conservative argument ready: {response.content[:300]}...") + step_timer.end_step("conservative_analyst", "completed", response.content[:200]) + symbol_progress.step_done(state["company_of_interest"], "conservative_analyst") + step_timer.set_details("conservative_analyst", { + "system_prompt": system_prompt, + "user_prompt": user_prompt[:3000], + "response": response.content[:3000], + "tool_calls": [], + }) argument = f"Safe Analyst: {response.content}" diff --git a/tradingagents/agents/risk_mgmt/neutral_debator.py b/tradingagents/agents/risk_mgmt/neutral_debator.py index a6d2ef5c..5324990a 100644 --- a/tradingagents/agents/risk_mgmt/neutral_debator.py +++ b/tradingagents/agents/risk_mgmt/neutral_debator.py @@ -1,5 +1,8 @@ import time import json +from langchain_core.messages import SystemMessage, HumanMessage + +from tradingagents.log_utils import add_log, step_timer, symbol_progress def create_neutral_debator(llm): @@ -18,21 +21,70 @@ def create_neutral_debator(llm): trader_decision = state["trader_investment_plan"] - prompt = f"""As the Neutral Risk Analyst, your role is to provide a balanced perspective, weighing both the potential benefits and risks of the trader's decision or plan. You prioritize a well-rounded approach, evaluating the upsides and downsides while factoring in broader market trends, potential economic shifts, and diversification strategies.Here is the trader's decision: + system_prompt = """You are a Neutral Risk Analyst at a financial advisory firm. You MUST stay in character as a financial analyst at all times. +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and focus your analysis on the data that IS available + +Your role: Provide a balanced perspective, weighing both potential benefits and risks objectively. +Focus on: balanced risk/reward assessment, moderate strategies, where both aggressive and conservative views may be flawed. + +RESPONSE FORMAT: +- Maximum 2000 characters. Focus on 3-5 key balanced observations. +- Complete your ENTIRE argument in a SINGLE response. + +Respond only with your balanced financial analysis. No disclaimers or meta-commentary.""" + + user_prompt = f"""Provide the balanced/neutral perspective on this investment: + +TRADER'S DECISION: {trader_decision} -Your task is to challenge both the Risky and Safe Analysts, pointing out where each perspective may be overly optimistic or overly cautious. Use insights from the following data sources to support a moderate, sustainable strategy to adjust the trader's decision: +MARKET DATA: +{market_research_report} -Market Research Report: {market_research_report} -Social Media Sentiment Report: {sentiment_report} -Latest World Affairs Report: {news_report} -Company Fundamentals Report: {fundamentals_report} -Here is the current conversation history: {history} Here is the last response from the risky analyst: {current_risky_response} Here is the last response from the safe analyst: {current_safe_response}. If there are no responses from the other viewpoints, do not halluncinate and just present your point. +SENTIMENT: +{sentiment_report} -Engage actively by analyzing both sides critically, addressing weaknesses in the risky and conservative arguments to advocate for a more balanced approach. Challenge each of their points to illustrate why a moderate risk strategy might offer the best of both worlds, providing growth potential while safeguarding against extreme volatility. Focus on debating rather than simply presenting data, aiming to show that a balanced view can lead to the most reliable outcomes. Output conversationally as if you are speaking without any special formatting.""" +NEWS: +{news_report} - response = llm.invoke(prompt) +FUNDAMENTALS: +{fundamentals_report} + +DEBATE HISTORY: +{history} + +AGGRESSIVE ANALYST'S ARGUMENT: +{current_risky_response if current_risky_response else "None yet"} + +CONSERVATIVE ANALYST'S ARGUMENT: +{current_safe_response if current_safe_response else "None yet"} + +Present your balanced/neutral case.""" + + messages = [ + SystemMessage(content=system_prompt), + HumanMessage(content=user_prompt) + ] + step_timer.start_step("neutral_analyst") + add_log("agent", "neutral", f"⚖️ Neutral Analyst calling LLM...") + t0 = time.time() + response = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "neutral", f"LLM responded in {elapsed:.1f}s ({len(response.content)} chars)") + add_log("agent", "neutral", f"✅ Neutral argument ready: {response.content[:300]}...") + step_timer.end_step("neutral_analyst", "completed", response.content[:200]) + symbol_progress.step_done(state["company_of_interest"], "neutral_analyst") + step_timer.set_details("neutral_analyst", { + "system_prompt": system_prompt, + "user_prompt": user_prompt[:3000], + "response": response.content[:3000], + "tool_calls": [], + }) argument = f"Neutral Analyst: {response.content}" diff --git a/tradingagents/agents/trader/trader.py b/tradingagents/agents/trader/trader.py index 1b05c35d..08daec1d 100644 --- a/tradingagents/agents/trader/trader.py +++ b/tradingagents/agents/trader/trader.py @@ -1,6 +1,9 @@ import functools import time import json +from langchain_core.messages import SystemMessage, HumanMessage + +from tradingagents.log_utils import add_log, step_timer, symbol_progress def create_trader(llm, memory): @@ -13,29 +16,65 @@ def create_trader(llm, memory): fundamentals_report = state["fundamentals_report"] curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}" - past_memories = memory.get_memories(curr_situation, n_matches=2) past_memory_str = "" + past_memories = None + if memory is not None: + past_memories = memory.get_memories(curr_situation, n_matches=2) if past_memories: for i, rec in enumerate(past_memories, 1): past_memory_str += rec["recommendation"] + "\n\n" else: past_memory_str = "No past memories found." - context = { - "role": "user", - "content": f"Based on a comprehensive analysis by a team of analysts, here is an investment plan tailored for {company_name}. This plan incorporates insights from current technical market trends, macroeconomic indicators, and social media sentiment. Use this plan as a foundation for evaluating your next trading decision.\n\nProposed Investment Plan: {investment_plan}\n\nLeverage these insights to make an informed and strategic decision.", - } + system_content = """You are a Trader at a financial trading desk. You MUST stay in character as a financial trader at all times. + +CRITICAL RULES: +- NEVER mention that you are an AI, Claude, a language model, or an assistant +- NEVER offer to help with code, software, or implementation tasks +- NEVER say "I don't have access to" or "I can't see the data" — analyze whatever data is provided below +- If data sections are empty, state that data is unavailable and make a recommendation based on available information + +Your task: Review the investment plan and market data, then provide a clear trading recommendation. + +Respond with your trading analysis and conclude with: FINAL TRANSACTION PROPOSAL: **BUY**, **HOLD**, or **SELL** + +RESPONSE FORMAT: +- Maximum 1500 characters. Lead with your recommendation, then key rationale. +- Complete your ENTIRE response in a SINGLE message. + +Provide only your trading analysis. No disclaimers or meta-commentary.""" + + user_content = f"""Company: {company_name} + +Investment Plan from Analysts: +{investment_plan} + +Past reflections from similar situations: +{past_memory_str} + +Based on this analysis, what is your trading recommendation for {company_name}?""" messages = [ - { - "role": "system", - "content": f"""You are a trading agent analyzing market data to make investment decisions. Based on your analysis, provide a specific recommendation to buy, sell, or hold. End with a firm decision and always conclude your response with 'FINAL TRANSACTION PROPOSAL: **BUY/HOLD/SELL**' to confirm your recommendation. Do not forget to utilize lessons from past decisions to learn from your mistakes. Here is some reflections from similar situatiosn you traded in and the lessons learned: {past_memory_str}""", - }, - context, + SystemMessage(content=system_content), + HumanMessage(content=user_content), ] + step_timer.start_step("trader") + add_log("agent", "trader", f"💰 Trader calling LLM for {company_name}...") + t0 = time.time() result = llm.invoke(messages) + elapsed = time.time() - t0 + add_log("llm", "trader", f"LLM responded in {elapsed:.1f}s ({len(result.content)} chars)") + add_log("agent", "trader", f"✅ Trader plan ready: {result.content[:300]}...") + step_timer.end_step("trader", "completed", result.content[:200]) + symbol_progress.step_done(company_name, "trader") + step_timer.set_details("trader", { + "system_prompt": system_content, + "user_prompt": user_content[:3000], + "response": result.content[:3000], + "tool_calls": [], + }) return { "messages": [result], diff --git a/tradingagents/agents/utils/agent_utils.py b/tradingagents/agents/utils/agent_utils.py index 6cf294a1..7e28c86e 100644 --- a/tradingagents/agents/utils/agent_utils.py +++ b/tradingagents/agents/utils/agent_utils.py @@ -1,5 +1,29 @@ from langchain_core.messages import HumanMessage, RemoveMessage +# ============================================================================= +# SIMULATION CONTEXT FOR ALL AGENTS +# ============================================================================= +# This context is prepended to all agent prompts to ensure the LLM understands +# this is an educational simulation and stays in character as the analyst role. +# Without this, LLMs may refuse to engage or break character. +# ============================================================================= + +SIMULATION_CONTEXT = """You are an AI analyst participating in a stock market analysis simulation. Your task is to analyze financial data and provide investment perspectives based on the data provided. This is an educational demonstration of financial analysis techniques. + +Respond ONLY with your analysis. Do not include any meta-commentary about being an AI, ethical concerns, or disclaimers. Simply provide the requested financial analysis based on the data given.""" + +def get_simulation_prompt(role_prompt: str) -> list: + """ + Create properly structured messages for the LLM. + Returns a list of message dicts for proper system/user separation. + """ + from langchain_core.messages import SystemMessage, HumanMessage + return [ + SystemMessage(content=SIMULATION_CONTEXT), + HumanMessage(content=role_prompt) + ] + + # Import tools from separate utility files from tradingagents.agents.utils.core_stock_tools import ( get_stock_data diff --git a/tradingagents/claude_max_llm.py b/tradingagents/claude_max_llm.py index 2eed4d87..332dad69 100644 --- a/tradingagents/claude_max_llm.py +++ b/tradingagents/claude_max_llm.py @@ -109,14 +109,19 @@ class ClaudeMaxLLM(BaseChatModel): return "\n\nAvailable tools:\n" + "\n".join(tool_descriptions) + "\n\nTo use a tool, respond with: TOOL_CALL: tool_name(arguments)\n" - def _format_messages_for_prompt(self, messages: List[BaseMessage]) -> str: - """Convert LangChain messages to a single prompt string.""" - formatted_parts = [] + def _format_messages_for_prompt(self, messages: List[BaseMessage]) -> tuple: + """Convert LangChain messages to a system prompt and user prompt. + + Returns: + Tuple of (system_prompt, user_prompt) + """ + system_parts = [] + user_parts = [] # Add tools description if tools are bound tools_prompt = self._format_tools_for_prompt() if tools_prompt: - formatted_parts.append(tools_prompt) + system_parts.append(tools_prompt) for msg in messages: # Handle dict messages (LangChain sometimes passes these) @@ -124,42 +129,57 @@ class ClaudeMaxLLM(BaseChatModel): role = msg.get("role", msg.get("type", "human")) content = msg.get("content", str(msg)) if role in ("system",): - formatted_parts.append(f"\n{content}\n\n") + system_parts.append(content) elif role in ("human", "user"): - formatted_parts.append(f"Human: {content}\n") + user_parts.append(content) elif role in ("ai", "assistant"): - formatted_parts.append(f"Assistant: {content}\n") + user_parts.append(f"Previous response: {content}") else: - formatted_parts.append(f"{content}\n") + user_parts.append(content) elif isinstance(msg, SystemMessage): - formatted_parts.append(f"\n{msg.content}\n\n") + system_parts.append(msg.content) elif isinstance(msg, HumanMessage): - formatted_parts.append(f"Human: {msg.content}\n") + user_parts.append(msg.content) elif isinstance(msg, AIMessage): - formatted_parts.append(f"Assistant: {msg.content}\n") + user_parts.append(f"Previous response: {msg.content}") elif isinstance(msg, ToolMessage): - formatted_parts.append(f"Tool Result ({msg.name}): {msg.content}\n") + user_parts.append(f"Tool Result ({msg.name}): {msg.content}") elif hasattr(msg, 'content'): - formatted_parts.append(f"{msg.content}\n") + user_parts.append(msg.content) else: - formatted_parts.append(f"{str(msg)}\n") + user_parts.append(str(msg)) - return "\n".join(formatted_parts) + system_prompt = "\n\n".join(system_parts) if system_parts else "" + user_prompt = "\n\n".join(user_parts) if user_parts else "" - def _call_claude_cli(self, prompt: str) -> str: - """Call the Claude CLI and return the response.""" + return system_prompt, user_prompt + + def _call_claude_cli(self, system_prompt: str, user_prompt: str) -> str: + """Call the Claude CLI and return the response. + + Args: + system_prompt: The system prompt to use (overrides Claude Code defaults) + user_prompt: The user prompt/query + """ # Create environment without ANTHROPIC_API_KEY to force subscription auth env = os.environ.copy() env.pop("ANTHROPIC_API_KEY", None) - # Build the command - use --prompt flag with stdin for long prompts + # Build the command with --system-prompt to override Claude Code's default behavior cmd = [ self.claude_cli_path, "--print", # Non-interactive mode "--model", self.model, - "-p", prompt # Use -p flag for prompt + "--tools", "", # Disable all Claude Code tools - we're just doing analysis ] + # Add system prompt if provided (this overrides Claude Code's default system prompt) + if system_prompt: + cmd.extend(["--system-prompt", system_prompt]) + + # Add the user prompt + cmd.extend(["-p", user_prompt]) + try: result = subprocess.run( cmd, @@ -192,8 +212,8 @@ class ClaudeMaxLLM(BaseChatModel): **kwargs: Any, ) -> ChatResult: """Generate a response from the Claude CLI.""" - prompt = self._format_messages_for_prompt(messages) - response_text = self._call_claude_cli(prompt) + system_prompt, user_prompt = self._format_messages_for_prompt(messages) + response_text = self._call_claude_cli(system_prompt, user_prompt) # Apply stop sequences if provided if stop: diff --git a/tradingagents/dataflows/alpha_vantage_stock.py b/tradingagents/dataflows/alpha_vantage_stock.py index ffd3570b..a332c4bd 100644 --- a/tradingagents/dataflows/alpha_vantage_stock.py +++ b/tradingagents/dataflows/alpha_vantage_stock.py @@ -20,12 +20,13 @@ def get_stock( """ # Parse dates to determine the range start_dt = datetime.strptime(start_date, "%Y-%m-%d") - today = datetime.now() + end_dt = datetime.strptime(end_date, "%Y-%m-%d") - # Choose outputsize based on whether the requested range is within the latest 100 days - # Compact returns latest 100 data points, so check if start_date is recent enough - days_from_today_to_start = (today - start_dt).days - outputsize = "compact" if days_from_today_to_start < 100 else "full" + # FIXED: Use end_date instead of today to determine outputsize + # This ensures consistent behavior regardless of when backtest is run + # Compact returns latest 100 data points from Alpha Vantage's perspective + days_in_range = (end_dt - start_dt).days + outputsize = "compact" if days_in_range < 100 else "full" params = { "symbol": symbol, diff --git a/tradingagents/dataflows/interface.py b/tradingagents/dataflows/interface.py index 67d0604a..3bf5a98f 100644 --- a/tradingagents/dataflows/interface.py +++ b/tradingagents/dataflows/interface.py @@ -1,4 +1,44 @@ from typing import Annotated +from functools import lru_cache +import hashlib +import json +import os +import time as _time + +from tradingagents.log_utils import add_log, raw_data_store + +# Debug mode - set to False to reduce logging verbosity +DEBUG_MODE = os.environ.get("TRADING_AGENTS_DEBUG", "false").lower() == "true" + +def _debug_print(*args, **kwargs): + """Print only when debug mode is enabled.""" + if DEBUG_MODE: + print(*args, **kwargs) + +# Simple in-memory cache for data requests within same analysis session +_request_cache = {} + + +def _make_cache_key(method: str, args: tuple, kwargs: dict) -> str: + """Generate a cache key from method name and arguments.""" + # Convert args to a hashable form + key_parts = [method] + for arg in args: + if isinstance(arg, (str, int, float, bool, type(None))): + key_parts.append(str(arg)) + else: + key_parts.append(str(type(arg).__name__)) + for k, v in sorted(kwargs.items()): + if isinstance(v, (str, int, float, bool, type(None))): + key_parts.append(f"{k}={v}") + return "|".join(key_parts) + + +def clear_request_cache(): + """Clear the request cache. Call this between different stock analyses.""" + global _request_cache + _request_cache.clear() + # Import from vendor-specific modules from .local import get_YFin_data, get_finnhub_news, get_finnhub_company_insider_sentiment, get_finnhub_company_insider_transactions, get_simfin_balance_sheet, get_simfin_cashflow, get_simfin_income_statements, get_reddit_global_news, get_reddit_company_news @@ -171,6 +211,16 @@ def get_vendor(category: str, method: str = None, symbol: str = None) -> str: def route_to_vendor(method: str, *args, **kwargs): """Route method calls to appropriate vendor implementation with fallback support.""" + # Check cache first to avoid redundant API calls + cache_key = _make_cache_key(method, args, kwargs) + if cache_key in _request_cache: + add_log("data", "data_fetch", f"📦 {method}({', '.join(str(a) for a in args)}) — cached") + return _request_cache[cache_key] + + fetch_start = _time.time() + args_str = ', '.join(str(a) for a in args) + add_log("data", "data_fetch", f"🔄 Fetching {method}({args_str})...") + category = get_category_for_method(method) # Extract symbol from args/kwargs for market-aware routing @@ -206,7 +256,7 @@ def route_to_vendor(method: str, *args, **kwargs): # Debug: Print fallback ordering primary_str = " → ".join(primary_vendors) fallback_str = " → ".join(fallback_vendors) - print(f"DEBUG: {method} - Primary: [{primary_str}] | Full fallback order: [{fallback_str}]") + _debug_print(f"DEBUG: {method} - Primary: [{primary_str}] | Full fallback order: [{fallback_str}]") # Track results and execution state results = [] @@ -217,7 +267,7 @@ def route_to_vendor(method: str, *args, **kwargs): for vendor in fallback_vendors: if vendor not in VENDOR_METHODS[method]: if vendor in primary_vendors: - print(f"INFO: Vendor '{vendor}' not supported for method '{method}', falling back to next vendor") + _debug_print(f"INFO: Vendor '{vendor}' not supported for method '{method}', falling back to next vendor") continue vendor_impl = VENDOR_METHODS[method][vendor] @@ -230,12 +280,12 @@ def route_to_vendor(method: str, *args, **kwargs): # Debug: Print current attempt vendor_type = "PRIMARY" if is_primary_vendor else "FALLBACK" - print(f"DEBUG: Attempting {vendor_type} vendor '{vendor}' for {method} (attempt #{vendor_attempt_count})") + _debug_print(f"DEBUG: Attempting {vendor_type} vendor '{vendor}' for {method} (attempt #{vendor_attempt_count})") # Handle list of methods for a vendor if isinstance(vendor_impl, list): vendor_methods = [(impl, vendor) for impl in vendor_impl] - print(f"DEBUG: Vendor '{vendor}' has multiple implementations: {len(vendor_methods)} functions") + _debug_print(f"DEBUG: Vendor '{vendor}' has multiple implementations: {len(vendor_methods)} functions") else: vendor_methods = [(vendor_impl, vendor)] @@ -243,20 +293,20 @@ def route_to_vendor(method: str, *args, **kwargs): vendor_results = [] for impl_func, vendor_name in vendor_methods: try: - print(f"DEBUG: Calling {impl_func.__name__} from vendor '{vendor_name}'...") + _debug_print(f"DEBUG: Calling {impl_func.__name__} from vendor '{vendor_name}'...") result = impl_func(*args, **kwargs) vendor_results.append(result) - print(f"SUCCESS: {impl_func.__name__} from vendor '{vendor_name}' completed successfully") + _debug_print(f"SUCCESS: {impl_func.__name__} from vendor '{vendor_name}' completed successfully") except AlphaVantageRateLimitError as e: if vendor == "alpha_vantage": - print(f"RATE_LIMIT: Alpha Vantage rate limit exceeded, falling back to next available vendor") - print(f"DEBUG: Rate limit details: {e}") + _debug_print(f"RATE_LIMIT: Alpha Vantage rate limit exceeded, falling back to next available vendor") + _debug_print(f"DEBUG: Rate limit details: {e}") # Continue to next vendor for fallback continue except Exception as e: # Log error but continue with other implementations - print(f"FAILED: {impl_func.__name__} from vendor '{vendor_name}' failed: {e}") + _debug_print(f"FAILED: {impl_func.__name__} from vendor '{vendor_name}' failed: {e}") continue # Add this vendor's results @@ -264,26 +314,46 @@ def route_to_vendor(method: str, *args, **kwargs): results.extend(vendor_results) successful_vendor = vendor result_summary = f"Got {len(vendor_results)} result(s)" - print(f"SUCCESS: Vendor '{vendor}' succeeded - {result_summary}") + _debug_print(f"SUCCESS: Vendor '{vendor}' succeeded - {result_summary}") # Stopping logic: Stop after first successful vendor for single-vendor configs # Multiple vendor configs (comma-separated) may want to collect from multiple sources if len(primary_vendors) == 1: - print(f"DEBUG: Stopping after successful vendor '{vendor}' (single-vendor config)") + _debug_print(f"DEBUG: Stopping after successful vendor '{vendor}' (single-vendor config)") break else: - print(f"FAILED: Vendor '{vendor}' produced no results") + _debug_print(f"FAILED: Vendor '{vendor}' produced no results") # Final result summary if not results: - print(f"FAILURE: All {vendor_attempt_count} vendor attempts failed for method '{method}'") + _debug_print(f"FAILURE: All {vendor_attempt_count} vendor attempts failed for method '{method}'") raise RuntimeError(f"All vendor implementations failed for method '{method}'") else: - print(f"FINAL: Method '{method}' completed with {len(results)} result(s) from {vendor_attempt_count} vendor attempt(s)") + _debug_print(f"FINAL: Method '{method}' completed with {len(results)} result(s) from {vendor_attempt_count} vendor attempt(s)") # Return single result if only one, otherwise concatenate as string if len(results) == 1: - return results[0] + final_result = results[0] else: # Convert all results to strings and concatenate - return '\n'.join(str(result) for result in results) \ No newline at end of file + final_result = '\n'.join(str(result) for result in results) + + # Cache the result for subsequent calls + _request_cache[cache_key] = final_result + + fetch_elapsed = _time.time() - fetch_start + result_len = len(str(final_result)) + result_preview = str(final_result)[:200].replace('\n', ' ') + add_log("data", "data_fetch", f"✅ {method}({args_str}) → {result_len} chars in {fetch_elapsed:.1f}s via {successful_vendor} | {result_preview}...") + + # Capture raw data for frontend debugging + raw_data_store.log_fetch( + method=method, + symbol=str(symbol) if symbol else "", + vendor=str(successful_vendor) if successful_vendor else "unknown", + raw_data=str(final_result), + args_str=args_str, + duration_s=fetch_elapsed, + ) + + return final_result \ No newline at end of file diff --git a/tradingagents/dataflows/stockstats_utils.py b/tradingagents/dataflows/stockstats_utils.py index e81684e0..a40f1aca 100644 --- a/tradingagents/dataflows/stockstats_utils.py +++ b/tradingagents/dataflows/stockstats_utils.py @@ -24,6 +24,12 @@ class StockstatsUtils: df = None data = None + # CRITICAL: Use curr_date as end date to prevent future data leakage + # This ensures backtest doesn't see data beyond the analysis date + curr_date_dt = pd.to_datetime(curr_date) + end_date_dt = curr_date_dt + start_date_dt = curr_date_dt - pd.DateOffset(years=2) # Reduced from 15 years for faster fetching + if not online: try: data = pd.read_csv( @@ -32,22 +38,20 @@ class StockstatsUtils: f"{symbol}-YFin-data-2015-01-01-2025-03-25.csv", ) ) + # CRITICAL: Filter local data to prevent future data leakage + data["Date"] = pd.to_datetime(data["Date"]) + data = data[data["Date"] <= curr_date_dt] df = wrap(data) except FileNotFoundError: raise Exception("Stockstats fail: Yahoo Finance data not fetched yet!") else: - # Get today's date as YYYY-mm-dd to add to cache - today_date = pd.Timestamp.today() - curr_date = pd.to_datetime(curr_date) - - end_date = today_date - start_date = today_date - pd.DateOffset(years=15) - start_date = start_date.strftime("%Y-%m-%d") - end_date = end_date.strftime("%Y-%m-%d") + start_date = start_date_dt.strftime("%Y-%m-%d") + end_date = end_date_dt.strftime("%Y-%m-%d") # Get config and ensure cache directory exists os.makedirs(config["data_cache_dir"], exist_ok=True) + # Cache file now uses curr_date (end_date), not today's date data_file = os.path.join( config["data_cache_dir"], f"{symbol}-YFin-data-{start_date}-{end_date}.csv", @@ -70,7 +74,7 @@ class StockstatsUtils: df = wrap(data) df["Date"] = df["Date"].dt.strftime("%Y-%m-%d") - curr_date = curr_date.strftime("%Y-%m-%d") + curr_date = curr_date_dt.strftime("%Y-%m-%d") df[indicator] # trigger stockstats to calculate the indicator matching_rows = df[df["Date"].str.startswith(curr_date)] diff --git a/tradingagents/dataflows/y_finance.py b/tradingagents/dataflows/y_finance.py index 6e5b1d5e..cbe6490a 100644 --- a/tradingagents/dataflows/y_finance.py +++ b/tradingagents/dataflows/y_finance.py @@ -225,7 +225,7 @@ def _get_stock_stats_bulk( curr_date_dt = pd.to_datetime(curr_date) end_date = curr_date_dt # Use backtest date, NOT today's date - start_date = curr_date_dt - pd.DateOffset(years=15) + start_date = curr_date_dt - pd.DateOffset(years=2) # Reduced from 15 years for faster fetching start_date_str = start_date.strftime("%Y-%m-%d") end_date_str = end_date.strftime("%Y-%m-%d") @@ -314,16 +314,17 @@ def _filter_fundamentals_by_date(data, curr_date): try: curr_date_dt = pd.to_datetime(curr_date) - # Financial reports are typically published ~45 days after the report date - # So for a report dated 2024-03-31, it would be available around mid-May - publication_delay_days = 45 + # Financial reports have SEC deadlines (10-K: 60-90 days, 10-Q: 40-45 days) + # However, many companies file later and data vendors need processing time + # Using 60 days as conservative estimate to prevent future data leakage + publication_delay_days = 60 # Filter columns (report dates) to only include those available at curr_date valid_columns = [] for col in data.columns: try: report_date = pd.to_datetime(col) - # Report would have been published ~45 days after report_date + # Report would have been published ~60 days after report_date estimated_publish_date = report_date + pd.Timedelta(days=publication_delay_days) if estimated_publish_date <= curr_date_dt: valid_columns.append(col) diff --git a/tradingagents/graph/reflection.py b/tradingagents/graph/reflection.py index 33303231..e4e7c1f0 100644 --- a/tradingagents/graph/reflection.py +++ b/tradingagents/graph/reflection.py @@ -72,6 +72,8 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur def reflect_bull_researcher(self, current_state, returns_losses, bull_memory): """Reflect on bull researcher's analysis and update memory.""" + if bull_memory is None: + return situation = self._extract_current_situation(current_state) bull_debate_history = current_state["investment_debate_state"]["bull_history"] @@ -82,6 +84,8 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur def reflect_bear_researcher(self, current_state, returns_losses, bear_memory): """Reflect on bear researcher's analysis and update memory.""" + if bear_memory is None: + return situation = self._extract_current_situation(current_state) bear_debate_history = current_state["investment_debate_state"]["bear_history"] @@ -92,6 +96,8 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur def reflect_trader(self, current_state, returns_losses, trader_memory): """Reflect on trader's decision and update memory.""" + if trader_memory is None: + return situation = self._extract_current_situation(current_state) trader_decision = current_state["trader_investment_plan"] @@ -102,6 +108,8 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur def reflect_invest_judge(self, current_state, returns_losses, invest_judge_memory): """Reflect on investment judge's decision and update memory.""" + if invest_judge_memory is None: + return situation = self._extract_current_situation(current_state) judge_decision = current_state["investment_debate_state"]["judge_decision"] @@ -112,6 +120,8 @@ Adhere strictly to these instructions, and ensure your output is detailed, accur def reflect_risk_manager(self, current_state, returns_losses, risk_manager_memory): """Reflect on risk manager's decision and update memory.""" + if risk_manager_memory is None: + return situation = self._extract_current_situation(current_state) judge_decision = current_state["risk_debate_state"]["judge_decision"] diff --git a/tradingagents/graph/signal_processing.py b/tradingagents/graph/signal_processing.py index 903e8529..593c1b38 100644 --- a/tradingagents/graph/signal_processing.py +++ b/tradingagents/graph/signal_processing.py @@ -10,22 +10,64 @@ class SignalProcessor: """Initialize with an LLM for processing.""" self.quick_thinking_llm = quick_thinking_llm - def process_signal(self, full_signal: str) -> str: + def process_signal(self, full_signal: str) -> dict: """ - Process a full trading signal to extract the core decision. + Process a full trading signal to extract the core decision and hold_days. Args: full_signal: Complete trading signal text Returns: - Extracted decision (BUY, SELL, or HOLD) + Dict with 'decision' (BUY/SELL/HOLD) and 'hold_days' (int or None) """ messages = [ ( "system", - "You are an efficient assistant designed to analyze paragraphs or financial reports provided by a group of analysts. Your task is to extract the investment decision: SELL, BUY, or HOLD. Provide only the extracted decision (SELL, BUY, or HOLD) as your output, without adding any additional text or information.", + "You are an efficient assistant designed to analyze paragraphs or financial reports " + "provided by a group of analysts. Extract two pieces of information:\n" + "1. The investment decision: SELL, BUY, or HOLD\n" + "2. The recommended holding period in trading days (only for BUY or HOLD decisions)\n\n" + "Respond in exactly this format (nothing else):\n" + "DECISION: \n" + "HOLD_DAYS: \n\n" + "For SELL decisions, always use HOLD_DAYS: N/A\n" + "For BUY or HOLD decisions, extract the number of days if mentioned, otherwise default to 5.", ), ("human", full_signal), ] - return self.quick_thinking_llm.invoke(messages).content + response = self.quick_thinking_llm.invoke(messages).content + return self._parse_signal_response(response) + + def _parse_signal_response(self, response: str) -> dict: + """Parse the structured LLM response into decision and hold_days.""" + decision = "HOLD" + hold_days = None + + for line in response.strip().split("\n"): + line = line.strip() + upper = line.upper() + if upper.startswith("DECISION:"): + raw = upper.split(":", 1)[1].strip() + # Strip markdown bold markers + raw = raw.replace("*", "").strip() + if raw in ("BUY", "SELL", "HOLD"): + decision = raw + elif upper.startswith("HOLD_DAYS:"): + raw = upper.split(":", 1)[1].strip() + raw = raw.replace("*", "").strip() + if raw not in ("N/A", "NA", "NONE", "-", ""): + try: + hold_days = int(raw) + # Clamp to reasonable range + hold_days = max(1, min(90, hold_days)) + except (ValueError, TypeError): + hold_days = None + + # Enforce: SELL never has hold_days; BUY/HOLD default to 5 if missing + if decision == "SELL": + hold_days = None + elif hold_days is None: + hold_days = 5 # Default hold period + + return {"decision": decision, "hold_days": hold_days} diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index 2cdab4f6..9892fa9f 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -12,6 +12,9 @@ FRONTEND_BACKEND_PATH = Path(__file__).parent.parent.parent / "frontend" / "back if str(FRONTEND_BACKEND_PATH) not in sys.path: sys.path.insert(0, str(FRONTEND_BACKEND_PATH)) +# Import shared logging +from tradingagents.log_utils import add_log, step_timer + from langchain_openai import ChatOpenAI from langchain_anthropic import ChatAnthropic from langchain_google_genai import ChatGoogleGenerativeAI @@ -92,12 +95,22 @@ class TradingAgentsGraph: else: raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}") - # Initialize memories - self.bull_memory = FinancialSituationMemory("bull_memory", self.config) - self.bear_memory = FinancialSituationMemory("bear_memory", self.config) - self.trader_memory = FinancialSituationMemory("trader_memory", self.config) - self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config) - self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config) + # Initialize memories with graceful error handling for ChromaDB race conditions + try: + self.bull_memory = FinancialSituationMemory("bull_memory", self.config) + self.bear_memory = FinancialSituationMemory("bear_memory", self.config) + self.trader_memory = FinancialSituationMemory("trader_memory", self.config) + self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config) + self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config) + except Exception as e: + # ChromaDB can fail with race conditions in parallel execution + # Fall back to None memories - agents will work without memory-based recommendations + add_log("warning", "system", f"ChromaDB memory initialization failed: {str(e)[:100]}. Continuing without memory.") + self.bull_memory = None + self.bear_memory = None + self.trader_memory = None + self.invest_judge_memory = None + self.risk_manager_memory = None # Create tool nodes self.tool_nodes = self._create_tool_nodes() @@ -167,10 +180,15 @@ class TradingAgentsGraph: def propagate(self, company_name, trade_date): """Run the trading agents graph for a company on a specific date.""" + import time as _time self.ticker = company_name + pipeline_start = _time.time() + step_timer.clear() # Reset per-agent timings for this run + add_log("info", "system", f"🚀 Starting analysis for {company_name} on {trade_date}") # Initialize state + add_log("info", "system", "Initializing agent state...") init_agent_state = self.propagator.create_initial_state( company_name, trade_date ) @@ -178,6 +196,7 @@ class TradingAgentsGraph: if self.debug: # Debug mode with tracing + add_log("info", "system", "Running in debug mode with tracing...") trace = [] for chunk in self.graph.stream(init_agent_state, **args): if len(chunk["messages"]) == 0: @@ -188,20 +207,59 @@ class TradingAgentsGraph: final_state = trace[-1] else: - # Standard mode without tracing + # Standard mode - log key stages + add_log("info", "system", f"Running full analysis pipeline for {company_name} (deep={self.config.get('deep_think_llm','?')}, quick={self.config.get('quick_think_llm','?')})...") + add_log("info", "system", "Pipeline: Data Fetch → Analysts → Bull/Bear Debate → Trader → Risk Debate → Final Decision") + + # Run the full graph (all agents log their own timing) + graph_start = _time.time() final_state = self.graph.invoke(init_agent_state, **args) + graph_elapsed = _time.time() - graph_start + add_log("info", "system", f"Graph execution completed in {graph_elapsed:.1f}s") + + # Log completions with report sizes + if final_state.get("market_report"): + add_log("success", "market_analyst", f"✅ Market report: {len(final_state['market_report'])} chars") + if final_state.get("news_report"): + add_log("success", "news_analyst", f"✅ News report: {len(final_state['news_report'])} chars") + if final_state.get("sentiment_report"): + add_log("success", "social_analyst", f"✅ Sentiment report: {len(final_state['sentiment_report'])} chars") + if final_state.get("fundamentals_report"): + add_log("success", "fundamentals", f"✅ Fundamentals report: {len(final_state['fundamentals_report'])} chars") + + # Log debate results + invest_debate = final_state.get("investment_debate_state", {}) + if invest_debate.get("judge_decision"): + add_log("success", "debate", f"✅ Investment debate decided: {invest_debate['judge_decision'][:100]}...") + if final_state.get("trader_investment_plan"): + add_log("success", "trader", f"✅ Trader plan: {final_state['trader_investment_plan'][:100]}...") + risk_debate = final_state.get("risk_debate_state", {}) + if risk_debate.get("judge_decision"): + add_log("success", "risk_manager", f"✅ Risk decision: {risk_debate['judge_decision'][:100]}...") # Store current state for reflection self.curr_state = final_state + add_log("info", "system", "Storing analysis results...") # Log state self._log_state(trade_date, final_state) # Save to frontend database for UI display + add_log("info", "system", "Saving pipeline data to database...") + t0 = _time.time() self._save_to_frontend_db(trade_date, final_state) + add_log("info", "system", f"Database save completed in {_time.time() - t0:.1f}s") - # Return decision and processed signal - return final_state, self.process_signal(final_state["final_trade_decision"]) + # Extract and log the final decision + hold_days + signal_result = self.process_signal(final_state["final_trade_decision"]) + final_decision = signal_result["decision"] + hold_days = signal_result.get("hold_days") + total_elapsed = _time.time() - pipeline_start + hold_info = f", hold {hold_days}d" if hold_days else "" + add_log("success", "system", f"✅ Analysis complete for {company_name}: {final_decision}{hold_info} (total: {total_elapsed:.0f}s)") + + # Return decision, hold_days, and processed signal + return final_state, final_decision, hold_days def _log_state(self, trade_date, final_state): """Log the final state to a JSON file.""" @@ -312,20 +370,93 @@ class TradingAgentsGraph: full_history=risk_debate.get("history", "") ) - # 4. Save pipeline steps (tracking the stages) - pipeline_steps = [ - {"step_number": 1, "step_name": "initialize", "status": "completed", "started_at": now, "completed_at": now, "output_summary": "Pipeline initialized"}, - {"step_number": 2, "step_name": "market_analysis", "status": "completed", "started_at": now, "completed_at": now, "output_summary": "Market analysis complete" if final_state.get("market_report") else "Skipped"}, - {"step_number": 3, "step_name": "news_analysis", "status": "completed", "started_at": now, "completed_at": now, "output_summary": "News analysis complete" if final_state.get("news_report") else "Skipped"}, - {"step_number": 4, "step_name": "social_analysis", "status": "completed", "started_at": now, "completed_at": now, "output_summary": "Social analysis complete" if final_state.get("sentiment_report") else "Skipped"}, - {"step_number": 5, "step_name": "fundamental_analysis", "status": "completed", "started_at": now, "completed_at": now, "output_summary": "Fundamental analysis complete" if final_state.get("fundamentals_report") else "Skipped"}, - {"step_number": 6, "step_name": "investment_debate", "status": "completed", "started_at": now, "completed_at": now, "output_summary": invest_debate.get("judge_decision", "")[:100] if invest_debate else "Skipped"}, - {"step_number": 7, "step_name": "trader_decision", "status": "completed", "started_at": now, "completed_at": now, "output_summary": final_state.get("trader_investment_plan", "")[:100] if final_state.get("trader_investment_plan") else "Skipped"}, - {"step_number": 8, "step_name": "risk_debate", "status": "completed", "started_at": now, "completed_at": now, "output_summary": risk_debate.get("judge_decision", "")[:100] if risk_debate else "Skipped"}, - {"step_number": 9, "step_name": "final_decision", "status": "completed", "started_at": now, "completed_at": now, "output_summary": final_state.get("final_trade_decision", "")[:100] if final_state.get("final_trade_decision") else "Pending"}, + # 4. Save pipeline steps — 12 granular steps with per-agent timing + step_timings = step_timer.get_steps() + + # Define the 12 steps with their IDs, names, and fallback output summaries + step_defs = [ + (1, "market_analyst", "market_analysis", final_state.get("market_report", "")[:200]), + (2, "social_media_analyst", "social_analysis", final_state.get("sentiment_report", "")[:200]), + (3, "news_analyst", "news_analysis", final_state.get("news_report", "")[:200]), + (4, "fundamentals_analyst", "fundamental_analysis", final_state.get("fundamentals_report", "")[:200]), + (5, "bull_researcher", "bull_research", invest_debate.get("bull_history", "")[:200]), + (6, "bear_researcher", "bear_research", invest_debate.get("bear_history", "")[:200]), + (7, "research_manager", "research_manager", invest_debate.get("judge_decision", "")[:200]), + (8, "trader", "trader_decision", final_state.get("trader_investment_plan", "")[:200]), + (9, "aggressive_analyst", "aggressive_analysis", risk_debate.get("risky_history", "")[:200]), + (10, "conservative_analyst", "conservative_analysis", risk_debate.get("safe_history", "")[:200]), + (11, "neutral_analyst", "neutral_analysis", risk_debate.get("neutral_history", "")[:200]), + (12, "risk_manager", "risk_manager", risk_debate.get("judge_decision", "")[:200]), ] + + pipeline_steps = [] + for step_num, timer_id, step_name, fallback_summary in step_defs: + timing = step_timings.get(timer_id, {}) + # Force status to "completed" — we only reach this save code + # after the graph has fully executed, so all steps must be done. + # The step_timer may show "running" if end_step() wasn't called + # due to an exception in the agent. + pipeline_steps.append({ + "step_number": step_num, + "step_name": step_name, + "status": "completed", + "started_at": timing.get("started_at", now), + "completed_at": timing.get("completed_at", now), + "duration_ms": timing.get("duration_ms"), + "output_summary": timing.get("output_summary") or fallback_summary or "Completed", + "step_details": timing.get("details"), + }) + # 5. Save raw data source logs from the data fetch store + from tradingagents.log_utils import raw_data_store + + METHOD_TO_SOURCE = { + "get_stock_data": ("market_data", "Yahoo Finance"), + "get_YFin_data": ("market_data", "Yahoo Finance"), + "get_stock_stats": ("indicators", "Technical Indicators"), + "get_stock_stats_indicators": ("indicators", "Technical Indicators"), + "get_fundamentals": ("fundamentals", "Financial Data"), + "get_balance_sheet": ("fundamentals", "Balance Sheet"), + "get_income_statement": ("fundamentals", "Income Statement"), + "get_cashflow": ("fundamentals", "Cash Flow"), + "get_news": ("news", "Google News"), + "get_global_news": ("news", "Global News"), + "get_reddit_posts": ("social_media", "Reddit"), + } + + raw_entries = raw_data_store.get_entries() + + # Enrich pipeline step tool_calls with result_preview from raw data + if raw_entries: + for step in pipeline_steps: + details = step.get("step_details") + if details and details.get("tool_calls"): + for tc in details["tool_calls"]: + for entry in raw_entries: + if entry["method"] == tc.get("name"): + tc["result_preview"] = str(entry["raw_data"])[:500] + break + save_pipeline_steps_bulk(trade_date, symbol, pipeline_steps) + if raw_entries: + data_source_logs = [] + for entry in raw_entries: + source_type, source_name = METHOD_TO_SOURCE.get( + entry["method"], ("other", entry["method"]) + ) + data_source_logs.append({ + "source_type": source_type, + "source_name": source_name, + "method": entry["method"], + "args": entry.get("args", ""), + "data_fetched": entry["raw_data"], + "fetch_timestamp": entry["timestamp"], + "success": True, + "error_message": None, + }) + save_data_source_logs_bulk(trade_date, symbol, data_source_logs) + raw_data_store.clear() + print(f"[Frontend DB] Saved pipeline data for {symbol} on {trade_date}") except Exception as e: diff --git a/tradingagents/log_utils.py b/tradingagents/log_utils.py new file mode 100644 index 00000000..66a74088 --- /dev/null +++ b/tradingagents/log_utils.py @@ -0,0 +1,218 @@ +"""Shared logging module for analysis pipeline. + +This module provides a thread-safe logging system that can broadcast +logs to SSE subscribers. Both the server and agent files import from here, +avoiding circular import issues. +""" +import threading +import time +from collections import deque +from datetime import datetime + +# Thread-safe log buffer for SSE streaming +analysis_logs = deque(maxlen=1000) +log_lock = threading.Lock() +log_subscribers = [] # List of subscriber queues + + +def add_log(log_type: str, source: str, message: str): + """Add a log entry to the buffer and notify SSE subscribers. + + Args: + log_type: One of 'info', 'success', 'error', 'warning', 'llm', 'agent', 'data' + source: The source component (e.g. 'system', 'bull_researcher', 'trader') + message: The log message + """ + log_entry = { + "timestamp": datetime.now().isoformat(), + "type": log_type, + "source": source, + "message": message + } + with log_lock: + analysis_logs.append(log_entry) + # Notify all subscribers + for queue in log_subscribers: + try: + queue.put_nowait(log_entry) + except Exception: + pass # Queue full, skip + + +class StepTimer: + """Tracks start/end times for individual pipeline steps. + + Thread-safe timing tracker. Each agent calls start_step() / end_step() + and _save_to_frontend_db reads the results. + """ + + def __init__(self): + self._lock = threading.Lock() + self._steps = {} # step_id -> {started_at, completed_at, duration_ms, status, output_summary} + + def start_step(self, step_id: str): + """Record the start of a pipeline step. Only sets start time on first call.""" + with self._lock: + if step_id in self._steps and self._steps[step_id].get("_start_time"): + # Already started, don't overwrite (multi-turn agents) + return + self._steps[step_id] = { + "started_at": datetime.now().isoformat(), + "completed_at": None, + "duration_ms": None, + "status": "running", + "output_summary": None, + "_start_time": time.time(), + } + + def end_step(self, step_id: str, status: str = "completed", output_summary: str = None): + """Record the completion of a pipeline step.""" + with self._lock: + if step_id not in self._steps: + # Step wasn't started, create entry anyway + self._steps[step_id] = { + "started_at": datetime.now().isoformat(), + "completed_at": datetime.now().isoformat(), + "duration_ms": 0, + "status": status, + "output_summary": output_summary, + } + return + + entry = self._steps[step_id] + entry["completed_at"] = datetime.now().isoformat() + entry["status"] = status + if output_summary: + entry["output_summary"] = output_summary + start_time = entry.pop("_start_time", None) + if start_time: + entry["duration_ms"] = int((time.time() - start_time) * 1000) + + def set_details(self, step_id: str, details: dict): + """Store structured details for a step (prompt, response, tool_calls). + + Args: + step_id: The step identifier + details: Dict with keys like system_prompt, user_prompt, response, tool_calls + """ + with self._lock: + if step_id not in self._steps: + self._steps[step_id] = {} + self._steps[step_id]["details"] = details + + def update_details(self, step_id: str, updates: dict): + """Merge updates into existing step details (preserves unmodified fields). + + Use this instead of set_details() when you want to update specific fields + (e.g., set response) without losing previously-stored fields (e.g., tool_calls). + """ + with self._lock: + if step_id not in self._steps: + self._steps[step_id] = {} + existing = self._steps[step_id].get("details", {}) + existing.update(updates) + self._steps[step_id]["details"] = existing + + def get_steps(self) -> dict: + """Get all recorded step timings.""" + with self._lock: + # Return copies without internal _start_time field + result = {} + for step_id, entry in self._steps.items(): + clean = {k: v for k, v in entry.items() if not k.startswith("_")} + result[step_id] = clean + return result + + def clear(self): + """Reset all step timings.""" + with self._lock: + self._steps.clear() + + +# Global step timer instance, shared across all agents in one analysis run +step_timer = StepTimer() + + +class SymbolProgress: + """Thread-safe per-symbol step progress tracker for parallel analysis. + + Unlike StepTimer (global, gets cleared per-stock), this tracks progress + per symbol so the frontend can show "N/12 steps" on each analyzing card. + """ + + TOTAL_STEPS = 12 + + def __init__(self): + self._lock = threading.Lock() + self._data = {} # {symbol: {"done": N, "current": "step_name"}} + + def step_done(self, symbol: str, step_id: str): + """Record that a step completed for a symbol.""" + with self._lock: + if symbol not in self._data: + self._data[symbol] = {"done": 0, "current": step_id} + self._data[symbol]["done"] += 1 + self._data[symbol]["current"] = step_id + + def get(self, symbol: str) -> dict: + """Get progress for a symbol: {done, total, current}.""" + with self._lock: + entry = self._data.get(symbol, {"done": 0, "current": None}) + return {"done": entry["done"], "total": self.TOTAL_STEPS, "current": entry["current"]} + + def get_all(self) -> dict: + """Get progress for all symbols.""" + with self._lock: + return { + sym: {"done": e["done"], "total": self.TOTAL_STEPS, "current": e["current"]} + for sym, e in self._data.items() + } + + def clear(self, symbol: str): + """Clear progress for a symbol.""" + with self._lock: + self._data.pop(symbol, None) + + +symbol_progress = SymbolProgress() + + +class RawDataStore: + """Thread-safe store for raw data fetched during analysis pipeline. + + Captures the actual data returned by vendor APIs (OHLCV, news, fundamentals) + so it can be saved to the frontend database for debugging/inspection. + """ + + def __init__(self): + self._lock = threading.Lock() + self._entries = [] # list of {method, symbol, vendor, data, timestamp, ...} + + def log_fetch(self, method: str, symbol: str, vendor: str, raw_data: str, + args_str: str = "", duration_s: float = 0): + """Log a raw data fetch result.""" + with self._lock: + self._entries.append({ + "method": method, + "symbol": symbol, + "vendor": vendor, + "raw_data": raw_data, + "args": args_str, + "duration_s": round(duration_s, 2), + "timestamp": datetime.now().isoformat(), + "data_size": len(raw_data) if raw_data else 0, + }) + + def get_entries(self) -> list: + """Get all captured raw data entries.""" + with self._lock: + return list(self._entries) + + def clear(self): + """Reset all captured data.""" + with self._lock: + self._entries.clear() + + +# Global raw data store, shared across all data fetches in one analysis run +raw_data_store = RawDataStore()