diff --git a/.playwright-mcp/01-dashboard.png b/.playwright-mcp/01-dashboard.png
deleted file mode 100644
index 43f80757..00000000
Binary files a/.playwright-mcp/01-dashboard.png and /dev/null differ
diff --git a/.playwright-mcp/02-settings-modal.png b/.playwright-mcp/02-settings-modal.png
deleted file mode 100644
index 3b67507e..00000000
Binary files a/.playwright-mcp/02-settings-modal.png and /dev/null differ
diff --git a/.playwright-mcp/03-stock-detail-overview.png b/.playwright-mcp/03-stock-detail-overview.png
deleted file mode 100644
index 07f7fb7f..00000000
Binary files a/.playwright-mcp/03-stock-detail-overview.png and /dev/null differ
diff --git a/.playwright-mcp/04-analysis-pipeline.png b/.playwright-mcp/04-analysis-pipeline.png
deleted file mode 100644
index ff13ced7..00000000
Binary files a/.playwright-mcp/04-analysis-pipeline.png and /dev/null differ
diff --git a/.playwright-mcp/05-debates-tab.png b/.playwright-mcp/05-debates-tab.png
deleted file mode 100644
index 45c40c5d..00000000
Binary files a/.playwright-mcp/05-debates-tab.png and /dev/null differ
diff --git a/.playwright-mcp/06-investment-debate-expanded.png b/.playwright-mcp/06-investment-debate-expanded.png
deleted file mode 100644
index 02bc602f..00000000
Binary files a/.playwright-mcp/06-investment-debate-expanded.png and /dev/null differ
diff --git a/.playwright-mcp/07-data-sources-tab.png b/.playwright-mcp/07-data-sources-tab.png
deleted file mode 100644
index 2df93b64..00000000
Binary files a/.playwright-mcp/07-data-sources-tab.png and /dev/null differ
diff --git a/.playwright-mcp/08-dashboard-dark-mode.png b/.playwright-mcp/08-dashboard-dark-mode.png
deleted file mode 100644
index 36680cd3..00000000
Binary files a/.playwright-mcp/08-dashboard-dark-mode.png and /dev/null differ
diff --git a/.playwright-mcp/09-how-it-works.png b/.playwright-mcp/09-how-it-works.png
deleted file mode 100644
index 95140dae..00000000
Binary files a/.playwright-mcp/09-how-it-works.png and /dev/null differ
diff --git a/.playwright-mcp/10-history-page.png b/.playwright-mcp/10-history-page.png
deleted file mode 100644
index 0a5d01c4..00000000
Binary files a/.playwright-mcp/10-history-page.png and /dev/null differ
diff --git a/.playwright-mcp/analysis-in-progress.png b/.playwright-mcp/analysis-in-progress.png
deleted file mode 100644
index e9314a9f..00000000
Binary files a/.playwright-mcp/analysis-in-progress.png and /dev/null differ
diff --git a/.playwright-mcp/analysis-running.png b/.playwright-mcp/analysis-running.png
deleted file mode 100644
index 4f6a5a9e..00000000
Binary files a/.playwright-mcp/analysis-running.png and /dev/null differ
diff --git a/.playwright-mcp/analysis-working-network.png b/.playwright-mcp/analysis-working-network.png
deleted file mode 100644
index 2a8d1247..00000000
Binary files a/.playwright-mcp/analysis-working-network.png and /dev/null differ
diff --git a/.playwright-mcp/analysis-working-tcs.png b/.playwright-mcp/analysis-working-tcs.png
deleted file mode 100644
index 7e6c7dfa..00000000
Binary files a/.playwright-mcp/analysis-working-tcs.png and /dev/null differ
diff --git a/.playwright-mcp/chrome-headless-test.png b/.playwright-mcp/chrome-headless-test.png
deleted file mode 100644
index 1bd12bc4..00000000
Binary files a/.playwright-mcp/chrome-headless-test.png and /dev/null differ
diff --git a/.playwright-mcp/current-state.png b/.playwright-mcp/current-state.png
deleted file mode 100644
index 5ed48249..00000000
Binary files a/.playwright-mcp/current-state.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-analyze-all.png b/.playwright-mcp/dashboard-analyze-all.png
deleted file mode 100644
index 93a8a4f8..00000000
Binary files a/.playwright-mcp/dashboard-analyze-all.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-before.png b/.playwright-mcp/dashboard-before.png
deleted file mode 100644
index f45dc554..00000000
Binary files a/.playwright-mcp/dashboard-before.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-buy-filter-active.png b/.playwright-mcp/dashboard-buy-filter-active.png
deleted file mode 100644
index c42e1fdb..00000000
Binary files a/.playwright-mcp/dashboard-buy-filter-active.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-compact.png b/.playwright-mcp/dashboard-compact.png
deleted file mode 100644
index 5a6e3049..00000000
Binary files a/.playwright-mcp/dashboard-compact.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-hold-filter-final.png b/.playwright-mcp/dashboard-hold-filter-final.png
deleted file mode 100644
index b9dbea78..00000000
Binary files a/.playwright-mcp/dashboard-hold-filter-final.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-scrolled.png b/.playwright-mcp/dashboard-scrolled.png
deleted file mode 100644
index 367aae9b..00000000
Binary files a/.playwright-mcp/dashboard-scrolled.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-search-visible.png b/.playwright-mcp/dashboard-search-visible.png
deleted file mode 100644
index 1efea958..00000000
Binary files a/.playwright-mcp/dashboard-search-visible.png and /dev/null differ
diff --git a/.playwright-mcp/dashboard-with-search.png b/.playwright-mcp/dashboard-with-search.png
deleted file mode 100644
index 25855731..00000000
Binary files a/.playwright-mcp/dashboard-with-search.png and /dev/null differ
diff --git a/.playwright-mcp/history-compact.png b/.playwright-mcp/history-compact.png
deleted file mode 100644
index 9a2afb97..00000000
Binary files a/.playwright-mcp/history-compact.png and /dev/null differ
diff --git a/.playwright-mcp/history-new-calc.png b/.playwright-mcp/history-new-calc.png
deleted file mode 100644
index 95f70d68..00000000
Binary files a/.playwright-mcp/history-new-calc.png and /dev/null differ
diff --git a/.playwright-mcp/history-page-current.png b/.playwright-mcp/history-page-current.png
deleted file mode 100644
index 39990d98..00000000
Binary files a/.playwright-mcp/history-page-current.png and /dev/null differ
diff --git a/.playwright-mcp/history-page-updated.png b/.playwright-mcp/history-page-updated.png
deleted file mode 100644
index 608254ca..00000000
Binary files a/.playwright-mcp/history-page-updated.png and /dev/null differ
diff --git a/.playwright-mcp/history-sparklines-2.png b/.playwright-mcp/history-sparklines-2.png
deleted file mode 100644
index 7d9c5af3..00000000
Binary files a/.playwright-mcp/history-sparklines-2.png and /dev/null differ
diff --git a/.playwright-mcp/history-sparklines-more.png b/.playwright-mcp/history-sparklines-more.png
deleted file mode 100644
index 71225674..00000000
Binary files a/.playwright-mcp/history-sparklines-more.png and /dev/null differ
diff --git a/.playwright-mcp/history-sparklines-normalized.png b/.playwright-mcp/history-sparklines-normalized.png
deleted file mode 100644
index 0d5ce53b..00000000
Binary files a/.playwright-mcp/history-sparklines-normalized.png and /dev/null differ
diff --git a/.playwright-mcp/history-sparklines-scrolled.png b/.playwright-mcp/history-sparklines-scrolled.png
deleted file mode 100644
index 33129dd4..00000000
Binary files a/.playwright-mcp/history-sparklines-scrolled.png and /dev/null differ
diff --git a/.playwright-mcp/history-sparklines.png b/.playwright-mcp/history-sparklines.png
deleted file mode 100644
index 7d9c5af3..00000000
Binary files a/.playwright-mcp/history-sparklines.png and /dev/null differ
diff --git a/.playwright-mcp/history-stock-list.png b/.playwright-mcp/history-stock-list.png
deleted file mode 100644
index a3c88907..00000000
Binary files a/.playwright-mcp/history-stock-list.png and /dev/null differ
diff --git a/.playwright-mcp/mobile-view.png b/.playwright-mcp/mobile-view.png
deleted file mode 100644
index eae6f0b6..00000000
Binary files a/.playwright-mcp/mobile-view.png and /dev/null differ
diff --git a/.playwright-mcp/overall-modal-fixed.png b/.playwright-mcp/overall-modal-fixed.png
deleted file mode 100644
index 3a06d7f0..00000000
Binary files a/.playwright-mcp/overall-modal-fixed.png and /dev/null differ
diff --git a/.playwright-mcp/overall-modal-table.png b/.playwright-mcp/overall-modal-table.png
deleted file mode 100644
index cdcbcf36..00000000
Binary files a/.playwright-mcp/overall-modal-table.png and /dev/null differ
diff --git a/.playwright-mcp/overall-modal.png b/.playwright-mcp/overall-modal.png
deleted file mode 100644
index 3a06d7f0..00000000
Binary files a/.playwright-mcp/overall-modal.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-39-38-424Z.png b/.playwright-mcp/page-2026-01-31T10-39-38-424Z.png
deleted file mode 100644
index cd6f8045..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-39-38-424Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-41-56-205Z.png b/.playwright-mcp/page-2026-01-31T10-41-56-205Z.png
deleted file mode 100644
index e0df99eb..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-41-56-205Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-42-07-250Z.png b/.playwright-mcp/page-2026-01-31T10-42-07-250Z.png
deleted file mode 100644
index 96ab48c6..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-42-07-250Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-42-21-398Z.png b/.playwright-mcp/page-2026-01-31T10-42-21-398Z.png
deleted file mode 100644
index 71be46f7..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-42-21-398Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-43-02-673Z.png b/.playwright-mcp/page-2026-01-31T10-43-02-673Z.png
deleted file mode 100644
index 5970ecb6..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-43-02-673Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-43-38-177Z.png b/.playwright-mcp/page-2026-01-31T10-43-38-177Z.png
deleted file mode 100644
index ad8898b0..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-43-38-177Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-44-36-104Z.png b/.playwright-mcp/page-2026-01-31T10-44-36-104Z.png
deleted file mode 100644
index fc31ccb0..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-44-36-104Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-44-56-012Z.png b/.playwright-mcp/page-2026-01-31T10-44-56-012Z.png
deleted file mode 100644
index 8bb9d2ae..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-44-56-012Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-45-15-489Z.png b/.playwright-mcp/page-2026-01-31T10-45-15-489Z.png
deleted file mode 100644
index 916b50bf..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-45-15-489Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-45-42-676Z.png b/.playwright-mcp/page-2026-01-31T10-45-42-676Z.png
deleted file mode 100644
index 4d1b71e4..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-45-42-676Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-45-58-686Z.png b/.playwright-mcp/page-2026-01-31T10-45-58-686Z.png
deleted file mode 100644
index c50f2026..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-45-58-686Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-46-33-307Z.png b/.playwright-mcp/page-2026-01-31T10-46-33-307Z.png
deleted file mode 100644
index ca2e0763..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-46-33-307Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-47-05-151Z.png b/.playwright-mcp/page-2026-01-31T10-47-05-151Z.png
deleted file mode 100644
index 40448610..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-47-05-151Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-47-42-171Z.png b/.playwright-mcp/page-2026-01-31T10-47-42-171Z.png
deleted file mode 100644
index 742cb46f..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-47-42-171Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-49-11-278Z.png b/.playwright-mcp/page-2026-01-31T10-49-11-278Z.png
deleted file mode 100644
index 425d3016..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-49-11-278Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-49-27-614Z.png b/.playwright-mcp/page-2026-01-31T10-49-27-614Z.png
deleted file mode 100644
index 4c04faae..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-49-27-614Z.png and /dev/null differ
diff --git a/.playwright-mcp/page-2026-01-31T10-49-46-409Z.png b/.playwright-mcp/page-2026-01-31T10-49-46-409Z.png
deleted file mode 100644
index 68827663..00000000
Binary files a/.playwright-mcp/page-2026-01-31T10-49-46-409Z.png and /dev/null differ
diff --git a/.playwright-mcp/return-modal-formula.png b/.playwright-mcp/return-modal-formula.png
deleted file mode 100644
index bd655874..00000000
Binary files a/.playwright-mcp/return-modal-formula.png and /dev/null differ
diff --git a/.playwright-mcp/return-modal-scrolled.png b/.playwright-mcp/return-modal-scrolled.png
deleted file mode 100644
index 468e1059..00000000
Binary files a/.playwright-mcp/return-modal-scrolled.png and /dev/null differ
diff --git a/.playwright-mcp/return-modal.png b/.playwright-mcp/return-modal.png
deleted file mode 100644
index 57878255..00000000
Binary files a/.playwright-mcp/return-modal.png and /dev/null differ
diff --git a/.playwright-mcp/settings-api-key.png b/.playwright-mcp/settings-api-key.png
deleted file mode 100644
index 3b67507e..00000000
Binary files a/.playwright-mcp/settings-api-key.png and /dev/null differ
diff --git a/.playwright-mcp/settings-modal.png b/.playwright-mcp/settings-modal.png
deleted file mode 100644
index dc1b3608..00000000
Binary files a/.playwright-mcp/settings-modal.png and /dev/null differ
diff --git a/.playwright-mcp/stock-detail-compact.png b/.playwright-mcp/stock-detail-compact.png
deleted file mode 100644
index 1d5665a6..00000000
Binary files a/.playwright-mcp/stock-detail-compact.png and /dev/null differ
diff --git a/.playwright-mcp/stocks-page-compact.png b/.playwright-mcp/stocks-page-compact.png
deleted file mode 100644
index 3cbec6f0..00000000
Binary files a/.playwright-mcp/stocks-page-compact.png and /dev/null differ
diff --git a/analysis-cancelled.png b/analysis-cancelled.png
new file mode 100644
index 00000000..1dea274f
Binary files /dev/null and b/analysis-cancelled.png differ
diff --git a/analysis-live-fullpage.png b/analysis-live-fullpage.png
new file mode 100644
index 00000000..296d7dda
Binary files /dev/null and b/analysis-live-fullpage.png differ
diff --git a/analysis-live-progress.png b/analysis-live-progress.png
new file mode 100644
index 00000000..1ad5969e
Binary files /dev/null and b/analysis-live-progress.png differ
diff --git a/analysis-running-with-cancel.png b/analysis-running-with-cancel.png
new file mode 100644
index 00000000..3638ce15
Binary files /dev/null and b/analysis-running-with-cancel.png differ
diff --git a/analyze-all-skipped.png b/analyze-all-skipped.png
new file mode 100644
index 00000000..b8f95d46
Binary files /dev/null and b/analyze-all-skipped.png differ
diff --git a/current-state.png b/current-state.png
new file mode 100644
index 00000000..737cd6e9
Binary files /dev/null and b/current-state.png differ
diff --git a/data-source-raw-content.png b/data-source-raw-content.png
new file mode 100644
index 00000000..88e5274f
Binary files /dev/null and b/data-source-raw-content.png differ
diff --git a/data-source-raw-viewer.png b/data-source-raw-viewer.png
new file mode 100644
index 00000000..667cb45f
Binary files /dev/null and b/data-source-raw-viewer.png differ
diff --git a/data-sources-all.png b/data-sources-all.png
new file mode 100644
index 00000000..986e62f3
Binary files /dev/null and b/data-sources-all.png differ
diff --git a/data-sources-fixed.png b/data-sources-fixed.png
new file mode 100644
index 00000000..9a3c5960
Binary files /dev/null and b/data-sources-fixed.png differ
diff --git a/detail-drawer-bottom.png b/detail-drawer-bottom.png
new file mode 100644
index 00000000..9b7f986a
Binary files /dev/null and b/detail-drawer-bottom.png differ
diff --git a/detail-drawer-test.png b/detail-drawer-test.png
new file mode 100644
index 00000000..ce7e2908
Binary files /dev/null and b/detail-drawer-test.png differ
diff --git a/drawer-header.png b/drawer-header.png
new file mode 100644
index 00000000..7d2ac1b6
Binary files /dev/null and b/drawer-header.png differ
diff --git a/frontend/backend/backtest_service.py b/frontend/backend/backtest_service.py
new file mode 100644
index 00000000..4df17615
--- /dev/null
+++ b/frontend/backend/backtest_service.py
@@ -0,0 +1,237 @@
+"""Backtest service for calculating real prediction accuracy."""
+import yfinance as yf
+import pandas as pd
+from datetime import datetime, timedelta
+from typing import Optional
+import database as db
+
+
+def get_trading_day_price(ticker: yf.Ticker, target_date: datetime,
+ direction: str = 'forward', max_days: int = 7) -> Optional[float]:
+ """
+ Get the closing price for a trading day near the target date.
+
+ Args:
+ ticker: yfinance Ticker object
+ target_date: The date we want price for
+ direction: 'forward' to look for next trading day, 'backward' for previous
+ max_days: Maximum days to search
+
+ Returns:
+ Closing price or None if not found
+ """
+ for i in range(max_days):
+ if direction == 'forward':
+ check_date = target_date + timedelta(days=i)
+ else:
+ check_date = target_date - timedelta(days=i)
+
+ start = check_date
+ end = check_date + timedelta(days=1)
+
+ hist = ticker.history(start=start.strftime('%Y-%m-%d'),
+ end=end.strftime('%Y-%m-%d'))
+ if not hist.empty:
+ return hist['Close'].iloc[0]
+
+ return None
+
+
+def calculate_backtest_for_recommendation(date: str, symbol: str, decision: str,
+ hold_days: int = None) -> Optional[dict]:
+ """
+ Calculate backtest results for a single recommendation.
+
+ Args:
+ date: Prediction date (YYYY-MM-DD)
+ symbol: Stock symbol (NSE format like RELIANCE.NS)
+ decision: BUY, SELL, or HOLD
+ hold_days: Recommended holding period in days (for BUY/HOLD)
+
+ Returns:
+ Dict with backtest results or None if calculation failed
+ """
+ try:
+ # Convert date
+ pred_date = datetime.strptime(date, '%Y-%m-%d')
+
+ # For Indian stocks, append .NS suffix if not present
+ yf_symbol = symbol if '.' in symbol else f"{symbol}.NS"
+
+ ticker = yf.Ticker(yf_symbol)
+
+ # Get price at prediction date (or next trading day)
+ price_at_pred = get_trading_day_price(ticker, pred_date, 'forward')
+ if price_at_pred is None:
+ return None
+
+ # Get prices for 1 day, 1 week, 1 month later
+ date_1d = pred_date + timedelta(days=1)
+ date_1w = pred_date + timedelta(weeks=1)
+ date_1m = pred_date + timedelta(days=30)
+
+ price_1d = get_trading_day_price(ticker, date_1d, 'forward')
+ price_1w = get_trading_day_price(ticker, date_1w, 'forward')
+ price_1m = get_trading_day_price(ticker, date_1m, 'forward')
+
+ # Calculate returns
+ return_1d = ((price_1d - price_at_pred) / price_at_pred * 100) if price_1d else None
+ return_1w = ((price_1w - price_at_pred) / price_at_pred * 100) if price_1w else None
+ return_1m = ((price_1m - price_at_pred) / price_at_pred * 100) if price_1m else None
+
+ # Calculate return at hold_days horizon if specified
+ return_at_hold = None
+ if hold_days and hold_days > 0:
+ date_hold = pred_date + timedelta(days=hold_days)
+ price_at_hold = get_trading_day_price(ticker, date_hold, 'forward')
+ if price_at_hold:
+ return_at_hold = round(((price_at_hold - price_at_pred) / price_at_pred * 100), 2)
+
+ # Determine if prediction was correct
+ # Use hold_days return when available, fall back to 1-week return
+ prediction_correct = None
+ check_return = return_at_hold if return_at_hold is not None else return_1w
+ if check_return is not None:
+ if decision == 'BUY' or decision == 'HOLD':
+ prediction_correct = check_return > 0
+ elif decision == 'SELL':
+ prediction_correct = check_return < 0
+
+ return {
+ 'date': date,
+ 'symbol': symbol,
+ 'decision': decision,
+ 'price_at_prediction': round(price_at_pred, 2),
+ 'price_1d_later': round(price_1d, 2) if price_1d else None,
+ 'price_1w_later': round(price_1w, 2) if price_1w else None,
+ 'price_1m_later': round(price_1m, 2) if price_1m else None,
+ 'return_1d': round(return_1d, 2) if return_1d is not None else None,
+ 'return_1w': round(return_1w, 2) if return_1w is not None else None,
+ 'return_1m': round(return_1m, 2) if return_1m is not None else None,
+ 'return_at_hold': return_at_hold,
+ 'hold_days': hold_days,
+ 'prediction_correct': prediction_correct
+ }
+
+ except Exception as e:
+ print(f"Error calculating backtest for {symbol} on {date}: {e}")
+ return None
+
+
+def calculate_and_save_backtest(date: str, symbol: str, decision: str,
+ hold_days: int = None) -> Optional[dict]:
+ """Calculate backtest and save to database."""
+ result = calculate_backtest_for_recommendation(date, symbol, decision, hold_days)
+
+ if result:
+ db.save_backtest_result(
+ date=result['date'],
+ symbol=result['symbol'],
+ decision=result['decision'],
+ price_at_prediction=result['price_at_prediction'],
+ price_1d_later=result['price_1d_later'],
+ price_1w_later=result['price_1w_later'],
+ price_1m_later=result['price_1m_later'],
+ return_1d=result['return_1d'],
+ return_1w=result['return_1w'],
+ return_1m=result['return_1m'],
+ prediction_correct=result['prediction_correct'],
+ hold_days=result.get('hold_days')
+ )
+
+ return result
+
+
+def backtest_all_recommendations_for_date(date: str) -> dict:
+ """
+ Calculate backtest for all recommendations on a given date.
+
+ Returns summary statistics.
+ """
+ rec = db.get_recommendation_by_date(date)
+ if not rec or 'analysis' not in rec:
+ return {'error': 'No recommendations found for date', 'date': date}
+
+ analysis = rec['analysis'] # Dict keyed by symbol
+ results = []
+ errors = []
+
+ for symbol, stock_data in analysis.items():
+ decision = stock_data['decision']
+ hold_days = stock_data.get('hold_days')
+
+ # Check if we already have a backtest result
+ existing = db.get_backtest_result(date, symbol)
+ if existing:
+ results.append(existing)
+ continue
+
+ # Calculate new backtest
+ result = calculate_and_save_backtest(date, symbol, decision, hold_days)
+ if result:
+ results.append(result)
+ else:
+ errors.append(symbol)
+
+ # Calculate summary
+ correct = sum(1 for r in results if r.get('prediction_correct'))
+ total_with_result = sum(1 for r in results if r.get('prediction_correct') is not None)
+
+ return {
+ 'date': date,
+ 'total_stocks': len(analysis),
+ 'calculated': len(results),
+ 'errors': errors,
+ 'correct_predictions': correct,
+ 'total_with_result': total_with_result,
+ 'accuracy': round(correct / total_with_result * 100, 1) if total_with_result > 0 else 0
+ }
+
+
+def get_backtest_data_for_frontend(date: str, symbol: str) -> dict:
+ """
+ Get backtest data formatted for frontend display.
+ Includes price history for charts.
+ """
+ result = db.get_backtest_result(date, symbol)
+
+ if not result:
+ # Try to calculate it
+ rec = db.get_recommendation_by_date(date)
+ if rec and 'analysis' in rec:
+ stock_data = rec['analysis'].get(symbol)
+ if stock_data:
+ result = calculate_and_save_backtest(date, symbol, stock_data['decision'], stock_data.get('hold_days'))
+
+ if not result:
+ return {'available': False, 'reason': 'Could not calculate backtest'}
+
+ # Get price history for chart
+ try:
+ pred_date = datetime.strptime(date, '%Y-%m-%d')
+ yf_symbol = symbol if '.' in symbol else f"{symbol}.NS"
+ ticker = yf.Ticker(yf_symbol)
+
+ # Get 30 days of history starting from prediction date
+ end_date = pred_date + timedelta(days=35)
+ hist = ticker.history(start=pred_date.strftime('%Y-%m-%d'),
+ end=end_date.strftime('%Y-%m-%d'))
+
+ price_history = [
+ {'date': idx.strftime('%Y-%m-%d'), 'price': round(row['Close'], 2)}
+ for idx, row in hist.iterrows()
+ ][:30] # Limit to 30 data points
+
+ except Exception:
+ price_history = []
+
+ return {
+ 'available': True,
+ 'prediction_correct': result['prediction_correct'],
+ 'actual_return_1d': result['return_1d'],
+ 'actual_return_1w': result['return_1w'],
+ 'actual_return_1m': result['return_1m'],
+ 'price_at_prediction': result['price_at_prediction'],
+ 'current_price': result.get('price_1m_later') or result.get('price_1w_later'),
+ 'price_history': price_history
+ }
diff --git a/frontend/backend/database.py b/frontend/backend/database.py
index da3a72a2..fbdf5154 100644
--- a/frontend/backend/database.py
+++ b/frontend/backend/database.py
@@ -105,10 +105,17 @@ def init_db():
completed_at TEXT,
duration_ms INTEGER,
output_summary TEXT,
+ step_details TEXT,
UNIQUE(date, symbol, step_number)
)
""")
+ # Add step_details column if it doesn't exist (migration for existing DBs)
+ try:
+ cursor.execute("ALTER TABLE pipeline_steps ADD COLUMN step_details TEXT")
+ except sqlite3.OperationalError:
+ pass # Column already exists
+
# Create data_source_logs table (stores what raw data was fetched)
cursor.execute("""
CREATE TABLE IF NOT EXISTS data_source_logs (
@@ -117,6 +124,8 @@ def init_db():
symbol TEXT NOT NULL,
source_type TEXT,
source_name TEXT,
+ method TEXT,
+ args TEXT,
data_fetched TEXT,
fetch_timestamp TEXT,
success INTEGER DEFAULT 1,
@@ -124,6 +133,46 @@ def init_db():
)
""")
+ # Migrate: add method/args columns if missing (existing databases)
+ try:
+ cursor.execute("ALTER TABLE data_source_logs ADD COLUMN method TEXT")
+ except Exception:
+ pass # Column already exists
+ try:
+ cursor.execute("ALTER TABLE data_source_logs ADD COLUMN args TEXT")
+ except Exception:
+ pass # Column already exists
+
+ # Create backtest_results table (stores calculated backtest accuracy)
+ cursor.execute("""
+ CREATE TABLE IF NOT EXISTS backtest_results (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ date TEXT NOT NULL,
+ symbol TEXT NOT NULL,
+ decision TEXT,
+ price_at_prediction REAL,
+ price_1d_later REAL,
+ price_1w_later REAL,
+ price_1m_later REAL,
+ return_1d REAL,
+ return_1w REAL,
+ return_1m REAL,
+ prediction_correct INTEGER,
+ calculated_at TEXT DEFAULT CURRENT_TIMESTAMP,
+ UNIQUE(date, symbol)
+ )
+ """)
+
+ # Add hold_days column if it doesn't exist (migration for existing DBs)
+ try:
+ cursor.execute("ALTER TABLE stock_analysis ADD COLUMN hold_days INTEGER")
+ except sqlite3.OperationalError:
+ pass # Column already exists
+ try:
+ cursor.execute("ALTER TABLE backtest_results ADD COLUMN hold_days INTEGER")
+ except sqlite3.OperationalError:
+ pass # Column already exists
+
# Create indexes for new tables
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_agent_reports_date_symbol ON agent_reports(date, symbol)
@@ -137,6 +186,9 @@ def init_db():
cursor.execute("""
CREATE INDEX IF NOT EXISTS idx_data_source_logs_date_symbol ON data_source_logs(date, symbol)
""")
+ cursor.execute("""
+ CREATE INDEX IF NOT EXISTS idx_backtest_results_date ON backtest_results(date)
+ """)
conn.commit()
conn.close()
@@ -168,8 +220,8 @@ def save_recommendation(date: str, analysis_data: dict, summary: dict,
for symbol, analysis in analysis_data.items():
cursor.execute("""
INSERT OR REPLACE INTO stock_analysis
- (date, symbol, company_name, decision, confidence, risk, raw_analysis)
- VALUES (?, ?, ?, ?, ?, ?, ?)
+ (date, symbol, company_name, decision, confidence, risk, raw_analysis, hold_days)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""", (
date,
symbol,
@@ -177,7 +229,8 @@ def save_recommendation(date: str, analysis_data: dict, summary: dict,
analysis.get('decision'),
analysis.get('confidence'),
analysis.get('risk'),
- analysis.get('raw_analysis', '')
+ analysis.get('raw_analysis', ''),
+ analysis.get('hold_days')
))
conn.commit()
@@ -185,6 +238,52 @@ def save_recommendation(date: str, analysis_data: dict, summary: dict,
conn.close()
+def save_single_stock_analysis(date: str, symbol: str, analysis: dict):
+ """Save analysis for a single stock.
+
+ Args:
+ date: Date string (YYYY-MM-DD)
+ symbol: Stock symbol
+ analysis: Dict with keys: company_name, decision, confidence, risk, raw_analysis, hold_days
+ """
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ cursor.execute("""
+ INSERT OR REPLACE INTO stock_analysis
+ (date, symbol, company_name, decision, confidence, risk, raw_analysis, hold_days)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?)
+ """, (
+ date,
+ symbol,
+ analysis.get('company_name', symbol),
+ analysis.get('decision', 'HOLD'),
+ analysis.get('confidence', 'MEDIUM'),
+ analysis.get('risk', 'MEDIUM'),
+ analysis.get('raw_analysis', ''),
+ analysis.get('hold_days')
+ ))
+ conn.commit()
+ finally:
+ conn.close()
+
+
+def get_analyzed_symbols_for_date(date: str) -> list:
+ """Get list of symbols that already have analysis for a given date.
+
+ Used by bulk analysis to skip already-completed stocks when resuming.
+ """
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ cursor.execute("SELECT symbol FROM stock_analysis WHERE date = ?", (date,))
+ return [row['symbol'] for row in cursor.fetchall()]
+ finally:
+ conn.close()
+
+
def get_recommendation_by_date(date: str) -> Optional[dict]:
"""Get recommendation for a specific date."""
conn = get_connection()
@@ -197,37 +296,60 @@ def get_recommendation_by_date(date: str) -> Optional[dict]:
""", (date,))
row = cursor.fetchone()
- if not row:
- return None
-
# Get stock analysis for this date
cursor.execute("""
SELECT * FROM stock_analysis WHERE date = ?
""", (date,))
analysis_rows = cursor.fetchall()
+ # If no daily_recommendations AND no stock_analysis, return None
+ if not row and not analysis_rows:
+ return None
+
analysis = {}
for a in analysis_rows:
+ decision = (a['decision'] or '').strip().upper()
+ if decision not in ('BUY', 'SELL', 'HOLD'):
+ decision = 'HOLD'
analysis[a['symbol']] = {
'symbol': a['symbol'],
'company_name': a['company_name'],
- 'decision': a['decision'],
- 'confidence': a['confidence'],
- 'risk': a['risk'],
- 'raw_analysis': a['raw_analysis']
+ 'decision': decision,
+ 'confidence': a['confidence'] or 'MEDIUM',
+ 'risk': a['risk'] or 'MEDIUM',
+ 'raw_analysis': a['raw_analysis'],
+ 'hold_days': a['hold_days'] if 'hold_days' in a.keys() else None
}
+ if row:
+ return {
+ 'date': row['date'],
+ 'analysis': analysis,
+ 'summary': {
+ 'total': row['summary_total'],
+ 'buy': row['summary_buy'],
+ 'sell': row['summary_sell'],
+ 'hold': row['summary_hold']
+ },
+ 'top_picks': json.loads(row['top_picks']) if row['top_picks'] else [],
+ 'stocks_to_avoid': json.loads(row['stocks_to_avoid']) if row['stocks_to_avoid'] else []
+ }
+
+ # Fallback: build summary from stock_analysis when daily_recommendations is missing
+ buy_count = sum(1 for a in analysis.values() if a['decision'] == 'BUY')
+ sell_count = sum(1 for a in analysis.values() if a['decision'] == 'SELL')
+ hold_count = sum(1 for a in analysis.values() if a['decision'] == 'HOLD')
return {
- 'date': row['date'],
+ 'date': date,
'analysis': analysis,
'summary': {
- 'total': row['summary_total'],
- 'buy': row['summary_buy'],
- 'sell': row['summary_sell'],
- 'hold': row['summary_hold']
+ 'total': len(analysis),
+ 'buy': buy_count,
+ 'sell': sell_count,
+ 'hold': hold_count
},
- 'top_picks': json.loads(row['top_picks']) if row['top_picks'] else [],
- 'stocks_to_avoid': json.loads(row['stocks_to_avoid']) if row['stocks_to_avoid'] else []
+ 'top_picks': [],
+ 'stocks_to_avoid': []
}
finally:
conn.close()
@@ -253,13 +375,17 @@ def get_latest_recommendation() -> Optional[dict]:
def get_all_dates() -> list:
- """Get all available dates."""
+ """Get all available dates (union of daily_recommendations and stock_analysis)."""
conn = get_connection()
cursor = conn.cursor()
try:
cursor.execute("""
- SELECT date FROM daily_recommendations ORDER BY date DESC
+ SELECT DISTINCT date FROM (
+ SELECT date FROM daily_recommendations
+ UNION
+ SELECT date FROM stock_analysis
+ ) ORDER BY date DESC
""")
return [row['date'] for row in cursor.fetchall()]
finally:
@@ -273,21 +399,26 @@ def get_stock_history(symbol: str) -> list:
try:
cursor.execute("""
- SELECT date, decision, confidence, risk
+ SELECT date, decision, confidence, risk, hold_days
FROM stock_analysis
WHERE symbol = ?
ORDER BY date DESC
""", (symbol,))
- return [
- {
+ results = []
+ for row in cursor.fetchall():
+ decision = (row['decision'] or '').strip().upper()
+ # Sanitize: only allow BUY/SELL/HOLD
+ if decision not in ('BUY', 'SELL', 'HOLD'):
+ decision = 'HOLD'
+ results.append({
'date': row['date'],
- 'decision': row['decision'],
- 'confidence': row['confidence'],
- 'risk': row['risk']
- }
- for row in cursor.fetchall()
- ]
+ 'decision': decision,
+ 'confidence': row['confidence'] or 'MEDIUM',
+ 'risk': row['risk'] or 'MEDIUM',
+ 'hold_days': row['hold_days'] if 'hold_days' in row.keys() else None
+ })
+ return results
finally:
conn.close()
@@ -467,11 +598,14 @@ def save_pipeline_steps_bulk(date: str, symbol: str, steps: list):
try:
for step in steps:
+ step_details = step.get('step_details')
+ if step_details and not isinstance(step_details, str):
+ step_details = json.dumps(step_details)
cursor.execute("""
INSERT OR REPLACE INTO pipeline_steps
(date, symbol, step_number, step_name, status,
- started_at, completed_at, duration_ms, output_summary)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
+ started_at, completed_at, duration_ms, output_summary, step_details)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
date, symbol,
step.get('step_number'),
@@ -480,7 +614,8 @@ def save_pipeline_steps_bulk(date: str, symbol: str, steps: list):
step.get('started_at'),
step.get('completed_at'),
step.get('duration_ms'),
- step.get('output_summary')
+ step.get('output_summary'),
+ step_details
))
conn.commit()
finally:
@@ -499,18 +634,26 @@ def get_pipeline_steps(date: str, symbol: str) -> list:
ORDER BY step_number
""", (date, symbol))
- return [
- {
+ results = []
+ for row in cursor.fetchall():
+ step_details = None
+ raw_details = row['step_details'] if 'step_details' in row.keys() else None
+ if raw_details:
+ try:
+ step_details = json.loads(raw_details)
+ except (json.JSONDecodeError, TypeError):
+ step_details = None
+ results.append({
'step_number': row['step_number'],
'step_name': row['step_name'],
'status': row['status'],
'started_at': row['started_at'],
'completed_at': row['completed_at'],
'duration_ms': row['duration_ms'],
- 'output_summary': row['output_summary']
- }
- for row in cursor.fetchall()
- ]
+ 'output_summary': row['output_summary'],
+ 'step_details': step_details,
+ })
+ return results
finally:
conn.close()
@@ -550,13 +693,15 @@ def save_data_source_logs_bulk(date: str, symbol: str, logs: list):
for log in logs:
cursor.execute("""
INSERT INTO data_source_logs
- (date, symbol, source_type, source_name, data_fetched,
+ (date, symbol, source_type, source_name, method, args, data_fetched,
fetch_timestamp, success, error_message)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
date, symbol,
log.get('source_type'),
log.get('source_name'),
+ log.get('method'),
+ log.get('args'),
json.dumps(log.get('data_fetched')) if log.get('data_fetched') else None,
log.get('fetch_timestamp') or datetime.now().isoformat(),
1 if log.get('success', True) else 0,
@@ -568,7 +713,8 @@ def save_data_source_logs_bulk(date: str, symbol: str, logs: list):
def get_data_source_logs(date: str, symbol: str) -> list:
- """Get all data source logs for a stock on a date."""
+ """Get all data source logs for a stock on a date.
+ Falls back to generating entries from agent_reports if no explicit logs exist."""
conn = get_connection()
cursor = conn.cursor()
@@ -579,10 +725,12 @@ def get_data_source_logs(date: str, symbol: str) -> list:
ORDER BY fetch_timestamp
""", (date, symbol))
- return [
+ logs = [
{
'source_type': row['source_type'],
'source_name': row['source_name'],
+ 'method': row['method'] if 'method' in row.keys() else None,
+ 'args': row['args'] if 'args' in row.keys() else None,
'data_fetched': json.loads(row['data_fetched']) if row['data_fetched'] else None,
'fetch_timestamp': row['fetch_timestamp'],
'success': bool(row['success']),
@@ -590,6 +738,39 @@ def get_data_source_logs(date: str, symbol: str) -> list:
}
for row in cursor.fetchall()
]
+
+ if logs:
+ return logs
+
+ # No explicit logs — generate from agent_reports with full raw content
+ AGENT_TO_SOURCE = {
+ 'market': ('market_data', 'Yahoo Finance'),
+ 'news': ('news', 'Google News'),
+ 'social_media': ('social_media', 'Social Sentiment'),
+ 'fundamentals': ('fundamentals', 'Financial Data'),
+ }
+
+ cursor.execute("""
+ SELECT agent_type, report_content, created_at
+ FROM agent_reports
+ WHERE date = ? AND symbol = ?
+ """, (date, symbol))
+
+ generated = []
+ for row in cursor.fetchall():
+ source_type, source_name = AGENT_TO_SOURCE.get(
+ row['agent_type'], ('other', row['agent_type'])
+ )
+ generated.append({
+ 'source_type': source_type,
+ 'source_name': source_name,
+ 'data_fetched': row['report_content'],
+ 'fetch_timestamp': row['created_at'],
+ 'success': True,
+ 'error_message': None
+ })
+
+ return generated
finally:
conn.close()
@@ -698,5 +879,283 @@ def get_pipeline_summary_for_date(date: str) -> list:
conn.close()
+def save_backtest_result(date: str, symbol: str, decision: str,
+ price_at_prediction: float, price_1d_later: float = None,
+ price_1w_later: float = None, price_1m_later: float = None,
+ return_1d: float = None, return_1w: float = None,
+ return_1m: float = None, prediction_correct: bool = None,
+ hold_days: int = None):
+ """Save a backtest result for a stock recommendation."""
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ cursor.execute("""
+ INSERT OR REPLACE INTO backtest_results
+ (date, symbol, decision, price_at_prediction,
+ price_1d_later, price_1w_later, price_1m_later,
+ return_1d, return_1w, return_1m, prediction_correct, hold_days)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ """, (
+ date, symbol, decision, price_at_prediction,
+ price_1d_later, price_1w_later, price_1m_later,
+ return_1d, return_1w, return_1m,
+ 1 if prediction_correct else 0 if prediction_correct is not None else None,
+ hold_days
+ ))
+ conn.commit()
+ finally:
+ conn.close()
+
+
+def get_backtest_result(date: str, symbol: str) -> Optional[dict]:
+ """Get backtest result for a specific stock and date."""
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ cursor.execute("""
+ SELECT * FROM backtest_results WHERE date = ? AND symbol = ?
+ """, (date, symbol))
+ row = cursor.fetchone()
+
+ if row:
+ return {
+ 'date': row['date'],
+ 'symbol': row['symbol'],
+ 'decision': row['decision'],
+ 'price_at_prediction': row['price_at_prediction'],
+ 'price_1d_later': row['price_1d_later'],
+ 'price_1w_later': row['price_1w_later'],
+ 'price_1m_later': row['price_1m_later'],
+ 'return_1d': row['return_1d'],
+ 'return_1w': row['return_1w'],
+ 'return_1m': row['return_1m'],
+ 'prediction_correct': bool(row['prediction_correct']) if row['prediction_correct'] is not None else None,
+ 'hold_days': row['hold_days'] if 'hold_days' in row.keys() else None,
+ 'calculated_at': row['calculated_at']
+ }
+ return None
+ finally:
+ conn.close()
+
+
+def get_backtest_results_by_date(date: str) -> list:
+ """Get all backtest results for a specific date."""
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ cursor.execute("""
+ SELECT * FROM backtest_results WHERE date = ?
+ """, (date,))
+
+ return [
+ {
+ 'symbol': row['symbol'],
+ 'decision': row['decision'],
+ 'price_at_prediction': row['price_at_prediction'],
+ 'price_1d_later': row['price_1d_later'],
+ 'price_1w_later': row['price_1w_later'],
+ 'price_1m_later': row['price_1m_later'],
+ 'return_1d': row['return_1d'],
+ 'return_1w': row['return_1w'],
+ 'return_1m': row['return_1m'],
+ 'prediction_correct': bool(row['prediction_correct']) if row['prediction_correct'] is not None else None,
+ 'hold_days': row['hold_days'] if 'hold_days' in row.keys() else None
+ }
+ for row in cursor.fetchall()
+ ]
+ finally:
+ conn.close()
+
+
+def get_all_backtest_results() -> list:
+ """Get all backtest results for accuracy calculation."""
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ cursor.execute("""
+ SELECT br.*, sa.confidence, sa.risk
+ FROM backtest_results br
+ LEFT JOIN stock_analysis sa ON br.date = sa.date AND br.symbol = sa.symbol
+ WHERE br.prediction_correct IS NOT NULL
+ ORDER BY br.date DESC
+ """)
+
+ return [
+ {
+ 'date': row['date'],
+ 'symbol': row['symbol'],
+ 'decision': row['decision'],
+ 'confidence': row['confidence'],
+ 'risk': row['risk'],
+ 'price_at_prediction': row['price_at_prediction'],
+ 'return_1d': row['return_1d'],
+ 'return_1w': row['return_1w'],
+ 'return_1m': row['return_1m'],
+ 'prediction_correct': bool(row['prediction_correct'])
+ }
+ for row in cursor.fetchall()
+ ]
+ finally:
+ conn.close()
+
+
+def calculate_accuracy_metrics() -> dict:
+ """Calculate overall backtest accuracy metrics."""
+ results = get_all_backtest_results()
+
+ if not results:
+ return {
+ 'overall_accuracy': 0,
+ 'total_predictions': 0,
+ 'correct_predictions': 0,
+ 'by_decision': {'BUY': {'accuracy': 0, 'total': 0}, 'SELL': {'accuracy': 0, 'total': 0}, 'HOLD': {'accuracy': 0, 'total': 0}},
+ 'by_confidence': {'High': {'accuracy': 0, 'total': 0}, 'Medium': {'accuracy': 0, 'total': 0}, 'Low': {'accuracy': 0, 'total': 0}}
+ }
+
+ total = len(results)
+ correct = sum(1 for r in results if r['prediction_correct'])
+
+ # By decision type
+ by_decision = {}
+ for decision in ['BUY', 'SELL', 'HOLD']:
+ decision_results = [r for r in results if r['decision'] == decision]
+ if decision_results:
+ decision_correct = sum(1 for r in decision_results if r['prediction_correct'])
+ by_decision[decision] = {
+ 'accuracy': round(decision_correct / len(decision_results) * 100, 1),
+ 'total': len(decision_results),
+ 'correct': decision_correct
+ }
+ else:
+ by_decision[decision] = {'accuracy': 0, 'total': 0, 'correct': 0}
+
+ # By confidence level
+ by_confidence = {}
+ for conf in ['High', 'Medium', 'Low']:
+ conf_results = [r for r in results if r.get('confidence') == conf]
+ if conf_results:
+ conf_correct = sum(1 for r in conf_results if r['prediction_correct'])
+ by_confidence[conf] = {
+ 'accuracy': round(conf_correct / len(conf_results) * 100, 1),
+ 'total': len(conf_results),
+ 'correct': conf_correct
+ }
+ else:
+ by_confidence[conf] = {'accuracy': 0, 'total': 0, 'correct': 0}
+
+ return {
+ 'overall_accuracy': round(correct / total * 100, 1) if total > 0 else 0,
+ 'total_predictions': total,
+ 'correct_predictions': correct,
+ 'by_decision': by_decision,
+ 'by_confidence': by_confidence
+ }
+
+
+def update_daily_recommendation_summary(date: str):
+ """Auto-create/update daily_recommendations from stock_analysis for a date.
+
+ Counts BUY/SELL/HOLD decisions, generates top_picks and stocks_to_avoid,
+ and upserts the daily_recommendations row.
+ """
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ # Get all stock analyses for this date
+ cursor.execute("""
+ SELECT symbol, company_name, decision, confidence, risk, raw_analysis
+ FROM stock_analysis WHERE date = ?
+ """, (date,))
+ rows = cursor.fetchall()
+
+ if not rows:
+ return
+
+ buy_count = 0
+ sell_count = 0
+ hold_count = 0
+ buy_stocks = []
+ sell_stocks = []
+
+ for row in rows:
+ decision = (row['decision'] or '').upper()
+ if decision == 'BUY':
+ buy_count += 1
+ buy_stocks.append({
+ 'symbol': row['symbol'],
+ 'company_name': row['company_name'] or row['symbol'],
+ 'decision': 'BUY',
+ 'confidence': row['confidence'] or 'MEDIUM',
+ 'reason': (row['raw_analysis'] or '')[:200]
+ })
+ elif decision == 'SELL':
+ sell_count += 1
+ sell_stocks.append({
+ 'symbol': row['symbol'],
+ 'company_name': row['company_name'] or row['symbol'],
+ 'decision': 'SELL',
+ 'confidence': row['confidence'] or 'MEDIUM',
+ 'reason': (row['raw_analysis'] or '')[:200]
+ })
+ else:
+ hold_count += 1
+
+ total = buy_count + sell_count + hold_count
+
+ # Top picks: up to 5 BUY stocks
+ top_picks = [
+ {'symbol': s['symbol'], 'company_name': s['company_name'],
+ 'confidence': s['confidence'], 'reason': s['reason']}
+ for s in buy_stocks[:5]
+ ]
+
+ # Stocks to avoid: up to 5 SELL stocks
+ stocks_to_avoid = [
+ {'symbol': s['symbol'], 'company_name': s['company_name'],
+ 'confidence': s['confidence'], 'reason': s['reason']}
+ for s in sell_stocks[:5]
+ ]
+
+ cursor.execute("""
+ INSERT OR REPLACE INTO daily_recommendations
+ (date, summary_total, summary_buy, summary_sell, summary_hold, top_picks, stocks_to_avoid)
+ VALUES (?, ?, ?, ?, ?, ?, ?)
+ """, (
+ date, total, buy_count, sell_count, hold_count,
+ json.dumps(top_picks),
+ json.dumps(stocks_to_avoid)
+ ))
+ conn.commit()
+ finally:
+ conn.close()
+
+
+def rebuild_all_daily_recommendations():
+ """Rebuild daily_recommendations for all dates that have stock_analysis data.
+
+ This ensures dates with stock_analysis but missing daily_recommendations
+ entries become visible to the API.
+ """
+ conn = get_connection()
+ cursor = conn.cursor()
+
+ try:
+ cursor.execute("SELECT DISTINCT date FROM stock_analysis")
+ dates = [row['date'] for row in cursor.fetchall()]
+ finally:
+ conn.close()
+
+ for date in dates:
+ update_daily_recommendation_summary(date)
+
+ if dates:
+ print(f"[DB] Rebuilt daily_recommendations for {len(dates)} dates: {sorted(dates)}")
+
+
# Initialize database on module import
init_db()
diff --git a/frontend/backend/recommendations.db b/frontend/backend/recommendations.db
index 0ec5c455..9d729e07 100644
Binary files a/frontend/backend/recommendations.db and b/frontend/backend/recommendations.db differ
diff --git a/frontend/backend/server.py b/frontend/backend/server.py
index 110b4fd3..38b537b4 100644
--- a/frontend/backend/server.py
+++ b/frontend/backend/server.py
@@ -1,6 +1,7 @@
"""FastAPI server for Nifty50 AI recommendations."""
from fastapi import FastAPI, HTTPException, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
+from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import Optional
import database as db
@@ -9,11 +10,18 @@ import os
from pathlib import Path
from datetime import datetime
import threading
+from concurrent.futures import ThreadPoolExecutor, as_completed
+import asyncio
+import json
+import time
# Add parent directories to path for importing trading agents
PROJECT_ROOT = Path(__file__).parent.parent.parent
sys.path.insert(0, str(PROJECT_ROOT))
+# Import shared logging system
+from tradingagents.log_utils import add_log, analysis_logs, log_lock, log_subscribers
+
# Track running analyses
# NOTE: This is not thread-safe for production multi-worker deployments.
# For production, use Redis or a database-backed job queue instead.
@@ -145,6 +153,11 @@ class RunAnalysisRequest(BaseModel):
config: Optional[AnalysisConfig] = None
+def _is_cancelled(symbol: str) -> bool:
+ """Check if an analysis has been cancelled."""
+ return running_analyses.get(symbol, {}).get("cancelled", False)
+
+
def run_analysis_task(symbol: str, date: str, analysis_config: dict = None):
"""Background task to run trading analysis for a stock."""
global running_analyses
@@ -163,14 +176,20 @@ def run_analysis_task(symbol: str, date: str, analysis_config: dict = None):
running_analyses[symbol] = {
"status": "initializing",
"started_at": datetime.now().isoformat(),
- "progress": "Loading trading agents..."
+ "progress": "Loading trading agents...",
+ "cancelled": False,
}
+ add_log("info", "system", f"🚀 Starting analysis for {symbol} on {date}")
+ add_log("info", "system", f"Config: deep_think={deep_think_model}, quick_think={quick_think_model}")
+
# Import trading agents
+ add_log("info", "system", "Loading TradingAgentsGraph module...")
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
running_analyses[symbol]["progress"] = "Initializing analysis pipeline..."
+ add_log("info", "system", "Initializing analysis pipeline...")
# Create config from user settings
config = DEFAULT_CONFIG.copy()
@@ -183,14 +202,77 @@ def run_analysis_task(symbol: str, date: str, analysis_config: dict = None):
if provider == "anthropic_api" and api_key:
os.environ["ANTHROPIC_API_KEY"] = api_key
+ # Check cancellation before starting
+ if _is_cancelled(symbol):
+ add_log("info", "system", f"Analysis for {symbol} was cancelled before starting")
+ running_analyses[symbol]["status"] = "cancelled"
+ running_analyses[symbol]["progress"] = "Analysis cancelled"
+ return
+
running_analyses[symbol]["status"] = "running"
running_analyses[symbol]["progress"] = f"Running market analysis (model: {deep_think_model})..."
+ add_log("agent", "system", f"Creating TradingAgentsGraph for {symbol}...")
+
# Initialize and run
ta = TradingAgentsGraph(debug=False, config=config)
+ # Check cancellation before graph execution
+ if _is_cancelled(symbol):
+ add_log("info", "system", f"Analysis for {symbol} was cancelled before graph execution")
+ running_analyses[symbol]["status"] = "cancelled"
+ running_analyses[symbol]["progress"] = "Analysis cancelled"
+ return
+
running_analyses[symbol]["progress"] = f"Analyzing {symbol}..."
- final_state, decision = ta.propagate(symbol, date)
+ add_log("agent", "system", f"Starting propagation for {symbol}...")
+ add_log("data", "data_fetch", f"Fetching market data for {symbol}...")
+
+ final_state, decision, hold_days = ta.propagate(symbol, date)
+
+ # Check cancellation after graph execution (skip saving results)
+ if _is_cancelled(symbol):
+ add_log("info", "system", f"Analysis for {symbol} was cancelled after completion — results discarded")
+ running_analyses[symbol]["status"] = "cancelled"
+ running_analyses[symbol]["progress"] = "Analysis cancelled (results discarded)"
+ return
+
+ add_log("success", "system", f"✅ Analysis complete for {symbol}: {decision}")
+
+ # Extract raw analysis from final_state if available
+ raw_analysis = ""
+ if final_state:
+ if "final_trade_decision" in final_state:
+ raw_analysis = final_state.get("final_trade_decision", "")
+ elif "risk_debate_state" in final_state:
+ raw_analysis = final_state.get("risk_debate_state", {}).get("judge_decision", "")
+
+ # Save the analysis result to the database
+ analysis_data = {
+ "company_name": symbol,
+ "decision": decision.upper() if decision else "HOLD",
+ "confidence": "MEDIUM",
+ "risk": "MEDIUM",
+ "raw_analysis": raw_analysis,
+ "hold_days": hold_days
+ }
+ db.save_single_stock_analysis(date, symbol, analysis_data)
+ add_log("info", "system", f"💾 Saved analysis for {symbol} to database")
+
+ # Auto-update daily recommendation summary (counts, top_picks, stocks_to_avoid)
+ db.update_daily_recommendation_summary(date)
+ add_log("info", "system", f"📊 Updated daily recommendation summary for {date}")
+
+ # Auto-trigger backtest calculation for this stock
+ try:
+ import backtest_service as bt
+ bt_result = bt.calculate_and_save_backtest(date, symbol, analysis_data["decision"], analysis_data.get("hold_days"))
+ if bt_result:
+ add_log("info", "system", f"📈 Backtest calculated for {symbol}: correct={bt_result.get('prediction_correct')}")
+ else:
+ add_log("info", "system", f"📈 Backtest not available yet for {symbol} (future date or no price data)")
+ except Exception as bt_err:
+ add_log("warning", "system", f"⚠️ Backtest calculation skipped for {symbol}: {bt_err}")
running_analyses[symbol] = {
"status": "completed",
@@ -198,9 +280,16 @@ def run_analysis_task(symbol: str, date: str, analysis_config: dict = None):
"progress": f"Analysis complete: {decision}",
"decision": decision
}
+ # Clear per-symbol step progress after completion
+ try:
+ from tradingagents.log_utils import symbol_progress
+ symbol_progress.clear(symbol)
+ except Exception:
+ pass
except Exception as e:
error_msg = str(e) if str(e) else f"{type(e).__name__}: No details provided"
+ add_log("error", "system", f"❌ Error analyzing {symbol}: {error_msg}")
running_analyses[symbol] = {
"status": "error",
"error": error_msg,
@@ -295,6 +384,60 @@ async def health_check():
return {"status": "healthy", "database": "connected"}
+# ============== Live Log Streaming Endpoint ==============
+
+@app.get("/stream/logs")
+async def stream_logs():
+ """Server-Sent Events endpoint for streaming analysis logs."""
+ import queue
+
+ # Create a queue for this subscriber
+ subscriber_queue = queue.Queue(maxsize=100)
+
+ with log_lock:
+ log_subscribers.append(subscriber_queue)
+
+ async def event_generator():
+ try:
+ # Send initial connection message
+ yield f"data: {json.dumps({'type': 'info', 'source': 'system', 'message': 'Connected to log stream', 'timestamp': datetime.now().isoformat()})}\n\n"
+
+ # Send any recent logs from buffer
+ with log_lock:
+ recent_logs = list(analysis_logs)[-50:] # Last 50 logs
+ for log in recent_logs:
+ yield f"data: {json.dumps(log)}\n\n"
+
+ # Stream new logs as they arrive
+ while True:
+ try:
+ # Check for new logs with timeout
+ log_entry = await asyncio.get_event_loop().run_in_executor(
+ None, lambda: subscriber_queue.get(timeout=5)
+ )
+ yield f"data: {json.dumps(log_entry)}\n\n"
+ except queue.Empty:
+ # Send heartbeat to keep connection alive
+ yield f"data: {json.dumps({'type': 'heartbeat', 'timestamp': datetime.now().isoformat()})}\n\n"
+ except Exception:
+ break
+ finally:
+ # Remove subscriber on disconnect
+ with log_lock:
+ if subscriber_queue in log_subscribers:
+ log_subscribers.remove(subscriber_queue)
+
+ return StreamingResponse(
+ event_generator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ "Access-Control-Allow-Origin": "*",
+ }
+ )
+
+
# ============== Pipeline Data Endpoints ==============
@app.get("/recommendations/{date}/{symbol}/pipeline")
@@ -395,14 +538,15 @@ async def save_pipeline_data(request: SavePipelineDataRequest):
# Track bulk analysis state
bulk_analysis_state = {
- "status": "idle", # idle, running, completed, error
+ "status": "idle", # idle, running, completed, error, cancelled
"total": 0,
"completed": 0,
"failed": 0,
"current_symbol": None,
"started_at": None,
"completed_at": None,
- "results": {}
+ "results": {},
+ "cancelled": False # Flag to signal cancellation
}
# List of Nifty 50 stocks
@@ -423,11 +567,12 @@ class BulkAnalysisRequest(BaseModel):
provider: Optional[str] = "claude_subscription"
api_key: Optional[str] = None
max_debate_rounds: Optional[int] = 1
+ parallel_workers: Optional[int] = 3
@app.post("/analyze/all")
async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date: Optional[str] = None):
- """Trigger analysis for all Nifty 50 stocks. Runs in background."""
+ """Trigger analysis for all Nifty 50 stocks. Runs in background with parallel processing."""
global bulk_analysis_state
# Check if bulk analysis is already running
@@ -443,6 +588,7 @@ async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date:
# Build analysis config from request
analysis_config = {}
+ parallel_workers = 3
if request:
analysis_config = {
"deep_think_model": request.deep_think_model,
@@ -451,57 +597,129 @@ async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date:
"api_key": request.api_key,
"max_debate_rounds": request.max_debate_rounds
}
+ if request.parallel_workers is not None:
+ parallel_workers = max(1, min(5, request.parallel_workers))
+
+ # Resume support: skip stocks already analyzed for this date
+ already_analyzed = set(db.get_analyzed_symbols_for_date(date))
+ symbols_to_analyze = [s for s in NIFTY_50_SYMBOLS if s not in already_analyzed]
+ skipped_count = len(already_analyzed)
+
+ # If all stocks are already analyzed, return immediately
+ if not symbols_to_analyze:
+ bulk_analysis_state = {
+ "status": "completed",
+ "total": 0,
+ "total_all": len(NIFTY_50_SYMBOLS),
+ "skipped": skipped_count,
+ "completed": 0,
+ "failed": 0,
+ "current_symbols": [],
+ "started_at": datetime.now().isoformat(),
+ "completed_at": datetime.now().isoformat(),
+ "results": {},
+ "parallel_workers": parallel_workers,
+ "cancelled": False
+ }
+ return {
+ "message": f"All {skipped_count} stocks already analyzed for {date}",
+ "date": date,
+ "total_stocks": 0,
+ "skipped": skipped_count,
+ "parallel_workers": parallel_workers,
+ "status": "completed"
+ }
+
+ def analyze_single_stock(symbol: str, analysis_date: str, config: dict) -> tuple:
+ """Analyze a single stock and return (symbol, status, error)."""
+ try:
+ # Check if cancelled before starting
+ if bulk_analysis_state.get("cancelled"):
+ return (symbol, "cancelled", "Bulk analysis was cancelled")
+
+ run_analysis_task(symbol, analysis_date, config)
+
+ # Wait for completion with timeout
+ import time
+ max_wait = 600 # 10 minute timeout per stock
+ waited = 0
+ while waited < max_wait:
+ # Check for cancellation during wait
+ if bulk_analysis_state.get("cancelled"):
+ return (symbol, "cancelled", "Bulk analysis was cancelled")
+
+ if symbol not in running_analyses:
+ return (symbol, "unknown", None)
+ status = running_analyses[symbol].get("status")
+ if status != "running" and status != "initializing":
+ return (symbol, status, None)
+ time.sleep(2)
+ waited += 2
+
+ return (symbol, "timeout", "Analysis timed out after 10 minutes")
+
+ except Exception as e:
+ return (symbol, "error", str(e))
# Start bulk analysis in background thread
- def run_bulk():
+ def run_bulk_parallel():
global bulk_analysis_state
bulk_analysis_state = {
"status": "running",
- "total": len(NIFTY_50_SYMBOLS),
+ "total": len(symbols_to_analyze),
+ "total_all": len(NIFTY_50_SYMBOLS),
+ "skipped": skipped_count,
"completed": 0,
"failed": 0,
- "current_symbol": None,
+ "current_symbols": [],
"started_at": datetime.now().isoformat(),
"completed_at": None,
- "results": {}
+ "results": {},
+ "parallel_workers": parallel_workers,
+ "cancelled": False
}
- for symbol in NIFTY_50_SYMBOLS:
- try:
- bulk_analysis_state["current_symbol"] = symbol
- run_analysis_task(symbol, date, analysis_config)
+ with ThreadPoolExecutor(max_workers=parallel_workers) as executor:
+ future_to_symbol = {
+ executor.submit(analyze_single_stock, symbol, date, analysis_config): symbol
+ for symbol in symbols_to_analyze
+ }
- # Wait for completion
- import time
- while symbol in running_analyses and running_analyses[symbol].get("status") == "running":
- time.sleep(2)
+ bulk_analysis_state["current_symbols"] = list(symbols_to_analyze[:parallel_workers])
+
+ for future in as_completed(future_to_symbol):
+ symbol = future_to_symbol[future]
+ try:
+ symbol, status, error = future.result()
+ bulk_analysis_state["results"][symbol] = status if not error else f"error: {error}"
- if symbol in running_analyses:
- status = running_analyses[symbol].get("status", "unknown")
- bulk_analysis_state["results"][symbol] = status
if status == "completed":
bulk_analysis_state["completed"] += 1
else:
bulk_analysis_state["failed"] += 1
- else:
- bulk_analysis_state["results"][symbol] = "unknown"
+
+ remaining = [s for s in symbols_to_analyze
+ if s not in bulk_analysis_state["results"]]
+ bulk_analysis_state["current_symbols"] = remaining[:parallel_workers]
+
+ except Exception as e:
+ bulk_analysis_state["results"][symbol] = f"error: {str(e)}"
bulk_analysis_state["failed"] += 1
- except Exception as e:
- bulk_analysis_state["results"][symbol] = f"error: {str(e)}"
- bulk_analysis_state["failed"] += 1
-
bulk_analysis_state["status"] = "completed"
- bulk_analysis_state["current_symbol"] = None
+ bulk_analysis_state["current_symbols"] = []
bulk_analysis_state["completed_at"] = datetime.now().isoformat()
- thread = threading.Thread(target=run_bulk)
+ thread = threading.Thread(target=run_bulk_parallel)
thread.start()
+ skipped_msg = f", {skipped_count} already done" if skipped_count > 0 else ""
return {
- "message": "Bulk analysis started for all Nifty 50 stocks",
+ "message": f"Bulk analysis started for {len(symbols_to_analyze)} stocks ({parallel_workers} parallel workers{skipped_msg})",
"date": date,
- "total_stocks": len(NIFTY_50_SYMBOLS),
+ "total_stocks": len(symbols_to_analyze),
+ "skipped": skipped_count,
+ "parallel_workers": parallel_workers,
"status": "started"
}
@@ -509,7 +727,47 @@ async def run_bulk_analysis(request: Optional[BulkAnalysisRequest] = None, date:
@app.get("/analyze/all/status")
async def get_bulk_analysis_status():
"""Get the status of bulk analysis."""
- return bulk_analysis_state
+ # Add backward compatibility for current_symbol (old format)
+ result = dict(bulk_analysis_state)
+ if "current_symbols" in result:
+ result["current_symbol"] = result["current_symbols"][0] if result["current_symbols"] else None
+
+ # Include per-stock step progress for currently-analyzing stocks
+ if result.get("status") == "running" and result.get("current_symbols"):
+ try:
+ from tradingagents.log_utils import symbol_progress
+ stock_progress = {}
+ for sym in result["current_symbols"]:
+ stock_progress[sym] = symbol_progress.get(sym)
+ result["stock_progress"] = stock_progress
+ except Exception:
+ pass
+
+ return result
+
+
+@app.post("/analyze/all/cancel")
+async def cancel_bulk_analysis():
+ """Cancel the running bulk analysis."""
+ global bulk_analysis_state
+
+ if bulk_analysis_state.get("status") != "running":
+ return {
+ "message": "No bulk analysis is running",
+ "status": bulk_analysis_state.get("status")
+ }
+
+ # Set the cancelled flag
+ bulk_analysis_state["cancelled"] = True
+ bulk_analysis_state["status"] = "cancelled"
+ bulk_analysis_state["completed_at"] = datetime.now().isoformat()
+
+ return {
+ "message": "Bulk analysis cancellation requested",
+ "completed": bulk_analysis_state.get("completed", 0),
+ "total": bulk_analysis_state.get("total", 0),
+ "status": "cancelled"
+ }
@app.get("/analyze/running")
@@ -571,7 +829,7 @@ async def run_analysis(symbol: str, background_tasks: BackgroundTasks, request:
@app.get("/analyze/{symbol}/status")
async def get_analysis_status(symbol: str):
- """Get the status of a running or completed analysis."""
+ """Get the status of a running or completed analysis, including live pipeline step progress."""
symbol = symbol.upper()
if symbol not in running_analyses:
@@ -581,11 +839,234 @@ async def get_analysis_status(symbol: str):
"message": "No analysis has been run for this stock"
}
- return {
+ result = {
"symbol": symbol,
**running_analyses[symbol]
}
+ # Include live pipeline step progress from step_timer when analysis is running
+ if running_analyses[symbol].get("status") == "running":
+ try:
+ from tradingagents.log_utils import step_timer
+
+ steps = step_timer.get_steps()
+ if steps:
+ # Build a live progress summary
+ STEP_NAMES = {
+ "market_analyst": "Market Analysis",
+ "social_media_analyst": "Social Media Analysis",
+ "news_analyst": "News Analysis",
+ "fundamentals_analyst": "Fundamental Analysis",
+ "bull_researcher": "Bull Research",
+ "bear_researcher": "Bear Research",
+ "research_manager": "Research Manager",
+ "trader": "Trader Decision",
+ "aggressive_analyst": "Aggressive Analysis",
+ "conservative_analyst": "Conservative Analysis",
+ "neutral_analyst": "Neutral Analysis",
+ "risk_manager": "Risk Manager",
+ }
+
+ completed = [k for k, v in steps.items() if v.get("status") == "completed"]
+ running = [k for k, v in steps.items() if v.get("status") == "running"]
+ total = 12
+
+ # Build progress message from live step data
+ if running:
+ current_step = STEP_NAMES.get(running[0], running[0])
+ result["progress"] = f"Step {len(completed)+1}/{total}: {current_step}..."
+ elif completed:
+ last_step = STEP_NAMES.get(completed[-1], completed[-1])
+ result["progress"] = f"Step {len(completed)}/{total}: {last_step} done"
+
+ result["steps_completed"] = len(completed)
+ result["steps_running"] = [STEP_NAMES.get(s, s) for s in running]
+ result["steps_total"] = total
+ result["pipeline_steps"] = {
+ k: {"status": v.get("status"), "duration_ms": v.get("duration_ms")}
+ for k, v in steps.items()
+ }
+ except Exception:
+ pass # Don't fail status endpoint if step_timer unavailable
+
+ return result
+
+
+@app.post("/analyze/{symbol}/cancel")
+async def cancel_analysis(symbol: str):
+ """Cancel a running analysis for a stock."""
+ symbol = symbol.upper()
+
+ if symbol not in running_analyses:
+ return {"message": f"No analysis found for {symbol}", "status": "not_found"}
+
+ current_status = running_analyses[symbol].get("status")
+ if current_status not in ("running", "initializing"):
+ return {"message": f"Analysis for {symbol} is not running (status: {current_status})", "status": current_status}
+
+ # Set cancellation flag — the background thread checks this
+ running_analyses[symbol]["cancelled"] = True
+ running_analyses[symbol]["status"] = "cancelled"
+ running_analyses[symbol]["progress"] = "Cancellation requested..."
+ running_analyses[symbol]["completed_at"] = datetime.now().isoformat()
+
+ add_log("info", "system", f"🛑 Cancellation requested for {symbol}")
+
+ return {
+ "message": f"Cancellation requested for {symbol}",
+ "symbol": symbol,
+ "status": "cancelled"
+ }
+
+
+# ============== Backtest Endpoints ==============
+# NOTE: Static routes must come BEFORE parameterized routes to avoid
+# "accuracy" being matched as a {date} parameter.
+
+@app.get("/backtest/accuracy")
+async def get_accuracy_metrics():
+ """Get overall backtest accuracy metrics."""
+ metrics = db.calculate_accuracy_metrics()
+ return metrics
+
+
+@app.get("/backtest/{date}/{symbol}")
+async def get_backtest_result(date: str, symbol: str):
+ """Get backtest result for a specific stock and date.
+
+ Returns pre-calculated results only (no on-demand yfinance fetching)
+ to avoid blocking the event loop.
+ """
+ result = db.get_backtest_result(date, symbol.upper())
+ if not result:
+ return {'available': False, 'reason': 'Backtest not yet calculated'}
+
+ return {
+ 'available': True,
+ 'prediction_correct': result['prediction_correct'],
+ 'actual_return_1d': result['return_1d'],
+ 'actual_return_1w': result['return_1w'],
+ 'actual_return_1m': result['return_1m'],
+ 'price_at_prediction': result['price_at_prediction'],
+ 'current_price': result.get('price_1m_later') or result.get('price_1w_later'),
+ 'hold_days': result.get('hold_days'),
+ }
+
+
+@app.get("/backtest/{date}")
+async def get_backtest_results_for_date(date: str):
+ """Get all backtest results for a specific date."""
+ results = db.get_backtest_results_by_date(date)
+ return {"date": date, "results": results}
+
+
+@app.post("/backtest/{date}/calculate")
+async def calculate_backtest_for_date(date: str):
+ """Calculate backtest for all recommendations on a date (runs in background thread)."""
+ import backtest_service as bt
+
+ # Run calculation in a separate thread to avoid blocking the event loop
+ def run_backtest():
+ try:
+ bt.backtest_all_recommendations_for_date(date)
+ except Exception as e:
+ print(f"Backtest calculation error for {date}: {e}")
+
+ thread = threading.Thread(target=run_backtest)
+ thread.start()
+ return {"status": "started", "date": date, "message": "Backtest calculation started in background"}
+
+
+# ============== Stock Price History Endpoint ==============
+
+@app.get("/stocks/{symbol}/prices")
+async def get_stock_price_history(symbol: str, days: int = 90):
+ """Get real historical closing prices for a stock from yfinance."""
+ try:
+ import yfinance as yf
+ from datetime import timedelta
+
+ yf_symbol = symbol if '.' in symbol else f"{symbol}.NS"
+ ticker = yf.Ticker(yf_symbol)
+
+ end_date = datetime.now()
+ start_date = end_date - timedelta(days=days)
+
+ hist = ticker.history(start=start_date.strftime('%Y-%m-%d'),
+ end=end_date.strftime('%Y-%m-%d'))
+
+ if hist.empty:
+ return {"symbol": symbol, "prices": [], "error": "No price data found"}
+
+ prices = [
+ {"date": idx.strftime('%Y-%m-%d'), "price": round(float(row['Close']), 2)}
+ for idx, row in hist.iterrows()
+ ]
+
+ return {"symbol": symbol, "prices": prices}
+ except ImportError:
+ return {"symbol": symbol, "prices": [], "error": "yfinance not installed"}
+ except Exception as e:
+ return {"symbol": symbol, "prices": [], "error": str(e)}
+
+
+# ============== Nifty50 Index Endpoint ==============
+
+@app.get("/nifty50/history")
+async def get_nifty50_history():
+ """Get Nifty50 index closing prices for recommendation date range."""
+ try:
+ import yfinance as yf
+ from datetime import timedelta
+
+ # Get the date range from our recommendations
+ dates = db.get_all_dates()
+ if not dates:
+ return {"dates": [], "prices": {}}
+
+ # Get date range with buffer for daily return calculation
+ start_date = (datetime.strptime(min(dates), "%Y-%m-%d") - timedelta(days=7)).strftime("%Y-%m-%d")
+ end_date = (datetime.strptime(max(dates), "%Y-%m-%d") + timedelta(days=7)).strftime("%Y-%m-%d")
+
+ # Fetch ^NSEI data
+ nifty = yf.Ticker("^NSEI")
+ hist = nifty.history(start=start_date, end=end_date, interval="1d")
+
+ prices = {}
+ for idx, row in hist.iterrows():
+ date_str = idx.strftime("%Y-%m-%d")
+ prices[date_str] = round(float(row['Close']), 2)
+
+ return {"dates": sorted(prices.keys()), "prices": prices}
+ except ImportError:
+ return {"dates": [], "prices": {}, "error": "yfinance not installed"}
+ except Exception as e:
+ return {"dates": [], "prices": {}, "error": str(e)}
+
+
+@app.on_event("startup")
+async def startup_event():
+ """Rebuild daily_recommendations and trigger backtest calculations at startup."""
+ db.rebuild_all_daily_recommendations()
+
+ # Trigger backtest calculation for all dates in background
+ def startup_backtest():
+ import backtest_service as bt
+ dates = db.get_all_dates()
+ for date in dates:
+ existing = db.get_backtest_results_by_date(date)
+ rec = db.get_recommendation_by_date(date)
+ expected_count = len(rec.get('analysis', {})) if rec else 0
+ if len(existing) < expected_count:
+ print(f"[Backtest] Calculating for {date} ({len(existing)}/{expected_count} done)...")
+ try:
+ bt.backtest_all_recommendations_for_date(date)
+ except Exception as e:
+ print(f"[Backtest] Error for {date}: {e}")
+
+ thread = threading.Thread(target=startup_backtest, daemon=True)
+ thread.start()
+
if __name__ == "__main__":
import uvicorn
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index bef80a5b..e5771e51 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -1,9 +1,11 @@
import { Routes, Route } from 'react-router-dom';
import { ThemeProvider } from './contexts/ThemeContext';
import { SettingsProvider } from './contexts/SettingsContext';
+import { NotificationProvider } from './contexts/NotificationContext';
import Header from './components/Header';
import Footer from './components/Footer';
import SettingsModal from './components/SettingsModal';
+import ToastContainer from './components/Toast';
import Dashboard from './pages/Dashboard';
import History from './pages/History';
import StockDetail from './pages/StockDetail';
@@ -13,19 +15,22 @@ function App() {
return (
-
-
-
-
- } />
- } />
- } />
- } />
-
-
-
-
-
+
+
+
+
+
+ } />
+ } />
+ } />
+ } />
+
+
+
+
+
+
+
);
diff --git a/frontend/src/components/AccuracyTrendChart.tsx b/frontend/src/components/AccuracyTrendChart.tsx
index ab56a20b..ccdc22cb 100644
--- a/frontend/src/components/AccuracyTrendChart.tsx
+++ b/frontend/src/components/AccuracyTrendChart.tsx
@@ -1,13 +1,23 @@
import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, Legend } from 'recharts';
import { getAccuracyTrend } from '../data/recommendations';
+export interface AccuracyTrendPoint {
+ date: string;
+ overall: number;
+ buy: number;
+ sell: number;
+ hold: number;
+}
+
interface AccuracyTrendChartProps {
height?: number;
className?: string;
+ data?: AccuracyTrendPoint[]; // Optional prop for real data
}
-export default function AccuracyTrendChart({ height = 200, className = '' }: AccuracyTrendChartProps) {
- const data = getAccuracyTrend();
+export default function AccuracyTrendChart({ height = 200, className = '', data: propData }: AccuracyTrendChartProps) {
+ // Use provided data or fall back to mock data
+ const data = propData || getAccuracyTrend();
if (data.length === 0) {
return (
diff --git a/frontend/src/components/CumulativeReturnChart.tsx b/frontend/src/components/CumulativeReturnChart.tsx
index d5b22c9c..5f90e388 100644
--- a/frontend/src/components/CumulativeReturnChart.tsx
+++ b/frontend/src/components/CumulativeReturnChart.tsx
@@ -1,13 +1,16 @@
import { AreaChart, Area, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, ReferenceLine } from 'recharts';
import { getCumulativeReturns } from '../data/recommendations';
+import type { CumulativeReturnPoint } from '../types';
interface CumulativeReturnChartProps {
height?: number;
className?: string;
+ data?: CumulativeReturnPoint[]; // Optional prop for real data
}
-export default function CumulativeReturnChart({ height = 160, className = '' }: CumulativeReturnChartProps) {
- const data = getCumulativeReturns();
+export default function CumulativeReturnChart({ height = 160, className = '', data: propData }: CumulativeReturnChartProps) {
+ // Use provided data or fall back to mock data
+ const data = propData || getCumulativeReturns();
if (data.length === 0) {
return (
diff --git a/frontend/src/components/IndexComparisonChart.tsx b/frontend/src/components/IndexComparisonChart.tsx
index 59de0ab6..cbd2d5d5 100644
--- a/frontend/src/components/IndexComparisonChart.tsx
+++ b/frontend/src/components/IndexComparisonChart.tsx
@@ -1,14 +1,17 @@
import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, Legend, ReferenceLine } from 'recharts';
import { TrendingUp, TrendingDown } from 'lucide-react';
import { getCumulativeReturns } from '../data/recommendations';
+import type { CumulativeReturnPoint } from '../types';
-interface IndexComparisonChartProps {
+export interface IndexComparisonChartProps {
height?: number;
className?: string;
+ data?: CumulativeReturnPoint[]; // Optional prop for real data
}
-export default function IndexComparisonChart({ height = 220, className = '' }: IndexComparisonChartProps) {
- const data = getCumulativeReturns();
+export default function IndexComparisonChart({ height = 220, className = '', data: propData }: IndexComparisonChartProps) {
+ // Use provided data or fall back to mock data
+ const data = propData || getCumulativeReturns();
if (data.length === 0) {
return (
diff --git a/frontend/src/components/InfoModal.tsx b/frontend/src/components/InfoModal.tsx
new file mode 100644
index 00000000..9fe2b746
--- /dev/null
+++ b/frontend/src/components/InfoModal.tsx
@@ -0,0 +1,82 @@
+import { X, Info } from 'lucide-react';
+import type { ReactNode } from 'react';
+
+interface InfoModalProps {
+ isOpen: boolean;
+ onClose: () => void;
+ title: string;
+ children: ReactNode;
+ icon?: ReactNode;
+}
+
+export default function InfoModal({ isOpen, onClose, title, children, icon }: InfoModalProps) {
+ if (!isOpen) return null;
+
+ return (
+
+ {/* Backdrop */}
+
+
+ {/* Modal */}
+
+
+ {/* Header */}
+
+
+ {icon || }
+
{title}
+
+
+
+
+ {/* Content */}
+
+ {children}
+
+
+ {/* Footer */}
+
+
+
+
+
+
+ );
+}
+
+// Reusable info button component
+interface InfoButtonProps {
+ onClick: () => void;
+ className?: string;
+ size?: 'sm' | 'md';
+}
+
+export function InfoButton({ onClick, className = '', size = 'sm' }: InfoButtonProps) {
+ const sizeClasses = size === 'sm' ? 'w-3.5 h-3.5' : 'w-4 h-4';
+
+ return (
+
+ );
+}
diff --git a/frontend/src/components/OverallReturnModal.tsx b/frontend/src/components/OverallReturnModal.tsx
index 4eddfe67..95b99724 100644
--- a/frontend/src/components/OverallReturnModal.tsx
+++ b/frontend/src/components/OverallReturnModal.tsx
@@ -1,16 +1,27 @@
import { X, Activity } from 'lucide-react';
import { getOverallReturnBreakdown } from '../data/recommendations';
import CumulativeReturnChart from './CumulativeReturnChart';
+import type { CumulativeReturnPoint } from '../types';
+
+export interface OverallReturnBreakdown {
+ dailyReturns: { date: string; return: number; multiplier: number; cumulative: number }[];
+ finalMultiplier: number;
+ finalReturn: number;
+ formula: string;
+}
interface OverallReturnModalProps {
isOpen: boolean;
onClose: () => void;
+ breakdown?: OverallReturnBreakdown; // Optional prop for real data
+ cumulativeData?: CumulativeReturnPoint[]; // Optional prop for chart data
}
-export default function OverallReturnModal({ isOpen, onClose }: OverallReturnModalProps) {
+export default function OverallReturnModal({ isOpen, onClose, breakdown: propBreakdown, cumulativeData }: OverallReturnModalProps) {
if (!isOpen) return null;
- const breakdown = getOverallReturnBreakdown();
+ // Use provided breakdown or fall back to mock data
+ const breakdown = propBreakdown || getOverallReturnBreakdown();
return (
@@ -55,7 +66,7 @@ export default function OverallReturnModal({ isOpen, onClose }: OverallReturnMod
diff --git a/frontend/src/components/PortfolioSimulator.tsx b/frontend/src/components/PortfolioSimulator.tsx
index 285ca059..d928209d 100644
--- a/frontend/src/components/PortfolioSimulator.tsx
+++ b/frontend/src/components/PortfolioSimulator.tsx
@@ -1,39 +1,271 @@
import { useState, useMemo } from 'react';
-import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, ReferenceLine } from 'recharts';
-import { Calculator, ChevronDown, ChevronUp, IndianRupee } from 'lucide-react';
-import { getOverallReturnBreakdown } from '../data/recommendations';
+import { LineChart, Line, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, ReferenceLine, Legend, BarChart, Bar, Cell, LabelList } from 'recharts';
+import { Calculator, ChevronDown, ChevronUp, IndianRupee, Settings2, BarChart3, Info, TrendingUp, TrendingDown, ArrowRightLeft, Wallet, PiggyBank, Receipt, HelpCircle, AlertCircle } from 'lucide-react';
+import { sampleRecommendations, getNifty50IndexHistory, getBacktestResult } from '../data/recommendations';
+import { calculateBrokerage, formatINR, type BrokerageBreakdown } from '../utils/brokerageCalculator';
+import InfoModal, { InfoButton } from './InfoModal';
+import type { Decision, DailyRecommendation } from '../types';
interface PortfolioSimulatorProps {
className?: string;
+ recommendations?: DailyRecommendation[];
+ isUsingMockData?: boolean;
+ nifty50Prices?: Record
;
+ allBacktestData?: Record>;
}
-export default function PortfolioSimulator({ className = '' }: PortfolioSimulatorProps) {
+export type InvestmentMode = 'all50' | 'topPicks';
+
+interface TradeRecord {
+ symbol: string;
+ entryDate: string;
+ entryPrice: number;
+ exitDate: string;
+ exitPrice: number;
+ quantity: number;
+ brokerage: BrokerageBreakdown;
+ profitLoss: number;
+}
+
+interface TradeStats {
+ totalTrades: number;
+ buyTrades: number;
+ sellTrades: number;
+ brokerageBreakdown: BrokerageBreakdown;
+ trades: TradeRecord[];
+}
+
+// Smart trade counting logic using Zerodha brokerage for Equity Delivery
+function calculateSmartTrades(
+ recommendations: typeof sampleRecommendations,
+ mode: InvestmentMode,
+ startingAmount: number,
+ nifty50Prices?: Record,
+ allBacktestData?: Record>
+): {
+ portfolioData: Array<{ date: string; rawDate: string; value: number; niftyValue: number; return: number; cumulative: number }>;
+ stats: TradeStats;
+ openPositions: Record;
+} {
+ const hasRealNifty = nifty50Prices && Object.keys(nifty50Prices).length > 0;
+ const niftyHistory = hasRealNifty ? null : getNifty50IndexHistory();
+ const sortedRecs = [...recommendations].sort((a, b) => new Date(a.date).getTime() - new Date(b.date).getTime());
+
+ // Precompute real Nifty start price for comparison
+ const sortedNiftyDates = hasRealNifty ? Object.keys(nifty50Prices).sort() : [];
+ const niftyStartPrice = hasRealNifty && sortedNiftyDates.length > 0
+ ? nifty50Prices[sortedNiftyDates[0]]
+ : null;
+
+ // Track open positions per stock
+ const openPositions: Record = {};
+ const completedTrades: TradeRecord[] = [];
+ let buyTrades = 0;
+ let sellTrades = 0;
+
+ const getStocksToTrack = (rec: typeof recommendations[0]) => {
+ if (mode === 'topPicks') {
+ return rec.top_picks.map(p => p.symbol);
+ }
+ return Object.keys(rec.analysis);
+ };
+
+ const stockCount = mode === 'topPicks' ? 3 : 50;
+ const investmentPerStock = startingAmount / stockCount;
+
+ let portfolioValue = startingAmount;
+ let niftyValue = startingAmount;
+ const niftyStartValue = niftyHistory?.[0]?.value || 21500;
+
+ const portfolioData = sortedRecs.map((rec) => {
+ const stocks = getStocksToTrack(rec);
+ let dayReturn = 0;
+ let stocksTracked = 0;
+
+ stocks.forEach(symbol => {
+ const analysis = rec.analysis[symbol];
+ if (!analysis || !analysis.decision) return;
+
+ const decision = analysis.decision;
+ const prevPosition = openPositions[symbol];
+
+ const backtest = getBacktestResult(symbol);
+ const currentPrice = backtest?.current_price || 1000;
+ const quantity = Math.floor(investmentPerStock / currentPrice);
+
+ if (decision === 'BUY') {
+ if (!prevPosition) {
+ openPositions[symbol] = { entryDate: rec.date, entryPrice: currentPrice, decision };
+ buyTrades++;
+ } else if (prevPosition.decision === 'SELL') {
+ buyTrades++;
+ openPositions[symbol] = { entryDate: rec.date, entryPrice: currentPrice, decision };
+ } else {
+ openPositions[symbol].decision = decision;
+ }
+ // Use real backtest return if available, otherwise 0 (neutral)
+ const realBuyReturn = allBacktestData?.[rec.date]?.[symbol];
+ dayReturn += realBuyReturn !== undefined ? realBuyReturn : 0;
+ stocksTracked++;
+ } else if (decision === 'HOLD') {
+ if (prevPosition) {
+ openPositions[symbol].decision = decision;
+ }
+ // Use real backtest return if available, otherwise 0 (neutral)
+ const realHoldReturn = allBacktestData?.[rec.date]?.[symbol];
+ dayReturn += realHoldReturn !== undefined ? realHoldReturn : 0;
+ stocksTracked++;
+ } else if (decision === 'SELL') {
+ if (prevPosition && (prevPosition.decision === 'BUY' || prevPosition.decision === 'HOLD')) {
+ sellTrades++;
+
+ // Use real backtest return for exit price if available, otherwise break-even
+ const realSellReturn = allBacktestData?.[rec.date]?.[symbol];
+ const exitPrice = realSellReturn !== undefined
+ ? currentPrice * (1 + realSellReturn / 100)
+ : currentPrice;
+ const brokerage = calculateBrokerage({
+ buyPrice: prevPosition.entryPrice,
+ sellPrice: exitPrice,
+ quantity,
+ tradeType: 'delivery',
+ });
+
+ const grossProfit = (exitPrice - prevPosition.entryPrice) * quantity;
+ const profitLoss = grossProfit - brokerage.totalCharges;
+
+ completedTrades.push({
+ symbol,
+ entryDate: prevPosition.entryDate,
+ entryPrice: prevPosition.entryPrice,
+ exitDate: rec.date,
+ exitPrice,
+ quantity,
+ brokerage,
+ profitLoss,
+ });
+
+ delete openPositions[symbol];
+ }
+ stocksTracked++;
+ }
+ });
+
+ const avgDayReturn = stocksTracked > 0 ? dayReturn / stocksTracked : 0;
+ portfolioValue = portfolioValue * (1 + avgDayReturn / 100);
+
+ // Use real Nifty50 prices if available, otherwise use mock history
+ if (hasRealNifty && niftyStartPrice) {
+ const closestDate = sortedNiftyDates.find(d => d >= rec.date) || sortedNiftyDates[sortedNiftyDates.length - 1];
+ if (closestDate && nifty50Prices[closestDate]) {
+ niftyValue = startingAmount * (nifty50Prices[closestDate] / niftyStartPrice);
+ }
+ } else if (niftyHistory) {
+ const niftyPoint = niftyHistory.find(n => n.date === rec.date);
+ if (niftyPoint) {
+ niftyValue = startingAmount * (niftyPoint.value / niftyStartValue);
+ }
+ }
+
+ return {
+ date: new Date(rec.date).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' }),
+ rawDate: rec.date,
+ value: Math.round(portfolioValue),
+ niftyValue: Math.round(niftyValue),
+ return: avgDayReturn,
+ cumulative: ((portfolioValue - startingAmount) / startingAmount) * 100,
+ };
+ });
+
+ const totalBrokerage = completedTrades.reduce(
+ (acc, trade) => ({
+ brokerage: acc.brokerage + trade.brokerage.brokerage,
+ stt: acc.stt + trade.brokerage.stt,
+ exchangeCharges: acc.exchangeCharges + trade.brokerage.exchangeCharges,
+ sebiCharges: acc.sebiCharges + trade.brokerage.sebiCharges,
+ gst: acc.gst + trade.brokerage.gst,
+ stampDuty: acc.stampDuty + trade.brokerage.stampDuty,
+ totalCharges: acc.totalCharges + trade.brokerage.totalCharges,
+ netProfit: acc.netProfit + trade.brokerage.netProfit,
+ turnover: acc.turnover + trade.brokerage.turnover,
+ }),
+ { brokerage: 0, stt: 0, exchangeCharges: 0, sebiCharges: 0, gst: 0, stampDuty: 0, totalCharges: 0, netProfit: 0, turnover: 0 }
+ );
+
+ return {
+ portfolioData,
+ stats: {
+ totalTrades: buyTrades + sellTrades,
+ buyTrades,
+ sellTrades,
+ brokerageBreakdown: totalBrokerage,
+ trades: completedTrades,
+ },
+ openPositions,
+ };
+}
+
+// Helper for consistent positive/negative color classes
+function getValueColorClass(value: number): string {
+ return value >= 0
+ ? 'text-green-600 dark:text-green-400'
+ : 'text-red-600 dark:text-red-400';
+}
+
+export default function PortfolioSimulator({
+ className = '',
+ recommendations = sampleRecommendations,
+ isUsingMockData = true, // Default to true since this uses simulated returns
+ nifty50Prices,
+ allBacktestData,
+}: PortfolioSimulatorProps) {
const [startingAmount, setStartingAmount] = useState(100000);
const [showBreakdown, setShowBreakdown] = useState(false);
+ const [showSettings, setShowSettings] = useState(false);
+ const [showBrokerageDetails, setShowBrokerageDetails] = useState(false);
+ const [showTradeWaterfall, setShowTradeWaterfall] = useState(false);
+ const [investmentMode, setInvestmentMode] = useState('all50');
+ const [includeBrokerage, setIncludeBrokerage] = useState(true);
- const breakdown = useMemo(() => getOverallReturnBreakdown(), []);
+ // Modal state - single state for all modals instead of 7 separate booleans
+ type ModalType = 'totalTrades' | 'buyTrades' | 'sellTrades' | 'portfolioValue' | 'profitLoss' | 'comparison' | null;
+ const [activeModal, setActiveModal] = useState(null);
- // Calculate portfolio values over time
- const portfolioData = useMemo(() => {
- let value = startingAmount;
- return breakdown.dailyReturns.map(day => {
- value = value * day.multiplier;
- return {
- date: new Date(day.date).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' }),
- value: Math.round(value),
- return: day.return,
- cumulative: day.cumulative,
- };
- });
- }, [breakdown.dailyReturns, startingAmount]);
+ const { portfolioData, stats, openPositions } = useMemo(() => {
+ return calculateSmartTrades(
+ recommendations,
+ investmentMode,
+ startingAmount,
+ nifty50Prices,
+ allBacktestData
+ );
+ }, [recommendations, investmentMode, startingAmount, nifty50Prices, allBacktestData]);
- const currentValue = portfolioData.length > 0
- ? portfolioData[portfolioData.length - 1].value
- : startingAmount;
- const totalReturn = ((currentValue - startingAmount) / startingAmount) * 100;
- const profitLoss = currentValue - startingAmount;
+ const lastDataPoint = portfolioData[portfolioData.length - 1];
+ const currentValue = lastDataPoint?.value ?? startingAmount;
+ const niftyValue = lastDataPoint?.niftyValue ?? startingAmount;
+
+ const totalCharges = includeBrokerage ? stats.brokerageBreakdown.totalCharges : 0;
+ const finalValue = currentValue - totalCharges;
+ const totalReturn = ((finalValue - startingAmount) / startingAmount) * 100;
+ const profitLoss = finalValue - startingAmount;
const isPositive = profitLoss >= 0;
+ const niftyReturn = ((niftyValue - startingAmount) / startingAmount) * 100;
+ const outperformance = totalReturn - niftyReturn;
+
+ // Calculate Y-axis domain with padding
+ const yAxisDomain = useMemo(() => {
+ if (portfolioData.length === 0) return [0, startingAmount * 1.2];
+
+ const allValues = portfolioData.flatMap(d => [d.value, d.niftyValue]);
+ const minValue = Math.min(...allValues);
+ const maxValue = Math.max(...allValues);
+ const padding = (maxValue - minValue) * 0.1;
+
+ return [Math.floor((minValue - padding) / 1000) * 1000, Math.ceil((maxValue + padding) / 1000) * 1000];
+ }, [portfolioData, startingAmount]);
+
const handleAmountChange = (e: React.ChangeEvent) => {
const value = parseInt(e.target.value.replace(/,/g, ''), 10);
if (!isNaN(value) && value >= 0) {
@@ -41,21 +273,73 @@ export default function PortfolioSimulator({ className = '' }: PortfolioSimulato
}
};
- const formatCurrency = (value: number) => {
- return new Intl.NumberFormat('en-IN', {
- style: 'currency',
- currency: 'INR',
- maximumFractionDigits: 0,
- }).format(value);
- };
+ const openPositionsCount = Object.keys(openPositions).length;
return (
-
-
-
Portfolio Simulator
+
+
+
+
Portfolio Simulator
+
+
+ {/* Settings Panel */}
+ {showSettings && (
+
+
+
+
+
+
+
+
+
+
+
+
+
+ )}
+
{/* Input Section */}
@@ -89,24 +373,158 @@ export default function PortfolioSimulator({ className = '' }: PortfolioSimulato
{/* Results Section */}
-
-
Current Value
-
- {formatCurrency(currentValue)}
+
+
+ Final Portfolio Value
+ setActiveModal('portfolioValue')} />
+
+
+ {formatINR(finalValue, 0)}
-
Profit/Loss
-
- {isPositive ? '+' : ''}{formatCurrency(profitLoss)}
+
+ Net Profit/Loss
+ setActiveModal('profitLoss')} />
+
+
+ {isPositive ? '+' : ''}{formatINR(profitLoss, 0)}
({isPositive ? '+' : ''}{totalReturn.toFixed(1)}%)
- {/* Chart */}
+ {/* Trade Stats with Info Buttons */}
+
+
setActiveModal('totalTrades')}
+ >
+
{stats.totalTrades}
+
+ Total Trades
+
+
+
setActiveModal('buyTrades')}
+ >
+
{stats.buyTrades}
+
+ Buy Trades
+
+
+
setActiveModal('sellTrades')}
+ >
+
{stats.sellTrades}
+
+ Sell Trades
+
+
+
setShowBrokerageDetails(!showBrokerageDetails)}
+ title="Click for detailed breakdown"
+ >
+
{formatINR(totalCharges, 0)}
+
+ Total Charges
+
+
+
+
+ {/* Open Positions Badge */}
+ {openPositionsCount > 0 && (
+
+
+
+
+ Open Positions (not yet sold)
+
+ {openPositionsCount} stocks
+
+
+ )}
+
+ {/* Brokerage Breakdown */}
+ {showBrokerageDetails && includeBrokerage && (
+
+
+
+ Zerodha Equity Delivery Charges
+
+
+
+ Brokerage:
+ {formatINR(stats.brokerageBreakdown.brokerage)}
+
+
+ STT:
+ {formatINR(stats.brokerageBreakdown.stt)}
+
+
+ Exchange Charges:
+ {formatINR(stats.brokerageBreakdown.exchangeCharges)}
+
+
+ SEBI Charges:
+ {formatINR(stats.brokerageBreakdown.sebiCharges)}
+
+
+ GST (18%):
+ {formatINR(stats.brokerageBreakdown.gst)}
+
+
+ Stamp Duty:
+ {formatINR(stats.brokerageBreakdown.stampDuty)}
+
+
+
+ Total Turnover:
+ {formatINR(stats.brokerageBreakdown.turnover, 0)}
+
+
+ )}
+
+ {/* Comparison with Nifty */}
+
setActiveModal('comparison')}
+ >
+
+
+
+ vs Nifty 50 Index
+
+
+
+
+
+
+ {totalReturn >= 0 ? '+' : ''}{totalReturn.toFixed(1)}%
+
+
AI Strategy
+
+
+
+ {niftyReturn >= 0 ? '+' : ''}{niftyReturn.toFixed(1)}%
+
+
Nifty 50
+
+
+
= 0 ? 'text-nifty-600 dark:text-nifty-400' : 'text-red-600 dark:text-red-400'}`}>
+ {outperformance >= 0 ? '+' : ''}{outperformance.toFixed(1)}%
+
+
Outperformance
+
+
+
+
+ {/* Chart with Nifty Comparison - Fixed Y-axis */}
{portfolioData.length > 0 && (
-
+
@@ -117,9 +535,10 @@ export default function PortfolioSimulator({ className = '' }: PortfolioSimulato
/>
formatCurrency(v).replace('₹', '')}
+ tickFormatter={(v) => formatINR(v, 0).replace('₹', '')}
className="text-gray-500 dark:text-gray-400"
width={60}
+ domain={yAxisDomain}
/>
[formatCurrency(value as number), 'Value']}
+ formatter={(value, name) => [
+ formatINR(Number(value) || 0, 0),
+ name === 'value' ? 'AI Strategy' : 'Nifty 50'
+ ]}
+ />
+
)}
+ {/* Trade Waterfall Toggle */}
+
+
+ {/* Trade Waterfall Chart */}
+ {showTradeWaterfall && stats.trades.length > 0 && (
+
+
+ Each bar represents a trade from buy to sell. Green = Profit, Red = Loss.
+
+
+
+
+ ({
+ ...t,
+ idx: i,
+ displayName: `${t.symbol}`,
+ duration: `${new Date(t.entryDate).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' })} → ${new Date(t.exitDate).toLocaleDateString('en-IN', { month: 'short', day: 'numeric' })}`,
+ }))}
+ layout="vertical"
+ margin={{ top: 5, right: 60, bottom: 5, left: 70 }}
+ >
+
+ formatINR(v, 0)}
+ domain={['dataMin', 'dataMax']}
+ />
+
+ [formatINR(Number(value) || 0, 2), 'P/L']}
+ labelFormatter={(_, payload) => {
+ if (payload && payload[0]) {
+ const d = payload[0].payload;
+ return `${d.symbol}: ${d.duration}`;
+ }
+ return '';
+ }}
+ />
+
+ {stats.trades.map((trade, index) => (
+ | = 0 ? '#22c55e' : '#ef4444'}
+ />
+ ))}
+ formatINR(Number(v) || 0, 0)}
+ style={{ fontSize: 9, fill: '#6b7280' }}
+ />
+ |
+
+
+
+
+
+ )}
+
{/* Daily Breakdown (Collapsible) */}