Merge pull request #34 from aguzererler/copilot/refactor-agent-workflows-and-risk-metrics

feat: portfolio risk metrics computation + LangChain agent tools
This commit is contained in:
ahmet guzererler 2026-03-21 02:31:57 +01:00 committed by GitHub
commit c3954c966d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 1580 additions and 1 deletions

View File

@ -21,7 +21,13 @@ from tradingagents.portfolio.exceptions import (
) )
from tradingagents.portfolio.models import Holding, Portfolio, Trade from tradingagents.portfolio.models import Holding, Portfolio, Trade
from tradingagents.portfolio.repository import PortfolioRepository from tradingagents.portfolio.repository import PortfolioRepository
from tests.portfolio.conftest import requires_supabase
# Define skip marker inline — avoids problematic absolute import from conftest
import os
requires_supabase = pytest.mark.skipif(
not os.getenv("SUPABASE_CONNECTION_STRING"),
reason="SUPABASE_CONNECTION_STRING not set -- skipping integration tests",
)
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------

View File

@ -0,0 +1,507 @@
"""Tests for tradingagents/portfolio/risk_metrics.py.
Coverage:
- Happy-path: all metrics computed from sufficient data
- Sharpe / Sortino: correct annualisation, sign, edge cases
- VaR: 5th-percentile logic
- Max drawdown: correct peak-to-trough
- Beta: covariance / variance calculation
- Sector concentration: weighted from holdings_snapshot
- Insufficient data: returns None gracefully
- Single snapshot: n_days = 0, all None
- Type validation: raises TypeError for non-PortfolioSnapshot input
Run::
pytest tests/portfolio/test_risk_metrics.py -v
"""
from __future__ import annotations
import math
import pytest
from tradingagents.portfolio.models import PortfolioSnapshot
from tradingagents.portfolio.risk_metrics import (
_daily_returns,
_mean,
_percentile,
_std,
compute_risk_metrics,
)
# ---------------------------------------------------------------------------
# Helper factories
# ---------------------------------------------------------------------------
def make_snapshot(
total_value: float,
date: str = "2026-01-01",
holdings: list[dict] | None = None,
portfolio_id: str = "pid",
) -> PortfolioSnapshot:
"""Create a minimal PortfolioSnapshot for testing."""
return PortfolioSnapshot(
snapshot_id="snap-1",
portfolio_id=portfolio_id,
snapshot_date=date,
total_value=total_value,
cash=0.0,
equity_value=total_value,
num_positions=len(holdings) if holdings else 0,
holdings_snapshot=holdings or [],
)
def nav_snapshots(nav_values: list[float]) -> list[PortfolioSnapshot]:
"""Build a list of snapshots from NAV values, one per day."""
return [
make_snapshot(v, date=f"2026-01-{i + 1:02d}")
for i, v in enumerate(nav_values)
]
# ---------------------------------------------------------------------------
# Internal helper tests
# ---------------------------------------------------------------------------
class TestDailyReturns:
def test_single_step(self):
assert _daily_returns([100.0, 110.0]) == pytest.approx([0.1])
def test_multi_step(self):
r = _daily_returns([100.0, 110.0, 99.0])
assert r[0] == pytest.approx(0.1)
assert r[1] == pytest.approx((99.0 - 110.0) / 110.0)
def test_zero_previous_returns_zero(self):
r = _daily_returns([0.0, 100.0])
assert r == [0.0]
def test_empty_list(self):
assert _daily_returns([]) == []
def test_one_element(self):
assert _daily_returns([100.0]) == []
class TestMean:
def test_basic(self):
assert _mean([1.0, 2.0, 3.0]) == pytest.approx(2.0)
def test_single(self):
assert _mean([5.0]) == pytest.approx(5.0)
def test_empty_raises(self):
with pytest.raises(ValueError, match="empty"):
_mean([])
class TestStd:
def test_sample_std(self):
# [1, 2, 3] sample std = sqrt(1) = 1
assert _std([1.0, 2.0, 3.0]) == pytest.approx(1.0)
def test_zero_variance(self):
assert _std([5.0, 5.0, 5.0]) == pytest.approx(0.0)
def test_insufficient_data_returns_zero(self):
assert _std([1.0], ddof=1) == 0.0
class TestPercentile:
def test_median(self):
assert _percentile([1.0, 2.0, 3.0, 4.0, 5.0], 50) == pytest.approx(3.0)
def test_5th_percentile_all_equal(self):
assert _percentile([0.01] * 10, 5) == pytest.approx(0.01)
def test_0th_percentile_is_min(self):
assert _percentile([3.0, 1.0, 2.0], 0) == pytest.approx(1.0)
def test_100th_percentile_is_max(self):
assert _percentile([3.0, 1.0, 2.0], 100) == pytest.approx(3.0)
def test_empty_raises(self):
with pytest.raises(ValueError):
_percentile([], 50)
# ---------------------------------------------------------------------------
# compute_risk_metrics — type validation
# ---------------------------------------------------------------------------
class TestTypeValidation:
def test_non_snapshot_raises_type_error(self):
with pytest.raises(TypeError, match="PortfolioSnapshot"):
compute_risk_metrics([{"total_value": 100.0}]) # type: ignore[list-item]
def test_mixed_list_raises(self):
snap = make_snapshot(100.0)
with pytest.raises(TypeError):
compute_risk_metrics([snap, "not-a-snapshot"]) # type: ignore[list-item]
# ---------------------------------------------------------------------------
# compute_risk_metrics — insufficient data
# ---------------------------------------------------------------------------
class TestInsufficientData:
def test_empty_list(self):
result = compute_risk_metrics([])
assert result["sharpe"] is None
assert result["sortino"] is None
assert result["var_95"] is None
assert result["max_drawdown"] is None
assert result["beta"] is None
assert result["sector_concentration"] == {}
assert result["return_stats"]["n_days"] == 0
def test_single_snapshot(self):
result = compute_risk_metrics([make_snapshot(100_000.0)])
assert result["sharpe"] is None
assert result["sortino"] is None
assert result["var_95"] is None
assert result["max_drawdown"] is None
assert result["return_stats"]["n_days"] == 0
# ---------------------------------------------------------------------------
# compute_risk_metrics — Sharpe ratio
# ---------------------------------------------------------------------------
class TestSharpe:
def test_zero_std_returns_none(self):
# All identical NAV → zero std → Sharpe cannot be computed
snaps = nav_snapshots([100.0, 100.0, 100.0, 100.0])
result = compute_risk_metrics(snaps)
assert result["sharpe"] is None
def test_positive_trend_positive_sharpe(self):
# Uniformly rising NAV → positive mean return, positive Sharpe
nav = [100.0 * (1.001 ** i) for i in range(30)]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
assert result["sharpe"] is not None
assert result["sharpe"] > 0.0
def test_negative_trend_negative_sharpe(self):
nav = [100.0 * (0.999 ** i) for i in range(30)]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
assert result["sharpe"] is not None
assert result["sharpe"] < 0.0
def test_annualisation_factor(self):
# Manually verify: r = [0.01, 0.01, -0.005]
# mean = 0.005, std = std([0.01, 0.01, -0.005], ddof=1)
# sharpe = mean / std * sqrt(252)
returns = [0.01, 0.01, -0.005]
mu = sum(returns) / len(returns)
variance = sum((r - mu) ** 2 for r in returns) / (len(returns) - 1)
sigma = math.sqrt(variance)
expected = mu / sigma * math.sqrt(252)
# Build snapshots that produce exactly these returns
navs = [100.0]
for r in returns:
navs.append(navs[-1] * (1 + r))
snaps = nav_snapshots(navs)
result = compute_risk_metrics(snaps)
assert result["sharpe"] == pytest.approx(expected, rel=1e-4)
# ---------------------------------------------------------------------------
# compute_risk_metrics — Sortino ratio
# ---------------------------------------------------------------------------
class TestSortino:
def test_no_downside_returns_none(self):
# All positive returns → no downside → Sortino = None
nav = [100.0, 101.0, 102.5, 104.0, 106.0]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
# No negative returns in this series → sortino is None
assert result["sortino"] is None
def test_mixed_returns_yields_sortino(self):
# Volatile up/down series
nav = [100.0, 105.0, 98.0, 103.0, 101.0, 107.0, 99.0, 104.0]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
# Should compute when there are downside returns
assert result["sortino"] is not None
def test_sortino_greater_than_sharpe_for_skewed_up_distribution(self):
# Many small up returns, few large down returns
# In a right-skewed return series, Sortino > Sharpe
nav = [100.0]
for _ in range(25):
nav.append(nav[-1] * 1.003) # small daily gain
# Add a few moderate losses
for _ in range(5):
nav.append(nav[-1] * 0.988)
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
if result["sharpe"] is not None and result["sortino"] is not None:
assert result["sortino"] > result["sharpe"]
# ---------------------------------------------------------------------------
# compute_risk_metrics — VaR
# ---------------------------------------------------------------------------
class TestVaR:
def test_insufficient_data_returns_none(self):
snaps = nav_snapshots([100.0, 101.0, 102.0, 103.0]) # only 3 returns
result = compute_risk_metrics(snaps)
assert result["var_95"] is None
def test_var_is_non_negative(self):
nav = [100.0 + i * 0.5 + ((-1) ** i) * 3 for i in range(40)]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
assert result["var_95"] is not None
assert result["var_95"] >= 0.0
def test_var_near_zero_for_stable_portfolio(self):
# Very low volatility → VaR close to 0
nav = [100_000.0 + i * 10 for i in range(30)]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
# Returns are essentially constant positive → VaR ≈ 0
assert result["var_95"] is not None
assert result["var_95"] < 0.001
# ---------------------------------------------------------------------------
# compute_risk_metrics — Max drawdown
# ---------------------------------------------------------------------------
class TestMaxDrawdown:
def test_no_drawdown(self):
# Monotonically increasing NAV
nav = [100.0 + i for i in range(10)]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
assert result["max_drawdown"] == pytest.approx(0.0)
def test_simple_drawdown(self):
# Peak=200, trough=100 → drawdown = (100-200)/200 = -0.5
nav = [100.0, 150.0, 200.0, 150.0, 100.0]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
assert result["max_drawdown"] == pytest.approx(-0.5, rel=1e-4)
def test_recovery_still_records_worst(self):
# Goes down then recovers
nav = [100.0, 80.0, 90.0, 110.0]
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps)
# Worst trough: 80/100 - 1 = -0.2
assert result["max_drawdown"] == pytest.approx(-0.2, rel=1e-4)
def test_two_snapshots(self):
snaps = nav_snapshots([100.0, 90.0])
result = compute_risk_metrics(snaps)
assert result["max_drawdown"] == pytest.approx(-0.1, rel=1e-4)
# ---------------------------------------------------------------------------
# compute_risk_metrics — Beta
# ---------------------------------------------------------------------------
class TestBeta:
def test_no_benchmark_returns_none(self):
snaps = nav_snapshots([100.0, 102.0, 101.0, 103.0, 104.0])
result = compute_risk_metrics(snaps, benchmark_returns=None)
assert result["beta"] is None
def test_empty_benchmark_returns_none(self):
snaps = nav_snapshots([100.0, 102.0, 101.0, 103.0, 104.0])
result = compute_risk_metrics(snaps, benchmark_returns=[])
assert result["beta"] is None
def test_perfect_correlation_beta_one(self):
# Portfolio returns = benchmark returns → beta = 1
nav = [100.0, 102.0, 101.0, 103.5, 105.0]
snaps = nav_snapshots(nav)
returns = [(nav[i] - nav[i - 1]) / nav[i - 1] for i in range(1, len(nav))]
result = compute_risk_metrics(snaps, benchmark_returns=returns)
assert result["beta"] == pytest.approx(1.0, rel=1e-4)
def test_double_beta(self):
# Portfolio returns = 2 × benchmark → beta ≈ 2
bench = [0.01, -0.005, 0.008, 0.002, -0.003]
port_returns = [r * 2 for r in bench]
# Build NAV from port_returns
nav = [100.0]
for r in port_returns:
nav.append(nav[-1] * (1 + r))
snaps = nav_snapshots(nav)
result = compute_risk_metrics(snaps, benchmark_returns=bench)
assert result["beta"] == pytest.approx(2.0, rel=1e-3)
def test_zero_variance_benchmark_returns_none(self):
bench = [0.0, 0.0, 0.0, 0.0]
snaps = nav_snapshots([100.0, 101.0, 102.0, 103.0, 104.0])
result = compute_risk_metrics(snaps, benchmark_returns=bench)
assert result["beta"] is None
# ---------------------------------------------------------------------------
# compute_risk_metrics — Sector concentration
# ---------------------------------------------------------------------------
class TestSectorConcentration:
def test_empty_holdings(self):
snap = make_snapshot(100_000.0, holdings=[])
result = compute_risk_metrics([snap, make_snapshot(105_000.0)])
assert result["sector_concentration"] == {}
def test_single_sector(self):
holdings = [
{"ticker": "AAPL", "shares": 100, "avg_cost": 150.0, "sector": "Technology"},
]
# Sector concentration reads from the LAST snapshot
last_snap = make_snapshot(100_000.0, holdings=holdings)
result = compute_risk_metrics([make_snapshot(98_000.0), last_snap])
# Technology weight = (100 * 150) / 100_000 * 100 = 15 %
assert "Technology" in result["sector_concentration"]
assert result["sector_concentration"]["Technology"] == pytest.approx(15.0, rel=0.01)
def test_multiple_sectors_sum_to_equity_fraction(self):
holdings = [
{"ticker": "AAPL", "shares": 100, "avg_cost": 100.0, "sector": "Technology"},
{"ticker": "JPM", "shares": 50, "avg_cost": 200.0, "sector": "Financials"},
]
total_nav = 100_000.0
last_snap = make_snapshot(total_nav, holdings=holdings)
result = compute_risk_metrics([make_snapshot(99_000.0), last_snap])
conc = result["sector_concentration"]
assert "Technology" in conc
assert "Financials" in conc
# Technology: 10_000 / 100_000 = 10 %
assert conc["Technology"] == pytest.approx(10.0, rel=0.01)
# Financials: 10_000 / 100_000 = 10 %
assert conc["Financials"] == pytest.approx(10.0, rel=0.01)
def test_uses_current_value_when_available(self):
holdings = [
{
"ticker": "AAPL",
"shares": 100,
"avg_cost": 100.0,
"current_value": 20_000.0, # current, not cost
"sector": "Technology",
},
]
last_snap = make_snapshot(100_000.0, holdings=holdings)
result = compute_risk_metrics([make_snapshot(98_000.0), last_snap])
# current_value preferred: 20_000 / 100_000 * 100 = 20 %
assert result["sector_concentration"]["Technology"] == pytest.approx(20.0, rel=0.01)
def test_missing_sector_defaults_to_unknown(self):
holdings = [
{"ticker": "AAPL", "shares": 100, "avg_cost": 100.0}, # no sector key
]
last_snap = make_snapshot(100_000.0, holdings=holdings)
result = compute_risk_metrics([make_snapshot(100_000.0), last_snap])
assert "Unknown" in result["sector_concentration"]
# ---------------------------------------------------------------------------
# compute_risk_metrics — return_stats
# ---------------------------------------------------------------------------
class TestReturnStats:
def test_n_days_matches_returns_length(self):
snaps = nav_snapshots([100.0, 102.0, 101.0])
result = compute_risk_metrics(snaps)
assert result["return_stats"]["n_days"] == 2
def test_mean_and_std_present(self):
snaps = nav_snapshots([100.0, 102.0, 101.0, 103.5])
result = compute_risk_metrics(snaps)
stats = result["return_stats"]
assert stats["mean_daily"] is not None
assert stats["std_daily"] is not None
def test_empty_stats(self):
result = compute_risk_metrics([])
stats = result["return_stats"]
assert stats["mean_daily"] is None
assert stats["std_daily"] is None
assert stats["n_days"] == 0
# ---------------------------------------------------------------------------
# compute_risk_metrics — full integration scenario
# ---------------------------------------------------------------------------
class TestFullScenario:
def test_90_day_realistic_portfolio(self):
"""90-day NAV series with realistic up/down patterns."""
import random
random.seed(42)
nav = [100_000.0]
for _ in range(89):
daily_r = random.gauss(0.0005, 0.01) # ~12.5 % annual, 10 % vol
nav.append(nav[-1] * (1 + daily_r))
bench_returns = [random.gauss(0.0004, 0.009) for _ in range(89)]
holdings = [
{"ticker": "AAPL", "shares": 100, "avg_cost": 175.0, "sector": "Technology"},
{"ticker": "JPM", "shares": 50, "avg_cost": 200.0, "sector": "Financials"},
]
snaps = []
for i, v in enumerate(nav):
h = holdings if i == len(nav) - 1 else []
snaps.append(
PortfolioSnapshot(
snapshot_id=f"snap-{i}",
portfolio_id="pid",
snapshot_date=f"2026-01-{i + 1:02d}" if i < 31 else f"2026-02-{i - 30:02d}" if i < 59 else f"2026-03-{i - 58:02d}",
total_value=v,
cash=0.0,
equity_value=v,
num_positions=2,
holdings_snapshot=h,
)
)
result = compute_risk_metrics(snaps, benchmark_returns=bench_returns)
# All key metrics should be present
assert result["sharpe"] is not None
assert result["sortino"] is not None
assert result["var_95"] is not None
assert result["max_drawdown"] is not None
assert result["beta"] is not None
assert result["return_stats"]["n_days"] == 89
# Sector concentration from last snapshot
assert "Technology" in result["sector_concentration"]
assert "Financials" in result["sector_concentration"]
# Sanity bounds
assert -10.0 < result["sharpe"] < 10.0
assert result["max_drawdown"] <= 0.0
assert result["var_95"] >= 0.0
assert result["beta"] > 0.0 # should be positive for realistic market data

View File

@ -0,0 +1,424 @@
"""Tests for tradingagents/agents/utils/portfolio_tools.py.
All tests use in-memory / temporary-filesystem data no Supabase DB required.
Coverage:
- get_enriched_holdings: happy path, missing price, invalid JSON, empty list
- compute_portfolio_risk_metrics: happy path, insufficient data, invalid JSON
- load_portfolio_risk_metrics: file present, file missing, invalid JSON input
- load_portfolio_decision: file present, file missing
Run::
pytest tests/unit/test_portfolio_tools.py -v
"""
from __future__ import annotations
import json
import os
from pathlib import Path
import pytest
from tradingagents.agents.utils.portfolio_tools import (
compute_portfolio_risk_metrics,
get_enriched_holdings,
load_portfolio_decision,
load_portfolio_risk_metrics,
)
from tradingagents.portfolio.models import Holding, Portfolio, PortfolioSnapshot
from tradingagents.portfolio.report_store import ReportStore
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
PORTFOLIO_ID = "aaaa1111-0000-0000-0000-000000000001"
DATE = "2026-03-20"
@pytest.fixture
def sample_holdings_list() -> list[dict]:
return [
{
"holding_id": "h1",
"portfolio_id": PORTFOLIO_ID,
"ticker": "AAPL",
"shares": 100.0,
"avg_cost": 150.0,
"sector": "Technology",
"industry": "Consumer Electronics",
"created_at": "",
"updated_at": "",
},
{
"holding_id": "h2",
"portfolio_id": PORTFOLIO_ID,
"ticker": "MSFT",
"shares": 50.0,
"avg_cost": 300.0,
"sector": "Technology",
"industry": "Software",
"created_at": "",
"updated_at": "",
},
]
@pytest.fixture
def sample_prices() -> dict[str, float]:
return {"AAPL": 182.50, "MSFT": 420.00}
@pytest.fixture
def sample_snapshots() -> list[dict]:
"""30 snapshot dicts for risk metrics computation."""
navs = [100_000.0 * (1.001 ** i) for i in range(30)]
return [
{
"snapshot_id": f"snap-{i}",
"portfolio_id": PORTFOLIO_ID,
"snapshot_date": f"2026-02-{i + 1:02d}",
"total_value": v,
"cash": 0.0,
"equity_value": v,
"num_positions": 2,
"holdings_snapshot": [],
"metadata": {},
}
for i, v in enumerate(navs)
]
@pytest.fixture
def tmp_reports(tmp_path: Path) -> Path:
"""Temporary reports directory backed by pytest tmp_path."""
d = tmp_path / "reports"
d.mkdir()
return d
# ---------------------------------------------------------------------------
# Tests: get_enriched_holdings
# ---------------------------------------------------------------------------
class TestGetEnrichedHoldings:
def test_happy_path_returns_enriched_data(
self, sample_holdings_list, sample_prices
):
result_str = get_enriched_holdings.invoke(
{
"holdings_json": json.dumps(sample_holdings_list),
"prices_json": json.dumps(sample_prices),
"portfolio_cash": 10_000.0,
}
)
result = json.loads(result_str)
assert "holdings" in result
assert "portfolio_summary" in result
assert len(result["holdings"]) == 2
aapl = next(h for h in result["holdings"] if h["ticker"] == "AAPL")
assert aapl["current_price"] == pytest.approx(182.50)
assert aapl["current_value"] == pytest.approx(182.50 * 100.0)
assert aapl["cost_basis"] == pytest.approx(150.0 * 100.0)
assert aapl["unrealized_pnl"] == pytest.approx((182.50 - 150.0) * 100.0)
summary = result["portfolio_summary"]
equity = 182.50 * 100 + 420.0 * 50
total = 10_000.0 + equity
assert summary["total_value"] == pytest.approx(total)
assert summary["cash"] == pytest.approx(10_000.0)
assert summary["cash_pct"] == pytest.approx(10_000.0 / total)
def test_holding_with_missing_price_has_none_enrichment(
self, sample_holdings_list
):
# Only AAPL price provided — MSFT enrichment should remain None
prices = {"AAPL": 182.50}
result_str = get_enriched_holdings.invoke(
{
"holdings_json": json.dumps(sample_holdings_list),
"prices_json": json.dumps(prices),
"portfolio_cash": 0.0,
}
)
result = json.loads(result_str)
msft = next(h for h in result["holdings"] if h["ticker"] == "MSFT")
assert msft["current_price"] is None
def test_empty_holdings_returns_zero_equity(self, sample_prices):
result_str = get_enriched_holdings.invoke(
{
"holdings_json": "[]",
"prices_json": json.dumps(sample_prices),
"portfolio_cash": 50_000.0,
}
)
result = json.loads(result_str)
assert result["holdings"] == []
assert result["portfolio_summary"]["equity_value"] == pytest.approx(0.0)
assert result["portfolio_summary"]["total_value"] == pytest.approx(50_000.0)
def test_invalid_holdings_json_returns_error(self, sample_prices):
result_str = get_enriched_holdings.invoke(
{
"holdings_json": "not-json",
"prices_json": json.dumps(sample_prices),
"portfolio_cash": 0.0,
}
)
result = json.loads(result_str)
assert "error" in result
def test_invalid_prices_json_returns_error(self, sample_holdings_list):
result_str = get_enriched_holdings.invoke(
{
"holdings_json": json.dumps(sample_holdings_list),
"prices_json": "{bad json}",
"portfolio_cash": 0.0,
}
)
result = json.loads(result_str)
assert "error" in result
def test_weight_sums_to_equity_fraction(
self, sample_holdings_list, sample_prices
):
result_str = get_enriched_holdings.invoke(
{
"holdings_json": json.dumps(sample_holdings_list),
"prices_json": json.dumps(sample_prices),
"portfolio_cash": 0.0,
}
)
result = json.loads(result_str)
total_weight = sum(
h["weight"] for h in result["holdings"] if h["weight"] is not None
)
assert total_weight == pytest.approx(1.0, rel=1e-4)
def test_zero_cash_with_holdings(self, sample_holdings_list, sample_prices):
result_str = get_enriched_holdings.invoke(
{
"holdings_json": json.dumps(sample_holdings_list),
"prices_json": json.dumps(sample_prices),
"portfolio_cash": 0.0,
}
)
result = json.loads(result_str)
summary = result["portfolio_summary"]
assert summary["cash_pct"] == pytest.approx(0.0, abs=1e-9)
# ---------------------------------------------------------------------------
# Tests: compute_portfolio_risk_metrics
# ---------------------------------------------------------------------------
class TestComputePortfolioRiskMetrics:
def test_happy_path_30_snapshots(self, sample_snapshots):
result_str = compute_portfolio_risk_metrics.invoke(
{
"nav_history_json": json.dumps(sample_snapshots),
"benchmark_returns_json": "[]",
}
)
result = json.loads(result_str)
assert "sharpe" in result
assert "sortino" in result
assert "var_95" in result
assert "max_drawdown" in result
assert "return_stats" in result
assert result["return_stats"]["n_days"] == 29
def test_single_snapshot_returns_none_metrics(self):
snap = {
"snapshot_id": "s1",
"portfolio_id": PORTFOLIO_ID,
"snapshot_date": "2026-01-01",
"total_value": 100_000.0,
"cash": 0.0,
"equity_value": 100_000.0,
"num_positions": 0,
}
result_str = compute_portfolio_risk_metrics.invoke(
{
"nav_history_json": json.dumps([snap]),
"benchmark_returns_json": "[]",
}
)
result = json.loads(result_str)
assert result["sharpe"] is None
assert result["var_95"] is None
def test_invalid_nav_json_returns_error(self):
result_str = compute_portfolio_risk_metrics.invoke(
{
"nav_history_json": "not-json",
"benchmark_returns_json": "[]",
}
)
result = json.loads(result_str)
assert "error" in result
def test_invalid_snapshot_record_returns_error(self):
bad_snap = {"total_value": 100.0} # missing required fields
result_str = compute_portfolio_risk_metrics.invoke(
{
"nav_history_json": json.dumps([bad_snap]),
"benchmark_returns_json": "[]",
}
)
result = json.loads(result_str)
assert "error" in result
def test_with_benchmark_returns_beta(self, sample_snapshots):
# Use a non-constant benchmark so variance > 0 and beta is computed
bench = [0.001 * (1 + 0.1 * (i % 5 - 2)) for i in range(29)]
result_str = compute_portfolio_risk_metrics.invoke(
{
"nav_history_json": json.dumps(sample_snapshots),
"benchmark_returns_json": json.dumps(bench),
}
)
result = json.loads(result_str)
assert result["beta"] is not None
def test_empty_list_returns_null_metrics(self):
result_str = compute_portfolio_risk_metrics.invoke(
{
"nav_history_json": "[]",
"benchmark_returns_json": "[]",
}
)
result = json.loads(result_str)
assert result["sharpe"] is None
assert result["return_stats"]["n_days"] == 0
# ---------------------------------------------------------------------------
# Tests: load_portfolio_risk_metrics
# ---------------------------------------------------------------------------
class TestLoadPortfolioRiskMetrics:
def test_returns_metrics_when_file_exists(self, tmp_reports):
store = ReportStore(base_dir=tmp_reports)
metrics = {"sharpe": 1.23, "sortino": 1.87, "var_95": 0.018}
store.save_risk_metrics(DATE, PORTFOLIO_ID, metrics)
result_str = load_portfolio_risk_metrics.invoke(
{
"portfolio_id": PORTFOLIO_ID,
"date": DATE,
"reports_dir": str(tmp_reports),
}
)
result = json.loads(result_str)
assert result["sharpe"] == pytest.approx(1.23)
assert result["sortino"] == pytest.approx(1.87)
def test_returns_error_when_file_missing(self, tmp_reports):
result_str = load_portfolio_risk_metrics.invoke(
{
"portfolio_id": "nonexistent-id",
"date": DATE,
"reports_dir": str(tmp_reports),
}
)
result = json.loads(result_str)
assert "error" in result
assert "nonexistent-id" in result["error"]
def test_loaded_metrics_match_saved(self, tmp_reports):
store = ReportStore(base_dir=tmp_reports)
full_metrics = {
"sharpe": 0.85,
"sortino": 1.10,
"var_95": 0.025,
"max_drawdown": -0.12,
"beta": 0.93,
"sector_concentration": {"Technology": 40.0, "Healthcare": 20.0},
"return_stats": {"mean_daily": 0.0003, "std_daily": 0.009, "n_days": 60},
}
store.save_risk_metrics(DATE, PORTFOLIO_ID, full_metrics)
result_str = load_portfolio_risk_metrics.invoke(
{
"portfolio_id": PORTFOLIO_ID,
"date": DATE,
"reports_dir": str(tmp_reports),
}
)
result = json.loads(result_str)
assert result["beta"] == pytest.approx(0.93)
assert result["sector_concentration"]["Technology"] == pytest.approx(40.0)
# ---------------------------------------------------------------------------
# Tests: load_portfolio_decision
# ---------------------------------------------------------------------------
class TestLoadPortfolioDecision:
def test_returns_decision_when_file_exists(self, tmp_reports):
store = ReportStore(base_dir=tmp_reports)
decision = {
"sells": [{"ticker": "XYZ", "shares": 50, "rationale": "Stop loss triggered"}],
"buys": [{"ticker": "AAPL", "shares": 10, "rationale": "Strong momentum"}],
"holds": ["MSFT", "GOOGL"],
"target_cash_pct": 0.05,
}
store.save_pm_decision(DATE, PORTFOLIO_ID, decision)
result_str = load_portfolio_decision.invoke(
{
"portfolio_id": PORTFOLIO_ID,
"date": DATE,
"reports_dir": str(tmp_reports),
}
)
result = json.loads(result_str)
assert result["sells"][0]["ticker"] == "XYZ"
assert result["buys"][0]["ticker"] == "AAPL"
assert "MSFT" in result["holds"]
def test_returns_error_when_file_missing(self, tmp_reports):
result_str = load_portfolio_decision.invoke(
{
"portfolio_id": "no-such-portfolio",
"date": DATE,
"reports_dir": str(tmp_reports),
}
)
result = json.loads(result_str)
assert "error" in result
assert "no-such-portfolio" in result["error"]
def test_decision_fields_preserved(self, tmp_reports):
store = ReportStore(base_dir=tmp_reports)
decision = {
"sells": [],
"buys": [],
"holds": ["AAPL"],
"target_cash_pct": 0.10,
"rationale": "Market uncertainty — staying defensive.",
}
store.save_pm_decision(DATE, PORTFOLIO_ID, decision)
result_str = load_portfolio_decision.invoke(
{
"portfolio_id": PORTFOLIO_ID,
"date": DATE,
"reports_dir": str(tmp_reports),
}
)
result = json.loads(result_str)
assert result["rationale"] == "Market uncertainty — staying defensive."
assert result["target_cash_pct"] == pytest.approx(0.10)

View File

@ -0,0 +1,339 @@
"""LangChain tools that expose Portfolio Manager data to agents.
These tools wrap the existing Portfolio / Holding / PortfolioSnapshot data
models and the ReportStore filesystem APIs so that any LangChain-compatible
agent can:
1. **Enrich holdings** with current prices to obtain P&L, weights, and
unrealised gain/loss using :meth:`Holding.enrich` and
:meth:`Portfolio.enrich`.
2. **Compute portfolio risk metrics** (Sharpe, Sortino, VaR, max drawdown,
beta, sector concentration) from a NAV history using the pure-Python
:func:`~tradingagents.portfolio.risk_metrics.compute_risk_metrics`.
3. **Load saved risk metrics** from the filesystem using
:meth:`~tradingagents.portfolio.report_store.ReportStore.load_risk_metrics`.
4. **Load PM decisions** from the filesystem using
:meth:`~tradingagents.portfolio.report_store.ReportStore.load_pm_decision`.
All tools accept and return plain strings / JSON strings so they are
compatible with any LangChain tool-calling LLM without custom serialisers.
Usage::
from tradingagents.agents.utils.portfolio_tools import (
get_enriched_holdings,
compute_portfolio_risk_metrics,
load_portfolio_risk_metrics,
load_portfolio_decision,
)
# In an agent's tool list:
tools = [
get_enriched_holdings,
compute_portfolio_risk_metrics,
load_portfolio_risk_metrics,
load_portfolio_decision,
]
"""
from __future__ import annotations
import json
from typing import Annotated
from langchain_core.tools import tool
from tradingagents.portfolio.models import Holding, Portfolio, PortfolioSnapshot
from tradingagents.portfolio.report_store import ReportStore
from tradingagents.portfolio.risk_metrics import compute_risk_metrics
# ---------------------------------------------------------------------------
# Tool 1 — Enrich holdings with current prices
# ---------------------------------------------------------------------------
@tool
def get_enriched_holdings(
holdings_json: Annotated[
str,
"JSON array of holding objects. Each object must have: holding_id, "
"portfolio_id, ticker, shares, avg_cost. Optional: sector, industry, "
"created_at, updated_at.",
],
prices_json: Annotated[
str,
"JSON object mapping ticker symbol to current market price. "
'Example: {"AAPL": 182.50, "MSFT": 415.20}',
],
portfolio_cash: Annotated[
float,
"Cash balance of the portfolio (USD). Used to compute cash_pct.",
] = 0.0,
) -> str:
"""Enrich portfolio holdings with current prices to compute P&L and weights.
Uses the existing ``Holding.enrich()`` and ``Portfolio.enrich()`` methods
from the portfolio data model. For each holding the following runtime
fields are populated:
- ``current_price`` latest market price
- ``current_value`` current_price × shares
- ``cost_basis`` avg_cost × shares
- ``unrealized_pnl`` current_value cost_basis
- ``unrealized_pnl_pct`` unrealized_pnl / cost_basis (as fraction)
- ``weight`` current_value / total_portfolio_value (as fraction)
Portfolio-level summary fields returned:
- ``total_value`` cash + sum(current_value)
- ``equity_value`` sum(current_value)
- ``cash_pct`` cash / total_value
Args:
holdings_json: JSON array of holding dicts (see parameter description).
prices_json: JSON object of ticker price mappings.
portfolio_cash: Cash balance of the portfolio.
Returns:
JSON string with keys ``holdings`` (list of enriched dicts) and
``portfolio_summary`` (total_value, equity_value, cash, cash_pct).
"""
try:
raw_holdings: list[dict] = json.loads(holdings_json)
except json.JSONDecodeError as exc:
return json.dumps({"error": f"Invalid holdings_json: {exc}"})
try:
prices: dict[str, float] = json.loads(prices_json)
except json.JSONDecodeError as exc:
return json.dumps({"error": f"Invalid prices_json: {exc}"})
# Deserialise holdings
holdings: list[Holding] = []
for raw in raw_holdings:
try:
holdings.append(Holding.from_dict(raw))
except (KeyError, ValueError, TypeError) as exc:
return json.dumps({"error": f"Invalid holding record: {exc}"})
# First pass — compute equity total for total_value
equity = sum(
prices.get(h.ticker, 0.0) * h.shares for h in holdings
)
total_value = portfolio_cash + equity
# Second pass — enrich each holding
enriched: list[dict] = []
for holding in holdings:
price = prices.get(holding.ticker)
if price is not None:
holding.enrich(price, total_value)
enriched.append(
{
**holding.to_dict(),
"current_price": holding.current_price,
"current_value": holding.current_value,
"cost_basis": holding.cost_basis,
"unrealized_pnl": holding.unrealized_pnl,
"unrealized_pnl_pct": holding.unrealized_pnl_pct,
"weight": holding.weight,
}
)
# Portfolio-level summary
portfolio = Portfolio(
portfolio_id="",
name="",
cash=portfolio_cash,
initial_cash=portfolio_cash,
)
portfolio.enrich(holdings)
return json.dumps(
{
"holdings": enriched,
"portfolio_summary": {
"total_value": portfolio.total_value,
"equity_value": portfolio.equity_value,
"cash": portfolio_cash,
"cash_pct": portfolio.cash_pct,
},
},
indent=2,
)
# ---------------------------------------------------------------------------
# Tool 2 — Compute risk metrics from NAV history
# ---------------------------------------------------------------------------
@tool
def compute_portfolio_risk_metrics(
nav_history_json: Annotated[
str,
"JSON array of snapshot objects ordered oldest-first. Each object "
"must have: snapshot_id, portfolio_id, snapshot_date, total_value, "
"cash, equity_value, num_positions. Optional: holdings_snapshot "
"(list of dicts with ticker/sector/shares/avg_cost for sector "
"concentration), metadata.",
],
benchmark_returns_json: Annotated[
str,
"Optional JSON array of daily benchmark returns (e.g. SPY), aligned "
"1-to-1 with the portfolio returns derived from nav_history_json. "
'Pass an empty JSON array "[]" to skip beta computation.',
] = "[]",
) -> str:
"""Compute portfolio risk metrics from a NAV (Net Asset Value) time series.
This tool uses the pure-Python ``compute_risk_metrics()`` function from
the Portfolio Manager's risk metrics module. No LLM is involved.
Metrics returned:
- ``sharpe`` annualised Sharpe ratio (rf = 0)
- ``sortino`` annualised Sortino ratio (downside deviation)
- ``var_95`` 95 % historical Value at Risk (positive fraction = max loss)
- ``max_drawdown`` worst peak-to-trough as a fraction (negative)
- ``beta`` portfolio beta vs. benchmark (null when no benchmark given)
- ``sector_concentration`` sector weights in % from the last snapshot
- ``return_stats`` summary: mean_daily, std_daily, n_days
Requires at least 2 snapshots for any metrics. Returns null for metrics
that cannot be computed from the available data.
Args:
nav_history_json: JSON array of snapshot dicts (see above).
benchmark_returns_json: JSON array of floats or ``"[]"``.
Returns:
JSON string containing the metrics dict, or an ``{"error": ...}``
dict on input validation failure.
"""
try:
raw_snapshots: list[dict] = json.loads(nav_history_json)
except json.JSONDecodeError as exc:
return json.dumps({"error": f"Invalid nav_history_json: {exc}"})
try:
bench_returns: list[float] = json.loads(benchmark_returns_json)
except json.JSONDecodeError as exc:
return json.dumps({"error": f"Invalid benchmark_returns_json: {exc}"})
# Deserialise snapshots
snapshots: list[PortfolioSnapshot] = []
for raw in raw_snapshots:
try:
snapshots.append(PortfolioSnapshot.from_dict(raw))
except (KeyError, ValueError, TypeError) as exc:
return json.dumps({"error": f"Invalid snapshot record: {exc}"})
try:
metrics = compute_risk_metrics(
snapshots,
benchmark_returns=bench_returns if bench_returns else None,
)
except (TypeError, ValueError) as exc:
return json.dumps({"error": f"Risk metrics computation failed: {exc}"})
return json.dumps(metrics, indent=2)
# ---------------------------------------------------------------------------
# Tool 3 — Load saved risk metrics from filesystem
# ---------------------------------------------------------------------------
@tool
def load_portfolio_risk_metrics(
portfolio_id: Annotated[str, "UUID of the portfolio."],
date: Annotated[str, "ISO date string, e.g. '2026-03-20'."],
reports_dir: Annotated[
str,
"Root reports directory. Defaults to 'reports' (relative to CWD) "
"which matches the standard report_paths convention.",
] = "reports",
) -> str:
"""Load previously saved risk metrics for a portfolio on a given date.
Uses :meth:`~tradingagents.portfolio.report_store.ReportStore.load_risk_metrics`
to read from ``reports/daily/{date}/portfolio/{portfolio_id}_risk_metrics.json``.
Args:
portfolio_id: Portfolio UUID.
date: ISO date string.
reports_dir: Root reports directory (defaults to ``"reports"``).
Returns:
JSON string of the risk metrics dict, or an ``{"error": ...}`` dict
when the file is not found or cannot be read.
"""
store = ReportStore(base_dir=reports_dir)
try:
metrics = store.load_risk_metrics(date, portfolio_id)
except Exception as exc:
return json.dumps({"error": f"Failed to load risk metrics: {exc}"})
if metrics is None:
return json.dumps(
{
"error": (
f"No risk metrics found for portfolio '{portfolio_id}' "
f"on date '{date}'. "
"Run compute_portfolio_risk_metrics first and save the result."
)
}
)
return json.dumps(metrics, indent=2)
# ---------------------------------------------------------------------------
# Tool 4 — Load PM decision from filesystem
# ---------------------------------------------------------------------------
@tool
def load_portfolio_decision(
portfolio_id: Annotated[str, "UUID of the portfolio."],
date: Annotated[str, "ISO date string, e.g. '2026-03-20'."],
reports_dir: Annotated[
str,
"Root reports directory. Defaults to 'reports'.",
] = "reports",
) -> str:
"""Load the Portfolio Manager agent's decision for a given date.
Uses :meth:`~tradingagents.portfolio.report_store.ReportStore.load_pm_decision`
to read from
``reports/daily/{date}/portfolio/{portfolio_id}_pm_decision.json``.
The PM decision JSON contains the agent's allocation choices:
sells, buys, holds, target cash %, and detailed rationale per action.
Args:
portfolio_id: Portfolio UUID.
date: ISO date string.
reports_dir: Root reports directory (defaults to ``"reports"``).
Returns:
JSON string of the PM decision dict, or an ``{"error": ...}`` dict
when the file is not found.
"""
store = ReportStore(base_dir=reports_dir)
try:
decision = store.load_pm_decision(date, portfolio_id)
except Exception as exc:
return json.dumps({"error": f"Failed to load PM decision: {exc}"})
if decision is None:
return json.dumps(
{
"error": (
f"No PM decision found for portfolio '{portfolio_id}' "
f"on date '{date}'."
)
}
)
return json.dumps(decision, indent=2)

View File

@ -8,6 +8,7 @@ Import the primary interface classes from this package:
Holding, Holding,
Trade, Trade,
PortfolioSnapshot, PortfolioSnapshot,
compute_risk_metrics,
PortfolioError, PortfolioError,
PortfolioNotFoundError, PortfolioNotFoundError,
InsufficientCashError, InsufficientCashError,

View File

@ -0,0 +1,302 @@
"""Pure-Python risk metrics computation for the Portfolio Manager.
This module computes portfolio-level risk metrics from a time series of
NAV (Net Asset Value) snapshots. It is intentionally **LLM-free** all
calculations are deterministic Python / NumPy.
Metrics computed
----------------
- **Sharpe ratio** annualised, risk-free rate = 0
- **Sortino ratio** like Sharpe but denominator uses downside deviation only
- **95 % VaR** historical simulation (5th percentile of daily returns),
expressed as a *positive* fraction (e.g. 0.02 = 2 % expected max loss)
- **Max drawdown** worst peak-to-trough decline as a fraction (negative)
- **Beta** portfolio vs. an optional benchmark return series
- **Sector concentration** weight per GICS sector (%) from the most-recent
snapshot's ``holdings_snapshot`` field
Usage::
from tradingagents.portfolio import compute_risk_metrics, PortfolioSnapshot
metrics = compute_risk_metrics(snapshots, benchmark_returns=spy_returns)
# {
# "sharpe": 1.23,
# "sortino": 1.87,
# "var_95": 0.018,
# "max_drawdown": -0.142,
# "beta": 0.91,
# "sector_concentration": {"Technology": 35.4, "Healthcare": 18.2, ...},
# "return_stats": {"mean_daily": 0.0008, "std_daily": 0.011, "n_days": 90},
# }
See ``docs/portfolio/00_overview.md`` Phase 3 for the full specification.
"""
from __future__ import annotations
import math
from typing import Any
from tradingagents.portfolio.models import PortfolioSnapshot
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
TRADING_DAYS_PER_YEAR: int = 252
MIN_PERIODS_SHARPE: int = 2 # minimum data points for Sharpe / Sortino
MIN_PERIODS_VAR: int = 5 # minimum data points for VaR
MIN_PERIODS_DRAWDOWN: int = 2 # minimum data points for max drawdown
MIN_PERIODS_BETA: int = 2 # minimum data points for beta
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _daily_returns(nav_series: list[float]) -> list[float]:
"""Compute daily percentage returns from an ordered NAV series.
Returns a list one element shorter than the input. Each element is
``(nav[t] - nav[t-1]) / nav[t-1]``. Periods where the previous NAV
is zero are skipped (appended as 0.0 to avoid division by zero).
"""
returns: list[float] = []
for i in range(1, len(nav_series)):
prev = nav_series[i - 1]
if prev == 0.0:
returns.append(0.0)
else:
returns.append((nav_series[i] - prev) / prev)
return returns
def _mean(values: list[float]) -> float:
"""Arithmetic mean of a list. Raises ValueError on empty input."""
if not values:
raise ValueError("Cannot compute mean of empty list")
return sum(values) / len(values)
def _std(values: list[float], ddof: int = 1) -> float:
"""Sample standard deviation.
Args:
values: List of floats.
ddof: Degrees of freedom adjustment (1 = sample std, 0 = population).
Returns:
Standard deviation, or 0.0 when insufficient data.
"""
n = len(values)
if n <= ddof:
return 0.0
mu = _mean(values)
variance = sum((x - mu) ** 2 for x in values) / (n - ddof)
return math.sqrt(variance)
def _percentile(values: list[float], pct: float) -> float:
"""Return the *pct*-th percentile of *values* using linear interpolation.
Args:
values: Non-empty list of floats.
pct: Percentile in [0, 100].
Returns:
Interpolated percentile value.
"""
if not values:
raise ValueError("Cannot compute percentile of empty list")
sorted_vals = sorted(values)
n = len(sorted_vals)
# Linear interpolation index
index = (pct / 100.0) * (n - 1)
lower = int(index)
upper = lower + 1
frac = index - lower
if upper >= n:
return sorted_vals[-1]
return sorted_vals[lower] * (1.0 - frac) + sorted_vals[upper] * frac
# ---------------------------------------------------------------------------
# Public API
# ---------------------------------------------------------------------------
def compute_risk_metrics(
snapshots: list[PortfolioSnapshot],
benchmark_returns: list[float] | None = None,
trading_days_per_year: int = TRADING_DAYS_PER_YEAR,
) -> dict[str, Any]:
"""Compute portfolio risk metrics from a NAV time series.
Args:
snapshots: Ordered list of :class:`~tradingagents.portfolio.models.PortfolioSnapshot`
objects (oldest first). Each snapshot contributes one NAV data
point (``snapshot.total_value``). At least 2 snapshots are
required; fewer than that returns ``None`` for all rate metrics.
benchmark_returns: Optional list of daily returns for a benchmark
(e.g. SPY) aligned 1-to-1 with the *portfolio* daily returns
derived from ``snapshots``. Must be the same length as
``len(snapshots) - 1``. When provided, beta is computed.
trading_days_per_year: Number of trading days used to annualise
Sharpe and Sortino ratios. Defaults to 252.
Returns:
A dict with keys:
- ``sharpe`` (:class:`float` | ``None``) annualised Sharpe ratio
- ``sortino`` (:class:`float` | ``None``) annualised Sortino ratio
- ``var_95`` (:class:`float` | ``None``) 95 % historical VaR
(positive = expected max loss as a fraction of portfolio value)
- ``max_drawdown`` (:class:`float` | ``None``) worst peak-to-trough
as a fraction (negative value)
- ``beta`` (:class:`float` | ``None``) portfolio beta vs. benchmark
- ``sector_concentration`` (:class:`dict[str, float]`) sector weights
in % from the most-recent snapshot, or ``{}`` when not available
- ``return_stats`` (:class:`dict`) summary stats:
``mean_daily``, ``std_daily``, ``n_days``
Raises:
TypeError: If any element of *snapshots* is not a ``PortfolioSnapshot``.
"""
# ------------------------------------------------------------------
# Validate input
# ------------------------------------------------------------------
for i, snap in enumerate(snapshots):
if not isinstance(snap, PortfolioSnapshot):
raise TypeError(
f"snapshots[{i}] must be a PortfolioSnapshot, got {type(snap).__name__}"
)
# ------------------------------------------------------------------
# Extract NAV series and compute daily returns
# ------------------------------------------------------------------
nav_series = [s.total_value for s in snapshots]
returns = _daily_returns(nav_series)
n_days = len(returns)
return_stats: dict[str, Any] = {
"mean_daily": _mean(returns) if returns else None,
"std_daily": _std(returns) if n_days >= 2 else None,
"n_days": n_days,
}
# Pre-compute mean once for reuse in Sharpe and Sortino
mu: float | None = _mean(returns) if n_days >= MIN_PERIODS_SHARPE else None
# ------------------------------------------------------------------
# Sharpe ratio (annualised, rf = 0)
# ------------------------------------------------------------------
sharpe: float | None = None
if mu is not None:
sigma = _std(returns)
if sigma > 0.0:
sharpe = mu / sigma * math.sqrt(trading_days_per_year)
# ------------------------------------------------------------------
# Sortino ratio (downside deviation denominator)
# ------------------------------------------------------------------
sortino: float | None = None
if mu is not None:
downside = [r for r in returns if r < 0.0]
sigma_down = _std(downside) if len(downside) >= 2 else 0.0
if sigma_down > 0.0:
sortino = mu / sigma_down * math.sqrt(trading_days_per_year)
# ------------------------------------------------------------------
# 95 % Value at Risk (historical simulation — 5th percentile)
# ------------------------------------------------------------------
var_95: float | None = None
if n_days >= MIN_PERIODS_VAR:
# 5th-percentile return (worst end of distribution)
fifth_pct = _percentile(returns, 5.0)
# Express as a *positive* loss fraction
var_95 = -fifth_pct if fifth_pct < 0.0 else 0.0
# ------------------------------------------------------------------
# Max drawdown (peak-to-trough over the full window)
# ------------------------------------------------------------------
max_drawdown: float | None = None
if len(nav_series) >= MIN_PERIODS_DRAWDOWN:
peak = nav_series[0]
worst = 0.0
for nav in nav_series[1:]:
if nav > peak:
peak = nav
if peak > 0.0:
drawdown = (nav - peak) / peak
if drawdown < worst:
worst = drawdown
max_drawdown = worst # 0.0 when no drawdown occurred
# ------------------------------------------------------------------
# Beta (vs. benchmark, when provided)
# ------------------------------------------------------------------
beta: float | None = None
if benchmark_returns is not None and len(benchmark_returns) >= MIN_PERIODS_BETA:
# Align lengths
min_len = min(len(returns), len(benchmark_returns))
r_p = returns[-min_len:]
r_b = benchmark_returns[-min_len:]
if min_len >= MIN_PERIODS_BETA:
mu_p = _mean(r_p)
mu_b = _mean(r_b)
covariance = sum(
(r_p[i] - mu_p) * (r_b[i] - mu_b) for i in range(min_len)
) / (min_len - 1)
var_b = _std(r_b) ** 2
if var_b > 0.0:
beta = covariance / var_b
# ------------------------------------------------------------------
# Sector concentration (from last snapshot's holdings_snapshot)
# ------------------------------------------------------------------
sector_concentration: dict[str, float] = {}
if snapshots:
last_snap = snapshots[-1]
holdings = last_snap.holdings_snapshot or []
total_value = last_snap.total_value
if holdings and total_value and total_value > 0.0:
sector_totals: dict[str, float] = {}
for h in holdings:
sector = h.get("sector") or "Unknown"
shares = float(h.get("shares", 0.0))
# Use current_value if available; fall back to shares * avg_cost
current_value = h.get("current_value")
if current_value is not None:
value = float(current_value)
else:
avg_cost = float(h.get("avg_cost", 0.0))
value = shares * avg_cost
sector_totals[sector] = sector_totals.get(sector, 0.0) + value
sector_concentration = {
sector: round(total / total_value * 100.0, 2)
for sector, total in sector_totals.items()
}
return {
"sharpe": round(sharpe, 4) if sharpe is not None else None,
"sortino": round(sortino, 4) if sortino is not None else None,
"var_95": round(var_95, 6) if var_95 is not None else None,
"max_drawdown": round(max_drawdown, 6) if max_drawdown is not None else None,
"beta": round(beta, 4) if beta is not None else None,
"sector_concentration": sector_concentration,
"return_stats": {
"mean_daily": round(return_stats["mean_daily"], 6)
if return_stats["mean_daily"] is not None
else None,
"std_daily": round(return_stats["std_daily"], 6)
if return_stats["std_daily"] is not None
else None,
"n_days": n_days,
},
}