Merge pull request #99 from aguzererler/feature/portfolio-resumability-and-cleanup

feat: Add portfolio resumability, extend report saving, and gitignore uv.lock
This commit is contained in:
ahmet guzererler 2026-03-24 03:32:21 +01:00 committed by GitHub
commit 321cc80434
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 312 additions and 4343 deletions

View File

@ -78,8 +78,11 @@ FINNHUB_API_KEY=
# PostgreSQL connection string for Supabase (required for portfolio commands) # PostgreSQL connection string for Supabase (required for portfolio commands)
# SUPABASE_CONNECTION_STRING=postgresql://postgres.<project>:<password>@aws-1-<region>.pooler.supabase.com:6543/postgres # SUPABASE_CONNECTION_STRING=postgresql://postgres.<project>:<password>@aws-1-<region>.pooler.supabase.com:6543/postgres
# Portfolio data directory (where JSON reports are stored) # Root directory for all reports (scans, analysis, portfolio artifacts).
# TRADINGAGENTS_PORTFOLIO_DATA_DIR=reports # All output lands under {REPORTS_DIR}/daily/{date}/...
# PORTFOLIO_DATA_DIR overrides this for portfolio-only reports if you need them split.
# TRADINGAGENTS_REPORTS_DIR=/absolute/path/to/reports
# PORTFOLIO_DATA_DIR=/absolute/path/to/reports
# Portfolio constraint overrides # Portfolio constraint overrides
# TRADINGAGENTS_PM_MAX_POSITIONS=15 # maximum number of open positions # TRADINGAGENTS_PM_MAX_POSITIONS=15 # maximum number of open positions

2
.gitignore vendored
View File

@ -98,7 +98,7 @@ ipython_config.py
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control. # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more # This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries. # commonly ignored for libraries.
# uv.lock uv.lock
# poetry # poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.

View File

@ -104,6 +104,27 @@ async def trigger_auto(
background_tasks.add_task(_run_and_store, run_id, engine.run_auto(run_id, params or {})) background_tasks.add_task(_run_and_store, run_id, engine.run_auto(run_id, params or {}))
return {"run_id": run_id, "status": "queued"} return {"run_id": run_id, "status": "queued"}
@router.delete("/portfolio-stage")
async def reset_portfolio_stage(
params: Dict[str, Any],
user: dict = Depends(get_current_user),
):
"""Delete PM decision and execution result for a given date/portfolio_id.
After calling this, an auto run will re-run Phase 3 from scratch
(Phases 1 & 2 are skipped if their cached results still exist).
"""
from tradingagents.portfolio.report_store import ReportStore
date = params.get("date")
portfolio_id = params.get("portfolio_id")
if not date or not portfolio_id:
raise HTTPException(status_code=422, detail="date and portfolio_id are required")
store = ReportStore()
deleted = store.clear_portfolio_stage(date, portfolio_id)
logger.info("reset_portfolio_stage date=%s portfolio=%s deleted=%s user=%s", date, portfolio_id, deleted, user["user_id"])
return {"deleted": deleted, "date": date, "portfolio_id": portfolio_id}
@router.get("/") @router.get("/")
async def list_runs(user: dict = Depends(get_current_user)): async def list_runs(user: dict = Depends(get_current_user)):
# Filter by user in production # Filter by user in production

View File

@ -19,6 +19,45 @@ logger = logging.getLogger("agent_os.engine")
# Maximum characters of prompt/response content to include in the short message # Maximum characters of prompt/response content to include in the short message
_MAX_CONTENT_LEN = 300 _MAX_CONTENT_LEN = 300
def _fetch_prices(tickers: list[str]) -> dict[str, float]:
"""Fetch the latest closing price for each ticker via yfinance.
Returns a dict of {ticker: price}. Tickers that fail are silently skipped.
"""
if not tickers:
return {}
try:
import yfinance as yf
data = yf.download(tickers, period="2d", auto_adjust=True, progress=False, threads=True)
if data.empty:
return {}
close = data["Close"] if "Close" in data.columns else data
# Take the last available row
last_row = close.iloc[-1]
return {
t: float(last_row[t])
for t in tickers
if t in last_row.index and not __import__("math").isnan(last_row[t])
}
except Exception as exc:
logger.warning("_fetch_prices failed: %s", exc)
return {}
def _tickers_from_decision(decision: dict) -> list[str]:
"""Extract all ticker symbols referenced in a PM decision dict."""
tickers = set()
for key in ("sells", "buys", "holds"):
for item in decision.get(key) or []:
if isinstance(item, dict):
t = item.get("ticker") or item.get("symbol")
else:
t = str(item)
if t:
tickers.add(t.upper())
return list(tickers)
# Maximum characters of prompt/response for the full fields (generous limit) # Maximum characters of prompt/response for the full fields (generous limit)
_MAX_FULL_LEN = 50_000 _MAX_FULL_LEN = 50_000
@ -280,8 +319,21 @@ class LangGraphEngine:
if ticker_analyses: if ticker_analyses:
scan_summary["ticker_analyses"] = ticker_analyses scan_summary["ticker_analyses"] = ticker_analyses
# Fetch prices from scan_summary if available, else default to empty dict # Collect tickers: current holdings + scan candidates, then fetch live prices
prices = scan_summary.get("prices") or {} holding_tickers: list[str] = []
try:
from tradingagents.portfolio.repository import PortfolioRepository
_repo = PortfolioRepository()
_, holdings = _repo.get_portfolio_with_holdings(portfolio_id)
holding_tickers = [h.ticker for h in holdings]
except Exception as exc:
logger.warning("run_portfolio: could not load holdings for price fetch: %s", exc)
candidate_tickers = [
c if isinstance(c, str) else (c.get("ticker") or c.get("symbol") or "")
for c in (scan_summary.get("stocks_to_investigate") or [])
]
all_tickers = list({t.upper() for t in holding_tickers + candidate_tickers if t})
prices = _fetch_prices(all_tickers) if all_tickers else {}
initial_state = { initial_state = {
"portfolio_id": portfolio_id, "portfolio_id": portfolio_id,
@ -326,44 +378,106 @@ class LangGraphEngine:
except Exception as exc: except Exception as exc:
logger.warning("PORTFOLIO fallback ainvoke failed run=%s: %s", run_id, exc) logger.warning("PORTFOLIO fallback ainvoke failed run=%s: %s", run_id, exc)
# Save PM decision report # Save portfolio reports (Holding Reviews, Risk Metrics, PM Decision, Execution Result)
if final_state: if final_state:
try: try:
pm_decision_str = final_state.get("pm_decision", "") # 1. Holding Reviews — save the raw string via ReportStore
holding_reviews_str = final_state.get("holding_reviews")
if holding_reviews_str:
try:
reviews = json.loads(holding_reviews_str) if isinstance(holding_reviews_str, str) else holding_reviews_str
store.save_holding_reviews(date, portfolio_id, reviews)
except Exception as exc:
logger.warning("Failed to save holding_reviews run=%s: %s", run_id, exc)
# 2. Risk Metrics
risk_metrics_str = final_state.get("risk_metrics")
if risk_metrics_str:
try:
metrics = json.loads(risk_metrics_str) if isinstance(risk_metrics_str, str) else risk_metrics_str
store.save_risk_metrics(date, portfolio_id, metrics)
except Exception as exc:
logger.warning("Failed to save risk_metrics run=%s: %s", run_id, exc)
# 3. PM Decision
pm_decision_str = final_state.get("pm_decision")
if pm_decision_str: if pm_decision_str:
try: try:
pm_decision_dict = ( decision = json.loads(pm_decision_str) if isinstance(pm_decision_str, str) else pm_decision_str
json.loads(pm_decision_str) store.save_pm_decision(date, portfolio_id, decision)
if isinstance(pm_decision_str, str) except Exception as exc:
else pm_decision_str logger.warning("Failed to save pm_decision run=%s: %s", run_id, exc)
)
except (json.JSONDecodeError, TypeError): # 4. Execution Result
pm_decision_dict = {"raw": pm_decision_str} execution_result_str = final_state.get("execution_result")
ReportStore().save_pm_decision(date, portfolio_id, pm_decision_dict) if execution_result_str:
yield self._system_log( try:
f"Portfolio reports saved for {portfolio_id} on {date}" execution = json.loads(execution_result_str) if isinstance(execution_result_str, str) else execution_result_str
) store.save_execution_result(date, portfolio_id, execution)
except Exception as exc:
logger.warning("Failed to save execution_result run=%s: %s", run_id, exc)
yield self._system_log(f"Portfolio stage reports (decision & execution) saved for {portfolio_id} on {date}")
except Exception as exc: except Exception as exc:
logger.exception("Failed to save portfolio reports run=%s", run_id) logger.exception("Failed to save portfolio reports run=%s", run_id)
yield self._system_log( yield self._system_log(f"Warning: could not save portfolio reports: {exc}")
f"Warning: could not save portfolio reports: {exc}"
)
logger.info("Completed PORTFOLIO run=%s", run_id) logger.info("Completed PORTFOLIO run=%s", run_id)
async def run_trade_execution(
self, run_id: str, date: str, portfolio_id: str, decision: dict, prices: dict,
store: ReportStore | None = None,
) -> AsyncGenerator[Dict[str, Any], None]:
"""Manually execute a pre-computed PM decision (for resumability)."""
logger.info("Starting TRADE_EXECUTION run=%s portfolio=%s date=%s", run_id, portfolio_id, date)
yield self._system_log(f"Resuming trade execution for {portfolio_id} using saved decision…")
from tradingagents.portfolio.trade_executor import TradeExecutor
from tradingagents.portfolio.repository import PortfolioRepository
if not prices:
tickers = _tickers_from_decision(decision)
if tickers:
yield self._system_log(f"Fetching live prices for {tickers} from yfinance…")
prices = _fetch_prices(tickers)
logger.info("TRADE_EXECUTION run=%s: fetched prices for %s", run_id, list(prices.keys()))
if not prices:
logger.warning("TRADE_EXECUTION run=%s: no prices available — execution may produce incomplete results", run_id)
yield self._system_log(f"Warning: no prices found for {portfolio_id} on {date} — trade execution may be incomplete.")
_store = store or ReportStore()
try:
repo = PortfolioRepository()
executor = TradeExecutor(repo=repo, config=self.config)
# Execute decisions
result = executor.execute_decisions(portfolio_id, decision, prices, date=date)
# Save results using the shared store instance
_store.save_execution_result(date, portfolio_id, result)
yield self._system_log(f"Trade execution completed for {portfolio_id}. {result.get('summary', {})}")
logger.info("Completed TRADE_EXECUTION run=%s", run_id)
except Exception as exc:
logger.exception("Trade execution failed run=%s", run_id)
yield self._system_log(f"Error during trade execution: {exc}")
raise
async def run_auto( async def run_auto(
self, run_id: str, params: Dict[str, Any] self, run_id: str, params: Dict[str, Any]
) -> AsyncGenerator[Dict[str, Any], None]: ) -> AsyncGenerator[Dict[str, Any], None]:
"""Run the full auto pipeline: scan → pipeline → portfolio.""" """Run the full auto pipeline: scan → pipeline → portfolio."""
date = params.get("date", time.strftime("%Y-%m-%d")) date = params.get("date", time.strftime("%Y-%m-%d"))
force = params.get("force", False)
logger.info("Starting AUTO run=%s date=%s", run_id, date) logger.info("Starting AUTO run=%s date=%s force=%s", run_id, date, force)
yield self._system_log(f"Starting full auto workflow for {date}") yield self._system_log(f"Starting full auto workflow for {date} (force={force})")
# Phase 1: Market scan # Phase 1: Market scan
yield self._system_log("Phase 1/3: Running market scan…") yield self._system_log("Phase 1/3: Running market scan…")
store = ReportStore() store = ReportStore()
if store.load_scan(date): if not force and store.load_scan(date):
yield self._system_log(f"Phase 1: Macro scan for {date} already exists, skipping.") yield self._system_log(f"Phase 1: Macro scan for {date} already exists, skipping.")
else: else:
async for evt in self.run_scan(f"{run_id}_scan", {"date": date}): async for evt in self.run_scan(f"{run_id}_scan", {"date": date}):
@ -382,7 +496,7 @@ class LangGraphEngine:
) )
else: else:
for ticker in tickers: for ticker in tickers:
if store.load_analysis(date, ticker): if not force and store.load_analysis(date, ticker):
yield self._system_log(f"Phase 2: Analysis for {ticker} on {date} already exists, skipping.") yield self._system_log(f"Phase 2: Analysis for {ticker} on {date} already exists, skipping.")
continue continue
@ -395,14 +509,29 @@ class LangGraphEngine:
# Phase 3: Portfolio management # Phase 3: Portfolio management
yield self._system_log("Phase 3/3: Running portfolio manager…") yield self._system_log("Phase 3/3: Running portfolio manager…")
portfolio_params = {k: v for k, v in params.items() if k != "ticker"} portfolio_params = {k: v for k, v in params.items() if k != "ticker"}
# Check if portfolio decision already exists portfolio_id = params.get("portfolio_id", "main_portfolio")
if store.load_pm_decision(date, portfolio_id):
yield self._system_log(f"Phase 3: Portfolio decision for {portfolio_id} on {date} already exists, skipping.") # Check if portfolio stage is fully complete (execution result exists)
if not force and store.load_execution_result(date, portfolio_id):
yield self._system_log(f"Phase 3: Portfolio execution for {portfolio_id} on {date} already exists, skipping.")
else: else:
async for evt in self.run_portfolio( # Check if we can resume from a saved decision
f"{run_id}_portfolio", {"date": date, **portfolio_params} saved_decision = store.load_pm_decision(date, portfolio_id)
): if not force and saved_decision:
yield evt yield self._system_log(f"Phase 3: Found saved PM decision for {portfolio_id}, resuming trade execution…")
# Fetch live prices for all tickers referenced in the decision
prices = _fetch_prices(_tickers_from_decision(saved_decision))
async for evt in self.run_trade_execution(
f"{run_id}_resume_trades", date, portfolio_id, saved_decision, prices,
store=store,
):
yield evt
else:
# Run full portfolio graph (Decision + Execution)
async for evt in self.run_portfolio(
f"{run_id}_portfolio", {"date": date, **portfolio_params}
):
yield evt
logger.info("Completed AUTO run=%s", run_id) logger.info("Completed AUTO run=%s", run_id)
@ -475,8 +604,8 @@ class LangGraphEngine:
"""Extract ticker symbols from a ReportStore scan summary dict. """Extract ticker symbols from a ReportStore scan summary dict.
Handles two shapes from the macro synthesis LLM output: Handles two shapes from the macro synthesis LLM output:
* List of dicts: ``[{"ticker": "AAPL", ...}, ...]`` * List of dicts: ``[{'ticker': 'AAPL', ...}, ...]``
* List of strings: ``["AAPL", "TSLA", ...]`` * List of strings: ``['AAPL', 'TSLA', ...]``
Also checks both ``stocks_to_investigate`` and ``watchlist`` keys. Also checks both ``stocks_to_investigate`` and ``watchlist`` keys.
Returns an uppercase, deduplicated list in original order. Returns an uppercase, deduplicated list in original order.
@ -599,7 +728,7 @@ class LangGraphEngine:
Handles several structures observed across LangChain / LangGraph versions: Handles several structures observed across LangChain / LangGraph versions:
- flat list of message objects ``[SystemMessage, HumanMessage, ...]`` - flat list of message objects ``[SystemMessage, HumanMessage, ...]``
- list-of-lists (batched) ``[[SystemMessage, HumanMessage, ...]]`` - list-of-lists (batched) ``[[SystemMessage, HumanMessage, ...]]``
- list of plain dicts ``[{"role": "system", "content": "..."}]`` - list of plain dicts ``[{'role': 'system', 'content': '...'}]``
- tuple wrapper ``([SystemMessage, ...],)`` - tuple wrapper ``([SystemMessage, ...],)``
""" """
if not messages: if not messages:
@ -628,7 +757,7 @@ class LangGraphEngine:
def _extract_model(self, event: Dict[str, Any]) -> str: def _extract_model(self, event: Dict[str, Any]) -> str:
"""Best-effort extraction of the model name from a LangGraph event.""" """Best-effort extraction of the model name from a LangGraph event."""
data = event.get("data") or {} data = event.get("data") or {};
# 1. invocation_params (standard LangChain) # 1. invocation_params (standard LangChain)
inv = data.get("invocation_params") or {} inv = data.get("invocation_params") or {}
@ -722,7 +851,7 @@ class LangGraphEngine:
) )
return { return {
"id": event.get("run_id", f"thought_{time.time_ns()}"), "id": event.get("run_id", f"thought_{time.time_ns()}").strip(),
"node_id": node_name, "node_id": node_name,
"parent_node_id": "start", "parent_node_id": "start",
"type": "thought", "type": "thought",
@ -757,7 +886,7 @@ class LangGraphEngine:
logger.info("Tool start tool=%s node=%s run=%s", name, node_name, run_id) logger.info("Tool start tool=%s node=%s run=%s", name, node_name, run_id)
return { return {
"id": event.get("run_id", f"tool_{time.time_ns()}"), "id": event.get("run_id", f"tool_{time.time_ns()}").strip(),
"node_id": f"tool_{name}", "node_id": f"tool_{name}",
"parent_node_id": node_name, "parent_node_id": node_name,
"type": "tool", "type": "tool",

View File

@ -8,6 +8,7 @@ import {
IconButton, IconButton,
Button, Button,
Input, Input,
Checkbox,
useDisclosure, useDisclosure,
Drawer, Drawer,
DrawerOverlay, DrawerOverlay,
@ -34,7 +35,7 @@ import {
Collapse, Collapse,
useToast, useToast,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { LayoutDashboard, Wallet, Settings, Terminal as TerminalIcon, ChevronRight, Eye, Search, BarChart3, Bot, ChevronDown, ChevronUp } from 'lucide-react'; import { LayoutDashboard, Wallet, Settings, Terminal as TerminalIcon, ChevronRight, Eye, Search, BarChart3, Bot, ChevronDown, ChevronUp, Trash2 } from 'lucide-react';
import { MetricHeader } from './components/MetricHeader'; import { MetricHeader } from './components/MetricHeader';
import { AgentGraph } from './components/AgentGraph'; import { AgentGraph } from './components/AgentGraph';
import { PortfolioViewer } from './components/PortfolioViewer'; import { PortfolioViewer } from './components/PortfolioViewer';
@ -50,6 +51,7 @@ interface RunParams {
date: string; date: string;
ticker: string; ticker: string;
portfolio_id: string; portfolio_id: string;
force: boolean;
} }
const RUN_TYPE_LABELS: Record<RunType, string> = { const RUN_TYPE_LABELS: Record<RunType, string> = {
@ -64,7 +66,7 @@ const REQUIRED_PARAMS: Record<RunType, (keyof RunParams)[]> = {
scan: ['date'], scan: ['date'],
pipeline: ['ticker', 'date'], pipeline: ['ticker', 'date'],
portfolio: ['date', 'portfolio_id'], portfolio: ['date', 'portfolio_id'],
auto: ['date', 'ticker'], auto: ['date', 'portfolio_id'],
}; };
/** Return the colour token for a given event type. */ /** Return the colour token for a given event type. */
@ -312,6 +314,7 @@ export const Dashboard: React.FC = () => {
date: new Date().toISOString().split('T')[0], date: new Date().toISOString().split('T')[0],
ticker: 'AAPL', ticker: 'AAPL',
portfolio_id: 'main_portfolio', portfolio_id: 'main_portfolio',
force: false,
}); });
// Auto-scroll the terminal to the bottom as new events arrive // Auto-scroll the terminal to the bottom as new events arrive
@ -335,7 +338,7 @@ export const Dashboard: React.FC = () => {
// Validate required params // Validate required params
const required = REQUIRED_PARAMS[type]; const required = REQUIRED_PARAMS[type];
const missing = required.filter((k) => !params[k]?.trim()); const missing = required.filter((k) => { const v = params[k]; return typeof v === 'string' ? !v.trim() : !v; });
if (missing.length > 0) { if (missing.length > 0) {
toast({ toast({
title: `Missing required fields for ${RUN_TYPE_LABELS[type]}`, title: `Missing required fields for ${RUN_TYPE_LABELS[type]}`,
@ -357,6 +360,7 @@ export const Dashboard: React.FC = () => {
portfolio_id: params.portfolio_id, portfolio_id: params.portfolio_id,
date: params.date, date: params.date,
ticker: params.ticker, ticker: params.ticker,
force: params.force,
}); });
setActiveRunId(res.data.run_id); setActiveRunId(res.data.run_id);
} catch (err) { } catch (err) {
@ -367,6 +371,27 @@ export const Dashboard: React.FC = () => {
} }
}; };
const resetPortfolioStage = async () => {
if (!params.date || !params.portfolio_id) {
toast({ title: 'Date and Portfolio ID are required', status: 'warning', duration: 3000, isClosable: true, position: 'top' });
setShowParams(true);
return;
}
try {
const res = await axios.delete(`${API_BASE}/run/portfolio-stage`, { data: { date: params.date, portfolio_id: params.portfolio_id } });
const deleted: string[] = res.data.deleted;
toast({
title: deleted.length ? `Cleared: ${deleted.join(', ')}` : 'Nothing to clear — no decision files found',
status: deleted.length ? 'success' : 'info',
duration: 4000,
isClosable: true,
position: 'top',
});
} catch (err) {
toast({ title: 'Failed to reset portfolio stage', status: 'error', duration: 3000, isClosable: true, position: 'top' });
}
};
/** Open the full-screen event detail modal */ /** Open the full-screen event detail modal */
const openModal = useCallback((evt: AgentEvent) => { const openModal = useCallback((evt: AgentEvent) => {
setModalEvent(evt); setModalEvent(evt);
@ -479,6 +504,19 @@ export const Dashboard: React.FC = () => {
); );
})} })}
<Divider orientation="vertical" h="20px" /> <Divider orientation="vertical" h="20px" />
<Tooltip label="Clear PM decision & execution result for this date/portfolio, then re-run Auto to start Phase 3 fresh">
<Button
size="sm"
leftIcon={<Trash2 size={14} />}
colorScheme="red"
variant="outline"
onClick={resetPortfolioStage}
isDisabled={isRunning}
>
Reset Decision
</Button>
</Tooltip>
<Divider orientation="vertical" h="20px" />
<Tag size="sm" colorScheme={status === 'streaming' ? 'green' : status === 'completed' ? 'blue' : status === 'error' ? 'red' : 'gray'}> <Tag size="sm" colorScheme={status === 'streaming' ? 'green' : status === 'completed' ? 'blue' : status === 'error' ? 'red' : 'gray'}>
{status.toUpperCase()} {status.toUpperCase()}
</Tag> </Tag>
@ -529,8 +567,18 @@ export const Dashboard: React.FC = () => {
onChange={(e) => setParams((p) => ({ ...p, portfolio_id: e.target.value }))} onChange={(e) => setParams((p) => ({ ...p, portfolio_id: e.target.value }))}
/> />
</HStack> </HStack>
<HStack>
<Checkbox
size="sm"
colorScheme="orange"
isChecked={params.force}
onChange={(e) => setParams((p) => ({ ...p, force: e.target.checked }))}
>
<Text fontSize="xs" color="orange.300">Force re-run (ignore cached results)</Text>
</Checkbox>
</HStack>
<Text fontSize="2xs" color="whiteAlpha.400"> <Text fontSize="2xs" color="whiteAlpha.400">
Required: Scan date · Pipeline ticker, date · Portfolio date, portfolio · Auto date, ticker Required: Scan date · Pipeline ticker, date · Portfolio date, portfolio · Auto date, portfolio
</Text> </Text>
</VStack> </VStack>
</Box> </Box>

View File

@ -29,7 +29,9 @@ from tradingagents.default_config import _env, _env_float, _env_int
PORTFOLIO_CONFIG: dict = { PORTFOLIO_CONFIG: dict = {
"supabase_connection_string": os.getenv("SUPABASE_CONNECTION_STRING", ""), "supabase_connection_string": os.getenv("SUPABASE_CONNECTION_STRING", ""),
"data_dir": _env("PORTFOLIO_DATA_DIR", "reports"), # PORTFOLIO_DATA_DIR takes precedence; falls back to TRADINGAGENTS_REPORTS_DIR,
# then to "reports" (relative to CWD) — same default as report_paths.REPORTS_ROOT.
"data_dir": os.getenv("PORTFOLIO_DATA_DIR") or _env("REPORTS_DIR", "reports"),
"max_positions": 15, "max_positions": 15,
"max_position_pct": 0.15, "max_position_pct": 0.15,
"max_sector_pct": 0.35, "max_sector_pct": 0.35,
@ -46,7 +48,7 @@ def get_portfolio_config() -> dict:
""" """
cfg = dict(PORTFOLIO_CONFIG) cfg = dict(PORTFOLIO_CONFIG)
cfg["supabase_connection_string"] = os.getenv("SUPABASE_CONNECTION_STRING", cfg["supabase_connection_string"]) cfg["supabase_connection_string"] = os.getenv("SUPABASE_CONNECTION_STRING", cfg["supabase_connection_string"])
cfg["data_dir"] = _env("PORTFOLIO_DATA_DIR", cfg["data_dir"]) cfg["data_dir"] = os.getenv("PORTFOLIO_DATA_DIR") or _env("REPORTS_DIR", cfg["data_dir"])
cfg["max_positions"] = _env_int("PM_MAX_POSITIONS", cfg["max_positions"]) cfg["max_positions"] = _env_int("PM_MAX_POSITIONS", cfg["max_positions"])
cfg["max_position_pct"] = _env_float("PM_MAX_POSITION_PCT", cfg["max_position_pct"]) cfg["max_position_pct"] = _env_float("PM_MAX_POSITION_PCT", cfg["max_position_pct"])
cfg["max_sector_pct"] = _env_float("PM_MAX_SECTOR_PCT", cfg["max_sector_pct"]) cfg["max_sector_pct"] = _env_float("PM_MAX_SECTOR_PCT", cfg["max_sector_pct"])

View File

@ -276,6 +276,50 @@ class ReportStore:
path = self._portfolio_dir(date) / f"{portfolio_id}_pm_decision.json" path = self._portfolio_dir(date) / f"{portfolio_id}_pm_decision.json"
return self._read_json(path) return self._read_json(path)
def save_execution_result(
self,
date: str,
portfolio_id: str,
data: dict[str, Any],
) -> Path:
"""Save trade execution results.
Path: ``{base_dir}/daily/{date}/portfolio/{portfolio_id}_execution_result.json``
Args:
date: ISO date string.
portfolio_id: UUID of the target portfolio.
data: TradeExecutor output dict.
"""
path = self._portfolio_dir(date) / f"{portfolio_id}_execution_result.json"
return self._write_json(path, data)
def load_execution_result(
self,
date: str,
portfolio_id: str,
) -> dict[str, Any] | None:
"""Load execution result. Returns None if the file does not exist."""
path = self._portfolio_dir(date) / f"{portfolio_id}_execution_result.json"
return self._read_json(path)
def clear_portfolio_stage(self, date: str, portfolio_id: str) -> list[str]:
"""Delete PM decision and execution result files for a given date/portfolio.
Returns a list of deleted file names so the caller can log what was removed.
"""
targets = [
self._portfolio_dir(date) / f"{portfolio_id}_pm_decision.json",
self._portfolio_dir(date) / f"{portfolio_id}_pm_decision.md",
self._portfolio_dir(date) / f"{portfolio_id}_execution_result.json",
]
deleted = []
for path in targets:
if path.exists():
path.unlink()
deleted.append(path.name)
return deleted
def list_pm_decisions(self, portfolio_id: str) -> list[Path]: def list_pm_decisions(self, portfolio_id: str) -> list[Path]:
"""Return all saved PM decision JSON paths for portfolio_id, newest first. """Return all saved PM decision JSON paths for portfolio_id, newest first.

View File

@ -415,3 +415,20 @@ class PortfolioRepository:
) -> dict[str, Any] | None: ) -> dict[str, Any] | None:
"""Load risk metrics. Returns None if not found.""" """Load risk metrics. Returns None if not found."""
return self._store.load_risk_metrics(date, portfolio_id) return self._store.load_risk_metrics(date, portfolio_id)
def save_execution_result(
self,
portfolio_id: str,
date: str,
result: dict[str, Any],
) -> Path:
"""Save trade execution results."""
return self._store.save_execution_result(date, portfolio_id, result)
def load_execution_result(
self,
portfolio_id: str,
date: str,
) -> dict[str, Any] | None:
"""Load trade execution results. Returns None if not found."""
return self._store.load_execution_result(date, portfolio_id)

View File

@ -19,9 +19,12 @@ all generated artifacts land under a single ``reports/`` tree::
from __future__ import annotations from __future__ import annotations
import os
from pathlib import Path from pathlib import Path
REPORTS_ROOT = Path("reports") # Configurable via TRADINGAGENTS_REPORTS_DIR env var.
# Falls back to "reports" (relative to CWD) when unset.
REPORTS_ROOT = Path(os.getenv("TRADINGAGENTS_REPORTS_DIR") or "reports")
def get_daily_dir(date: str) -> Path: def get_daily_dir(date: str) -> Path:

4298
uv.lock

File diff suppressed because it is too large Load Diff