chore: sync local modifications

- active.json: updated days_elapsed from hypothesis runner
- hypotheses.py: black formatting applied by pre-commit hook
- .gitignore: local additions

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Youssef Aitousarrah 2026-04-12 17:55:28 -07:00
parent 5b13e25adc
commit 7585da3ac6
3 changed files with 48 additions and 30 deletions

8
.gitignore vendored
View File

@ -12,3 +12,11 @@ eval_data/
.env
memory_db/
.worktrees/
# Playwright
node_modules/
/test-results/
/playwright-report/
/blob-report/
/playwright/.cache/
/playwright/.auth/

View File

@ -5,7 +5,7 @@
"id": "insider_buying-min-txn-100k",
"scanner": "insider_buying",
"title": "Raise min_transaction_value to $100K",
"description": "Hypothesis: filtering to insider purchases $100K (vs. current $25K) produces higher-quality picks by excluding routine small-lot grants and focusing on high-conviction, out-of-pocket capital deployment. Research (Lakonishok & Lee 2001; Cohen et al. 2012) shows large-value insider buys predict forward returns; small ones do not.",
"description": "Hypothesis: filtering to insider purchases \u2265$100K (vs. current $25K) produces higher-quality picks by excluding routine small-lot grants and focusing on high-conviction, out-of-pocket capital deployment. Research (Lakonishok & Lee 2001; Cohen et al. 2012) shows large-value insider buys predict forward returns; small ones do not.",
"branch": "hypothesis/insider_buying-min-txn-100k",
"pr_number": 529,
"status": "running",
@ -14,10 +14,12 @@
"hypothesis_type": "implementation",
"created_at": "2026-04-10",
"min_days": 21,
"days_elapsed": 0,
"picks_log": [],
"days_elapsed": 1,
"picks_log": [
"2026-04-10"
],
"baseline_scanner": "insider_buying",
"conclusion": null
}
]
}
}

View File

@ -12,7 +12,7 @@ from typing import Any, Dict, List
import streamlit as st
from tradingagents.ui.theme import COLORS, page_header
from tradingagents.ui.theme import page_header
_REPO_ROOT = Path(__file__).parent.parent.parent.parent
_ACTIVE_JSON = _REPO_ROOT / "docs/iterations/hypotheses/active.json"
@ -50,13 +50,15 @@ def load_concluded_hypotheses(concluded_dir: str = str(_CONCLUDED_DIR)) -> List[
scanner = _extract_md_field(text, r"^\*\*Scanner:\*\* (.+)$")
period = _extract_md_field(text, r"^\*\*Period:\*\* (.+)$")
outcome = _extract_md_field(text, r"^\*\*Outcome:\*\* (.+)$")
results.append({
"filename": md_file.name,
"title": title or md_file.stem,
"scanner": scanner or "",
"period": period or "",
"outcome": outcome or "",
})
results.append(
{
"filename": md_file.name,
"title": title or md_file.stem,
"scanner": scanner or "",
"period": period or "",
"outcome": outcome or "",
}
)
except Exception:
continue
return results
@ -85,7 +87,7 @@ def render() -> None:
if not hypotheses and not concluded:
st.info(
"No hypotheses yet. Run `/backtest-hypothesis \"<description>\"` to start an experiment."
'No hypotheses yet. Run `/backtest-hypothesis "<description>"` to start an experiment.'
)
return
@ -100,20 +102,23 @@ def render() -> None:
if running or pending:
import pandas as pd
active_rows = []
for h in sorted(running + pending, key=lambda x: -x.get("priority", 0)):
days_left = days_until_ready(h)
ready_str = "concluding soon" if days_left == 0 else f"{days_left}d left"
active_rows.append({
"ID": h["id"],
"Title": h.get("title", ""),
"Scanner": h.get("scanner", ""),
"Status": h["status"],
"Progress": f"{h.get('days_elapsed', 0)}/{h.get('min_days', 14)}d",
"Picks": len(h.get("picks_log", [])),
"Ready": ready_str,
"Priority": h.get("priority", ""),
})
active_rows.append(
{
"ID": h["id"],
"Title": h.get("title", ""),
"Scanner": h.get("scanner", ""),
"Status": h["status"],
"Progress": f"{h.get('days_elapsed', 0)}/{h.get('min_days', 14)}d",
"Picks": len(h.get("picks_log", [])),
"Ready": ready_str,
"Priority": h.get("priority", ""),
}
)
df = pd.DataFrame(active_rows)
st.dataframe(
df,
@ -143,17 +148,20 @@ def render() -> None:
if concluded:
import pandas as pd
concluded_rows = []
for c in concluded:
outcome = c["outcome"]
emoji = "" if "accepted" in outcome else ""
concluded_rows.append({
"Date": c["filename"][:10],
"Title": c["title"],
"Scanner": c["scanner"],
"Period": c["period"],
"Outcome": emoji,
})
concluded_rows.append(
{
"Date": c["filename"][:10],
"Title": c["title"],
"Scanner": c["scanner"],
"Period": c["period"],
"Outcome": emoji,
}
)
cdf = pd.DataFrame(concluded_rows)
st.dataframe(
cdf,