Fix: websocket emits "Run completed." on failed runs, masking errors in frontend (#102)

* Initial plan

* Fix websocket sending "Run completed." on failed runs, masking errors from frontend

Co-authored-by: aguzererler <6199053+aguzererler@users.noreply.github.com>
Agent-Logs-Url: https://github.com/aguzererler/TradingAgents/sessions/608ea02c-ca85-4f05-9ce1-611802493d7b

* Fix 5 failing unit tests (truthy MagicMock skips) + expose ticker_analyses in state + add Finviz live integration tests

Co-authored-by: aguzererler <6199053+aguzererler@users.noreply.github.com>
Agent-Logs-Url: https://github.com/aguzererler/TradingAgents/sessions/7872ceae-cade-4563-aedb-34660d419a36

* docs: update testing.md with MagicMock explainer, current test counts, full catalogue, and detailed run commands

Co-authored-by: aguzererler <6199053+aguzererler@users.noreply.github.com>
Agent-Logs-Url: https://github.com/aguzererler/TradingAgents/sessions/13639e60-cb9b-4cb0-891c-df5d448c158d

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: aguzererler <6199053+aguzererler@users.noreply.github.com>
This commit is contained in:
Copilot 2026-03-24 18:06:48 +01:00 committed by GitHub
parent 4c14080d73
commit 9c148b208a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 686 additions and 34 deletions

View File

@ -63,7 +63,7 @@ async def websocket_endpoint(
if run_info.get("status") == "failed":
await websocket.send_json(
{"type": "system", "message": f"Run failed: {run_info.get('error', 'unknown error')}"}
{"type": "system", "message": f"Error: Run failed: {run_info.get('error', 'unknown error')}"}
)
else:
# status == "queued" — WebSocket is the executor (background task didn't start yet)
@ -102,10 +102,15 @@ async def websocket_endpoint(
else:
msg = f"Run type '{run_type}' streaming not yet implemented."
logger.warning(msg)
run_info["status"] = "failed"
run_info["error"] = msg
await websocket.send_json({"type": "system", "message": f"Error: {msg}"})
await websocket.send_json({"type": "system", "message": "Run completed."})
logger.info("Run completed run=%s type=%s", run_id, run_type)
if run_info.get("status") == "completed":
await websocket.send_json({"type": "system", "message": "Run completed."})
logger.info("Run completed run=%s type=%s", run_id, run_type)
else:
logger.info("Run ended with status=%s run=%s type=%s", run_info.get("status"), run_id, run_type)
except WebSocketDisconnect:
logger.info("WebSocket client disconnected run=%s", run_id)

View File

@ -346,6 +346,7 @@ class LangGraphEngine:
"analysis_date": date, # PortfolioManagerState uses analysis_date
"prices": prices,
"scan_summary": scan_summary,
"ticker_analyses": ticker_analyses,
"messages": [],
"portfolio_data": "",
"risk_metrics": "",

View File

@ -1,7 +1,7 @@
# TradingAgents — Test Suite Reference
> **Last verified:** 2026-03-19
> **Test counts (current):** 405 unit · 68 integration · 1 e2e
> **Last verified:** 2026-03-24
> **Test counts (current):** 793 passed · 14 skipped (unit+portfolio+cli default run) · 101 integration · 1 e2e
---
@ -16,7 +16,8 @@
7. [Execution Flow Diagrams](#execution-flow-diagrams)
8. [How to Run Tests](#how-to-run-tests)
9. [Mock Patterns](#mock-patterns)
10. [Adding New Tests — Checklist](#adding-new-tests--checklist)
10. [What is MagicMock?](#what-is-magicmock)
11. [Adding New Tests — Checklist](#adding-new-tests--checklist)
---
@ -32,13 +33,19 @@ tests/
├── conftest.py ← root fixtures (shared across all tiers)
├── unit/ ← offline, <5 s total, default run
│ ├── conftest.py ← mock factories (yfinance, AV, LLM)
│ ├── agents/ ← agent-specific unit tests
│ └── test_*.py
├── integration/ ← live APIs, excluded from default run
│ ├── conftest.py ← VCR config + live key fixtures
│ └── test_*.py
│ └── test_*_live.py
├── e2e/ ← real LLM pipeline, manual only
│ ├── conftest.py
│ └── test_*.py
├── portfolio/ ← portfolio module unit tests (no DB)
│ ├── conftest.py
│ └── test_*.py
├── cli/ ← CLI module unit tests
│ └── test_*.py
└── cassettes/ ← recorded HTTP responses (VCR)
```
@ -48,7 +55,7 @@ tests/
| Tier | Directory | Default run? | Network? | Speed | Purpose |
|------|-----------|:---:|:---:|-------|---------|
| **Unit** | `tests/unit/` | ✅ yes | ❌ blocked by `pytest-socket` | < 5 s | Validate logic, parsing, routing with mocks |
| **Unit** | `tests/unit/`, `tests/portfolio/`, `tests/cli/` | ✅ yes | ❌ blocked by `pytest-socket` | < 5 s | Validate logic, parsing, routing with mocks |
| **Integration** | `tests/integration/` | ❌ ignored | ✅ real APIs | secondsminutes | Validate vendor API contracts, live data shapes |
| **E2E** | `tests/e2e/` | ❌ ignored | ✅ real LLM + APIs | minutes | Validate the full multi-agent pipeline |
@ -370,18 +377,48 @@ def pytest_collection_modifyitems(config, items):
| `test_e2e_api_integration.py` | 19 | `route_to_vendor` + full yfinance+AV pipeline | `yf.Ticker`, `requests.get` |
| `test_env_override.py` | 15 | `TRADINGAGENTS_*` env vars override `DEFAULT_CONFIG` | `importlib.reload`, `patch.dict` |
| `test_finnhub_integration.py` | ~100 | Finnhub data layer — all endpoints, exception types | `requests.get` (mock response) |
| `test_finnhub_scanner_utils.py` | 10 | `_safe_fmt` and other Finnhub scanner utility functions | None (pure logic) |
| `test_incident_fixes.py` | 15 | `_load_or_fetch_ohlcv` cache, `YFinanceError` propagation | `yf.Ticker`, `tmp_path` |
| `test_industry_deep_dive.py` | 12 | `_extract_top_sectors()` + `run_tool_loop` nudge | `MagicMock` LLM, `ToolMessage` |
| `test_json_utils.py` | 15 | `extract_json` — fences, think-tags, malformed input | None (pure logic) |
| `test_langgraph_engine_extraction.py` | 14 | `_map_langgraph_event` — event type mapping, metadata extraction | `MagicMock` events |
| `test_langgraph_engine_run_modes.py` | 28 | `run_scan/pipeline/portfolio/auto` — phase coordination, skip logic | `MagicMock` store, `AsyncMock` graph |
| `test_macro_bridge.py` | ~12 | Macro JSON parsing, filtering, report rendering | `tmp_path` |
| `test_macro_regime.py` | ~32 | VIX signals, credit spread, breadth, regime classifier | `pd.Series`, `patch` (yfinance) |
| `test_notebook_sync.py` | 5 | `sync_to_notebooklm` subprocess flow | `subprocess.run` |
| `test_peer_comparison.py` | ~18 | Sector peers, relative performance, comparison report | `yf.Ticker`, `yf.Sector` |
| `test_portfolio_tools.py` | 23 | `portfolio_tools.py` — in-memory data, no DB | `MagicMock` repo |
| `test_scanner_fallback.py` | 2 | AV scanner raises on total failure | `_fetch_global_quote` side_effect |
| `test_scanner_graph.py` | 4 | `ScannerGraph` + `ScannerGraphSetup` compile correctly | `ScannerGraph._create_llm` |
| `test_scanner_mocked.py` | ~57 | yfinance + AV scanner functions, route_to_vendor routing | `yf.Screener`, `requests.get` |
| `test_security_notebook_sync.py` | 3 | Shell injection guard in `sync_to_notebooklm` | None |
| `test_stockstats_utils.py` | ~20 | `get_stockstats_indicator` + `stockstats` formatting | `yf.download` mock |
| `test_ttm_analysis.py` | ~21 | TTM metric computation, report formatting | `yf.Ticker` (quarterly data) |
| `test_vendor_failfast.py` | 11 | Fail-fast routing (ADR 011), error chaining | `requests.get`, `MagicMock` |
| `test_yfinance_integration.py` | ~48 | yfinance data layer — OHLCV, fundamentals, news | `yf.Ticker`, `yf.Search` |
| `agents/test_analyst_agents.py` | ~15 | Analyst agent node wiring + prompt formatting | `MagicMock` LLM |
### Portfolio tests (`tests/portfolio/`)
These tests cover the portfolio management module with in-memory data only — no database connection is required.
| File | # Tests | What it covers | Key mocks used |
|------|--------:|----------------|---------------|
| `test_candidate_prioritizer.py` | 10 | Candidate scoring + ranking algorithm | None (pure logic) |
| `test_config.py` | 8 | Portfolio config defaults + env var overrides | `patch.dict` |
| `test_models.py` | 23 | `Portfolio`, `Holding`, `Trade`, `PortfolioSnapshot` dataclasses | None (pure logic) |
| `test_report_store.py` | 23 | `ReportStore` save/load cycle, JSON serialization | `tmp_path` |
| `test_repository.py` | 16 | `PortfolioRepository` — CRUD operations | `MagicMock` Supabase client |
| `test_risk_evaluator.py` | 28 | Risk constraint checks — position limits, sector limits, cash floor | None (pure logic) |
| `test_risk_metrics.py` | 48 | Volatility, Sharpe, max drawdown, beta metrics | `pd.Series` |
| `test_trade_executor.py` | 10 | `TradeExecutor` — sell/buy ordering, constraint pre-flight, snapshot | `MagicMock` repo |
### CLI tests (`tests/cli/`)
| File | # Tests | What it covers | Key mocks used |
|------|--------:|----------------|---------------|
| `test_main.py` | ~8 | `extract_content_string`, CLI argument parsing | `patch` (LLM), `tmp_path` |
| `test_stats_handler.py` | ~6 | `StatsCallbackHandler` — token counting, threading safety | `MagicMock` LLM callbacks |
### Integration tests (`tests/integration/`)
@ -389,8 +426,10 @@ def pytest_collection_modifyitems(config, items):
|------|--------:|----------------|---------|
| `test_alpha_vantage_live.py` | 3 | Live AV `_make_api_request` — key errors, timeout, success | Network |
| `test_finnhub_live.py` | ~41 | All Finnhub free-tier + paid-tier endpoints (live HTTP) | `FINNHUB_API_KEY` |
| `test_finviz_live.py` | 27 | All three Finviz smart-money screener tools — live HTTP | Network; auto-skips if `finvizfinance` not installed |
| `test_nlm_live.py` | 1 | NotebookLM source CRUD via `nlm` CLI | `NOTEBOOKLM_ID` + `nlm` binary |
| `test_scanner_live.py` | ~23 | yfinance scanner tools + AV routing (live yfinance + AV) | Network; `ALPHA_VANTAGE_API_KEY` for AV tests |
| `test_stockstats_live.py` | ~6 | `get_stockstats_indicator` against live yfinance data | Network |
### E2E tests (`tests/e2e/`)
@ -444,7 +483,10 @@ flowchart TD
J -- no --> K["pytest.mark.skipif\n→ test_nlm_live.py SKIPPED"]
J -- yes --> L[NotebookLM live test runs]
B --> M[Scanner live tests run\nagainst real yfinance API]
E & I & L & M --> N([Results reported])
B --> N{finvizfinance installed?}
N -- no --> O["pytestmark skipif\n→ test_finviz_live.py SKIPPED"]
N -- yes --> P[Finviz screener tests run\nno API key needed]
E & I & K & L & M & O & P --> Q([Results reported])
```
---
@ -517,67 +559,176 @@ flowchart TD
## How to Run Tests
### Default (unit only, CI-safe)
### Quick reference
```bash
pytest
# or equivalently:
pytest tests/unit/
```
Expected: **405 collected → ~395 passed, ~10 skipped**, < 5 s
| What to run | Command | Expected result |
|---|---|---|
| **All unit tests (CI-safe)** | `pytest` | 793 passed, 14 skipped, < 5 s |
| **Unit tests only** | `pytest tests/unit/` | ~600 passed |
| **Portfolio tests only** | `pytest tests/portfolio/` | ~166 passed |
| **CLI tests only** | `pytest tests/cli/` | ~14 passed |
| **All integration tests** | `pytest tests/integration/ -v` | varies (network required) |
| **Finviz integration** | `pytest tests/integration/test_finviz_live.py -v` | 27 tests, network required |
| **Finnhub integration** | `FINNHUB_API_KEY=<key> pytest tests/integration/test_finnhub_live.py -v` | requires key |
| **E2E pipeline** | `pytest tests/e2e/ -v` | requires LLM key + network |
---
### Integration tests (requires network + optional API keys)
### Install dependencies first
```bash
# All integration tests
pip install -e .
pip install pytest pytest-socket
```
---
### Default (unit + portfolio + cli, CI-safe)
The default `pytest` invocation is controlled by `addopts` in `pyproject.toml`:
```toml
addopts = "--ignore=tests/integration --ignore=tests/e2e --disable-socket --allow-unix-socket -x -q"
```
Run it with:
```bash
# Simplest — uses addopts automatically
pytest
# Equivalent explicit form
pytest tests/ --ignore=tests/integration --ignore=tests/e2e \
--disable-socket --allow-unix-socket -x -q
```
Expected output:
```
793 passed, 14 skipped in 4.9s
```
**What `-x` does:** stops at the first failing test.
**What `-q` does:** minimal output (dots + summary).
**What `--disable-socket` does:** blocks all real network calls — any accidental HTTP call fails with `SocketBlockedError`.
To see all failures instead of stopping at the first:
```bash
pytest tests/ --ignore=tests/integration --ignore=tests/e2e \
--disable-socket --allow-unix-socket -q
# (omit -x)
```
---
### Run a single test file
```bash
# Unit file
pytest tests/unit/test_langgraph_engine_run_modes.py -v \
--disable-socket --allow-unix-socket
# Portfolio file
pytest tests/portfolio/test_trade_executor.py -v \
--disable-socket --allow-unix-socket
# CLI file
pytest tests/cli/test_stats_handler.py -v \
--disable-socket --allow-unix-socket
```
---
### Run a single test class or test function
```bash
# Single class
pytest tests/unit/test_langgraph_engine_run_modes.py::TestRunAutoTickerSource -v \
--disable-socket --allow-unix-socket
# Single test function
pytest tests/unit/test_langgraph_engine_run_modes.py::TestRunAutoTickerSource::test_run_auto_gets_tickers_from_scan_report -v \
--disable-socket --allow-unix-socket
```
---
### Integration tests (requires network)
Integration tests make **real HTTP calls** to external APIs. They are
excluded from the default run but can be run explicitly at any time.
```bash
# All integration tests (everything in tests/integration/)
pytest tests/integration/ -v
# Only Alpha Vantage live tests
# Finviz smart-money screener tests (no API key needed — free public screener)
pytest tests/integration/test_finviz_live.py -v
# Alpha Vantage live tests ('demo' key works for basic calls)
pytest tests/integration/test_alpha_vantage_live.py -v
# Only Finnhub live tests (requires key)
# Finnhub live tests (free-tier endpoints only)
FINNHUB_API_KEY=your_key pytest tests/integration/test_finnhub_live.py -v \
-m "integration and not paid_tier"
# Finnhub — all tests including paid-tier (requires premium subscription)
FINNHUB_API_KEY=your_key pytest tests/integration/test_finnhub_live.py -v
# Only free-tier Finnhub tests
FINNHUB_API_KEY=your_key pytest tests/integration/test_finnhub_live.py -v -m "integration and not paid_tier"
# Scanner live tests
# Live scanner tests (hits real yfinance + Alpha Vantage APIs)
pytest tests/integration/test_scanner_live.py -v
# Live stockstats tests
pytest tests/integration/test_stockstats_live.py -v
# NotebookLM tests (requires NOTEBOOKLM_ID env var and nlm CLI binary)
NOTEBOOKLM_ID=your_id pytest tests/integration/test_nlm_live.py -v
```
> **Tip:** When running integration tests from a network-blocked environment
> (e.g., CI without outbound access), tests that require a network connection
> will auto-skip rather than fail — they detect the blocked socket and call
> `pytest.skip()` gracefully.
---
### E2E tests (requires LLM API key + network, manual only)
```bash
# Requires a real LLM API key (e.g., OPENAI_API_KEY) and network access.
# Takes several minutes to complete.
pytest tests/e2e/ -v
```
---
### Targeting by marker
### Run only tests with a specific marker
```bash
# Run only integration-marked tests (wherever they are)
pytest tests/ --override-ini="addopts=" -m integration
# All integration-marked tests across the entire test suite
pytest tests/ --override-ini="addopts=" -m integration -v
# Run excluding slow tests
pytest tests/ --override-ini="addopts=" -m "not slow"
# All tests except slow ones
pytest tests/ --override-ini="addopts=" -m "not slow" \
--disable-socket --allow-unix-socket
# Run unit tests without -x (see all failures, not just first)
pytest tests/unit/ --override-ini="addopts=--disable-socket --allow-unix-socket -q"
# Only paid_tier Finnhub tests (requires premium key)
FINNHUB_API_KEY=your_key pytest tests/integration/ -m paid_tier -v
```
> **`--override-ini="addopts="`** clears the default `addopts` from
> `pyproject.toml` so you can pass your own flags instead.
---
### Re-record VCR cassettes
```bash
# Record only new requests (existing cassettes kept)
pytest tests/integration/ --record-mode=new_episodes
# or to record from scratch:
# Re-record everything from scratch
pytest tests/integration/ --record-mode=all
```
@ -722,6 +873,187 @@ automatically to every test in the file without explicit declaration.
---
## What is MagicMock?
`MagicMock` is the workhorse of Python's `unittest.mock` library — the
standard tool for replacing real objects with controllable stand-ins during
tests. Understanding it is essential for reading and writing tests in this
project.
---
### The core idea: a pretend object
When you write:
```python
from unittest.mock import MagicMock
repo = MagicMock()
```
`repo` is now an object that:
- **accepts any attribute access** (`repo.some_attr` → another `MagicMock`)
- **accepts any method call** (`repo.get_portfolio()` → another `MagicMock`)
- **records everything** so you can ask later: "was this called? with what?"
No database, no HTTP, no file system — just a pretend object you fully control.
---
### Setting return values
By default every method returns a new `MagicMock`. You can override this:
```python
repo = MagicMock()
repo.get_portfolio.return_value = {"portfolio_id": "p1", "cash": 50_000.0}
result = repo.get_portfolio("p1") # returns the dict you set, not a MagicMock
print(result["cash"]) # 50000.0
```
This is critical. If you forget `return_value=...`, the method returns a
`MagicMock`, which is **truthy**. Code that does `if repo.load_data(...):`
will take the "data exists" branch even when it shouldn't.
---
### The truthy problem (and why it caused the mock trade bug)
This project has guard clauses like:
```python
if not force and store.load_execution_result(date, portfolio_id):
# skip — already done
return
```
If `store = MagicMock()` and `load_execution_result` is never given a
`return_value`, the call returns a new `MagicMock`, which is truthy.
The entire Phase 3 (portfolio execution / trades) gets **skipped** even
though no trades ever ran.
**The fix:** always set `return_value=None` for methods that are supposed to
return "nothing found":
```python
mock_store = MagicMock()
mock_store.load_scan.return_value = scan_data # some data
mock_store.load_execution_result.return_value = None # ← explicitly "not found"
mock_store.load_pm_decision.return_value = None # ← explicitly "not found"
mock_store.load_analysis.return_value = None # ← explicitly "not found"
```
This is the pattern used in `TestRunAutoTickerSource._make_mock_store()`.
---
### Checking calls (assertions)
```python
repo = MagicMock()
repo.add_holding("p1", "AAPL", 10, 150.0)
# Was it called?
repo.add_holding.assert_called_once()
# Was it called with these exact arguments?
repo.add_holding.assert_called_once_with("p1", "AAPL", 10, 150.0)
# How many times?
assert repo.add_holding.call_count == 1
# What args did the last call use?
args, kwargs = repo.add_holding.call_args
assert args[1] == "AAPL"
```
---
### Raising exceptions from mocks
```python
repo.add_holding.side_effect = InsufficientCashError("Not enough cash")
# Now calling add_holding() will raise the error instead of returning a value
```
`side_effect` can also be a callable or a list of values/exceptions to cycle
through on successive calls.
---
### AsyncMock — for `async def` functions
LangGraph uses `async for` and `await` extensively. For those, use
`AsyncMock`:
```python
from unittest.mock import AsyncMock, MagicMock
mock_graph = MagicMock()
mock_graph.astream_events = AsyncMock(return_value=iter([]))
# or as an async generator:
async def fake_stream(*args, **kwargs):
yield {"event": "on_chain_end", "data": {"output": {}}}
mock_graph.astream_events = fake_stream
```
---
### patch() — replacing real objects temporarily
`MagicMock` creates the stand-in; `patch()` *swaps* it in for the duration
of a `with` block (or test function):
```python
from unittest.mock import patch, MagicMock
def test_run_portfolio_skips_when_already_done():
mock_store = MagicMock()
mock_store.load_execution_result.return_value = {"summary": "done"}
with patch("agent_os.backend.services.langgraph_engine.ReportStore",
return_value=mock_store):
# All code inside here that imports ReportStore gets mock_store instead
...
```
The `target` string must be the **import path where the name is used**, not
where it is defined. If `langgraph_engine.py` does
`from tradingagents.portfolio.report_store import ReportStore`, you patch
`agent_os.backend.services.langgraph_engine.ReportStore`.
---
### PropertyMock — for `@property` attributes
```python
from unittest.mock import MagicMock, PropertyMock
mock_ticker = MagicMock()
type(mock_ticker).info = PropertyMock(return_value={"longName": "Apple Inc."})
# mock_ticker.info now returns the dict (not a MagicMock)
```
---
### Quick reference table
| Tool | Use case |
|------|----------|
| `MagicMock()` | Stand-in for any object (repo, client, graph) |
| `mock.method.return_value = x` | Make a method return `x` |
| `mock.method.side_effect = exc` | Make a method raise `exc` |
| `mock.method.assert_called_once_with(...)` | Assert exact call args |
| `AsyncMock()` | Stand-in for `async def` functions / methods |
| `patch("module.Name")` | Swap a real class/function with a mock temporarily |
| `patch.dict(os.environ, {...})` | Inject env vars for the duration of a test |
| `PropertyMock` | Mock `@property` descriptors |
---
## Adding New Tests — Checklist
When adding a test to this project, choose the right tier and follow the
@ -734,9 +1066,18 @@ corresponding checklist.
- [ ] yfinance: use `patch("...yf.Ticker", ...)` — never call yfinance directly.
- [ ] AV / Finnhub: use `patch("...requests.get", return_value=_mock_response(...))`.
- [ ] Use `monkeypatch.setenv` or `patch.dict(os.environ, ...)` for env var tests.
- [ ] When mocking a store/repo, always pin `load_*` methods to `None` to avoid
truthy MagicMock accidentally triggering "already done" skip branches.
- [ ] Do NOT use `@pytest.mark.integration` — that signals the test is being tracked
for future migration, not that it's already mocked.
- [ ] Run `pytest tests/unit/ -x` to confirm the test passes offline.
- [ ] Run `pytest tests/unit/ -x --disable-socket --allow-unix-socket` to confirm the test passes offline.
### Portfolio module test (no DB needed)
- [ ] File goes in `tests/portfolio/test_<component>.py`.
- [ ] Use `MagicMock()` for the Supabase client / `PortfolioRepository` if testing
higher-level logic. Do NOT use a real DB connection.
- [ ] Run `pytest tests/portfolio/ -x --disable-socket --allow-unix-socket`.
### Integration test (live API needed)
@ -744,6 +1085,8 @@ corresponding checklist.
- [ ] Class or function decorated with `@pytest.mark.integration`.
- [ ] Use the `av_api_key` fixture (or a similar guard) to auto-skip when the API
is unavailable.
- [ ] For APIs with no key (e.g., Finviz): use `pytest.mark.skipif(not lib_available, ...)`
so the file is skipped if the library is not installed.
- [ ] For Finnhub paid-tier endpoints: add both `@pytest.mark.paid_tier` and
`@pytest.mark.skip` so they are documented but never run accidentally.
- [ ] Do NOT add the file path to `addopts`'s `--ignore` list — it is already

View File

@ -0,0 +1,297 @@
"""Live integration tests for the Finviz smart-money screener tools.
These tests make REAL HTTP requests to finviz.com via the ``finvizfinance``
library and therefore require network access. No API key is needed Finviz
is a free public screener.
The entire module is skipped automatically when ``finvizfinance`` is not
installed in the current environment.
Run only the Finviz live tests:
pytest tests/integration/test_finviz_live.py -v -m integration
Run all integration tests:
pytest tests/integration/ -v -m integration
Skip in unit-only CI (default):
pytest tests/ --ignore=tests/integration -v # live tests never run
"""
import pytest
# ---------------------------------------------------------------------------
# Guard — skip every test in this file if finvizfinance is not installed.
# ---------------------------------------------------------------------------
try:
import finvizfinance # noqa: F401
_finvizfinance_available = True
except ImportError:
_finvizfinance_available = False
pytestmark = pytest.mark.integration
_skip_if_no_finviz = pytest.mark.skipif(
not _finvizfinance_available,
reason="finvizfinance not installed — skipping live Finviz tests",
)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
_VALID_RESULT_PREFIXES = (
"Top 5 stocks for ",
"No stocks matched",
"Smart money scan unavailable",
)
def _assert_valid_result(result: str, label: str) -> None:
"""Assert that *result* is a well-formed string from _run_finviz_screen."""
assert isinstance(result, str), f"{label}: expected str, got {type(result)}"
assert len(result) > 0, f"{label}: result is empty"
assert any(result.startswith(prefix) for prefix in _VALID_RESULT_PREFIXES), (
f"{label}: unexpected result format:\n{result}"
)
def _assert_ticker_rows(result: str, label: str) -> None:
"""When results were found, every data row must have the expected shape."""
if not result.startswith("Top 5 stocks for "):
pytest.skip(f"{label}: no market data returned today — skipping row assertions")
lines = result.strip().split("\n")
# First line is the header "Top 5 stocks for …:"
data_lines = [l for l in lines[1:] if l.strip()]
assert len(data_lines) >= 1, f"{label}: header present but no data rows"
for line in data_lines:
# Expected shape: "- TICKER (Sector) @ $Price"
assert line.startswith("- "), f"{label}: row missing '- ' prefix: {line!r}"
assert "@" in line, f"{label}: row missing '@' separator: {line!r}"
assert "$" in line, f"{label}: row missing '$' price marker: {line!r}"
# ---------------------------------------------------------------------------
# _run_finviz_screen helper (tested indirectly via the public tools)
# ---------------------------------------------------------------------------
@_skip_if_no_finviz
class TestRunFinvizScreen:
"""
Tests for the shared ``_run_finviz_screen`` helper.
Exercised indirectly through the public LangChain tool wrappers.
"""
def test_returns_string(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
assert isinstance(result, str)
def test_result_is_non_empty(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
assert len(result) > 0
def test_result_has_valid_prefix(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
_assert_valid_result(result, "unusual_volume")
# ---------------------------------------------------------------------------
# get_insider_buying_stocks
# ---------------------------------------------------------------------------
@_skip_if_no_finviz
class TestGetInsiderBuyingStocks:
"""Live tests for the insider-buying screener tool."""
def test_returns_string(self):
from tradingagents.agents.utils.scanner_tools import get_insider_buying_stocks
result = get_insider_buying_stocks.invoke({})
assert isinstance(result, str)
def test_result_is_non_empty(self):
from tradingagents.agents.utils.scanner_tools import get_insider_buying_stocks
result = get_insider_buying_stocks.invoke({})
assert len(result) > 0
def test_result_has_valid_prefix(self):
from tradingagents.agents.utils.scanner_tools import get_insider_buying_stocks
result = get_insider_buying_stocks.invoke({})
_assert_valid_result(result, "insider_buying")
def test_data_rows_have_expected_shape(self):
from tradingagents.agents.utils.scanner_tools import get_insider_buying_stocks
result = get_insider_buying_stocks.invoke({})
_assert_ticker_rows(result, "insider_buying")
def test_no_error_message_on_success(self):
from tradingagents.agents.utils.scanner_tools import get_insider_buying_stocks
result = get_insider_buying_stocks.invoke({})
# If finviz returned data or an empty result, there should be no error
if result.startswith("Top 5 stocks for ") or result.startswith("No stocks matched"):
assert "Finviz error" not in result
# ---------------------------------------------------------------------------
# get_unusual_volume_stocks
# ---------------------------------------------------------------------------
@_skip_if_no_finviz
class TestGetUnusualVolumeStocks:
"""Live tests for the unusual-volume screener tool."""
def test_returns_string(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
assert isinstance(result, str)
def test_result_is_non_empty(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
assert len(result) > 0
def test_result_has_valid_prefix(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
_assert_valid_result(result, "unusual_volume")
def test_data_rows_have_expected_shape(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
_assert_ticker_rows(result, "unusual_volume")
def test_no_error_message_on_success(self):
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
if result.startswith("Top 5 stocks for ") or result.startswith("No stocks matched"):
assert "Finviz error" not in result
def test_tickers_are_uppercase(self):
"""When data is returned, all ticker symbols must be uppercase."""
from tradingagents.agents.utils.scanner_tools import get_unusual_volume_stocks
result = get_unusual_volume_stocks.invoke({})
if not result.startswith("Top 5 stocks for "):
pytest.skip("No data returned today")
lines = result.strip().split("\n")[1:]
for line in lines:
if not line.strip():
continue
# "- TICKER (…) @ $…"
ticker = line.lstrip("- ").split(" ")[0]
assert ticker == ticker.upper(), f"Ticker not uppercase: {ticker!r}"
# ---------------------------------------------------------------------------
# get_breakout_accumulation_stocks
# ---------------------------------------------------------------------------
@_skip_if_no_finviz
class TestGetBreakoutAccumulationStocks:
"""Live tests for the breakout-accumulation screener tool."""
def test_returns_string(self):
from tradingagents.agents.utils.scanner_tools import get_breakout_accumulation_stocks
result = get_breakout_accumulation_stocks.invoke({})
assert isinstance(result, str)
def test_result_is_non_empty(self):
from tradingagents.agents.utils.scanner_tools import get_breakout_accumulation_stocks
result = get_breakout_accumulation_stocks.invoke({})
assert len(result) > 0
def test_result_has_valid_prefix(self):
from tradingagents.agents.utils.scanner_tools import get_breakout_accumulation_stocks
result = get_breakout_accumulation_stocks.invoke({})
_assert_valid_result(result, "breakout_accumulation")
def test_data_rows_have_expected_shape(self):
from tradingagents.agents.utils.scanner_tools import get_breakout_accumulation_stocks
result = get_breakout_accumulation_stocks.invoke({})
_assert_ticker_rows(result, "breakout_accumulation")
def test_no_error_message_on_success(self):
from tradingagents.agents.utils.scanner_tools import get_breakout_accumulation_stocks
result = get_breakout_accumulation_stocks.invoke({})
if result.startswith("Top 5 stocks for ") or result.startswith("No stocks matched"):
assert "Finviz error" not in result
def test_at_most_five_rows_returned(self):
"""The screener caps output at 5 rows (hardcoded head(5))."""
from tradingagents.agents.utils.scanner_tools import get_breakout_accumulation_stocks
result = get_breakout_accumulation_stocks.invoke({})
if not result.startswith("Top 5 stocks for "):
pytest.skip("No data returned today")
lines = [l for l in result.strip().split("\n")[1:] if l.strip()]
assert len(lines) <= 5, f"Expected ≤5 rows, got {len(lines)}"
def test_price_column_is_numeric(self):
"""Price values after '$' must be parseable as floats."""
from tradingagents.agents.utils.scanner_tools import get_breakout_accumulation_stocks
result = get_breakout_accumulation_stocks.invoke({})
if not result.startswith("Top 5 stocks for "):
pytest.skip("No data returned today")
lines = [l for l in result.strip().split("\n")[1:] if l.strip()]
for line in lines:
price_part = line.split("@ $")[-1].strip()
float(price_part) # raises ValueError if not numeric
# ---------------------------------------------------------------------------
# All three tools together — smoke test
# ---------------------------------------------------------------------------
@_skip_if_no_finviz
class TestAllThreeToolsSmoke:
"""Quick smoke test running all three tools sequentially."""
def test_all_three_return_strings(self):
from tradingagents.agents.utils.scanner_tools import (
get_breakout_accumulation_stocks,
get_insider_buying_stocks,
get_unusual_volume_stocks,
)
tools = [
(get_insider_buying_stocks, "insider_buying"),
(get_unusual_volume_stocks, "unusual_volume"),
(get_breakout_accumulation_stocks, "breakout_accumulation"),
]
for tool_fn, label in tools:
result = tool_fn.invoke({})
assert isinstance(result, str), f"{label}: expected str"
assert len(result) > 0, f"{label}: empty result"
_assert_valid_result(result, label)

View File

@ -636,6 +636,7 @@ class TestRunAutoTickerSource(unittest.TestCase):
mock_store = MagicMock()
mock_store.load_scan.return_value = scan_data
mock_store.load_analysis.return_value = None # prevent truthy skip of pipeline phase
mock_rs_cls.return_value = mock_store
asyncio.run(_collect(engine.run_auto("auto1", {"date": "2026-01-01"})))
@ -716,6 +717,8 @@ class TestRunAutoTickerSource(unittest.TestCase):
mock_store = MagicMock()
mock_store.load_scan.return_value = {}
mock_store.load_execution_result.return_value = None # prevent truthy skip of Phase 3
mock_store.load_pm_decision.return_value = None
mock_rs_cls.return_value = mock_store
asyncio.run(_collect(engine.run_auto("auto1", {"date": "2026-01-01"})))
@ -754,6 +757,8 @@ class TestRunAutoTickerSource(unittest.TestCase):
mock_store = MagicMock()
mock_store.load_scan.return_value = {}
mock_store.load_execution_result.return_value = None # prevent truthy skip of Phase 3
mock_store.load_pm_decision.return_value = None
mock_rs_cls.return_value = mock_store
params = {"ticker": "GOOG", "portfolio_id": "my_portfolio", "date": "2026-01-01"}

View File

@ -28,6 +28,7 @@ class PortfolioManagerState(MessagesState):
analysis_date: str
prices: dict # ticker → price
scan_summary: dict # macro scan output from ScannerGraph
ticker_analyses: dict # per-ticker analysis results keyed by ticker symbol
# Processing fields (string-serialised JSON — written by individual nodes)
portfolio_data: Annotated[str, _last_value]