feat(scanners): add minervini scanner to registry

minervini.py existed but was never committed. Without the file on the
remote, the __init__.py import added in the previous fix causes an
ImportError in CI.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Youssef Aitousarrah 2026-04-06 13:51:33 -07:00
parent 8706484c16
commit b68a43ec0d
12 changed files with 478 additions and 89 deletions

View File

@ -157,6 +157,7 @@ def select_shallow_thinking_agent(provider) -> str:
("Gemini 2.5 Pro - Most capable Gemini model", "gemini-2.5-pro"),
("Gemini 3.0 Pro Preview - Next generation preview", "gemini-3-pro-preview"),
("Gemini 3.0 Flash Preview - Latest generation preview", "gemini-3-flash-preview"),
("Gemini 3.1 Preview - Newest model preview", "gemini-3.1-preview"),
],
"openrouter": [
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
@ -237,6 +238,7 @@ def select_deep_thinking_agent(provider) -> str:
("Gemini 2.5 Pro - Most capable Gemini model", "gemini-2.5-pro"),
("Gemini 3.0 Pro Preview - Next generation preview", "gemini-3-pro-preview"),
("Gemini 3.0 Flash Preview - Latest generation preview", "gemini-3-flash-preview"),
("Gemini 3.1 Preview - Newest model preview", "gemini-3.1-preview"),
],
"openrouter": [
(

View File

@ -27,13 +27,11 @@ logger = get_logger(__name__)
def main():
logger.info(
"""
logger.info("""
TradingAgents - Historical Memory Builder
"""
)
""")
# Configuration
tickers = [

View File

@ -89,8 +89,7 @@ def build_strategy_memories(strategy_name: str, config: dict):
strategy = STRATEGIES[strategy_name]
logger.info(
f"""
logger.info(f"""
Building Memories: {strategy_name.upper().replace('_', ' ')}
@ -99,8 +98,7 @@ Strategy: {strategy['description']}
Lookforward: {strategy['lookforward_days']} days
Sampling: Every {strategy['interval_days']} days
Tickers: {', '.join(strategy['tickers'])}
"""
)
""")
# Date range - last 2 years
end_date = datetime.now()
@ -159,8 +157,7 @@ Tickers: {', '.join(strategy['tickers'])}
def main():
logger.info(
"""
logger.info("""
TradingAgents - Strategy-Specific Memory Builder
@ -171,8 +168,7 @@ This script builds optimized memories for different trading styles:
2. Swing Trading - 7-day returns, weekly samples
3. Position Trading - 30-day returns, monthly samples
4. Long-term - 90-day returns, quarterly samples
"""
)
""")
logger.info("Available strategies:")
for i, (name, config) in enumerate(STRATEGIES.items(), 1):
@ -220,13 +216,11 @@ This script builds optimized memories for different trading styles:
logger.info("\n" + "=" * 70)
logger.info("\n💡 TIP: To use a specific strategy's memories, update your config:")
logger.info(
"""
logger.info("""
config = DEFAULT_CONFIG.copy()
config["memory_dir"] = "data/memories/swing_trading" # or your strategy
config["load_historical_memories"] = True
"""
)
""")
if __name__ == "__main__":

View File

@ -129,12 +129,10 @@ def main():
6. Save updated positions
7. Print progress messages
"""
logger.info(
"""
logger.info("""
TradingAgents - Position Updater
""".strip()
)
""".strip())
# Initialize position tracker
tracker = PositionTracker(data_dir="data")

View File

@ -24,6 +24,10 @@ class FilterConfig:
compression_atr_pct_max: float = 2.0
compression_bb_width_max: float = 6.0
compression_min_volume_ratio: float = 1.3
# Fundamental Risk Filters
min_z_score: float = 1.81 # Default below 1.81 indicates distress
min_f_score: int = 4 # Default below 4 is poor
filter_fundamental_risk: bool = True
@dataclass
@ -139,6 +143,9 @@ class DiscoveryConfig:
compression_min_volume_ratio=f.get(
"compression_min_volume_ratio", _fd.compression_min_volume_ratio
),
min_z_score=f.get("min_z_score", _fd.min_z_score),
min_f_score=f.get("min_f_score", _fd.min_f_score),
filter_fundamental_risk=f.get("filter_fundamental_risk", _fd.filter_fundamental_risk),
)
# Enrichment — nested under "enrichment" key, fallback to root

View File

@ -132,6 +132,11 @@ class CandidateFilter:
self.compression_bb_width_max = dc.filters.compression_bb_width_max
self.compression_min_volume_ratio = dc.filters.compression_min_volume_ratio
# Fundamental Risk
self.filter_fundamental_risk = dc.filters.filter_fundamental_risk
self.min_z_score = dc.filters.min_z_score
self.min_f_score = dc.filters.min_f_score
# Enrichment settings
self.batch_news_vendor = dc.enrichment.batch_news_vendor
self.batch_news_batch_size = dc.enrichment.batch_news_batch_size
@ -166,6 +171,7 @@ class CandidateFilter:
volume_by_ticker = self._fetch_batch_volume(state, candidates)
news_by_ticker = self._fetch_batch_news(start_date, end_date, candidates)
price_by_ticker = self._fetch_batch_prices(candidates)
(
filtered_candidates,
@ -177,6 +183,7 @@ class CandidateFilter:
candidates=candidates,
volume_by_ticker=volume_by_ticker,
news_by_ticker=news_by_ticker,
price_by_ticker=price_by_ticker,
end_date=end_date,
)
@ -347,12 +354,65 @@ class CandidateFilter:
logger.warning(f"Batch news fetch failed, will skip news enrichment: {e}")
return {}
def _fetch_batch_prices(self, candidates: List[Dict[str, Any]]) -> Dict[str, float]:
"""Batch-fetch current prices for all candidates in one request.
This avoids per-ticker yfinance calls that get rate-limited after
bulk downloads (e.g., ml_signal scanning 500+ tickers).
"""
tickers = [c.get("ticker", "").upper() for c in candidates if c.get("ticker")]
if not tickers:
return {}
try:
import yfinance as yf
logger.info(f"💰 Batch fetching prices for {len(tickers)} tickers...")
# Call yf.download directly — the download_history wrapper only accepts
# a single string (calls symbol.upper()), but yf.download handles lists.
data = yf.download(
tickers,
period="5d",
interval="1d",
auto_adjust=True,
progress=False,
)
if data is None or data.empty:
logger.warning("Batch price download returned empty data")
return {}
prices = {}
if isinstance(data.columns, pd.MultiIndex):
available = data.columns.get_level_values(1).unique()
for ticker in tickers:
try:
if ticker in available:
close = data.xs(ticker, axis=1, level=1)["Close"].dropna()
if not close.empty:
prices[ticker] = float(close.iloc[-1])
except Exception:
continue
else:
# Single ticker case
close = data["Close"].dropna()
if not close.empty and len(tickers) == 1:
prices[tickers[0]] = float(close.iloc[-1])
logger.info(f"✓ Batch prices fetched for {len(prices)}/{len(tickers)} tickers")
return prices
except Exception as e:
logger.warning(f"Batch price fetch failed, will fall back to per-ticker: {e}")
return {}
def _filter_and_enrich_candidates(
self,
state: Dict[str, Any],
candidates: List[Dict[str, Any]],
volume_by_ticker: Dict[str, Any],
news_by_ticker: Dict[str, Any],
price_by_ticker: Dict[str, float],
end_date: str,
):
filtered_candidates = []
@ -361,6 +421,8 @@ class CandidateFilter:
"intraday_moved": 0,
"recent_moved": 0,
"market_cap": 0,
"z_score": 0,
"f_score": 0,
"no_data": 0,
}
@ -458,8 +520,10 @@ class CandidateFilter:
try:
from tradingagents.dataflows.y_finance import get_fundamentals, get_stock_price
# Get current price
current_price = get_stock_price(ticker)
# Get current price — prefer batch result, fall back to per-ticker
current_price = price_by_ticker.get(ticker.upper())
if current_price is None:
current_price = get_stock_price(ticker)
cand["current_price"] = current_price
# Track failures for delisted cache
@ -545,6 +609,27 @@ class CandidateFilter:
# Assign strategy based on source (prioritize leading indicators)
self._assign_strategy(cand)
# Fundamental Risk Check (Altman Z-Score & Piotroski F-Score)
if self.filter_fundamental_risk and cand.get("strategy") != "short_squeeze":
from tradingagents.dataflows.discovery.risk_metrics import (
calculate_altman_z_score,
calculate_piotroski_f_score,
)
z_score = calculate_altman_z_score(ticker)
f_score = calculate_piotroski_f_score(ticker)
cand["z_score"] = z_score
cand["f_score"] = f_score
if z_score is not None and z_score < self.min_z_score:
filtered_reasons["z_score"] += 1
continue
if f_score is not None and f_score < self.min_f_score:
filtered_reasons["f_score"] += 1
continue
# Technical Analysis Check (New)
today_str = end_date
rsi_data = self._run_tool(
@ -747,6 +832,10 @@ class CandidateFilter:
logger.info(f" ❌ Low volume: {filtered_reasons['volume']}")
if filtered_reasons.get("market_cap", 0) > 0:
logger.info(f" ❌ Below market cap: {filtered_reasons['market_cap']}")
if filtered_reasons.get("z_score", 0) > 0:
logger.info(f" ❌ Low Altman Z-Score: {filtered_reasons['z_score']}")
if filtered_reasons.get("f_score", 0) > 0:
logger.info(f" ❌ Low Piotroski F-Score: {filtered_reasons['f_score']}")
if filtered_reasons.get("no_data", 0) > 0:
logger.info(f" ❌ No data available: {filtered_reasons['no_data']}")
logger.info(f" ✅ Passed filters: {len(filtered_candidates)}")

View File

@ -226,6 +226,8 @@ class CandidateRanker:
# New enrichment fields
confluence_score = cand.get("confluence_score", 1)
quant_score = cand.get("quant_score", "N/A")
z_score = cand.get("z_score", "N/A")
f_score = cand.get("f_score", "N/A")
# ML prediction
ml_win_prob = cand.get("ml_win_probability")
@ -255,7 +257,7 @@ class CandidateRanker:
summary = f"""### {ticker} (Priority: {priority.upper()})
- **Strategy Match**: {strategy}
- **Sources**: {source_str} | **Confluence**: {confluence_score} source(s)
- **Quant Pre-Score**: {quant_score}/100 | **ML Win Probability**: {ml_str}
- **Quant Pre-Score**: {quant_score}/100 | **ML Win Probability**: {ml_str} | **Altman Z-Score**: {z_score} | **Piotroski F-Score**: {f_score}
- **Price**: {price_str} | **Current Price (numeric)**: {current_price if isinstance(current_price, (int, float)) else "N/A"} | **Intraday**: {intraday_str} | **Avg Volume**: {volume_str}
- **Short Interest**: {short_str}
- **Discovery Context**: {context}
@ -305,6 +307,7 @@ Each candidate was discovered by a specific scanner. Evaluate them using the cri
- **contrarian_value**: Focus on oversold technicals (RSI <30), fundamental support (earnings stability), and a clear reason why the selloff is overdone.
- **news_catalyst**: Focus on the materiality of the news, whether it's already priced in (check intraday move), and the timeline of impact.
- **sector_rotation**: Focus on relative strength vs sector ETF, whether the stock is a laggard in an accelerating sector.
- **minervini**: Focus on the RS Rating (top 30% = RS>=70, top 10% = RS>=90) as the primary signal. Verify all 6 trend template conditions are met (price structure above rising SMAs). Strongest setups combine RS>=85 with price consolidating near highs (within 10-15% of 52w high) these have minimal overhead supply. Penalize if RS Rating is borderline (70-75) without other confirming signals.
- **ml_signal**: Use the ML Win Probability as a strong quantitative signal. Scores above 65% deserve significant weight.
HISTORICAL INSIGHTS:
@ -348,9 +351,24 @@ IMPORTANT: Return ONLY valid JSON. No markdown wrapping, no commentary outside t
logger.info(f"Full ranking prompt:\n{prompt}")
try:
# Use structured output with include_raw for debugging
structured_llm = self.llm.with_structured_output(RankingResponse, include_raw=True)
response = structured_llm.invoke([HumanMessage(content=prompt)])
# Invoke LLM directly — avoids with_structured_output which fails
# when the LLM wraps JSON in ```json...``` markdown blocks
response = self.llm.invoke([HumanMessage(content=prompt)])
# Extract text content from response
raw_text = ""
if hasattr(response, "content"):
content = response.content
if isinstance(content, str):
raw_text = content
elif isinstance(content, list):
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
raw_text = block.get("text", "")
break
elif isinstance(block, str):
raw_text = block
break
tool_logs = state.get("tool_logs", [])
append_llm_log(
@ -359,72 +377,29 @@ IMPORTANT: Return ONLY valid JSON. No markdown wrapping, no commentary outside t
step="Rank candidates",
model=resolve_llm_name(self.llm),
prompt=prompt,
output=response,
output=raw_text[:2000],
)
state["tool_logs"] = tool_logs
# Handle the response (dict with raw, parsed, parsing_error)
if isinstance(response, dict):
result = response.get("parsed")
raw = response.get("raw")
parsing_error = response.get("parsing_error")
# Log debug info
logger.info(f"Structured output - parsed type: {type(result)}")
if parsing_error:
logger.error(f"Parsing error: {parsing_error}")
if raw and hasattr(raw, "content"):
logger.debug(f"Raw content preview: {str(raw.content)[:500]}...")
else:
# Direct RankingResponse (shouldn't happen with include_raw=True)
result = response
# Extract rankings - with fallback for markdown-wrapped JSON
if result is None:
logger.warning(
"Structured output parsing returned None - attempting fallback extraction"
if not raw_text.strip():
raise ValueError(
"LLM returned empty response. This may be due to content filtering or prompt length."
)
# Try to extract JSON from raw response (handles ```json...``` wrapping)
raw_text = None
if raw and hasattr(raw, "content"):
content = raw.content
if isinstance(content, str):
raw_text = content
elif isinstance(content, list):
# Handle list of content blocks (e.g., [{'type': 'text', 'text': '...'}])
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
raw_text = block.get("text", "")
break
elif isinstance(block, str):
raw_text = block
break
# Strip markdown wrapper (```json...```) and parse JSON
json_str = extract_json_from_markdown(raw_text)
if not json_str:
raise ValueError(
f"LLM response did not contain valid JSON. Preview: {raw_text[:500]}"
)
if raw_text:
json_str = extract_json_from_markdown(raw_text)
if json_str:
try:
parsed_data = json.loads(json_str)
result = RankingResponse.model_validate(parsed_data)
logger.info(
"Successfully extracted JSON from markdown-wrapped response"
)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse extracted JSON: {e}")
except Exception as e:
logger.error(f"Failed to validate extracted JSON: {e}")
try:
parsed_data = json.loads(json_str)
except json.JSONDecodeError as e:
raise ValueError(f"Failed to parse JSON from LLM response: {e}")
if result is None:
logger.error("Parsed result is None - check raw response for clues")
raise ValueError(
"LLM returned None. This may be due to content filtering or prompt length. "
"Check LOG_LEVEL=DEBUG for details."
)
if not hasattr(result, "rankings"):
logger.error(f"Result missing 'rankings'. Type: {type(result)}, Value: {result}")
raise ValueError(f"Unexpected result format: {type(result)}")
result = RankingResponse.model_validate(parsed_data)
logger.info(f"Parsed {len(result.rankings)} rankings from LLM response")
final_ranking_list = [ranking.model_dump() for ranking in result.rankings]

View File

@ -0,0 +1,286 @@
"""Minervini Trend Template scanner — Stage 2 uptrend identification.
Identifies stocks in a confirmed Stage 2 uptrend using Mark Minervini's
6-condition trend template, then ranks survivors by an IBD-style Relative
Strength (RS) Rating computed within the scanned universe.
All computation is pure OHLCV math zero per-ticker API calls during scan.
"""
from typing import Any, Dict, List, Optional, Tuple
import pandas as pd
from tradingagents.dataflows.discovery.scanner_registry import SCANNER_REGISTRY, BaseScanner
from tradingagents.dataflows.discovery.utils import Priority
from tradingagents.utils.logger import get_logger
logger = get_logger(__name__)
DEFAULT_TICKER_FILE = "data/tickers.txt"
def _load_tickers_from_file(path: str) -> List[str]:
"""Load ticker symbols from a text file."""
try:
with open(path) as f:
tickers = [
line.strip().upper()
for line in f
if line.strip() and not line.strip().startswith("#")
]
if tickers:
logger.info(f"Minervini scanner: loaded {len(tickers)} tickers from {path}")
return tickers
except FileNotFoundError:
logger.warning(f"Ticker file not found: {path}")
except Exception as e:
logger.warning(f"Failed to load ticker file {path}: {e}")
return []
class MinerviniScanner(BaseScanner):
"""Scan for stocks in a confirmed Minervini Stage 2 uptrend.
Applies Mark Minervini's 6-condition trend template to identify stocks
with healthy price structure (above rising SMAs, well off lows, near highs),
then ranks by an IBD-style RS Rating computed within the scanned universe.
Data requirement: ~200+ trading days of OHLCV (uses 1y lookback by default).
Cost: single batch yfinance download, zero per-ticker API calls.
"""
name = "minervini"
pipeline = "momentum"
strategy = "minervini"
def __init__(self, config: Dict[str, Any]):
super().__init__(config)
self.ticker_file = self.scanner_config.get(
"ticker_file",
config.get("tickers_file", DEFAULT_TICKER_FILE),
)
self.min_rs_rating = self.scanner_config.get("min_rs_rating", 70)
self.lookback_period = self.scanner_config.get("lookback_period", "1y")
self.sma_200_slope_days = self.scanner_config.get("sma_200_slope_days", 20)
self.min_pct_off_low = self.scanner_config.get("min_pct_off_low", 30)
self.max_pct_from_high = self.scanner_config.get("max_pct_from_high", 25)
def scan(self, state: Dict[str, Any]) -> List[Dict[str, Any]]:
if not self.is_enabled():
return []
logger.info("📊 Scanning for Minervini Stage 2 uptrends...")
tickers = _load_tickers_from_file(self.ticker_file)
if not tickers:
logger.warning("No tickers loaded for Minervini scan")
return []
# Batch download OHLCV — 1y needed for SMA200
import yfinance as yf
try:
logger.info(f"Batch-downloading {len(tickers)} tickers ({self.lookback_period})...")
raw = yf.download(
tickers,
period=self.lookback_period,
interval="1d",
auto_adjust=True,
progress=False,
)
except Exception as e:
logger.error(f"Batch download failed: {e}")
return []
if raw is None or raw.empty:
logger.warning("Minervini scanner: batch download returned empty data")
return []
# Compute 12-month returns for RS Rating (need all tickers' data)
universe_returns: Dict[str, float] = {}
passing_tickers: List[Tuple[str, Dict[str, Any]]] = []
for ticker in tickers:
result = self._check_minervini(ticker, raw)
if result is not None:
ticker_df, metrics = result
# Compute 12-month cumulative return for RS rating
ret = self._compute_return(ticker_df)
if ret is not None:
universe_returns[ticker] = ret
passing_tickers.append((ticker, metrics))
# Also compute returns for tickers that DIDN'T pass (for RS percentile ranking)
for ticker in tickers:
if ticker not in universe_returns:
try:
if isinstance(raw.columns, pd.MultiIndex):
if ticker not in raw.columns.get_level_values(1):
continue
df = raw.xs(ticker, axis=1, level=1).dropna()
else:
df = raw.dropna()
ret = self._compute_return(df)
if ret is not None:
universe_returns[ticker] = ret
except Exception:
continue
# Compute RS ratings as percentile ranks within the universe
if universe_returns:
all_returns = list(universe_returns.values())
all_returns_sorted = sorted(all_returns)
n = len(all_returns_sorted)
def percentile_rank(val: float) -> float:
pos = sum(1 for r in all_returns_sorted if r <= val)
return round((pos / n) * 100, 1)
rs_ratings = {t: percentile_rank(r) for t, r in universe_returns.items()}
else:
rs_ratings = {}
# Build final candidates: pass RS filter, sort, limit
candidates = []
for ticker, metrics in passing_tickers:
rs_rating = rs_ratings.get(ticker, 0)
if rs_rating < self.min_rs_rating:
continue
pct_off_low = metrics["pct_off_low"]
pct_from_high = metrics["pct_from_high"]
# Priority based on RS Rating
if rs_rating >= 90:
priority = Priority.CRITICAL.value
elif rs_rating >= 80:
priority = Priority.HIGH.value
else:
priority = Priority.MEDIUM.value
context = (
f"Minervini Stage 2: P>SMA50>SMA150>SMA200, "
f"+{pct_off_low:.0f}% off 52w low, "
f"within {pct_from_high:.0f}% of 52w high, "
f"RS Rating {rs_rating:.0f}/100"
)
candidates.append(
{
"ticker": ticker,
"source": self.name,
"context": context,
"priority": priority,
"strategy": self.strategy,
"rs_rating": rs_rating,
"pct_off_low": round(pct_off_low, 1),
"pct_from_high": round(pct_from_high, 1),
"sma_50": round(metrics["sma_50"], 2),
"sma_150": round(metrics["sma_150"], 2),
"sma_200": round(metrics["sma_200"], 2),
}
)
# Sort by RS Rating descending, then limit
candidates.sort(key=lambda c: c.get("rs_rating", 0), reverse=True)
candidates = candidates[: self.limit]
logger.info(
f"Minervini scanner: {len(candidates)} Stage 2 candidates "
f"(RS >= {self.min_rs_rating}) from {len(tickers)} tickers"
)
return candidates
def _check_minervini(
self, ticker: str, raw: pd.DataFrame
) -> Optional[Tuple[pd.DataFrame, Dict[str, Any]]]:
"""Apply the 6-condition Minervini trend template to one ticker.
Returns (df, metrics) if all conditions pass, None otherwise.
"""
try:
# Extract single-ticker slice
if isinstance(raw.columns, pd.MultiIndex):
if ticker not in raw.columns.get_level_values(1):
return None
df = raw.xs(ticker, axis=1, level=1).dropna()
else:
df = raw.dropna()
# Need at least 200 rows for SMA200
if len(df) < 200:
return None
close = df["Close"]
sma_50 = float(close.rolling(50).mean().iloc[-1])
sma_150 = float(close.rolling(150).mean().iloc[-1])
sma_200 = float(close.rolling(200).mean().iloc[-1])
sma_200_prev = float(close.rolling(200).mean().iloc[-self.sma_200_slope_days - 1])
price = float(close.iloc[-1])
low_52w = float(close.iloc[-252:].min()) if len(close) >= 252 else float(close.min())
high_52w = float(close.iloc[-252:].max()) if len(close) >= 252 else float(close.max())
if low_52w <= 0 or sma_50 <= 0 or sma_150 <= 0 or sma_200 <= 0:
return None
pct_off_low = ((price - low_52w) / low_52w) * 100
pct_from_high = ((high_52w - price) / high_52w) * 100
# Minervini's 6 conditions (all must pass)
conditions = [
price > sma_150 > sma_200, # 1. Price > SMA150 > SMA200
sma_150 > sma_200, # 2. SMA150 above SMA200
sma_200 > sma_200_prev, # 3. SMA200 slope is rising
price > sma_50, # 4. Price above SMA50
pct_off_low >= self.min_pct_off_low, # 5. At least 30% off 52w low
pct_from_high <= self.max_pct_from_high, # 6. Within 25% of 52w high
]
if not all(conditions):
return None
return df, {
"sma_50": sma_50,
"sma_150": sma_150,
"sma_200": sma_200,
"pct_off_low": pct_off_low,
"pct_from_high": pct_from_high,
}
except Exception as e:
logger.debug(f"Minervini check failed for {ticker}: {e}")
return None
def _compute_return(self, df: pd.DataFrame) -> Optional[float]:
"""Compute IBD-style 12-month return with recent-quarter double-weighting.
Formula: (full_year_return * 2 + last_quarter_return) / 3
This weights recent momentum more heavily, matching IBD's RS methodology.
"""
try:
close = df["Close"] if "Close" in df.columns else df.iloc[:, 0]
close = close.dropna()
if len(close) < 2:
return None
latest = float(close.iloc[-1])
year_ago = float(close.iloc[0])
quarter_ago = float(close.iloc[max(0, len(close) - 63)])
if year_ago <= 0 or quarter_ago <= 0:
return None
full_year_ret = (latest - year_ago) / year_ago
quarter_ret = (latest - quarter_ago) / quarter_ago
# IBD weighting: recent quarter counts double
return (full_year_ret * 2 + quarter_ret) / 3
except Exception:
return None
SCANNER_REGISTRY.register(MinerviniScanner)

View File

@ -28,7 +28,7 @@ class RedditDDScanner(BaseScanner):
try:
# Use Reddit DD scanner tool
result = execute_tool("scan_reddit_dd", limit=self.limit)
result = execute_tool("scan_reddit_dd", top_n=self.limit, as_list=True)
if not result:
logger.info("Found 0 DD posts")

View File

@ -51,6 +51,7 @@ class Strategy(str, Enum):
SOCIAL_DD = "social_dd"
SECTOR_ROTATION = "sector_rotation"
TECHNICAL_BREAKOUT = "technical_breakout"
MINERVINI = "minervini"
PRIORITY_ORDER = {

View File

@ -139,7 +139,7 @@ DEFAULT_CONFIG = {
"min_volume": 1000, # Minimum option volume to consider
# ticker_file: path to ticker list (defaults to tickers_file from root config)
# ticker_universe: explicit list overrides ticker_file if set
"max_tickers": 150, # Max tickers to scan (from start of file)
"max_tickers": 1000, # Max tickers to scan (from start of file)
"max_workers": 8, # Parallel option chain fetch threads
},
"congress_trades": {
@ -214,12 +214,22 @@ DEFAULT_CONFIG = {
"pipeline": "momentum",
"limit": 15,
"min_win_prob": 0.35, # Minimum P(WIN) to surface as candidate
"lookback_period": "1y", # OHLCV history to fetch (needs ~210 trading days)
"lookback_period": "6mo", # OHLCV history to fetch (needs ~130 trading days)
# ticker_file: path to ticker list (defaults to tickers_file from root config)
# ticker_universe: explicit list overrides ticker_file if set
"fetch_market_cap": False, # Skip for speed (1 NaN out of 30 features)
"max_workers": 8, # Parallel feature computation threads
},
"minervini": {
"enabled": True,
"pipeline": "momentum",
"limit": 10,
"min_rs_rating": 70, # Min IBD-style RS Rating (0-100)
"lookback_period": "1y", # Needs 200 trading days for SMA200
"sma_200_slope_days": 20, # Days back to check SMA200 slope
"min_pct_off_low": 30, # Must be 30%+ above 52w low
"max_pct_from_high": 25, # Must be within 25% of 52w high
},
},
},
# Memory settings

View File

@ -63,6 +63,7 @@ from tradingagents.dataflows.reddit_api import (
get_reddit_discussions,
get_reddit_news,
get_reddit_trending_tickers,
get_reddit_undiscovered_dd,
)
from tradingagents.dataflows.reddit_api import (
get_reddit_global_news as get_reddit_api_global_news,
@ -337,7 +338,7 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
"reddit": get_reddit_api_global_news,
# "alpha_vantage": get_alpha_vantage_global_news,
},
"vendor_priority": ["openai", "google", "reddit"],
"vendor_priority": ["openai", "reddit"],
"execution_mode": "aggregate",
"parameters": {
"date": {"type": "str", "description": "Date for news, yyyy-mm-dd"},
@ -587,6 +588,34 @@ TOOL_REGISTRY: Dict[str, Dict[str, Any]] = {
},
"returns": "str: Reddit discussions and sentiment",
},
"scan_reddit_dd": {
"description": "Scan Reddit for high-quality due diligence posts",
"category": "discovery",
"agents": ["social"],
"vendors": {
"reddit": get_reddit_undiscovered_dd,
},
"vendor_priority": ["reddit"],
"parameters": {
"lookback_hours": {"type": "int", "description": "Hours to look back", "default": 72},
"scan_limit": {
"type": "int",
"description": "Number of new posts to scan",
"default": 100,
},
"top_n": {
"type": "int",
"description": "Number of top DD posts to return",
"default": 10,
},
"num_comments": {
"type": "int",
"description": "Number of top comments to include",
"default": 10,
},
},
"returns": "str: Report of high-quality undiscovered DD",
},
"get_options_activity": {
"description": "Get options activity for a specific ticker (volume, open interest, put/call ratios, unusual activity)",
"category": "discovery",