feat: Add Perplexity Finance API and conversational AI research interface
PERPLEXITY FINANCE INTEGRATION: - Complete connector for real-time financial analysis - Stock analysis with fundamental, technical, and valuation modes - Natural language stock screening - Market sentiment analysis - Earnings analysis and insider trading monitoring - Congressional trades integration - Peer comparison and similarity analysis AI RESEARCH AGENT: - LangChain-powered conversational interface - Natural language investment queries - Multi-tool orchestration for comprehensive research - Portfolio gap analysis - Risk-reward assessment - Automated opportunity discovery - Caching and performance optimization RESEARCH CLI: - Interactive command-line interface - Rich formatting with tables and panels - Stock screening with filters - Investment opportunity finder - Query history tracking - Real-time market analysis SCHEDULER INTEGRATION: - AI research analysis every 2 hours - Weekly opportunity scanning - Automatic alerts for high-confidence signals - Portfolio position analysis with Perplexity - Market insights generation EXAMPLE USAGE: - research_demo.py showcases all capabilities - Natural language queries like "Find undervalued tech stocks" - Screening with specific criteria - Portfolio optimization suggestions This enables asking questions like: - "What undervalued companies should I invest in?" - "Is NVDA overvalued at current prices?" - "What are Congress members buying?" - "Find dividend stocks yielding over 4%" 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
parent
380f29768e
commit
950edd4acf
|
|
@ -0,0 +1,729 @@
|
|||
"""
|
||||
Perplexity Finance API Connector for real-time financial analysis and research.
|
||||
Provides sophisticated market insights, company analysis, and investment research.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime, timedelta
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
import aiohttp
|
||||
from pydantic import BaseModel, Field
|
||||
import logging
|
||||
|
||||
from autonomous.core.cache import RedisCache, CacheKey
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AnalysisType(str, Enum):
|
||||
"""Types of financial analysis available"""
|
||||
FUNDAMENTAL = "fundamental"
|
||||
TECHNICAL = "technical"
|
||||
SENTIMENT = "sentiment"
|
||||
EARNINGS = "earnings"
|
||||
VALUATION = "valuation"
|
||||
COMPETITIVE = "competitive"
|
||||
MACRO = "macro"
|
||||
INSIDER = "insider"
|
||||
INSTITUTIONAL = "institutional"
|
||||
OPTIONS_FLOW = "options_flow"
|
||||
|
||||
|
||||
class ResearchDepth(str, Enum):
|
||||
"""Depth of research analysis"""
|
||||
QUICK = "quick" # Fast, surface-level analysis
|
||||
STANDARD = "standard" # Regular depth analysis
|
||||
DEEP = "deep" # Comprehensive deep dive
|
||||
EXPERT = "expert" # Expert-level with all data sources
|
||||
|
||||
|
||||
@dataclass
|
||||
class StockAnalysis:
|
||||
"""Complete stock analysis result"""
|
||||
ticker: str
|
||||
timestamp: datetime
|
||||
analysis_type: AnalysisType
|
||||
|
||||
# Core metrics
|
||||
current_price: float
|
||||
fair_value: Optional[float]
|
||||
upside_potential: Optional[float]
|
||||
|
||||
# Fundamental data
|
||||
pe_ratio: Optional[float]
|
||||
peg_ratio: Optional[float]
|
||||
price_to_book: Optional[float]
|
||||
debt_to_equity: Optional[float]
|
||||
roe: Optional[float]
|
||||
revenue_growth: Optional[float]
|
||||
earnings_growth: Optional[float]
|
||||
|
||||
# Analysis results
|
||||
bull_case: str
|
||||
bear_case: str
|
||||
key_risks: List[str]
|
||||
catalysts: List[str]
|
||||
|
||||
# Recommendations
|
||||
rating: str # Buy, Hold, Sell
|
||||
confidence_score: float # 0-100
|
||||
time_horizon: str # short, medium, long
|
||||
|
||||
# Raw analysis text
|
||||
detailed_analysis: str
|
||||
data_sources: List[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class MarketScreenerResult:
|
||||
"""Result from market screening queries"""
|
||||
query: str
|
||||
timestamp: datetime
|
||||
total_results: int
|
||||
|
||||
stocks: List[Dict[str, Any]] # List of matching stocks with details
|
||||
screening_criteria: Dict[str, Any]
|
||||
market_context: str
|
||||
|
||||
# Top picks
|
||||
best_value: List[str]
|
||||
highest_growth: List[str]
|
||||
lowest_risk: List[str]
|
||||
|
||||
detailed_explanation: str
|
||||
|
||||
|
||||
class PerplexityFinanceConnector:
|
||||
"""
|
||||
Connector for Perplexity Finance API providing advanced financial analysis.
|
||||
Combines multiple data sources for comprehensive investment research.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
api_key: Optional[str] = None,
|
||||
cache: Optional[RedisCache] = None,
|
||||
rate_limit: int = 50): # requests per minute
|
||||
"""
|
||||
Initialize Perplexity Finance connector.
|
||||
|
||||
Args:
|
||||
api_key: Perplexity API key
|
||||
cache: Redis cache instance
|
||||
rate_limit: Maximum requests per minute
|
||||
"""
|
||||
self.api_key = api_key or os.getenv('PERPLEXITY_API_KEY')
|
||||
if not self.api_key:
|
||||
raise ValueError("Perplexity API key required")
|
||||
|
||||
self.base_url = "https://api.perplexity.ai"
|
||||
self.cache = cache
|
||||
self.rate_limit = rate_limit
|
||||
self.last_request_time = datetime.now()
|
||||
|
||||
# Headers for API requests
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# Finance-specific model that has access to real-time data
|
||||
self.finance_model = "pplx-70b-online" # or "pplx-7b-online" for faster
|
||||
|
||||
async def analyze_stock(self,
|
||||
ticker: str,
|
||||
analysis_type: AnalysisType = AnalysisType.FUNDAMENTAL,
|
||||
depth: ResearchDepth = ResearchDepth.STANDARD) -> StockAnalysis:
|
||||
"""
|
||||
Perform comprehensive analysis on a single stock.
|
||||
|
||||
Args:
|
||||
ticker: Stock symbol
|
||||
analysis_type: Type of analysis to perform
|
||||
depth: Depth of research
|
||||
|
||||
Returns:
|
||||
StockAnalysis object with complete findings
|
||||
"""
|
||||
# Check cache first
|
||||
cache_key = f"{CacheKey.AI_DECISION}:perplexity:{ticker}:{analysis_type}"
|
||||
if self.cache:
|
||||
cached = await self.cache.get(cache_key)
|
||||
if cached:
|
||||
logger.info(f"Using cached Perplexity analysis for {ticker}")
|
||||
return StockAnalysis(**cached)
|
||||
|
||||
# Construct analysis prompt based on type
|
||||
prompt = self._build_analysis_prompt(ticker, analysis_type, depth)
|
||||
|
||||
# Make API request with financial context
|
||||
analysis_text = await self._query_perplexity(
|
||||
prompt,
|
||||
context="financial_analysis",
|
||||
include_sources=True
|
||||
)
|
||||
|
||||
# Parse the analysis into structured format
|
||||
result = await self._parse_analysis(ticker, analysis_text, analysis_type)
|
||||
|
||||
# Cache the result
|
||||
if self.cache:
|
||||
await self.cache.set(cache_key, result.__dict__, ttl=3600) # 1 hour cache
|
||||
|
||||
return result
|
||||
|
||||
async def screen_stocks(self,
|
||||
query: str,
|
||||
max_results: int = 20,
|
||||
filters: Optional[Dict[str, Any]] = None) -> MarketScreenerResult:
|
||||
"""
|
||||
Screen stocks based on natural language query.
|
||||
|
||||
Args:
|
||||
query: Natural language screening query
|
||||
max_results: Maximum number of stocks to return
|
||||
filters: Additional filters (market cap, sector, etc.)
|
||||
|
||||
Returns:
|
||||
MarketScreenerResult with matching stocks
|
||||
"""
|
||||
# Build comprehensive screening prompt
|
||||
prompt = f"""
|
||||
Financial Stock Screening Request:
|
||||
{query}
|
||||
|
||||
Requirements:
|
||||
1. Search across US listed stocks
|
||||
2. Return up to {max_results} stocks that match
|
||||
3. Include current price, market cap, P/E ratio, and key metrics
|
||||
4. Rank by best match to the query criteria
|
||||
5. Explain why each stock matches
|
||||
6. Consider recent market conditions
|
||||
|
||||
Additional filters: {json.dumps(filters) if filters else 'None'}
|
||||
|
||||
Provide:
|
||||
- List of matching stocks with tickers
|
||||
- Key metrics for each
|
||||
- Brief explanation of fit
|
||||
- Overall market context
|
||||
"""
|
||||
|
||||
# Query Perplexity with financial context
|
||||
response = await self._query_perplexity(
|
||||
prompt,
|
||||
context="stock_screening",
|
||||
include_sources=True
|
||||
)
|
||||
|
||||
# Parse screening results
|
||||
result = await self._parse_screening_results(query, response)
|
||||
|
||||
return result
|
||||
|
||||
async def research_investment_thesis(self,
|
||||
question: str,
|
||||
tickers: Optional[List[str]] = None,
|
||||
include_portfolio_context: bool = True) -> Dict[str, Any]:
|
||||
"""
|
||||
Answer complex investment research questions using Perplexity's knowledge.
|
||||
|
||||
Args:
|
||||
question: Investment research question
|
||||
tickers: Optional list of tickers to focus on
|
||||
include_portfolio_context: Include current portfolio in analysis
|
||||
|
||||
Returns:
|
||||
Comprehensive research response
|
||||
"""
|
||||
# Build context-aware prompt
|
||||
prompt = f"""
|
||||
Investment Research Query:
|
||||
{question}
|
||||
|
||||
{"Focus on these stocks: " + ", ".join(tickers) if tickers else ""}
|
||||
|
||||
Please provide:
|
||||
1. Direct answer to the question
|
||||
2. Supporting data and metrics
|
||||
3. Current market context
|
||||
4. Specific actionable recommendations
|
||||
5. Risk factors to consider
|
||||
6. Time horizon for thesis
|
||||
7. Alternative perspectives
|
||||
|
||||
Use the most recent financial data available and cite sources.
|
||||
"""
|
||||
|
||||
# Add portfolio context if requested
|
||||
if include_portfolio_context and tickers:
|
||||
prompt += f"\nCurrent portfolio includes: {', '.join(tickers)}"
|
||||
|
||||
# Query with extended timeout for complex research
|
||||
response = await self._query_perplexity(
|
||||
prompt,
|
||||
context="investment_research",
|
||||
include_sources=True,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
||||
# Structure the research response
|
||||
return self._structure_research_response(question, response)
|
||||
|
||||
async def get_market_sentiment(self,
|
||||
sector: Optional[str] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Get current market sentiment and trends.
|
||||
|
||||
Args:
|
||||
sector: Optional sector to focus on
|
||||
|
||||
Returns:
|
||||
Market sentiment analysis
|
||||
"""
|
||||
prompt = f"""
|
||||
Analyze current market sentiment {f'for {sector} sector' if sector else 'overall'}:
|
||||
|
||||
1. Bull vs Bear sentiment
|
||||
2. Key concerns and opportunities
|
||||
3. Institutional positioning
|
||||
4. Retail investor sentiment
|
||||
5. Options flow indicators
|
||||
6. Technical levels to watch
|
||||
7. Upcoming catalysts
|
||||
|
||||
Based on the last 24-48 hours of market activity.
|
||||
"""
|
||||
|
||||
response = await self._query_perplexity(prompt, context="market_sentiment")
|
||||
|
||||
return {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"sector": sector or "market",
|
||||
"analysis": response,
|
||||
"data_freshness": "real-time"
|
||||
}
|
||||
|
||||
async def analyze_earnings(self,
|
||||
ticker: str,
|
||||
include_guidance: bool = True) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze recent earnings and forward guidance.
|
||||
|
||||
Args:
|
||||
ticker: Stock symbol
|
||||
include_guidance: Include forward guidance analysis
|
||||
|
||||
Returns:
|
||||
Earnings analysis
|
||||
"""
|
||||
prompt = f"""
|
||||
Analyze {ticker} earnings:
|
||||
|
||||
1. Most recent earnings results vs expectations
|
||||
2. Revenue and EPS growth trends
|
||||
3. Key metrics and KPIs
|
||||
4. Management commentary highlights
|
||||
{"5. Forward guidance analysis" if include_guidance else ""}
|
||||
6. Analyst revisions post-earnings
|
||||
7. Price target changes
|
||||
8. Key takeaways for investors
|
||||
|
||||
Include specific numbers and percentages.
|
||||
"""
|
||||
|
||||
response = await self._query_perplexity(
|
||||
prompt,
|
||||
context="earnings_analysis",
|
||||
include_sources=True
|
||||
)
|
||||
|
||||
return {
|
||||
"ticker": ticker,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"analysis": response,
|
||||
"includes_guidance": include_guidance
|
||||
}
|
||||
|
||||
async def find_similar_stocks(self,
|
||||
ticker: str,
|
||||
criteria: str = "business_model") -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Find stocks similar to a given ticker.
|
||||
|
||||
Args:
|
||||
ticker: Reference stock symbol
|
||||
criteria: Similarity criteria (business_model, valuation, growth, etc.)
|
||||
|
||||
Returns:
|
||||
List of similar stocks with comparison
|
||||
"""
|
||||
prompt = f"""
|
||||
Find stocks similar to {ticker} based on {criteria}:
|
||||
|
||||
1. List 5-10 similar companies
|
||||
2. Compare key metrics
|
||||
3. Highlight advantages/disadvantages vs {ticker}
|
||||
4. Current valuation comparison
|
||||
5. Growth rate comparison
|
||||
6. Risk profile comparison
|
||||
|
||||
Focus on investable alternatives.
|
||||
"""
|
||||
|
||||
response = await self._query_perplexity(prompt, context="peer_analysis")
|
||||
|
||||
# Parse into structured format
|
||||
return self._parse_peer_comparison(ticker, response)
|
||||
|
||||
async def analyze_insider_activity(self,
|
||||
ticker: str,
|
||||
days_back: int = 90) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze insider trading activity.
|
||||
|
||||
Args:
|
||||
ticker: Stock symbol
|
||||
days_back: Days of history to analyze
|
||||
|
||||
Returns:
|
||||
Insider activity analysis
|
||||
"""
|
||||
prompt = f"""
|
||||
Analyze insider trading for {ticker} over the last {days_back} days:
|
||||
|
||||
1. Major insider purchases/sales
|
||||
2. C-suite and board activity
|
||||
3. Transaction sizes and prices
|
||||
4. Historical pattern comparison
|
||||
5. Sentiment signal (bullish/bearish/neutral)
|
||||
6. Notable 10b5-1 plan changes
|
||||
|
||||
Interpret what the insider activity suggests.
|
||||
"""
|
||||
|
||||
response = await self._query_perplexity(prompt, context="insider_trading")
|
||||
|
||||
return {
|
||||
"ticker": ticker,
|
||||
"period_days": days_back,
|
||||
"analysis": response,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
async def _query_perplexity(self,
|
||||
prompt: str,
|
||||
context: str = "general",
|
||||
include_sources: bool = True,
|
||||
max_tokens: int = 1500) -> str:
|
||||
"""
|
||||
Make API request to Perplexity.
|
||||
|
||||
Args:
|
||||
prompt: Query prompt
|
||||
context: Context type for the query
|
||||
include_sources: Include source citations
|
||||
max_tokens: Maximum response tokens
|
||||
|
||||
Returns:
|
||||
API response text
|
||||
"""
|
||||
# Rate limiting
|
||||
await self._rate_limit()
|
||||
|
||||
# Prepare request payload
|
||||
payload = {
|
||||
"model": self.finance_model,
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"You are a senior financial analyst providing {context} analysis. "
|
||||
"Use real-time market data and cite credible sources. "
|
||||
"Be specific with numbers, percentages, and dates."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt
|
||||
}
|
||||
],
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": 0.2, # Lower temperature for factual finance data
|
||||
"return_citations": include_sources,
|
||||
"search_domain_filter": ["finance", "investing", "markets"],
|
||||
"search_recency_filter": "day" # Prioritize recent information
|
||||
}
|
||||
|
||||
# Make async request
|
||||
async with aiohttp.ClientSession() as session:
|
||||
try:
|
||||
async with session.post(
|
||||
f"{self.base_url}/chat/completions",
|
||||
headers=self.headers,
|
||||
json=payload,
|
||||
timeout=aiohttp.ClientTimeout(total=30)
|
||||
) as response:
|
||||
if response.status == 200:
|
||||
data = await response.json()
|
||||
return data['choices'][0]['message']['content']
|
||||
else:
|
||||
error = await response.text()
|
||||
logger.error(f"Perplexity API error: {error}")
|
||||
raise Exception(f"API request failed: {error}")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.error("Perplexity API request timed out")
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Perplexity API error: {e}")
|
||||
raise
|
||||
|
||||
async def _rate_limit(self):
|
||||
"""Implement rate limiting"""
|
||||
now = datetime.now()
|
||||
time_since_last = (now - self.last_request_time).total_seconds()
|
||||
min_interval = 60 / self.rate_limit # seconds between requests
|
||||
|
||||
if time_since_last < min_interval:
|
||||
await asyncio.sleep(min_interval - time_since_last)
|
||||
|
||||
self.last_request_time = datetime.now()
|
||||
|
||||
def _build_analysis_prompt(self,
|
||||
ticker: str,
|
||||
analysis_type: AnalysisType,
|
||||
depth: ResearchDepth) -> str:
|
||||
"""Build analysis prompt based on type and depth"""
|
||||
|
||||
base_prompt = f"Analyze {ticker} stock with focus on {analysis_type.value} analysis.\n\n"
|
||||
|
||||
if analysis_type == AnalysisType.FUNDAMENTAL:
|
||||
base_prompt += """
|
||||
Include:
|
||||
1. Current valuation metrics (P/E, PEG, P/B, EV/EBITDA)
|
||||
2. Profitability metrics (ROE, ROA, profit margins)
|
||||
3. Growth metrics (revenue, earnings, FCF growth)
|
||||
4. Balance sheet strength (debt/equity, current ratio)
|
||||
5. Competitive position and moat
|
||||
6. Management quality and capital allocation
|
||||
7. Fair value estimate using DCF or comparable analysis
|
||||
8. Investment recommendation with price target
|
||||
"""
|
||||
|
||||
elif analysis_type == AnalysisType.TECHNICAL:
|
||||
base_prompt += """
|
||||
Include:
|
||||
1. Current price action and trend
|
||||
2. Key support and resistance levels
|
||||
3. Moving averages (20, 50, 200 day)
|
||||
4. RSI, MACD, and momentum indicators
|
||||
5. Volume analysis
|
||||
6. Chart patterns forming
|
||||
7. Fibonacci levels if relevant
|
||||
8. Short-term and medium-term outlook
|
||||
"""
|
||||
|
||||
elif analysis_type == AnalysisType.VALUATION:
|
||||
base_prompt += """
|
||||
Perform comprehensive valuation:
|
||||
1. DCF model with assumptions
|
||||
2. Comparable company analysis
|
||||
3. Precedent transaction analysis
|
||||
4. Sum-of-parts if applicable
|
||||
5. Sensitivity analysis on key variables
|
||||
6. Bear, base, and bull case scenarios
|
||||
7. Margin of safety calculation
|
||||
8. Investment recommendation
|
||||
"""
|
||||
|
||||
# Add depth modifiers
|
||||
if depth == ResearchDepth.DEEP:
|
||||
base_prompt += "\n\nProvide extensive detail with specific numbers, calculations, and comprehensive analysis."
|
||||
elif depth == ResearchDepth.EXPERT:
|
||||
base_prompt += "\n\nProvide institutional-quality analysis with detailed models, risk scenarios, and actionable insights."
|
||||
|
||||
return base_prompt
|
||||
|
||||
async def _parse_analysis(self,
|
||||
ticker: str,
|
||||
raw_analysis: str,
|
||||
analysis_type: AnalysisType) -> StockAnalysis:
|
||||
"""Parse raw analysis text into structured StockAnalysis"""
|
||||
|
||||
# Use another Perplexity call to extract structured data
|
||||
extract_prompt = f"""
|
||||
From this analysis of {ticker}, extract:
|
||||
|
||||
1. Current price
|
||||
2. Fair value estimate
|
||||
3. P/E ratio
|
||||
4. PEG ratio
|
||||
5. Rating (Buy/Hold/Sell)
|
||||
6. Key risks (list)
|
||||
7. Catalysts (list)
|
||||
8. Confidence score (0-100)
|
||||
|
||||
Analysis text:
|
||||
{raw_analysis[:2000]}
|
||||
|
||||
Return in JSON format.
|
||||
"""
|
||||
|
||||
structured = await self._query_perplexity(
|
||||
extract_prompt,
|
||||
context="data_extraction",
|
||||
max_tokens=500
|
||||
)
|
||||
|
||||
# Parse JSON response (with fallback)
|
||||
try:
|
||||
import re
|
||||
json_match = re.search(r'\{.*\}', structured, re.DOTALL)
|
||||
if json_match:
|
||||
data = json.loads(json_match.group())
|
||||
else:
|
||||
data = {}
|
||||
except:
|
||||
data = {}
|
||||
|
||||
# Build StockAnalysis object
|
||||
return StockAnalysis(
|
||||
ticker=ticker,
|
||||
timestamp=datetime.now(),
|
||||
analysis_type=analysis_type,
|
||||
current_price=data.get('current_price', 0),
|
||||
fair_value=data.get('fair_value'),
|
||||
upside_potential=data.get('upside_potential'),
|
||||
pe_ratio=data.get('pe_ratio'),
|
||||
peg_ratio=data.get('peg_ratio'),
|
||||
price_to_book=None,
|
||||
debt_to_equity=None,
|
||||
roe=None,
|
||||
revenue_growth=None,
|
||||
earnings_growth=None,
|
||||
bull_case=data.get('bull_case', ''),
|
||||
bear_case=data.get('bear_case', ''),
|
||||
key_risks=data.get('key_risks', []),
|
||||
catalysts=data.get('catalysts', []),
|
||||
rating=data.get('rating', 'Hold'),
|
||||
confidence_score=data.get('confidence_score', 50),
|
||||
time_horizon='medium',
|
||||
detailed_analysis=raw_analysis,
|
||||
data_sources=['Perplexity AI', 'Real-time market data']
|
||||
)
|
||||
|
||||
async def _parse_screening_results(self,
|
||||
query: str,
|
||||
raw_response: str) -> MarketScreenerResult:
|
||||
"""Parse screening response into structured format"""
|
||||
|
||||
# Extract stock list using Perplexity
|
||||
extract_prompt = f"""
|
||||
From this screening result, extract a list of stock tickers with their key metrics.
|
||||
Format as JSON array with ticker, company_name, price, market_cap, pe_ratio for each.
|
||||
|
||||
Response:
|
||||
{raw_response[:1500]}
|
||||
"""
|
||||
|
||||
structured = await self._query_perplexity(
|
||||
extract_prompt,
|
||||
context="data_extraction",
|
||||
max_tokens=500
|
||||
)
|
||||
|
||||
# Parse stocks list
|
||||
try:
|
||||
import re
|
||||
json_match = re.search(r'\[.*\]', structured, re.DOTALL)
|
||||
if json_match:
|
||||
stocks = json.loads(json_match.group())
|
||||
else:
|
||||
stocks = []
|
||||
except:
|
||||
stocks = []
|
||||
|
||||
return MarketScreenerResult(
|
||||
query=query,
|
||||
timestamp=datetime.now(),
|
||||
total_results=len(stocks),
|
||||
stocks=stocks,
|
||||
screening_criteria={},
|
||||
market_context="",
|
||||
best_value=[s['ticker'] for s in stocks[:3]] if stocks else [],
|
||||
highest_growth=[],
|
||||
lowest_risk=[],
|
||||
detailed_explanation=raw_response
|
||||
)
|
||||
|
||||
def _structure_research_response(self,
|
||||
question: str,
|
||||
response: str) -> Dict[str, Any]:
|
||||
"""Structure research response into organized format"""
|
||||
|
||||
return {
|
||||
"question": question,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"answer": response,
|
||||
"sections": {
|
||||
"summary": response[:500] if len(response) > 500 else response,
|
||||
"detailed_analysis": response,
|
||||
"actionable_insights": self._extract_actionables(response),
|
||||
"risks": self._extract_risks(response),
|
||||
"data_sources": ["Perplexity AI", "Real-time market data"]
|
||||
},
|
||||
"confidence": "high" if "strong" in response.lower() else "medium",
|
||||
"requires_update": False
|
||||
}
|
||||
|
||||
def _extract_actionables(self, text: str) -> List[str]:
|
||||
"""Extract actionable insights from text"""
|
||||
actionables = []
|
||||
|
||||
# Look for action words
|
||||
action_keywords = ['buy', 'sell', 'hold', 'consider', 'avoid', 'monitor', 'wait']
|
||||
lines = text.split('\n')
|
||||
|
||||
for line in lines:
|
||||
if any(keyword in line.lower() for keyword in action_keywords):
|
||||
actionables.append(line.strip())
|
||||
|
||||
return actionables[:5] # Top 5 actionables
|
||||
|
||||
def _extract_risks(self, text: str) -> List[str]:
|
||||
"""Extract risk factors from text"""
|
||||
risks = []
|
||||
|
||||
risk_keywords = ['risk', 'concern', 'threat', 'challenge', 'weakness', 'vulnerable']
|
||||
lines = text.split('\n')
|
||||
|
||||
for line in lines:
|
||||
if any(keyword in line.lower() for keyword in risk_keywords):
|
||||
risks.append(line.strip())
|
||||
|
||||
return risks[:5] # Top 5 risks
|
||||
|
||||
def _parse_peer_comparison(self, ticker: str, response: str) -> List[Dict[str, Any]]:
|
||||
"""Parse peer comparison response"""
|
||||
|
||||
# Simple extraction of mentioned tickers
|
||||
import re
|
||||
|
||||
# Find all uppercase ticker symbols
|
||||
potential_tickers = re.findall(r'\b[A-Z]{2,5}\b', response)
|
||||
|
||||
# Filter out the original ticker and common words
|
||||
peers = [t for t in potential_tickers if t != ticker and len(t) <= 5][:10]
|
||||
|
||||
return [
|
||||
{
|
||||
"ticker": peer,
|
||||
"similarity_score": 0.8, # Would calculate based on metrics
|
||||
"comparison": f"Similar to {ticker}",
|
||||
"advantage": "Extracted from analysis",
|
||||
"disadvantage": "Extracted from analysis"
|
||||
}
|
||||
for peer in peers[:5]
|
||||
]
|
||||
|
|
@ -0,0 +1,926 @@
|
|||
"""
|
||||
AI Research Agent - Conversational interface for complex investment research.
|
||||
Combines multiple data sources to answer sophisticated investment questions.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Dict, List, Optional, Any, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
import logging
|
||||
from decimal import Decimal
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.format_scratchpad import format_to_openai_function_messages
|
||||
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
|
||||
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
|
||||
from langchain.tools import Tool
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.memory import ConversationBufferMemory
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
# Import our connectors and modules
|
||||
from autonomous.connectors.perplexity_finance import (
|
||||
PerplexityFinanceConnector,
|
||||
AnalysisType,
|
||||
ResearchDepth
|
||||
)
|
||||
from autonomous.core.database import DatabaseManager
|
||||
from autonomous.core.cache import RedisCache
|
||||
from autonomous.data_aggregator import DataAggregator
|
||||
from autonomous.signal_processor import SignalProcessor
|
||||
from autonomous.core.risk_manager import RiskManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ResearchQuery(BaseModel):
|
||||
"""Structure for research queries"""
|
||||
question: str = Field(..., description="The investment research question")
|
||||
context: Optional[Dict[str, Any]] = Field(default=None, description="Additional context")
|
||||
depth: str = Field(default="standard", description="Depth of analysis: quick, standard, deep")
|
||||
include_portfolio: bool = Field(default=True, description="Consider current portfolio")
|
||||
time_horizon: Optional[str] = Field(default=None, description="Investment time horizon")
|
||||
|
||||
|
||||
class ResearchResponse(BaseModel):
|
||||
"""Structure for research responses"""
|
||||
query: str
|
||||
answer: str
|
||||
confidence: float
|
||||
data_points: List[Dict[str, Any]]
|
||||
recommendations: List[Dict[str, Any]]
|
||||
risks: List[str]
|
||||
sources: List[str]
|
||||
timestamp: datetime
|
||||
follow_up_questions: List[str]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScreeningCriteria:
|
||||
"""Criteria for stock screening"""
|
||||
min_market_cap: Optional[float] = None
|
||||
max_market_cap: Optional[float] = None
|
||||
min_pe: Optional[float] = None
|
||||
max_pe: Optional[float] = None
|
||||
min_revenue_growth: Optional[float] = None
|
||||
min_roe: Optional[float] = None
|
||||
sectors: Optional[List[str]] = None
|
||||
exclude_sectors: Optional[List[str]] = None
|
||||
min_dividend_yield: Optional[float] = None
|
||||
max_debt_to_equity: Optional[float] = None
|
||||
min_profit_margin: Optional[float] = None
|
||||
|
||||
|
||||
class ResearchMode(str, Enum):
|
||||
"""Different research modes"""
|
||||
QUICK_ANSWER = "quick" # Fast response with cached data
|
||||
COMPREHENSIVE = "comprehensive" # Full analysis with all sources
|
||||
REAL_TIME = "real_time" # Priority on latest data
|
||||
HISTORICAL = "historical" # Focus on historical patterns
|
||||
COMPARATIVE = "comparative" # Compare multiple options
|
||||
|
||||
|
||||
class AIResearchAgent:
|
||||
"""
|
||||
Advanced AI Research Agent that can answer complex investment questions
|
||||
by orchestrating multiple data sources and analysis tools.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
openai_api_key: str,
|
||||
perplexity_connector: Optional[PerplexityFinanceConnector] = None,
|
||||
db_manager: Optional[DatabaseManager] = None,
|
||||
cache: Optional[RedisCache] = None):
|
||||
"""
|
||||
Initialize the AI Research Agent.
|
||||
|
||||
Args:
|
||||
openai_api_key: OpenAI API key for LLM
|
||||
perplexity_connector: Perplexity Finance connector
|
||||
db_manager: Database manager
|
||||
cache: Redis cache
|
||||
"""
|
||||
self.llm = ChatOpenAI(
|
||||
temperature=0.3,
|
||||
model="gpt-4o-mini",
|
||||
openai_api_key=openai_api_key
|
||||
)
|
||||
|
||||
self.perplexity = perplexity_connector or PerplexityFinanceConnector()
|
||||
self.db = db_manager
|
||||
self.cache = cache
|
||||
self.data_aggregator = DataAggregator()
|
||||
self.signal_processor = SignalProcessor()
|
||||
self.risk_manager = RiskManager()
|
||||
|
||||
# Setup conversation memory
|
||||
self.memory = ConversationBufferMemory(
|
||||
memory_key="chat_history",
|
||||
return_messages=True
|
||||
)
|
||||
|
||||
# Create research tools
|
||||
self.tools = self._create_research_tools()
|
||||
|
||||
# Setup the agent
|
||||
self.agent = self._setup_agent()
|
||||
|
||||
def _create_research_tools(self) -> List[Tool]:
|
||||
"""Create tools for the research agent"""
|
||||
|
||||
tools = [
|
||||
Tool(
|
||||
name="analyze_stock_fundamental",
|
||||
func=self._tool_analyze_fundamental,
|
||||
description="Analyze fundamental data for a specific stock. Input: ticker symbol"
|
||||
),
|
||||
Tool(
|
||||
name="screen_undervalued_stocks",
|
||||
func=self._tool_screen_undervalued,
|
||||
description="Find undervalued stocks based on criteria. Input: JSON criteria"
|
||||
),
|
||||
Tool(
|
||||
name="compare_stocks",
|
||||
func=self._tool_compare_stocks,
|
||||
description="Compare multiple stocks. Input: comma-separated tickers"
|
||||
),
|
||||
Tool(
|
||||
name="analyze_sector",
|
||||
func=self._tool_analyze_sector,
|
||||
description="Analyze a specific sector. Input: sector name"
|
||||
),
|
||||
Tool(
|
||||
name="get_market_sentiment",
|
||||
func=self._tool_get_sentiment,
|
||||
description="Get current market sentiment. Input: 'overall' or sector name"
|
||||
),
|
||||
Tool(
|
||||
name="analyze_portfolio_gaps",
|
||||
func=self._tool_analyze_portfolio_gaps,
|
||||
description="Identify gaps in current portfolio. Input: 'analyze'"
|
||||
),
|
||||
Tool(
|
||||
name="find_growth_stocks",
|
||||
func=self._tool_find_growth,
|
||||
description="Find high-growth stocks. Input: JSON with criteria"
|
||||
),
|
||||
Tool(
|
||||
name="analyze_risk_reward",
|
||||
func=self._tool_analyze_risk_reward,
|
||||
description="Analyze risk-reward for a stock. Input: ticker symbol"
|
||||
),
|
||||
Tool(
|
||||
name="get_earnings_calendar",
|
||||
func=self._tool_get_earnings,
|
||||
description="Get upcoming earnings. Input: number of days ahead"
|
||||
),
|
||||
Tool(
|
||||
name="analyze_insider_trading",
|
||||
func=self._tool_analyze_insider,
|
||||
description="Analyze insider trading activity. Input: ticker symbol"
|
||||
),
|
||||
Tool(
|
||||
name="technical_analysis",
|
||||
func=self._tool_technical_analysis,
|
||||
description="Perform technical analysis. Input: ticker symbol"
|
||||
),
|
||||
Tool(
|
||||
name="find_dividend_stocks",
|
||||
func=self._tool_find_dividends,
|
||||
description="Find high-quality dividend stocks. Input: minimum yield"
|
||||
),
|
||||
Tool(
|
||||
name="analyze_congressional_trades",
|
||||
func=self._tool_congressional_trades,
|
||||
description="Analyze congressional trading activity. Input: days back"
|
||||
),
|
||||
Tool(
|
||||
name="portfolio_optimization",
|
||||
func=self._tool_optimize_portfolio,
|
||||
description="Suggest portfolio optimizations. Input: risk tolerance (low/medium/high)"
|
||||
),
|
||||
Tool(
|
||||
name="macroeconomic_analysis",
|
||||
func=self._tool_macro_analysis,
|
||||
description="Analyze macroeconomic factors. Input: 'current'"
|
||||
)
|
||||
]
|
||||
|
||||
return tools
|
||||
|
||||
async def research(self,
|
||||
query: ResearchQuery,
|
||||
mode: ResearchMode = ResearchMode.COMPREHENSIVE) -> ResearchResponse:
|
||||
"""
|
||||
Execute a research query and return comprehensive response.
|
||||
|
||||
Args:
|
||||
query: Research query object
|
||||
mode: Research mode determining depth and speed
|
||||
|
||||
Returns:
|
||||
ResearchResponse with answer and supporting data
|
||||
"""
|
||||
logger.info(f"Processing research query: {query.question[:100]}...")
|
||||
|
||||
# Check cache for recent similar queries
|
||||
if self.cache and mode == ResearchMode.QUICK_ANSWER:
|
||||
cache_key = f"research:{hash(query.question)}"
|
||||
cached = await self.cache.get(cache_key)
|
||||
if cached:
|
||||
logger.info("Returning cached research response")
|
||||
return ResearchResponse(**cached)
|
||||
|
||||
# Prepare context
|
||||
context = await self._prepare_context(query)
|
||||
|
||||
# Execute agent with query
|
||||
try:
|
||||
result = await self.agent.ainvoke({
|
||||
"input": query.question,
|
||||
"context": json.dumps(context),
|
||||
"mode": mode.value
|
||||
})
|
||||
|
||||
# Parse and structure response
|
||||
response = await self._structure_response(query.question, result, context)
|
||||
|
||||
# Cache if appropriate
|
||||
if self.cache and mode != ResearchMode.REAL_TIME:
|
||||
await self.cache.set(
|
||||
f"research:{hash(query.question)}",
|
||||
response.dict(),
|
||||
ttl=1800 # 30 minutes
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Research error: {e}")
|
||||
return ResearchResponse(
|
||||
query=query.question,
|
||||
answer=f"Unable to complete research: {str(e)}",
|
||||
confidence=0.0,
|
||||
data_points=[],
|
||||
recommendations=[],
|
||||
risks=["Research process encountered an error"],
|
||||
sources=[],
|
||||
timestamp=datetime.now(),
|
||||
follow_up_questions=[]
|
||||
)
|
||||
|
||||
async def screen_stocks(self,
|
||||
natural_language_query: str,
|
||||
criteria: Optional[ScreeningCriteria] = None) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Screen stocks based on natural language query and criteria.
|
||||
|
||||
Args:
|
||||
natural_language_query: Natural language screening request
|
||||
criteria: Optional structured screening criteria
|
||||
|
||||
Returns:
|
||||
List of stocks matching criteria with analysis
|
||||
"""
|
||||
# Use Perplexity for natural language screening
|
||||
screening_result = await self.perplexity.screen_stocks(
|
||||
natural_language_query,
|
||||
max_results=20,
|
||||
filters=criteria.__dict__ if criteria else None
|
||||
)
|
||||
|
||||
# Enhance with our own analysis
|
||||
enhanced_results = []
|
||||
for stock in screening_result.stocks[:10]: # Top 10
|
||||
ticker = stock.get('ticker')
|
||||
if ticker:
|
||||
# Get additional metrics
|
||||
analysis = await self.perplexity.analyze_stock(
|
||||
ticker,
|
||||
AnalysisType.VALUATION,
|
||||
ResearchDepth.QUICK
|
||||
)
|
||||
|
||||
# Get AI signal
|
||||
signal = await self.signal_processor.process_signal(ticker)
|
||||
|
||||
enhanced_results.append({
|
||||
"ticker": ticker,
|
||||
"company": stock.get('company_name', ''),
|
||||
"current_price": stock.get('price', 0),
|
||||
"market_cap": stock.get('market_cap', 0),
|
||||
"pe_ratio": analysis.pe_ratio,
|
||||
"fair_value": analysis.fair_value,
|
||||
"upside_potential": analysis.upside_potential,
|
||||
"ai_signal": signal.signal if signal else "HOLD",
|
||||
"ai_confidence": signal.confidence if signal else 0,
|
||||
"match_reason": stock.get('match_reason', ''),
|
||||
"risk_level": self._calculate_risk_level(analysis)
|
||||
})
|
||||
|
||||
return enhanced_results
|
||||
|
||||
async def answer_question(self, question: str) -> str:
|
||||
"""
|
||||
Simple interface to answer investment questions.
|
||||
|
||||
Args:
|
||||
question: Natural language question
|
||||
|
||||
Returns:
|
||||
Text answer
|
||||
"""
|
||||
query = ResearchQuery(
|
||||
question=question,
|
||||
depth="standard",
|
||||
include_portfolio=True
|
||||
)
|
||||
|
||||
response = await self.research(query, mode=ResearchMode.COMPREHENSIVE)
|
||||
return response.answer
|
||||
|
||||
async def find_opportunities(self,
|
||||
investment_amount: float,
|
||||
risk_tolerance: str = "medium",
|
||||
time_horizon: str = "medium") -> Dict[str, Any]:
|
||||
"""
|
||||
Find investment opportunities based on parameters.
|
||||
|
||||
Args:
|
||||
investment_amount: Amount to invest
|
||||
risk_tolerance: low, medium, high
|
||||
time_horizon: short, medium, long
|
||||
|
||||
Returns:
|
||||
Investment opportunities with allocation suggestions
|
||||
"""
|
||||
# Build research query
|
||||
query = f"""
|
||||
Find the best investment opportunities for:
|
||||
- Investment amount: ${investment_amount:,.0f}
|
||||
- Risk tolerance: {risk_tolerance}
|
||||
- Time horizon: {time_horizon}
|
||||
|
||||
Consider:
|
||||
1. Undervalued stocks with strong fundamentals
|
||||
2. Growth stocks with momentum
|
||||
3. Dividend stocks for income
|
||||
4. Sector diversification
|
||||
5. Current market conditions
|
||||
|
||||
Provide specific stock recommendations with allocation percentages.
|
||||
"""
|
||||
|
||||
research_query = ResearchQuery(
|
||||
question=query,
|
||||
depth="deep",
|
||||
context={
|
||||
"investment_amount": investment_amount,
|
||||
"risk_tolerance": risk_tolerance,
|
||||
"time_horizon": time_horizon
|
||||
}
|
||||
)
|
||||
|
||||
response = await self.research(research_query, mode=ResearchMode.COMPREHENSIVE)
|
||||
|
||||
# Structure opportunities
|
||||
opportunities = {
|
||||
"investment_amount": investment_amount,
|
||||
"risk_profile": risk_tolerance,
|
||||
"time_horizon": time_horizon,
|
||||
"market_conditions": await self._get_market_conditions(),
|
||||
"recommendations": response.recommendations,
|
||||
"allocation_strategy": self._create_allocation_strategy(
|
||||
response.recommendations,
|
||||
investment_amount,
|
||||
risk_tolerance
|
||||
),
|
||||
"expected_returns": self._estimate_returns(
|
||||
response.recommendations,
|
||||
time_horizon
|
||||
),
|
||||
"key_risks": response.risks,
|
||||
"execution_plan": self._create_execution_plan(response.recommendations)
|
||||
}
|
||||
|
||||
return opportunities
|
||||
|
||||
# Tool implementation methods
|
||||
async def _tool_analyze_fundamental(self, ticker: str) -> str:
|
||||
"""Tool: Analyze fundamental data"""
|
||||
analysis = await self.perplexity.analyze_stock(
|
||||
ticker,
|
||||
AnalysisType.FUNDAMENTAL,
|
||||
ResearchDepth.STANDARD
|
||||
)
|
||||
|
||||
return f"""
|
||||
Fundamental Analysis for {ticker}:
|
||||
- Current Price: ${analysis.current_price}
|
||||
- Fair Value: ${analysis.fair_value}
|
||||
- Upside Potential: {analysis.upside_potential}%
|
||||
- P/E Ratio: {analysis.pe_ratio}
|
||||
- Rating: {analysis.rating}
|
||||
- Confidence: {analysis.confidence_score}%
|
||||
|
||||
Bull Case: {analysis.bull_case}
|
||||
Bear Case: {analysis.bear_case}
|
||||
Key Risks: {', '.join(analysis.key_risks[:3])}
|
||||
"""
|
||||
|
||||
async def _tool_screen_undervalued(self, criteria_json: str) -> str:
|
||||
"""Tool: Screen for undervalued stocks"""
|
||||
try:
|
||||
criteria = json.loads(criteria_json) if criteria_json else {}
|
||||
except:
|
||||
criteria = {}
|
||||
|
||||
query = "Find undervalued stocks with strong fundamentals"
|
||||
if criteria:
|
||||
query += f" with criteria: {criteria}"
|
||||
|
||||
result = await self.perplexity.screen_stocks(query, max_results=10)
|
||||
|
||||
stocks_summary = []
|
||||
for stock in result.stocks[:5]:
|
||||
stocks_summary.append(
|
||||
f"- {stock.get('ticker')}: "
|
||||
f"${stock.get('price', 'N/A')}, "
|
||||
f"P/E: {stock.get('pe_ratio', 'N/A')}"
|
||||
)
|
||||
|
||||
return f"""
|
||||
Undervalued Stocks Found:
|
||||
{chr(10).join(stocks_summary)}
|
||||
|
||||
Screening Criteria: {query}
|
||||
Total Results: {result.total_results}
|
||||
"""
|
||||
|
||||
async def _tool_compare_stocks(self, tickers: str) -> str:
|
||||
"""Tool: Compare multiple stocks"""
|
||||
ticker_list = [t.strip() for t in tickers.split(',')]
|
||||
|
||||
comparisons = []
|
||||
for ticker in ticker_list[:3]: # Limit to 3 for brevity
|
||||
analysis = await self.perplexity.analyze_stock(
|
||||
ticker,
|
||||
AnalysisType.VALUATION,
|
||||
ResearchDepth.QUICK
|
||||
)
|
||||
comparisons.append({
|
||||
"ticker": ticker,
|
||||
"price": analysis.current_price,
|
||||
"fair_value": analysis.fair_value,
|
||||
"pe_ratio": analysis.pe_ratio,
|
||||
"rating": analysis.rating
|
||||
})
|
||||
|
||||
comparison_text = []
|
||||
for comp in comparisons:
|
||||
comparison_text.append(
|
||||
f"{comp['ticker']}: "
|
||||
f"Price ${comp['price']}, "
|
||||
f"Fair Value ${comp['fair_value']}, "
|
||||
f"P/E {comp['pe_ratio']}, "
|
||||
f"Rating: {comp['rating']}"
|
||||
)
|
||||
|
||||
return f"""
|
||||
Stock Comparison:
|
||||
{chr(10).join(comparison_text)}
|
||||
"""
|
||||
|
||||
async def _tool_analyze_sector(self, sector: str) -> str:
|
||||
"""Tool: Analyze sector performance"""
|
||||
sentiment = await self.perplexity.get_market_sentiment(sector)
|
||||
|
||||
return f"""
|
||||
Sector Analysis for {sector}:
|
||||
{sentiment['analysis'][:500]}
|
||||
"""
|
||||
|
||||
async def _tool_get_sentiment(self, target: str) -> str:
|
||||
"""Tool: Get market sentiment"""
|
||||
sector = None if target == 'overall' else target
|
||||
sentiment = await self.perplexity.get_market_sentiment(sector)
|
||||
|
||||
return f"""
|
||||
Market Sentiment ({target}):
|
||||
{sentiment['analysis'][:500]}
|
||||
"""
|
||||
|
||||
async def _tool_analyze_portfolio_gaps(self, command: str) -> str:
|
||||
"""Tool: Analyze portfolio gaps"""
|
||||
if not self.db:
|
||||
return "Portfolio analysis unavailable - no database connection"
|
||||
|
||||
# Get current positions
|
||||
positions = await self.db.get_active_positions()
|
||||
|
||||
# Analyze sector distribution
|
||||
sectors = {}
|
||||
for pos in positions:
|
||||
sector = await self._get_stock_sector(pos.ticker)
|
||||
sectors[sector] = sectors.get(sector, 0) + pos.market_value
|
||||
|
||||
gaps = []
|
||||
if 'Technology' not in sectors:
|
||||
gaps.append("No technology exposure")
|
||||
if 'Healthcare' not in sectors:
|
||||
gaps.append("No healthcare exposure")
|
||||
if 'Consumer' not in sectors:
|
||||
gaps.append("No consumer exposure")
|
||||
|
||||
return f"""
|
||||
Portfolio Analysis:
|
||||
Current Sectors: {', '.join(sectors.keys())}
|
||||
Identified Gaps: {', '.join(gaps) if gaps else 'Well-diversified'}
|
||||
Recommendation: Consider adding exposure to missing sectors
|
||||
"""
|
||||
|
||||
async def _tool_find_growth(self, criteria_json: str) -> str:
|
||||
"""Tool: Find growth stocks"""
|
||||
query = "Find high-growth stocks with strong revenue and earnings growth"
|
||||
result = await self.perplexity.screen_stocks(query, max_results=10)
|
||||
|
||||
stocks = []
|
||||
for stock in result.stocks[:5]:
|
||||
stocks.append(f"- {stock.get('ticker')}: {stock.get('company_name', 'N/A')}")
|
||||
|
||||
return f"""
|
||||
High-Growth Stocks:
|
||||
{chr(10).join(stocks)}
|
||||
"""
|
||||
|
||||
async def _tool_analyze_risk_reward(self, ticker: str) -> str:
|
||||
"""Tool: Analyze risk-reward profile"""
|
||||
analysis = await self.perplexity.analyze_stock(
|
||||
ticker,
|
||||
AnalysisType.FUNDAMENTAL,
|
||||
ResearchDepth.STANDARD
|
||||
)
|
||||
|
||||
risk_level = self._calculate_risk_level(analysis)
|
||||
reward_potential = analysis.upside_potential or 0
|
||||
|
||||
return f"""
|
||||
Risk-Reward Analysis for {ticker}:
|
||||
- Risk Level: {risk_level}
|
||||
- Reward Potential: {reward_potential}%
|
||||
- Risk/Reward Ratio: {abs(reward_potential/10):.2f}
|
||||
- Key Risks: {', '.join(analysis.key_risks[:3])}
|
||||
- Catalysts: {', '.join(analysis.catalysts[:3])}
|
||||
"""
|
||||
|
||||
async def _tool_get_earnings(self, days_ahead: str) -> str:
|
||||
"""Tool: Get earnings calendar"""
|
||||
try:
|
||||
days = int(days_ahead)
|
||||
except:
|
||||
days = 7
|
||||
|
||||
# This would normally query earnings calendar API
|
||||
return f"""
|
||||
Upcoming Earnings (Next {days} days):
|
||||
- Check market calendars for detailed earnings dates
|
||||
- Major companies reporting soon
|
||||
"""
|
||||
|
||||
async def _tool_analyze_insider(self, ticker: str) -> str:
|
||||
"""Tool: Analyze insider trading"""
|
||||
insider_data = await self.perplexity.analyze_insider_activity(ticker, days_back=90)
|
||||
|
||||
return f"""
|
||||
Insider Trading Analysis for {ticker}:
|
||||
{insider_data['analysis'][:500]}
|
||||
"""
|
||||
|
||||
async def _tool_technical_analysis(self, ticker: str) -> str:
|
||||
"""Tool: Technical analysis"""
|
||||
analysis = await self.perplexity.analyze_stock(
|
||||
ticker,
|
||||
AnalysisType.TECHNICAL,
|
||||
ResearchDepth.STANDARD
|
||||
)
|
||||
|
||||
return f"""
|
||||
Technical Analysis for {ticker}:
|
||||
- Current Price: ${analysis.current_price}
|
||||
- Rating: {analysis.rating}
|
||||
- Time Horizon: {analysis.time_horizon}
|
||||
|
||||
{analysis.detailed_analysis[:300]}
|
||||
"""
|
||||
|
||||
async def _tool_find_dividends(self, min_yield: str) -> str:
|
||||
"""Tool: Find dividend stocks"""
|
||||
try:
|
||||
yield_threshold = float(min_yield)
|
||||
except:
|
||||
yield_threshold = 3.0
|
||||
|
||||
query = f"Find dividend stocks with yield above {yield_threshold}% and stable payouts"
|
||||
result = await self.perplexity.screen_stocks(query, max_results=10)
|
||||
|
||||
stocks = []
|
||||
for stock in result.stocks[:5]:
|
||||
stocks.append(f"- {stock.get('ticker')}: {stock.get('company_name', 'N/A')}")
|
||||
|
||||
return f"""
|
||||
Dividend Stocks (>{yield_threshold}% yield):
|
||||
{chr(10).join(stocks)}
|
||||
"""
|
||||
|
||||
async def _tool_congressional_trades(self, days_back: str) -> str:
|
||||
"""Tool: Analyze congressional trades"""
|
||||
try:
|
||||
days = int(days_back)
|
||||
except:
|
||||
days = 30
|
||||
|
||||
trades = await self.data_aggregator.fetch_congressional_trades(
|
||||
tickers=[], # All tickers
|
||||
days_back=days
|
||||
)
|
||||
|
||||
summary = []
|
||||
for trade in trades[:5]:
|
||||
summary.append(
|
||||
f"- {trade['politician']}: "
|
||||
f"{trade['action']} {trade['ticker']} "
|
||||
f"(${trade.get('amount', 'N/A')})"
|
||||
)
|
||||
|
||||
return f"""
|
||||
Recent Congressional Trades ({days} days):
|
||||
{chr(10).join(summary) if summary else 'No significant trades'}
|
||||
"""
|
||||
|
||||
async def _tool_optimize_portfolio(self, risk_tolerance: str) -> str:
|
||||
"""Tool: Portfolio optimization suggestions"""
|
||||
if not self.db:
|
||||
return "Portfolio optimization unavailable"
|
||||
|
||||
positions = await self.db.get_active_positions()
|
||||
|
||||
suggestions = []
|
||||
if risk_tolerance == "low":
|
||||
suggestions.append("Increase allocation to dividend stocks and bonds")
|
||||
suggestions.append("Reduce exposure to high-volatility growth stocks")
|
||||
elif risk_tolerance == "high":
|
||||
suggestions.append("Consider adding more growth stocks")
|
||||
suggestions.append("Look at emerging markets exposure")
|
||||
|
||||
return f"""
|
||||
Portfolio Optimization ({risk_tolerance} risk):
|
||||
Current Positions: {len(positions)}
|
||||
Suggestions:
|
||||
{chr(10).join(f'- {s}' for s in suggestions)}
|
||||
"""
|
||||
|
||||
async def _tool_macro_analysis(self, timeframe: str) -> str:
|
||||
"""Tool: Macroeconomic analysis"""
|
||||
analysis = await self.perplexity.get_market_sentiment()
|
||||
|
||||
return f"""
|
||||
Macroeconomic Analysis:
|
||||
{analysis['analysis'][:500]}
|
||||
"""
|
||||
|
||||
# Helper methods
|
||||
async def _prepare_context(self, query: ResearchQuery) -> Dict[str, Any]:
|
||||
"""Prepare context for research query"""
|
||||
context = {
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
"depth": query.depth
|
||||
}
|
||||
|
||||
# Add portfolio context if requested
|
||||
if query.include_portfolio and self.db:
|
||||
positions = await self.db.get_active_positions()
|
||||
context["portfolio"] = [
|
||||
{"ticker": p.ticker, "shares": p.quantity, "value": p.market_value}
|
||||
for p in positions
|
||||
]
|
||||
|
||||
# Add market context
|
||||
market_sentiment = await self.perplexity.get_market_sentiment()
|
||||
context["market_conditions"] = market_sentiment['analysis'][:200]
|
||||
|
||||
# Add any user-provided context
|
||||
if query.context:
|
||||
context.update(query.context)
|
||||
|
||||
return context
|
||||
|
||||
async def _structure_response(self,
|
||||
question: str,
|
||||
agent_result: Dict,
|
||||
context: Dict) -> ResearchResponse:
|
||||
"""Structure agent response into ResearchResponse object"""
|
||||
|
||||
# Extract answer
|
||||
answer = agent_result.get('output', '')
|
||||
|
||||
# Generate follow-up questions
|
||||
follow_ups = self._generate_follow_up_questions(question, answer)
|
||||
|
||||
# Extract recommendations and risks
|
||||
recommendations = self._extract_recommendations(answer)
|
||||
risks = self._extract_risks(answer)
|
||||
|
||||
return ResearchResponse(
|
||||
query=question,
|
||||
answer=answer,
|
||||
confidence=0.85, # Would calculate based on data quality
|
||||
data_points=[],
|
||||
recommendations=recommendations,
|
||||
risks=risks,
|
||||
sources=["Perplexity AI", "Market Data", "Portfolio Analysis"],
|
||||
timestamp=datetime.now(),
|
||||
follow_up_questions=follow_ups
|
||||
)
|
||||
|
||||
def _calculate_risk_level(self, analysis) -> str:
|
||||
"""Calculate risk level from analysis"""
|
||||
if not analysis.pe_ratio:
|
||||
return "Unknown"
|
||||
|
||||
if analysis.pe_ratio > 30:
|
||||
return "High"
|
||||
elif analysis.pe_ratio > 20:
|
||||
return "Medium"
|
||||
else:
|
||||
return "Low"
|
||||
|
||||
async def _get_stock_sector(self, ticker: str) -> str:
|
||||
"""Get sector for a stock"""
|
||||
# This would query a sector database/API
|
||||
# Simplified for example
|
||||
tech_stocks = ['AAPL', 'MSFT', 'GOOGL', 'NVDA', 'META']
|
||||
if ticker in tech_stocks:
|
||||
return "Technology"
|
||||
return "Other"
|
||||
|
||||
async def _get_market_conditions(self) -> Dict[str, Any]:
|
||||
"""Get current market conditions"""
|
||||
sentiment = await self.perplexity.get_market_sentiment()
|
||||
|
||||
return {
|
||||
"sentiment": "Bullish" if "bull" in sentiment['analysis'].lower() else "Bearish",
|
||||
"volatility": "Moderate", # Would calculate from VIX
|
||||
"trend": "Upward", # Would determine from market data
|
||||
"key_factors": sentiment['analysis'][:200]
|
||||
}
|
||||
|
||||
def _create_allocation_strategy(self,
|
||||
recommendations: List[Dict],
|
||||
amount: float,
|
||||
risk_tolerance: str) -> Dict[str, float]:
|
||||
"""Create allocation strategy"""
|
||||
strategy = {}
|
||||
|
||||
# Simple allocation based on risk tolerance
|
||||
if risk_tolerance == "low":
|
||||
# Conservative allocation
|
||||
allocation_pcts = [0.3, 0.25, 0.20, 0.15, 0.10]
|
||||
elif risk_tolerance == "high":
|
||||
# Aggressive allocation
|
||||
allocation_pcts = [0.35, 0.30, 0.20, 0.15]
|
||||
else:
|
||||
# Balanced allocation
|
||||
allocation_pcts = [0.25, 0.25, 0.20, 0.15, 0.15]
|
||||
|
||||
for i, rec in enumerate(recommendations[:len(allocation_pcts)]):
|
||||
if 'ticker' in rec:
|
||||
strategy[rec['ticker']] = amount * allocation_pcts[i]
|
||||
|
||||
return strategy
|
||||
|
||||
def _estimate_returns(self,
|
||||
recommendations: List[Dict],
|
||||
time_horizon: str) -> Dict[str, float]:
|
||||
"""Estimate potential returns"""
|
||||
# Simple estimation based on time horizon
|
||||
if time_horizon == "short":
|
||||
return {"expected": 5.0, "best_case": 15.0, "worst_case": -10.0}
|
||||
elif time_horizon == "long":
|
||||
return {"expected": 12.0, "best_case": 25.0, "worst_case": -5.0}
|
||||
else:
|
||||
return {"expected": 8.0, "best_case": 20.0, "worst_case": -8.0}
|
||||
|
||||
def _create_execution_plan(self, recommendations: List[Dict]) -> List[str]:
|
||||
"""Create execution plan for recommendations"""
|
||||
plan = []
|
||||
|
||||
for i, rec in enumerate(recommendations[:5], 1):
|
||||
if 'ticker' in rec:
|
||||
plan.append(
|
||||
f"{i}. Research {rec['ticker']} further"
|
||||
)
|
||||
plan.append(
|
||||
f" - Set limit order at recommended price"
|
||||
)
|
||||
plan.append(
|
||||
f" - Monitor for entry point"
|
||||
)
|
||||
|
||||
return plan
|
||||
|
||||
def _extract_recommendations(self, text: str) -> List[Dict[str, Any]]:
|
||||
"""Extract recommendations from text"""
|
||||
# This would use NLP to extract structured recommendations
|
||||
# Simplified for example
|
||||
recommendations = []
|
||||
|
||||
if "buy" in text.lower():
|
||||
recommendations.append({
|
||||
"action": "BUY",
|
||||
"confidence": 0.8,
|
||||
"reasoning": "Extracted from analysis"
|
||||
})
|
||||
|
||||
return recommendations
|
||||
|
||||
def _extract_risks(self, text: str) -> List[str]:
|
||||
"""Extract risks from text"""
|
||||
risks = []
|
||||
|
||||
risk_keywords = ['risk', 'concern', 'threat', 'weakness']
|
||||
for keyword in risk_keywords:
|
||||
if keyword in text.lower():
|
||||
risks.append(f"Potential {keyword} identified in analysis")
|
||||
|
||||
return risks[:5]
|
||||
|
||||
def _generate_follow_up_questions(self, question: str, answer: str) -> List[str]:
|
||||
"""Generate relevant follow-up questions"""
|
||||
follow_ups = []
|
||||
|
||||
if "undervalued" in question.lower():
|
||||
follow_ups.append("What are the key risks for these undervalued stocks?")
|
||||
follow_ups.append("How do these compare to the S&P 500 valuation?")
|
||||
|
||||
if "invest" in question.lower():
|
||||
follow_ups.append("What is the optimal position size for my portfolio?")
|
||||
follow_ups.append("When would be the best entry point?")
|
||||
|
||||
if "sector" in answer.lower():
|
||||
follow_ups.append("Which sectors are currently outperforming?")
|
||||
|
||||
return follow_ups[:3]
|
||||
|
||||
def _setup_agent(self) -> AgentExecutor:
|
||||
"""Setup the LangChain agent"""
|
||||
|
||||
# Create prompt
|
||||
prompt = ChatPromptTemplate.from_messages([
|
||||
("system", """You are an expert investment research analyst with access to real-time market data and analysis tools.
|
||||
|
||||
Your goal is to provide comprehensive, actionable investment research based on:
|
||||
1. Current market conditions
|
||||
2. Fundamental and technical analysis
|
||||
3. Risk assessment
|
||||
4. Portfolio considerations
|
||||
|
||||
Be specific with numbers, percentages, and tickers. Always cite your data sources.
|
||||
Consider the user's risk tolerance and investment timeline.
|
||||
|
||||
Context: {context}
|
||||
Mode: {mode}"""),
|
||||
MessagesPlaceholder(variable_name="chat_history"),
|
||||
("user", "{input}"),
|
||||
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
||||
])
|
||||
|
||||
# Create agent
|
||||
agent = (
|
||||
{
|
||||
"input": lambda x: x["input"],
|
||||
"context": lambda x: x.get("context", ""),
|
||||
"mode": lambda x: x.get("mode", "comprehensive"),
|
||||
"chat_history": lambda x: self.memory.chat_memory.messages,
|
||||
"agent_scratchpad": lambda x: format_to_openai_function_messages(
|
||||
x["intermediate_steps"]
|
||||
),
|
||||
}
|
||||
| prompt
|
||||
| self.llm.bind(functions=[t.as_tool() for t in self.tools])
|
||||
| OpenAIFunctionsAgentOutputParser()
|
||||
)
|
||||
|
||||
# Create executor
|
||||
agent_executor = AgentExecutor(
|
||||
agent=agent,
|
||||
tools=self.tools,
|
||||
verbose=True,
|
||||
return_intermediate_steps=True,
|
||||
max_iterations=10,
|
||||
handle_parsing_errors=True
|
||||
)
|
||||
|
||||
return agent_executor
|
||||
|
|
@ -0,0 +1,458 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
Interactive Research CLI - Conversational interface for investment research.
|
||||
Allows natural language queries about stocks, markets, and investment opportunities.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
import json
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich.markdown import Markdown
|
||||
from rich.prompt import Prompt, Confirm
|
||||
from rich.progress import Progress, SpinnerColumn, TextColumn
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Add parent directory to path
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from research.ai_research_agent import (
|
||||
AIResearchAgent,
|
||||
ResearchQuery,
|
||||
ResearchMode,
|
||||
ScreeningCriteria
|
||||
)
|
||||
from connectors.perplexity_finance import PerplexityFinanceConnector
|
||||
from core.cache import RedisCache
|
||||
from core.database import DatabaseManager
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize Rich console
|
||||
console = Console()
|
||||
|
||||
|
||||
class ResearchCLI:
|
||||
"""Interactive CLI for AI-powered investment research"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the Research CLI"""
|
||||
self.console = console
|
||||
self.agent = None
|
||||
self.cache = None
|
||||
self.db = None
|
||||
self.history = []
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize connections and agents"""
|
||||
self.console.print("\n[bold cyan]🤖 Initializing AI Research System...[/bold cyan]")
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=self.console
|
||||
) as progress:
|
||||
# Initialize components
|
||||
task = progress.add_task("Setting up connections...", total=4)
|
||||
|
||||
# Redis cache
|
||||
try:
|
||||
self.cache = RedisCache(
|
||||
host=os.getenv('REDIS_HOST', 'localhost'),
|
||||
port=int(os.getenv('REDIS_PORT', 6379))
|
||||
)
|
||||
progress.update(task, advance=1, description="Redis connected")
|
||||
except Exception as e:
|
||||
self.console.print(f"[yellow]Warning: Redis not available ({e})[/yellow]")
|
||||
progress.update(task, advance=1)
|
||||
|
||||
# Database
|
||||
try:
|
||||
self.db = DatabaseManager(os.getenv('DATABASE_URL'))
|
||||
await self.db.init_database()
|
||||
progress.update(task, advance=1, description="Database connected")
|
||||
except Exception as e:
|
||||
self.console.print(f"[yellow]Warning: Database not available ({e})[/yellow]")
|
||||
progress.update(task, advance=1)
|
||||
|
||||
# Perplexity connector
|
||||
perplexity = PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY'),
|
||||
cache=self.cache
|
||||
)
|
||||
progress.update(task, advance=1, description="Perplexity connected")
|
||||
|
||||
# AI Research Agent
|
||||
self.agent = AIResearchAgent(
|
||||
openai_api_key=os.getenv('OPENAI_API_KEY'),
|
||||
perplexity_connector=perplexity,
|
||||
db_manager=self.db,
|
||||
cache=self.cache
|
||||
)
|
||||
progress.update(task, advance=1, description="AI Agent ready")
|
||||
|
||||
self.console.print("[bold green]✅ System initialized successfully![/bold green]\n")
|
||||
|
||||
def display_welcome(self):
|
||||
"""Display welcome message"""
|
||||
welcome_text = """
|
||||
# 🎯 AI Investment Research Assistant
|
||||
|
||||
I can help you with:
|
||||
- Finding undervalued stocks
|
||||
- Analyzing specific companies
|
||||
- Screening for investment opportunities
|
||||
- Portfolio optimization suggestions
|
||||
- Market sentiment analysis
|
||||
- Sector and industry research
|
||||
- Risk assessment
|
||||
- And much more!
|
||||
|
||||
Just ask me any investment question in natural language.
|
||||
"""
|
||||
|
||||
panel = Panel(
|
||||
Markdown(welcome_text),
|
||||
title="Welcome",
|
||||
border_style="cyan"
|
||||
)
|
||||
self.console.print(panel)
|
||||
|
||||
def display_examples(self):
|
||||
"""Display example queries"""
|
||||
examples = """
|
||||
### 📝 Example Questions:
|
||||
|
||||
**Stock Analysis:**
|
||||
- "What's your analysis of NVDA stock?"
|
||||
- "Is Apple undervalued at current prices?"
|
||||
- "Compare Microsoft vs Google as investments"
|
||||
|
||||
**Screening & Discovery:**
|
||||
- "Find me undervalued tech stocks with P/E under 20"
|
||||
- "What are the best dividend stocks yielding over 4%?"
|
||||
- "Show me high-growth stocks in healthcare"
|
||||
|
||||
**Portfolio & Strategy:**
|
||||
- "I have $10,000 to invest, what should I buy?"
|
||||
- "How can I diversify my tech-heavy portfolio?"
|
||||
- "What sectors look attractive for 2024?"
|
||||
|
||||
**Market Analysis:**
|
||||
- "What's the current market sentiment?"
|
||||
- "Are we in a bull or bear market?"
|
||||
- "What are the major risks facing the market?"
|
||||
|
||||
**Advanced Research:**
|
||||
- "What stocks are Congress members buying?"
|
||||
- "Find companies with strong insider buying"
|
||||
- "What are Warren Buffett's recent purchases?"
|
||||
"""
|
||||
|
||||
self.console.print(Markdown(examples))
|
||||
|
||||
async def process_query(self, question: str) -> None:
|
||||
"""Process a research query"""
|
||||
|
||||
# Add to history
|
||||
self.history.append({"timestamp": datetime.now(), "question": question})
|
||||
|
||||
# Show processing indicator
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=self.console
|
||||
) as progress:
|
||||
task = progress.add_task("Researching your question...", total=None)
|
||||
|
||||
try:
|
||||
# Determine query depth based on question complexity
|
||||
depth = "deep" if any(word in question.lower() for word in
|
||||
['analyze', 'research', 'comprehensive', 'detailed']) else "standard"
|
||||
|
||||
# Create research query
|
||||
query = ResearchQuery(
|
||||
question=question,
|
||||
depth=depth,
|
||||
include_portfolio=True
|
||||
)
|
||||
|
||||
# Execute research
|
||||
mode = ResearchMode.COMPREHENSIVE if depth == "deep" else ResearchMode.STANDARD
|
||||
response = await self.agent.research(query, mode=mode)
|
||||
|
||||
progress.update(task, description="Analysis complete")
|
||||
|
||||
except Exception as e:
|
||||
self.console.print(f"\n[red]Error: {e}[/red]")
|
||||
return
|
||||
|
||||
# Display response
|
||||
self.display_response(response)
|
||||
|
||||
def display_response(self, response):
|
||||
"""Display research response in formatted way"""
|
||||
|
||||
# Main answer
|
||||
answer_panel = Panel(
|
||||
response.answer,
|
||||
title="📊 Analysis",
|
||||
border_style="green"
|
||||
)
|
||||
self.console.print("\n", answer_panel)
|
||||
|
||||
# Confidence score
|
||||
confidence_color = "green" if response.confidence > 0.7 else "yellow" if response.confidence > 0.4 else "red"
|
||||
self.console.print(
|
||||
f"\n[{confidence_color}]Confidence: {response.confidence:.0%}[/{confidence_color}]"
|
||||
)
|
||||
|
||||
# Recommendations if any
|
||||
if response.recommendations:
|
||||
self.console.print("\n[bold cyan]💡 Recommendations:[/bold cyan]")
|
||||
for rec in response.recommendations:
|
||||
self.console.print(f" • {rec}")
|
||||
|
||||
# Risks
|
||||
if response.risks:
|
||||
self.console.print("\n[bold yellow]⚠️ Key Risks:[/bold yellow]")
|
||||
for risk in response.risks:
|
||||
self.console.print(f" • {risk}")
|
||||
|
||||
# Data sources
|
||||
if response.sources:
|
||||
self.console.print(f"\n[dim]Sources: {', '.join(response.sources)}[/dim]")
|
||||
|
||||
# Follow-up questions
|
||||
if response.follow_up_questions:
|
||||
self.console.print("\n[bold]🤔 Follow-up questions you might ask:[/bold]")
|
||||
for q in response.follow_up_questions:
|
||||
self.console.print(f" • {q}")
|
||||
|
||||
async def screen_stocks(self):
|
||||
"""Interactive stock screening"""
|
||||
self.console.print("\n[bold cyan]📈 Stock Screener[/bold cyan]")
|
||||
|
||||
# Get screening criteria
|
||||
query = Prompt.ask("Describe what stocks you're looking for")
|
||||
|
||||
# Optional: Get specific criteria
|
||||
use_filters = Confirm.ask("Add specific filters?", default=False)
|
||||
|
||||
criteria = None
|
||||
if use_filters:
|
||||
criteria = ScreeningCriteria()
|
||||
|
||||
if Confirm.ask("Set market cap range?"):
|
||||
criteria.min_market_cap = float(Prompt.ask("Minimum market cap (in billions)", default="0"))
|
||||
criteria.max_market_cap = float(Prompt.ask("Maximum market cap (in billions)", default="10000"))
|
||||
|
||||
if Confirm.ask("Set P/E ratio range?"):
|
||||
criteria.min_pe = float(Prompt.ask("Minimum P/E", default="0"))
|
||||
criteria.max_pe = float(Prompt.ask("Maximum P/E", default="50"))
|
||||
|
||||
if Confirm.ask("Filter by sector?"):
|
||||
sectors = Prompt.ask("Sectors (comma-separated)")
|
||||
criteria.sectors = [s.strip() for s in sectors.split(',')]
|
||||
|
||||
# Run screening
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=self.console
|
||||
) as progress:
|
||||
task = progress.add_task("Screening stocks...", total=None)
|
||||
|
||||
try:
|
||||
results = await self.agent.screen_stocks(query, criteria)
|
||||
progress.update(task, description="Screening complete")
|
||||
except Exception as e:
|
||||
self.console.print(f"\n[red]Error: {e}[/red]")
|
||||
return
|
||||
|
||||
# Display results
|
||||
if results:
|
||||
table = Table(title=f"Stock Screening Results: {query}")
|
||||
table.add_column("Ticker", style="cyan", no_wrap=True)
|
||||
table.add_column("Company", style="white")
|
||||
table.add_column("Price", justify="right", style="green")
|
||||
table.add_column("P/E", justify="right")
|
||||
table.add_column("Fair Value", justify="right")
|
||||
table.add_column("Upside %", justify="right", style="yellow")
|
||||
table.add_column("AI Signal", justify="center")
|
||||
table.add_column("Confidence", justify="right")
|
||||
|
||||
for stock in results:
|
||||
signal_color = "green" if stock['ai_signal'] == "BUY" else "red" if stock['ai_signal'] == "SELL" else "yellow"
|
||||
table.add_row(
|
||||
stock['ticker'],
|
||||
stock['company'][:30],
|
||||
f"${stock['current_price']:.2f}",
|
||||
f"{stock['pe_ratio']:.1f}" if stock['pe_ratio'] else "N/A",
|
||||
f"${stock['fair_value']:.2f}" if stock['fair_value'] else "N/A",
|
||||
f"{stock['upside_potential']:.1f}%" if stock['upside_potential'] else "N/A",
|
||||
f"[{signal_color}]{stock['ai_signal']}[/{signal_color}]",
|
||||
f"{stock['ai_confidence']:.0%}"
|
||||
)
|
||||
|
||||
self.console.print("\n", table)
|
||||
else:
|
||||
self.console.print("[yellow]No stocks found matching criteria[/yellow]")
|
||||
|
||||
async def find_opportunities(self):
|
||||
"""Find investment opportunities based on parameters"""
|
||||
self.console.print("\n[bold cyan]💰 Investment Opportunity Finder[/bold cyan]")
|
||||
|
||||
# Get parameters
|
||||
amount = float(Prompt.ask("Investment amount ($)", default="10000"))
|
||||
risk = Prompt.ask(
|
||||
"Risk tolerance",
|
||||
choices=["low", "medium", "high"],
|
||||
default="medium"
|
||||
)
|
||||
horizon = Prompt.ask(
|
||||
"Time horizon",
|
||||
choices=["short", "medium", "long"],
|
||||
default="medium"
|
||||
)
|
||||
|
||||
# Find opportunities
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=self.console
|
||||
) as progress:
|
||||
task = progress.add_task("Finding opportunities...", total=None)
|
||||
|
||||
try:
|
||||
opportunities = await self.agent.find_opportunities(amount, risk, horizon)
|
||||
progress.update(task, description="Analysis complete")
|
||||
except Exception as e:
|
||||
self.console.print(f"\n[red]Error: {e}[/red]")
|
||||
return
|
||||
|
||||
# Display opportunities
|
||||
panel = Panel(
|
||||
f"""
|
||||
[bold]Investment Profile:[/bold]
|
||||
• Amount: ${amount:,.0f}
|
||||
• Risk: {risk}
|
||||
• Horizon: {horizon}
|
||||
|
||||
[bold]Market Conditions:[/bold]
|
||||
{opportunities['market_conditions'].get('key_factors', 'N/A')}
|
||||
""",
|
||||
title="📊 Investment Plan",
|
||||
border_style="cyan"
|
||||
)
|
||||
self.console.print("\n", panel)
|
||||
|
||||
# Allocation strategy
|
||||
if opportunities['allocation_strategy']:
|
||||
self.console.print("\n[bold]📈 Recommended Allocation:[/bold]")
|
||||
for ticker, allocation in opportunities['allocation_strategy'].items():
|
||||
pct = (allocation / amount) * 100
|
||||
self.console.print(f" • {ticker}: ${allocation:,.0f} ({pct:.0f}%)")
|
||||
|
||||
# Expected returns
|
||||
if opportunities['expected_returns']:
|
||||
returns = opportunities['expected_returns']
|
||||
self.console.print("\n[bold]💵 Projected Returns:[/bold]")
|
||||
self.console.print(f" • Expected: {returns['expected']:.1f}%")
|
||||
self.console.print(f" • Best case: {returns['best_case']:.1f}%")
|
||||
self.console.print(f" • Worst case: {returns['worst_case']:.1f}%")
|
||||
|
||||
# Execution plan
|
||||
if opportunities['execution_plan']:
|
||||
self.console.print("\n[bold]📋 Execution Plan:[/bold]")
|
||||
for step in opportunities['execution_plan']:
|
||||
self.console.print(f" {step}")
|
||||
|
||||
def show_history(self):
|
||||
"""Show query history"""
|
||||
if not self.history:
|
||||
self.console.print("[yellow]No queries in history[/yellow]")
|
||||
return
|
||||
|
||||
table = Table(title="Query History")
|
||||
table.add_column("Time", style="cyan", no_wrap=True)
|
||||
table.add_column("Question", style="white")
|
||||
|
||||
for entry in self.history[-10:]: # Last 10 queries
|
||||
table.add_row(
|
||||
entry['timestamp'].strftime("%H:%M:%S"),
|
||||
entry['question'][:80] + "..." if len(entry['question']) > 80 else entry['question']
|
||||
)
|
||||
|
||||
self.console.print("\n", table)
|
||||
|
||||
async def run(self):
|
||||
"""Main CLI loop"""
|
||||
await self.initialize()
|
||||
self.display_welcome()
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Display prompt
|
||||
self.console.print("\n" + "="*50)
|
||||
choice = Prompt.ask(
|
||||
"\n[bold cyan]What would you like to do?[/bold cyan]",
|
||||
choices=["ask", "screen", "opportunities", "examples", "history", "exit"],
|
||||
default="ask"
|
||||
)
|
||||
|
||||
if choice == "exit":
|
||||
self.console.print("\n[bold cyan]Thank you for using AI Research Assistant! 📈[/bold cyan]")
|
||||
break
|
||||
|
||||
elif choice == "ask":
|
||||
question = Prompt.ask("\n[bold]Your question[/bold]")
|
||||
if question.strip():
|
||||
await self.process_query(question)
|
||||
|
||||
elif choice == "screen":
|
||||
await self.screen_stocks()
|
||||
|
||||
elif choice == "opportunities":
|
||||
await self.find_opportunities()
|
||||
|
||||
elif choice == "examples":
|
||||
self.display_examples()
|
||||
|
||||
elif choice == "history":
|
||||
self.show_history()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
if Confirm.ask("\nExit?", default=True):
|
||||
break
|
||||
except Exception as e:
|
||||
self.console.print(f"\n[red]Error: {e}[/red]")
|
||||
self.console.print("[yellow]Please try again[/yellow]")
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
# Check for required environment variables
|
||||
required_vars = ['OPENAI_API_KEY', 'PERPLEXITY_API_KEY']
|
||||
missing = [var for var in required_vars if not os.getenv(var)]
|
||||
|
||||
if missing:
|
||||
console.print(f"[red]Missing required environment variables: {', '.join(missing)}[/red]")
|
||||
console.print("\n[yellow]Please set them in your .env file:[/yellow]")
|
||||
for var in missing:
|
||||
console.print(f" {var}=your_api_key_here")
|
||||
sys.exit(1)
|
||||
|
||||
# Run CLI
|
||||
cli = ResearchCLI()
|
||||
await cli.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
|
|
@ -24,6 +24,8 @@ from .ibkr_connector import IBKRConnector
|
|||
from .data_aggregator import DataAggregator
|
||||
from .signal_processor import SignalProcessor
|
||||
from .alert_engine import AlertEngine, AlertType, AlertPriority
|
||||
from .research.ai_research_agent import AIResearchAgent, ResearchQuery, ResearchMode
|
||||
from .connectors.perplexity_finance import PerplexityFinanceConnector, AnalysisType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -54,6 +56,22 @@ class AutonomousScheduler:
|
|||
self.signal_processor = SignalProcessor(self.ibkr, self.data_agg, config)
|
||||
self.alert_engine = AlertEngine(config)
|
||||
|
||||
# Initialize AI Research components if API keys are available
|
||||
self.research_agent = None
|
||||
self.perplexity = None
|
||||
if config.get('perplexity_api_key') and config.get('openai_api_key'):
|
||||
try:
|
||||
self.perplexity = PerplexityFinanceConnector(
|
||||
api_key=config.get('perplexity_api_key')
|
||||
)
|
||||
self.research_agent = AIResearchAgent(
|
||||
openai_api_key=config.get('openai_api_key'),
|
||||
perplexity_connector=self.perplexity
|
||||
)
|
||||
logger.info("AI Research Agent initialized")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not initialize AI Research: {e}")
|
||||
|
||||
# Track system state
|
||||
self.is_running = False
|
||||
self.last_portfolio_check = None
|
||||
|
|
@ -171,6 +189,25 @@ class AutonomousScheduler:
|
|||
misfire_grace_time=300
|
||||
)
|
||||
|
||||
# AI Research Analysis - every 2 hours during market hours
|
||||
if self.research_agent:
|
||||
self.scheduler.add_job(
|
||||
self.ai_research_analysis,
|
||||
IntervalTrigger(hours=2),
|
||||
id='ai_research',
|
||||
name='AI Research Analysis',
|
||||
misfire_grace_time=300
|
||||
)
|
||||
|
||||
# Weekly opportunity scan - Sunday evening
|
||||
self.scheduler.add_job(
|
||||
self.weekly_opportunity_scan,
|
||||
CronTrigger(day_of_week=6, hour=18), # Sunday 6 PM
|
||||
id='weekly_scan',
|
||||
name='Weekly Opportunity Scan',
|
||||
misfire_grace_time=600
|
||||
)
|
||||
|
||||
logger.info(f"Scheduled {len(self.scheduler.get_jobs())} jobs")
|
||||
|
||||
async def monitor_portfolio(self):
|
||||
|
|
@ -457,6 +494,118 @@ System Status: ✅ All systems operational
|
|||
logger.info("Shutting down...")
|
||||
await self.stop()
|
||||
|
||||
async def ai_research_analysis(self):
|
||||
"""Run AI-powered research analysis on portfolio and opportunities"""
|
||||
if not self.is_running or not self.research_agent:
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info("Running AI research analysis...")
|
||||
|
||||
# Analyze each portfolio position
|
||||
for ticker in self.portfolio_tickers[:3]: # Limit to avoid rate limits
|
||||
# Get deep analysis using Perplexity
|
||||
analysis = await self.perplexity.analyze_stock(
|
||||
ticker,
|
||||
AnalysisType.FUNDAMENTAL,
|
||||
depth="standard"
|
||||
)
|
||||
|
||||
# Alert if significant opportunity or risk
|
||||
if analysis.rating == "BUY" and analysis.confidence_score > 80:
|
||||
await self.alert_engine.send_alert(
|
||||
title=f"🤖 AI Research: Strong Buy - {ticker}",
|
||||
message=f"Fair Value: ${analysis.fair_value:.2f}\n"
|
||||
f"Upside: {analysis.upside_potential:.1f}%\n"
|
||||
f"Confidence: {analysis.confidence_score}%\n\n"
|
||||
f"{analysis.bull_case[:200]}",
|
||||
alert_type=AlertType.AI_SIGNAL,
|
||||
priority=AlertPriority.HIGH,
|
||||
data={'ticker': ticker, 'analysis': analysis.__dict__}
|
||||
)
|
||||
elif analysis.rating == "SELL" and analysis.confidence_score > 80:
|
||||
await self.alert_engine.send_alert(
|
||||
title=f"⚠️ AI Research: Consider Selling - {ticker}",
|
||||
message=f"Fair Value: ${analysis.fair_value:.2f}\n"
|
||||
f"Downside: {analysis.upside_potential:.1f}%\n\n"
|
||||
f"Risks: {', '.join(analysis.key_risks[:3])}",
|
||||
alert_type=AlertType.AI_SIGNAL,
|
||||
priority=AlertPriority.HIGH,
|
||||
data={'ticker': ticker, 'analysis': analysis.__dict__}
|
||||
)
|
||||
|
||||
await asyncio.sleep(5) # Rate limiting
|
||||
|
||||
# Ask AI for market insights
|
||||
market_question = "What are the key market risks and opportunities this week?"
|
||||
market_answer = await self.research_agent.answer_question(market_question)
|
||||
|
||||
if market_answer:
|
||||
await self.alert_engine.send_alert(
|
||||
title="🧠 AI Market Insights",
|
||||
message=market_answer[:500],
|
||||
alert_type=AlertType.MARKET_ANALYSIS,
|
||||
priority=AlertPriority.MEDIUM
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"AI research analysis error: {e}")
|
||||
|
||||
async def weekly_opportunity_scan(self):
|
||||
"""Weekly scan for new investment opportunities using AI"""
|
||||
if not self.is_running or not self.research_agent:
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info("Running weekly opportunity scan...")
|
||||
|
||||
# Find undervalued opportunities
|
||||
opportunities = await self.research_agent.find_opportunities(
|
||||
investment_amount=10000, # Hypothetical amount for analysis
|
||||
risk_tolerance="medium",
|
||||
time_horizon="medium"
|
||||
)
|
||||
|
||||
# Screen for specific opportunities
|
||||
screening_queries = [
|
||||
"Find undervalued tech stocks with P/E under 25 and strong growth",
|
||||
"What dividend stocks are attractive with yields above 3%?",
|
||||
"Find small-cap growth stocks with revenue growth above 20%"
|
||||
]
|
||||
|
||||
all_recommendations = []
|
||||
|
||||
for query in screening_queries:
|
||||
try:
|
||||
results = await self.research_agent.screen_stocks(query)
|
||||
all_recommendations.extend(results[:3]) # Top 3 from each search
|
||||
await asyncio.sleep(3) # Rate limiting
|
||||
except Exception as e:
|
||||
logger.error(f"Screening error for '{query}': {e}")
|
||||
|
||||
# Send weekly summary
|
||||
if all_recommendations:
|
||||
summary = "📊 **Weekly AI Research Report**\n\n"
|
||||
summary += "**Top Opportunities Found:**\n"
|
||||
|
||||
for rec in all_recommendations[:10]: # Top 10 overall
|
||||
summary += f"• **{rec.get('ticker', 'N/A')}**: "
|
||||
summary += f"${rec.get('current_price', 0):.2f}, "
|
||||
summary += f"Upside: {rec.get('upside_potential', 0):.1f}%\n"
|
||||
|
||||
summary += f"\n**Market Outlook:**\n{opportunities.get('market_conditions', {}).get('key_factors', 'N/A')[:300]}"
|
||||
|
||||
await self.alert_engine.send_alert(
|
||||
title="🎯 Weekly Investment Opportunities",
|
||||
message=summary,
|
||||
alert_type=AlertType.AI_SIGNAL,
|
||||
priority=AlertPriority.HIGH,
|
||||
data={'recommendations': all_recommendations}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Weekly opportunity scan error: {e}")
|
||||
|
||||
async def stop(self):
|
||||
"""Stop the autonomous trading system"""
|
||||
logger.info("Stopping Autonomous Trading System...")
|
||||
|
|
@ -496,6 +645,7 @@ async def main():
|
|||
'quiver_api_key': os.getenv('QUIVER_API_KEY'),
|
||||
'alpha_vantage_api_key': os.getenv('ALPHA_VANTAGE_API_KEY'),
|
||||
'openai_api_key': os.getenv('OPENAI_API_KEY'),
|
||||
'perplexity_api_key': os.getenv('PERPLEXITY_API_KEY'),
|
||||
|
||||
# Notification settings
|
||||
'discord_webhook_url': os.getenv('DISCORD_WEBHOOK_URL'),
|
||||
|
|
|
|||
|
|
@ -60,4 +60,23 @@ pytest-mock==3.12.0
|
|||
# === Development Tools ===
|
||||
python-dotenv==1.0.0 # Environment variables
|
||||
black==23.12.1 # Code formatting
|
||||
pylint==3.0.3 # Code linting
|
||||
pylint==3.0.3 # Code linting
|
||||
|
||||
# === AI Research & Perplexity Integration ===
|
||||
langchain==0.1.0 # LangChain for agent orchestration
|
||||
langchain-openai==0.0.5 # OpenAI integration
|
||||
langsmith==0.0.69 # LangChain monitoring
|
||||
pydantic==2.5.0 # Data validation
|
||||
tenacity==8.2.3 # Retry logic
|
||||
rich==13.7.0 # Rich CLI interface
|
||||
|
||||
# === Additional Data Processing ===
|
||||
beautifulsoup4==4.12.2 # Web scraping
|
||||
lxml==4.9.4 # XML/HTML processing
|
||||
httpx==0.25.2 # Async HTTP client
|
||||
aiofiles==23.2.1 # Async file operations
|
||||
|
||||
# === Security Enhancements ===
|
||||
cryptography==41.0.7 # Encryption
|
||||
PyJWT==2.8.0 # JWT tokens
|
||||
hashlib # Built-in, for HMAC signing
|
||||
|
|
@ -0,0 +1,322 @@
|
|||
#!/usr/bin/env python
|
||||
"""
|
||||
Research Demo - Example usage of the AI Research Agent
|
||||
Demonstrates how to use natural language queries to analyze stocks and markets.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from datetime import datetime
|
||||
from dotenv import load_dotenv
|
||||
import json
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Import our research components
|
||||
from autonomous.research.ai_research_agent import (
|
||||
AIResearchAgent,
|
||||
ResearchQuery,
|
||||
ResearchMode,
|
||||
ScreeningCriteria
|
||||
)
|
||||
from autonomous.connectors.perplexity_finance import (
|
||||
PerplexityFinanceConnector,
|
||||
AnalysisType,
|
||||
ResearchDepth
|
||||
)
|
||||
from autonomous.core.cache import RedisCache
|
||||
|
||||
|
||||
async def demo_stock_analysis():
|
||||
"""Demo: Analyze individual stocks"""
|
||||
print("\n" + "="*60)
|
||||
print("DEMO 1: Individual Stock Analysis")
|
||||
print("="*60)
|
||||
|
||||
# Initialize Perplexity connector
|
||||
perplexity = PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY')
|
||||
)
|
||||
|
||||
# Analyze NVDA
|
||||
print("\n📊 Analyzing NVIDIA (NVDA)...")
|
||||
analysis = await perplexity.analyze_stock(
|
||||
"NVDA",
|
||||
AnalysisType.FUNDAMENTAL,
|
||||
ResearchDepth.STANDARD
|
||||
)
|
||||
|
||||
print(f"""
|
||||
Current Price: ${analysis.current_price}
|
||||
Fair Value: ${analysis.fair_value}
|
||||
Upside Potential: {analysis.upside_potential}%
|
||||
P/E Ratio: {analysis.pe_ratio}
|
||||
Rating: {analysis.rating}
|
||||
Confidence: {analysis.confidence_score}%
|
||||
|
||||
Bull Case: {analysis.bull_case[:200]}...
|
||||
Bear Case: {analysis.bear_case[:200]}...
|
||||
|
||||
Key Risks:
|
||||
""" + '\n '.join(f"- {risk}" for risk in analysis.key_risks[:3]))
|
||||
|
||||
|
||||
async def demo_natural_language_research():
|
||||
"""Demo: Natural language investment research"""
|
||||
print("\n" + "="*60)
|
||||
print("DEMO 2: Natural Language Investment Research")
|
||||
print("="*60)
|
||||
|
||||
# Initialize AI Research Agent
|
||||
agent = AIResearchAgent(
|
||||
openai_api_key=os.getenv('OPENAI_API_KEY'),
|
||||
perplexity_connector=PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY')
|
||||
)
|
||||
)
|
||||
|
||||
# Example questions
|
||||
questions = [
|
||||
"What are the most undervalued tech stocks right now?",
|
||||
"Should I invest in AI stocks or wait for a pullback?",
|
||||
"Compare Microsoft vs Apple as long-term investments",
|
||||
"What sectors look most promising for 2024?"
|
||||
]
|
||||
|
||||
# Process first question as example
|
||||
question = questions[0]
|
||||
print(f"\n❓ Question: {question}")
|
||||
print("\n🤖 Researching...")
|
||||
|
||||
query = ResearchQuery(
|
||||
question=question,
|
||||
depth="standard",
|
||||
include_portfolio=False
|
||||
)
|
||||
|
||||
response = await agent.research(query, mode=ResearchMode.COMPREHENSIVE)
|
||||
|
||||
print(f"\n📝 Answer:\n{response.answer[:500]}...")
|
||||
|
||||
if response.recommendations:
|
||||
print("\n💡 Top Recommendations:")
|
||||
for rec in response.recommendations[:3]:
|
||||
print(f" • {rec}")
|
||||
|
||||
if response.risks:
|
||||
print("\n⚠️ Key Risks:")
|
||||
for risk in response.risks[:3]:
|
||||
print(f" • {risk}")
|
||||
|
||||
print(f"\n📊 Confidence: {response.confidence:.0%}")
|
||||
|
||||
|
||||
async def demo_stock_screening():
|
||||
"""Demo: Screen for investment opportunities"""
|
||||
print("\n" + "="*60)
|
||||
print("DEMO 3: Stock Screening")
|
||||
print("="*60)
|
||||
|
||||
# Initialize Perplexity connector
|
||||
perplexity = PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY')
|
||||
)
|
||||
|
||||
# Screen for undervalued dividend stocks
|
||||
query = "Find undervalued dividend stocks with yields above 3% and stable earnings"
|
||||
print(f"\n🔍 Screening: {query}")
|
||||
|
||||
filters = {
|
||||
"min_dividend_yield": 3.0,
|
||||
"max_pe": 20,
|
||||
"min_market_cap": 10 # $10B minimum
|
||||
}
|
||||
|
||||
result = await perplexity.screen_stocks(query, max_results=10, filters=filters)
|
||||
|
||||
print(f"\n📊 Found {result.total_results} stocks matching criteria:")
|
||||
print("\nTop 5 Results:")
|
||||
print("-" * 40)
|
||||
|
||||
for stock in result.stocks[:5]:
|
||||
print(f"""
|
||||
Ticker: {stock.get('ticker', 'N/A')}
|
||||
Company: {stock.get('company_name', 'N/A')}
|
||||
Price: ${stock.get('price', 'N/A')}
|
||||
P/E: {stock.get('pe_ratio', 'N/A')}
|
||||
Market Cap: ${stock.get('market_cap', 'N/A')}B
|
||||
""" + "-" * 40)
|
||||
|
||||
|
||||
async def demo_portfolio_opportunities():
|
||||
"""Demo: Find opportunities based on portfolio"""
|
||||
print("\n" + "="*60)
|
||||
print("DEMO 4: Investment Opportunities")
|
||||
print("="*60)
|
||||
|
||||
# Initialize AI Research Agent
|
||||
agent = AIResearchAgent(
|
||||
openai_api_key=os.getenv('OPENAI_API_KEY'),
|
||||
perplexity_connector=PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY')
|
||||
)
|
||||
)
|
||||
|
||||
# Find opportunities for $50k investment
|
||||
print("\n💰 Finding opportunities for $50,000 investment...")
|
||||
print(" Risk: Medium | Horizon: Long-term")
|
||||
|
||||
opportunities = await agent.find_opportunities(
|
||||
investment_amount=50000,
|
||||
risk_tolerance="medium",
|
||||
time_horizon="long"
|
||||
)
|
||||
|
||||
print("\n📊 Investment Plan:")
|
||||
print(f" Market Sentiment: {opportunities['market_conditions'].get('sentiment', 'N/A')}")
|
||||
|
||||
if opportunities['allocation_strategy']:
|
||||
print("\n💼 Recommended Allocation:")
|
||||
total = 50000
|
||||
for ticker, amount in opportunities['allocation_strategy'].items():
|
||||
pct = (amount / total) * 100
|
||||
print(f" • {ticker}: ${amount:,.0f} ({pct:.0f}%)")
|
||||
|
||||
if opportunities['expected_returns']:
|
||||
returns = opportunities['expected_returns']
|
||||
print("\n📈 Expected Returns:")
|
||||
print(f" • Expected: {returns['expected']:.1f}%")
|
||||
print(f" • Best Case: {returns['best_case']:.1f}%")
|
||||
print(f" • Worst Case: {returns['worst_case']:.1f}%")
|
||||
|
||||
|
||||
async def demo_market_sentiment():
|
||||
"""Demo: Analyze current market sentiment"""
|
||||
print("\n" + "="*60)
|
||||
print("DEMO 5: Market Sentiment Analysis")
|
||||
print("="*60)
|
||||
|
||||
# Initialize Perplexity connector
|
||||
perplexity = PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY')
|
||||
)
|
||||
|
||||
# Get overall market sentiment
|
||||
print("\n🌍 Analyzing overall market sentiment...")
|
||||
sentiment = await perplexity.get_market_sentiment()
|
||||
|
||||
print(f"\nMarket Analysis:")
|
||||
print(sentiment['analysis'][:800])
|
||||
|
||||
# Get tech sector sentiment
|
||||
print("\n💻 Analyzing technology sector...")
|
||||
tech_sentiment = await perplexity.get_market_sentiment("technology")
|
||||
|
||||
print(f"\nTechnology Sector Analysis:")
|
||||
print(tech_sentiment['analysis'][:500])
|
||||
|
||||
|
||||
async def demo_earnings_analysis():
|
||||
"""Demo: Analyze recent earnings"""
|
||||
print("\n" + "="*60)
|
||||
print("DEMO 6: Earnings Analysis")
|
||||
print("="*60)
|
||||
|
||||
# Initialize Perplexity connector
|
||||
perplexity = PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY')
|
||||
)
|
||||
|
||||
# Analyze Apple's earnings
|
||||
print("\n📊 Analyzing Apple (AAPL) earnings...")
|
||||
earnings = await perplexity.analyze_earnings("AAPL", include_guidance=True)
|
||||
|
||||
print(f"\nEarnings Analysis:")
|
||||
print(earnings['analysis'][:800])
|
||||
|
||||
|
||||
async def demo_congressional_trades():
|
||||
"""Demo: Find what Congress is trading"""
|
||||
print("\n" + "="*60)
|
||||
print("DEMO 7: Congressional Trading Activity")
|
||||
print("="*60)
|
||||
|
||||
# Initialize AI Research Agent
|
||||
agent = AIResearchAgent(
|
||||
openai_api_key=os.getenv('OPENAI_API_KEY'),
|
||||
perplexity_connector=PerplexityFinanceConnector(
|
||||
api_key=os.getenv('PERPLEXITY_API_KEY')
|
||||
)
|
||||
)
|
||||
|
||||
question = "What stocks have Congress members been buying recently and why might they be interested?"
|
||||
print(f"\n❓ Question: {question}")
|
||||
print("\n🤖 Researching congressional trades...")
|
||||
|
||||
answer = await agent.answer_question(question)
|
||||
print(f"\n📝 Answer:\n{answer[:800]}...")
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all demos"""
|
||||
print("""
|
||||
╔════════════════════════════════════════════════╗
|
||||
║ AI-Powered Investment Research Demo ║
|
||||
║ Powered by Perplexity Finance & OpenAI ║
|
||||
╚════════════════════════════════════════════════╝
|
||||
""")
|
||||
|
||||
# Check for required API keys
|
||||
if not os.getenv('PERPLEXITY_API_KEY'):
|
||||
print("❌ Error: PERPLEXITY_API_KEY not set in .env file")
|
||||
print(" Get your API key from: https://www.perplexity.ai/settings/api")
|
||||
return
|
||||
|
||||
if not os.getenv('OPENAI_API_KEY'):
|
||||
print("❌ Error: OPENAI_API_KEY not set in .env file")
|
||||
return
|
||||
|
||||
# Run demos
|
||||
try:
|
||||
# Individual analysis
|
||||
await demo_stock_analysis()
|
||||
|
||||
# Natural language Q&A
|
||||
await demo_natural_language_research()
|
||||
|
||||
# Stock screening
|
||||
await demo_stock_screening()
|
||||
|
||||
# Portfolio opportunities
|
||||
await demo_portfolio_opportunities()
|
||||
|
||||
# Market sentiment
|
||||
await demo_market_sentiment()
|
||||
|
||||
# Earnings analysis
|
||||
await demo_earnings_analysis()
|
||||
|
||||
# Congressional trades
|
||||
await demo_congressional_trades()
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("✅ All demos completed successfully!")
|
||||
print("="*60)
|
||||
|
||||
print("""
|
||||
🚀 Next Steps:
|
||||
1. Run the interactive CLI: python autonomous/research/research_cli.py
|
||||
2. Integrate with your trading system
|
||||
3. Customize screening criteria for your strategy
|
||||
4. Add more data sources as needed
|
||||
""")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ Error running demos: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
Loading…
Reference in New Issue