Made changes to Agents

This commit is contained in:
Shashwat17-vit 2025-12-06 15:14:38 -06:00
parent 40f5b529d2
commit 3f59a80800
7 changed files with 182 additions and 2 deletions

View File

@ -9,6 +9,10 @@ def create_research_manager(llm, memory):
sentiment_report = state["sentiment_report"]
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
# Get LoRA-scored news sentiment
news_net_sentiment_score = state.get("news_net_sentiment_score", 0.0)
news_net_sentiment_label = state.get("news_net_sentiment_label", "Neutral")
investment_debate_state = state["investment_debate_state"]
@ -33,6 +37,9 @@ Take into account your past mistakes on similar situations. Use these insights t
Here are your past reflections on mistakes:
\"{past_memory_str}\"
Additional Context:
News net sentiment (LoRA-scored): {news_net_sentiment_label} (score: {news_net_sentiment_score:.3f})
Here is the debate:
Debate History:
{history}"""

View File

@ -14,6 +14,10 @@ def create_bear_researcher(llm, memory):
sentiment_report = state["sentiment_report"]
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
# Get LoRA-scored news sentiment
news_net_sentiment_score = state.get("news_net_sentiment_score", 0.0)
news_net_sentiment_label = state.get("news_net_sentiment_label", "Neutral")
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
@ -37,6 +41,7 @@ Resources available:
Market research report: {market_research_report}
Social media sentiment report: {sentiment_report}
Latest world affairs news: {news_report}
News net sentiment (LoRA-scored): {news_net_sentiment_label} (score: {news_net_sentiment_score:.3f})
Company fundamentals report: {fundamentals_report}
Conversation history of the debate: {history}
Last bull argument: {current_response}

View File

@ -14,6 +14,10 @@ def create_bull_researcher(llm, memory):
sentiment_report = state["sentiment_report"]
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
# Get LoRA-scored news sentiment
news_net_sentiment_score = state.get("news_net_sentiment_score", 0.0)
news_net_sentiment_label = state.get("news_net_sentiment_label", "Neutral")
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
past_memories = memory.get_memories(curr_situation, n_matches=2)
@ -35,6 +39,7 @@ Resources available:
Market research report: {market_research_report}
Social media sentiment report: {sentiment_report}
Latest world affairs news: {news_report}
News net sentiment (LoRA-scored): {news_net_sentiment_label} (score: {news_net_sentiment_score:.3f})
Company fundamentals report: {fundamentals_report}
Conversation history of the debate: {history}
Last bear argument: {current_response}

View File

@ -59,6 +59,16 @@ class AgentState(MessagesState):
news_report: Annotated[
str, "Report from the News Researcher of current world affairs"
]
# FinLLama News additions
news_items_scored: Annotated[
list, "Per-item news sentiment with confidence, relevance, weight"
]
news_net_sentiment_score: Annotated[
float, "Weighted net sentiment score over fetched news items"
]
news_net_sentiment_label: Annotated[
str, "Label for net sentiment: Positive/Neutral/Negative"
]
fundamentals_report: Annotated[str, "Report from the Fundamentals Researcher"]
# researcher team discussion step

View File

@ -0,0 +1,148 @@
import re
from typing import List, Dict, Any
def _extract_urls(text: str) -> List[str]:
url_pattern = re.compile(r"https?://[^\s)]+")
return url_pattern.findall(text or "")
def _strip_md(s: str) -> str:
if not s:
return s
# Remove simple markdown bold/italics markers
return re.sub(r"[*_`]+", "", s).strip()
def parse_global_news(raw_text: str) -> List[Dict[str, Any]]:
"""
Parses global news text produced by get_global_news_openai into a list of items.
Expected patterns include enumerated bold headings like:
1. **October 25, 2025: "Headline"**
- Trading Relevance: ...
(source links)
Returns a list of dicts with keys: date, headline, relevance, sources, raw.
"""
if not raw_text or not isinstance(raw_text, str):
return []
items: List[Dict[str, Any]] = []
# Find each enumerated bold heading and take content until next heading
header_iter = list(
re.finditer(r"(?m)^\s*\d+\.\s+\*\*(.+?)\*\*\s*$", raw_text)
)
if not header_iter:
# Fallback: try to split by lines that start with a date-like pattern in bold
header_iter = list(
re.finditer(r"(?m)^\s*\*\*([A-Za-z]+\s+\d{1,2},\s+\d{4}.*)\*\*\s*$", raw_text)
)
boundaries = []
for m in header_iter:
boundaries.append((m.start(), m.end(), m.group(1)))
# Add sentinel end
text_len = len(raw_text)
for i, (s, e, header_text) in enumerate(boundaries):
next_start = boundaries[i + 1][0] if i + 1 < len(boundaries) else text_len
block = raw_text[e:next_start].strip()
header = header_text.strip()
# Extract date and headline from header
date_match = re.search(r"([A-Za-z]+\s+\d{1,2},\s+\d{4})", header)
quoted_headline = re.search(r"\"([^\"]+)\"", header)
headline_after_colon = None
if ":" in header:
parts = header.split(":", 1)
headline_after_colon = parts[1].strip()
# Remove surrounding quotes if present
headline_after_colon = headline_after_colon.strip("\"“”")
date_str = date_match.group(1) if date_match else None
headline = (
quoted_headline.group(1)
if quoted_headline
else (headline_after_colon or _strip_md(header))
)
# Extract trading relevance line(s)
rel_match = re.search(
r"(?i)Trading\s+Relevance:\s*(.+)", block
)
relevance = rel_match.group(1).strip() if rel_match else ""
sources = _extract_urls(block + " " + header)
items.append(
{
"date": date_str,
"headline": headline,
"relevance": relevance,
"sources": list(dict.fromkeys(sources)),
"raw": header + "\n" + block,
}
)
return items
def parse_stock_news(raw_text: str) -> List[Dict[str, Any]]:
"""
Parses company-specific news text from get_stock_news_openai into a list of items.
Expected patterns include bold enumerated sections like:
**1. Topic**
Description ... (url)
Returns a list of dicts with keys: title, summary, sources, raw.
"""
if not raw_text or not isinstance(raw_text, str):
return []
items: List[Dict[str, Any]] = []
# Find headings like **1. Something** or **1. Something** on its own line
header_iter = list(
re.finditer(r"(?m)^\s*\*\*\s*\d+\.\s*(.+?)\s*\*\*\s*$", raw_text)
)
if not header_iter:
# Fallback: split by numbered lines even without bold
header_iter = list(
re.finditer(r"(?m)^\s*\d+\.\s+(.+?)\s*$", raw_text)
)
if header_iter:
boundaries = []
for m in header_iter:
boundaries.append((m.start(), m.end(), m.group(1)))
text_len = len(raw_text)
for i, (s, e, header_text) in enumerate(boundaries):
next_start = boundaries[i + 1][0] if i + 1 < len(boundaries) else text_len
block = raw_text[e:next_start].strip()
title = _strip_md(header_text)
sources = _extract_urls(block + " " + title)
summary = block.strip()
items.append(
{
"title": title,
"summary": summary,
"sources": list(dict.fromkeys(sources)),
"raw": f"{title}\n{summary}",
}
)
else:
# Last resort: try to split paragraphs; each paragraph with a URL is an item
paragraphs = [p.strip() for p in re.split(r"\n\s*\n", raw_text) if p.strip()]
for p in paragraphs:
urls = _extract_urls(p)
if urls or len(p) > 120:
items.append(
{
"title": p.split("\n", 1)[0][:80],
"summary": p,
"sources": list(dict.fromkeys(urls)),
"raw": p,
}
)
return items

View File

@ -76,7 +76,8 @@ class GraphSetup:
if "news" in selected_analysts:
analyst_nodes["news"] = create_news_analyst(
self.sentiment_llm
# self.sentiment_llm
self.quick_thinking_llm
)
delete_nodes["news"] = create_msg_delete()
tool_nodes["news"] = self.tool_nodes["news"]

View File

@ -103,7 +103,7 @@ class TradingAgentsGraph:
try:
self.sentiment_llm = DAPTLlamaChatModel(
dapt_adapter_path=dapt_path,
max_new_tokens=512,
max_new_tokens=1024, # Increased for longer reports
temperature=0.7,
)
except Exception as e:
@ -238,6 +238,10 @@ class TradingAgentsGraph:
"market_report": final_state["market_report"],
"sentiment_report": final_state["sentiment_report"],
"news_report": final_state["news_report"],
# Persist FinLLama News fields if present
"news_items_scored": final_state.get("news_items_scored", []),
"news_net_sentiment_score": final_state.get("news_net_sentiment_score"),
"news_net_sentiment_label": final_state.get("news_net_sentiment_label"),
"fundamentals_report": final_state["fundamentals_report"],
"investment_debate_state": {
"bull_history": final_state["investment_debate_state"]["bull_history"],