GoogleAPIを使用したときに発生したエラー修正

This commit is contained in:
TakumaNakao 2025-10-28 15:14:14 +00:00
parent 00a8d9978c
commit b52d14f4be
4 changed files with 97 additions and 231 deletions

View File

@ -1,2 +1,3 @@
ALPHA_VANTAGE_API_KEY=alpha_vantage_api_key_placeholder
OPENAI_API_KEY=openai_api_key_placeholder
OPENAI_API_KEY=openai_api_key_placeholder
GOOGLE_API_KEY=google_api_key_placeholder

View File

@ -848,232 +848,80 @@ def run_analysis():
# Stream the analysis
trace = []
for chunk in graph.graph.stream(init_agent_state, **args):
if len(chunk["messages"]) > 0:
# Get the last message from the chunk
# Check for new messages and process them
if chunk.get("messages") and (not trace or len(chunk["messages"]) > len(trace[-1].get("messages", []))):
last_message = chunk["messages"][-1]
# Extract message content and type
if hasattr(last_message, "content"):
content = extract_content_string(last_message.content) # Use the helper function
content = extract_content_string(last_message.content)
msg_type = "Reasoning"
else:
content = str(last_message)
msg_type = "System"
message_buffer.add_message(msg_type, content)
# Add message to buffer
message_buffer.add_message(msg_type, content)
# If it's a tool call, add it to tool calls
if hasattr(last_message, "tool_calls"):
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
for tool_call in last_message.tool_calls:
# Handle both dictionary and object tool calls
if isinstance(tool_call, dict):
message_buffer.add_tool_call(
tool_call["name"], tool_call["args"]
)
message_buffer.add_tool_call(tool_call["name"], tool_call["args"])
else:
message_buffer.add_tool_call(tool_call.name, tool_call.args)
# Update reports and agent status based on chunk content
# Analyst Team Reports
if "market_report" in chunk and chunk["market_report"]:
message_buffer.update_report_section(
"market_report", chunk["market_report"]
)
message_buffer.update_agent_status("Market Analyst", "completed")
# Set next analyst to in_progress
if "social" in selections["analysts"]:
message_buffer.update_agent_status(
"Social Analyst", "in_progress"
)
# Helper to check if a key was updated in the current chunk
def was_updated(key):
if key not in chunk or not chunk[key]:
return False
# Check if the value is new by comparing with the last trace
if not trace or chunk[key] != trace[-1].get(key):
return True
return False
if "sentiment_report" in chunk and chunk["sentiment_report"]:
message_buffer.update_report_section(
"sentiment_report", chunk["sentiment_report"]
)
message_buffer.update_agent_status("Social Analyst", "completed")
# Set next analyst to in_progress
if "news" in selections["analysts"]:
message_buffer.update_agent_status(
"News Analyst", "in_progress"
)
# Process report and status updates from any chunk
if was_updated("market_report"):
message_buffer.update_report_section("market_report", chunk["market_report"])
message_buffer.update_agent_status("Market Analyst", "completed")
if "social" in selections["analysts"]:
message_buffer.update_agent_status("Social Analyst", "in_progress")
if "news_report" in chunk and chunk["news_report"]:
message_buffer.update_report_section(
"news_report", chunk["news_report"]
)
message_buffer.update_agent_status("News Analyst", "completed")
# Set next analyst to in_progress
if "fundamentals" in selections["analysts"]:
message_buffer.update_agent_status(
"Fundamentals Analyst", "in_progress"
)
if was_updated("sentiment_report"):
message_buffer.update_report_section("sentiment_report", chunk["sentiment_report"])
message_buffer.update_agent_status("Social Analyst", "completed")
if "news" in selections["analysts"]:
message_buffer.update_agent_status("News Analyst", "in_progress")
if "fundamentals_report" in chunk and chunk["fundamentals_report"]:
message_buffer.update_report_section(
"fundamentals_report", chunk["fundamentals_report"]
)
message_buffer.update_agent_status(
"Fundamentals Analyst", "completed"
)
# Set all research team members to in_progress
update_research_team_status("in_progress")
if was_updated("news_report"):
message_buffer.update_report_section("news_report", chunk["news_report"])
message_buffer.update_agent_status("News Analyst", "completed")
if "fundamentals" in selections["analysts"]:
message_buffer.update_agent_status("Fundamentals Analyst", "in_progress")
# Research Team - Handle Investment Debate State
if (
"investment_debate_state" in chunk
and chunk["investment_debate_state"]
):
debate_state = chunk["investment_debate_state"]
if was_updated("fundamentals_report"):
message_buffer.update_report_section("fundamentals_report", chunk["fundamentals_report"])
message_buffer.update_agent_status("Fundamentals Analyst", "completed")
update_research_team_status("in_progress")
# Update Bull Researcher status and report
if "bull_history" in debate_state and debate_state["bull_history"]:
# Keep all research team members in progress
update_research_team_status("in_progress")
# Extract latest bull response
bull_responses = debate_state["bull_history"].split("\n")
latest_bull = bull_responses[-1] if bull_responses else ""
if latest_bull:
message_buffer.add_message("Reasoning", latest_bull)
# Update research report with bull's latest analysis
message_buffer.update_report_section(
"investment_plan",
f"### Bull Researcher Analysis\n{latest_bull}",
)
if was_updated("investment_plan"):
message_buffer.update_report_section("investment_plan", chunk["investment_plan"])
update_research_team_status("completed")
message_buffer.update_agent_status("Trader", "in_progress")
# Update Bear Researcher status and report
if "bear_history" in debate_state and debate_state["bear_history"]:
# Keep all research team members in progress
update_research_team_status("in_progress")
# Extract latest bear response
bear_responses = debate_state["bear_history"].split("\n")
latest_bear = bear_responses[-1] if bear_responses else ""
if latest_bear:
message_buffer.add_message("Reasoning", latest_bear)
# Update research report with bear's latest analysis
message_buffer.update_report_section(
"investment_plan",
f"{message_buffer.report_sections['investment_plan']}\n\n### Bear Researcher Analysis\n{latest_bear}",
)
if was_updated("trader_investment_plan"):
message_buffer.update_report_section("trader_investment_plan", chunk["trader_investment_plan"])
message_buffer.update_agent_status("Trader", "completed")
message_buffer.update_agent_status("Risky Analyst", "in_progress")
message_buffer.update_agent_status("Neutral Analyst", "in_progress")
message_buffer.update_agent_status("Safe Analyst", "in_progress")
# Update Research Manager status and final decision
if (
"judge_decision" in debate_state
and debate_state["judge_decision"]
):
# Keep all research team members in progress until final decision
update_research_team_status("in_progress")
message_buffer.add_message(
"Reasoning",
f"Research Manager: {debate_state['judge_decision']}",
)
# Update research report with final decision
message_buffer.update_report_section(
"investment_plan",
f"{message_buffer.report_sections['investment_plan']}\n\n### Research Manager Decision\n{debate_state['judge_decision']}",
)
# Mark all research team members as completed
update_research_team_status("completed")
# Set first risk analyst to in_progress
message_buffer.update_agent_status(
"Risky Analyst", "in_progress"
)
if was_updated("final_trade_decision"):
message_buffer.update_report_section("final_trade_decision", chunk["final_trade_decision"])
message_buffer.update_agent_status("Risky Analyst", "completed")
message_buffer.update_agent_status("Neutral Analyst", "completed")
message_buffer.update_agent_status("Safe Analyst", "completed")
message_buffer.update_agent_status("Portfolio Manager", "completed")
# Trading Team
if (
"trader_investment_plan" in chunk
and chunk["trader_investment_plan"]
):
message_buffer.update_report_section(
"trader_investment_plan", chunk["trader_investment_plan"]
)
# Set first risk analyst to in_progress
message_buffer.update_agent_status("Risky Analyst", "in_progress")
# Risk Management Team - Handle Risk Debate State
if "risk_debate_state" in chunk and chunk["risk_debate_state"]:
risk_state = chunk["risk_debate_state"]
# Update Risky Analyst status and report
if (
"current_risky_response" in risk_state
and risk_state["current_risky_response"]
):
message_buffer.update_agent_status(
"Risky Analyst", "in_progress"
)
message_buffer.add_message(
"Reasoning",
f"Risky Analyst: {risk_state['current_risky_response']}",
)
# Update risk report with risky analyst's latest analysis only
message_buffer.update_report_section(
"final_trade_decision",
f"### Risky Analyst Analysis\n{risk_state['current_risky_response']}",
)
# Update Safe Analyst status and report
if (
"current_safe_response" in risk_state
and risk_state["current_safe_response"]
):
message_buffer.update_agent_status(
"Safe Analyst", "in_progress"
)
message_buffer.add_message(
"Reasoning",
f"Safe Analyst: {risk_state['current_safe_response']}",
)
# Update risk report with safe analyst's latest analysis only
message_buffer.update_report_section(
"final_trade_decision",
f"### Safe Analyst Analysis\n{risk_state['current_safe_response']}",
)
# Update Neutral Analyst status and report
if (
"current_neutral_response" in risk_state
and risk_state["current_neutral_response"]
):
message_buffer.update_agent_status(
"Neutral Analyst", "in_progress"
)
message_buffer.add_message(
"Reasoning",
f"Neutral Analyst: {risk_state['current_neutral_response']}",
)
# Update risk report with neutral analyst's latest analysis only
message_buffer.update_report_section(
"final_trade_decision",
f"### Neutral Analyst Analysis\n{risk_state['current_neutral_response']}",
)
# Update Portfolio Manager status and final decision
if "judge_decision" in risk_state and risk_state["judge_decision"]:
message_buffer.update_agent_status(
"Portfolio Manager", "in_progress"
)
message_buffer.add_message(
"Reasoning",
f"Portfolio Manager: {risk_state['judge_decision']}",
)
# Update risk report with final decision only
message_buffer.update_report_section(
"final_trade_decision",
f"### Portfolio Manager Decision\n{risk_state['judge_decision']}",
)
# Mark risk analysts as completed
message_buffer.update_agent_status("Risky Analyst", "completed")
message_buffer.update_agent_status("Safe Analyst", "completed")
message_buffer.update_agent_status(
"Neutral Analyst", "completed"
)
message_buffer.update_agent_status(
"Portfolio Manager", "completed"
)
# Update the display
update_display(layout)
# Always update the display to reflect any change
update_display(layout)
trace.append(chunk)

View File

@ -1,25 +1,33 @@
import chromadb
from chromadb.config import Settings
from openai import OpenAI
from langchain_google_genai import GoogleGenerativeAIEmbeddings
class FinancialSituationMemory:
def __init__(self, name, config):
if config["backend_url"] == "http://localhost:11434/v1":
self.embedding = "nomic-embed-text"
def __init__(self, name, config, llm):
self.config = config
self.llm = llm
if self.config["llm_provider"].lower() == "google":
self.embedding_model = GoogleGenerativeAIEmbeddings(
model="models/embedding-001"
)
else:
self.embedding = "text-embedding-3-small"
self.client = OpenAI(base_url=config["backend_url"])
if config["backend_url"] == "http://localhost:11434/v1":
self.embedding = "nomic-embed-text"
else:
self.embedding = "text-embedding-3-small"
self.client = OpenAI(base_url=config["backend_url"])
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
self.situation_collection = self.chroma_client.create_collection(name=name)
def get_embedding(self, text):
"""Get OpenAI embedding for a text"""
response = self.client.embeddings.create(
model=self.embedding, input=text
)
return response.data[0].embedding
"""Get embedding for a text"""
if self.config["llm_provider"].lower() == "google":
return self.embedding_model.embed_query(text)
else:
response = self.client.embeddings.create(model=self.embedding, input=text)
return response.data[0].embedding
def add_situations(self, situations_and_advice):
"""Add financial situations and their corresponding advice. Parameter is a list of tuples (situation, rec)"""
@ -45,8 +53,16 @@ class FinancialSituationMemory:
)
def get_memories(self, current_situation, n_matches=1):
"""Find matching recommendations using OpenAI embeddings"""
query_embedding = self.get_embedding(current_situation)
"""Find matching recommendations using embeddings. Summarizes if text is too long."""
# Check length and summarize if needed
if len(current_situation) > 30000:
prompt = f"Please summarize the following financial analysis text into a concise paragraph that captures the key insights and market sentiment. The summary should be suitable for use as a query to find similar past situations in a vector database:\n\n{current_situation}"
summary_response = self.llm.invoke(prompt)
query_text = summary_response.content
else:
query_text = current_situation
query_embedding = self.get_embedding(query_text)
results = self.situation_collection.query(
query_embeddings=[query_embedding],
@ -55,14 +71,15 @@ class FinancialSituationMemory:
)
matched_results = []
for i in range(len(results["documents"][0])):
matched_results.append(
{
"matched_situation": results["documents"][0][i],
"recommendation": results["metadatas"][0][i]["recommendation"],
"similarity_score": 1 - results["distances"][0][i],
}
)
if results.get("documents") and results["documents"][0]:
for i in range(len(results["documents"][0])):
matched_results.append(
{
"matched_situation": results["documents"][0][i],
"recommendation": results["metadatas"][0][i]["recommendation"],
"similarity_score": 1 - results["distances"][0][i],
}
)
return matched_results

View File

@ -85,11 +85,11 @@ class TradingAgentsGraph:
raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}")
# Initialize memories
self.bull_memory = FinancialSituationMemory("bull_memory", self.config)
self.bear_memory = FinancialSituationMemory("bear_memory", self.config)
self.trader_memory = FinancialSituationMemory("trader_memory", self.config)
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config)
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config)
self.bull_memory = FinancialSituationMemory("bull_memory", self.config, self.quick_thinking_llm)
self.bear_memory = FinancialSituationMemory("bear_memory", self.config, self.quick_thinking_llm)
self.trader_memory = FinancialSituationMemory("trader_memory", self.config, self.quick_thinking_llm)
self.invest_judge_memory = FinancialSituationMemory("invest_judge_memory", self.config, self.deep_thinking_llm)
self.risk_manager_memory = FinancialSituationMemory("risk_manager_memory", self.config, self.deep_thinking_llm)
# Create tool nodes
self.tool_nodes = self._create_tool_nodes()