This commit is contained in:
MarkLo 2025-11-17 04:30:40 +08:00
parent 9b3aee8ae6
commit a28ed2336e
11 changed files with 394 additions and 49 deletions

View File

@ -182,26 +182,29 @@ def select_shallow_thinking_agent(provider) -> str:
# 定義不同供應商的淺層思維 LLM 引擎選項
SHALLOW_AGENT_OPTIONS = {
"openai": [
("GPT-4o-mini - 快速高效,適用於快速任務", "gpt-4o-mini"),
("GPT-4.1-nano - 超輕量級模型,適用於基本操作", "gpt-4.1-nano"),
("GPT-4.1-mini - 性能良好的緊湊型模型", "gpt-4.1-mini"),
("GPT-4o - 功能齊全的標準模型", "gpt-4o"),
("GPT-5.1", "gpt-5.1-2025-11-13"),
("GPT-5-mini","gpt-5-mini-2025-08-07"),
("GPT-5-nano","gpt-5-nano-2025-08-07"),
("GPT-4.1-mini", "gpt-4.1-mini"),
("GPT-4.1-nano", "gpt-4.1-nano"),
("GPT-4o", "gpt-4o"),
("GPT-4o-mini", "gpt-4o-mini")
],
"anthropic": [
("Claude Haiku 3.5 - 推理速度快,具備標準能力", "claude-3-5-haiku-latest"),
("Claude Sonnet 3.5 - 功能強大的標準模型", "claude-3-5-sonnet-latest"),
("Claude Sonnet 3.7 - 卓越的混合推理和代理能力", "claude-3-7-sonnet-latest"),
("Claude Sonnet 4 - 高性能和出色的推理能力", "claude-sonnet-4-0"),
("Claude Haiku 3.5", "claude-3-5-haiku-latest"),
("Claude Sonnet 3.5", "claude-3-5-sonnet-latest"),
("Claude Sonnet 3.7", "claude-3-7-sonnet-latest"),
("Claude Sonnet 4", "claude-sonnet-4-0"),
],
"google": [
("Gemini 2.0 Flash-Lite - 成本效益高,延遲低", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash - 新一代功能、速度和思維", "gemini-2.0-flash"),
("Gemini 2.5 Flash - 適應性思維,成本效益高", "gemini-2.5-flash-preview-05-20"),
("Gemini 2.0 Flash-Lite", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash", "gemini-2.0-flash"),
("Gemini 2.5 Flash", "gemini-2.5-flash-preview-05-20"),
],
"openrouter": [
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
("Meta: Llama 3.3 8B Instruct - Llama 3.3 70B 的輕量級超快版本", "meta-llama/llama-3.3-8b-instruct:free"),
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 提供更快的首個 token 生成時間", "google/gemini-2.0-flash-exp:free"),
("Meta: Llama 3.3 8B Instruct - Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0", "google/gemini-2.0-flash-exp:free"),
],
"ollama": [
("llama3.1 本機版", "llama3.1"),
@ -253,25 +256,25 @@ def select_deep_thinking_agent(provider) -> str:
# 定義不同供應商的深層思維 LLM 引擎選項
DEEP_AGENT_OPTIONS = {
"openai": [
("GPT-4.1-nano - 超輕量級模型,適用於基本操作", "gpt-4.1-nano"),
("GPT-4.1-mini - 性能良好的緊湊型模型", "gpt-4.1-mini"),
("GPT-4o - 功能齊全的標準模型", "gpt-4o"),
("o4-mini - 專業推理模型 (緊湊型)", "o4-mini"),
("o3-mini - 進階推理模型 (輕量級)", "o3-mini"),
("o3 - 完整進階推理模型", "o3"),
("o1 - 頂級推理和問題解決模型", "o1"),
("GPT-5.1", "gpt-5.1-2025-11-13"),
("GPT-5-mini","gpt-5-mini-2025-08-07"),
("GPT-5-nano","gpt-5-nano-2025-08-07"),
("GPT-4.1-mini", "gpt-4.1-mini"),
("GPT-4.1-nano", "gpt-4.1-nano"),
("GPT-4o", "gpt-4o"),
("GPT-4o-mini", "gpt-4o-mini")
],
"anthropic": [
("Claude Haiku 3.5 - 推理速度快,具備標準能力", "claude-3-5-haiku-latest"),
("Claude Sonnet 3.5 - 功能強大的標準模型", "claude-3-5-sonnet-latest"),
("Claude Sonnet 3.7 - 卓越的混合推理和代理能力", "claude-3-7-sonnet-latest"),
("Claude Sonnet 4 - 高性能和出色的推理能力", "claude-sonnet-4-0"),
("Claude Opus 4 - Anthropic 最強大的模型", " claude-opus-4-0"),
("Claude Haiku 3.5", "claude-3-5-haiku-latest"),
("Claude Sonnet 3.5", "claude-3-5-sonnet-latest"),
("Claude Sonnet 3.7", "claude-3-7-sonnet-latest"),
("Claude Sonnet 4", "claude-sonnet-4-0"),
("Claude Opus 4", "claude-opus-4-0"),
],
"google": [
("Gemini 2.0 Flash-Lite - 成本效益高,延遲低", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash - 新一代功能、速度和思維", "gemini-2.0-flash"),
("Gemini 2.5 Flash - 適應性思維,成本效益高", "gemini-2.5-flash-preview-05-20"),
("Gemini 2.0 Flash-Lite", "gemini-2.0-flash-lite"),
("Gemini 2.0 Flash", "gemini-2.0-flash"),
("Gemini 2.5 Flash", "gemini-2.5-flash-preview-05-20"),
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
],
"openrouter": [

View File

@ -39,16 +39,40 @@ def create_research_manager(llm, memory):
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
# 定義文本截斷函數以避免超過 token 限制
def truncate_text(text, max_chars):
"""截斷文本到指定字符數"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "\n...(內容已截斷)"
# 為每個報告設置合理的字符限制
# 模型 gpt-4o-mini 的限制是 8192 tokens
# 混合中英文估算: 1 字符 ≈ 1.5-2 tokens (取保守值)
# 目標: 總字符數 < 3500 字符 (約 5250-7000 tokens留足夠 tokens 給 completion)
market_research_report = truncate_text(market_research_report, 500)
sentiment_report = truncate_text(sentiment_report, 500)
news_report = truncate_text(news_report, 600)
fundamentals_report = truncate_text(fundamentals_report, 600)
# 整合當前情況
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
# 從記憶體中獲取過去相似情況的經驗
past_memories = memory.get_memories(curr_situation, n_matches=2)
# 將過去的經驗格式化為字串
# 將過去的經驗格式化為字串(限制長度)
past_memory_str = ""
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
recommendation = rec["recommendation"]
# 限制每條記憶的長度
if len(recommendation) > 200:
recommendation = recommendation[:200] + "...(已截斷)"
past_memory_str += recommendation + "\n\n"
# 截斷辯論歷史 - 這是最容易超過限制的部分
# 限制辯論歷史在 1200 字符以內
history = truncate_text(history, 1200)
# 建立提示 (prompt)
prompt = f"""作為投資組合經理和辯論主持人,您的角色是批判性地評估這一輪辯論,並做出明確的決定:與看跌分析師保持一致、與看漲分析師保持一致,或者僅在有充分理由支持的情況下選擇持有。

View File

@ -41,16 +41,41 @@ def create_risk_manager(llm, memory):
sentiment_report = state["sentiment_report"]
trader_plan = state["investment_plan"]
# 定義文本截斷函數以避免超過 token 限制
def truncate_text(text, max_chars):
"""截斷文本到指定字符數"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "\n...(內容已截斷)"
# 為每個報告設置合理的字符限制
# 模型 gpt-4o-mini 的限制是 8192 tokens
# 混合中英文估算: 1 字符 ≈ 1.5-2 tokens (取保守值)
# 目標: 總字符數 < 3500 字符 (約 5250-7000 tokens留足夠 tokens 給 completion)
market_research_report = truncate_text(market_research_report, 500)
sentiment_report = truncate_text(sentiment_report, 500)
news_report = truncate_text(news_report, 600)
fundamentals_report = truncate_text(fundamentals_report, 600)
trader_plan = truncate_text(trader_plan, 800)
# 整合當前情況
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
# 從記憶體中獲取過去相似情況的經驗
past_memories = memory.get_memories(curr_situation, n_matches=2)
# 將過去的經驗格式化為字串
# 將過去的經驗格式化為字串(限制長度)
past_memory_str = ""
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
recommendation = rec["recommendation"]
# 限制每條記憶的長度
if len(recommendation) > 200:
recommendation = recommendation[:200] + "...(已截斷)"
past_memory_str += recommendation + "\n\n"
# 截斷辯論歷史 - 這是最容易超過限制的部分
# 限制辯論歷史在 1000 字符以內風險辯論通常有3方比投資辯論更長
history = truncate_text(history, 1000)
# 建立提示 (prompt)
prompt = f"""作為風險管理裁判和辯論主持人,您的目標是評估三位風險分析師——激進、中立和安全/保守——之間的辯論,並為交易員確定最佳行動方案。您的決策必須產生一個明確的建議:買入、賣出或持有。僅在有特定論點強烈支持時才選擇持有,而不是在各方看起來都合理時作為後備選項。力求清晰和果斷。

View File

@ -46,6 +46,24 @@ def create_risky_debator(llm):
# 獲取交易員的決策
trader_decision = state["trader_investment_plan"]
# 定義文本截斷函數以避免超過 token 限制
def truncate_text(text, max_chars):
"""截斷文本到指定字符數"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "\n...(內容已截斷)"
# 截斷各類輸入以控制 token 使用量
# 模型限制: 8192 tokens目標: < 3500 字符
market_research_report = truncate_text(market_research_report, 500)
sentiment_report = truncate_text(sentiment_report, 500)
news_report = truncate_text(news_report, 600)
fundamentals_report = truncate_text(fundamentals_report, 600)
trader_decision = truncate_text(trader_decision, 800)
history = truncate_text(history, 400)
current_safe_response = truncate_text(current_safe_response, 300)
current_neutral_response = truncate_text(current_neutral_response, 300)
# 建立提示 (prompt)
prompt = f"""作為激進風險分析師,您的角色是積極倡導高回報、高風險的機會,強調大膽的策略和競爭優勢。在評估交易員的決策或計畫時,請專注於潛在的上升空間、增長潛力和創新效益——即使這些都伴隨著較高的風險。利用所提供的市場數據和情緒分析來加強您的論點,並挑戰反對意見。具體來說,請直接回應保守和中立分析師提出的每點,用數據驅動的反駁和有說服力的推理進行反擊。強調他們的謹慎可能錯失關鍵機會,或者他們的假設可能過於保守。這是交易員的決策:

View File

@ -47,6 +47,24 @@ def create_safe_debator(llm):
# 獲取交易員的決策
trader_decision = state["trader_investment_plan"]
# 定義文本截斷函數以避免超過 token 限制
def truncate_text(text, max_chars):
"""截斷文本到指定字符數"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "\n...(內容已截斷)"
# 截斷各類輸入以控制 token 使用量
# 模型限制: 8192 tokens目標: < 3500 字符
market_research_report = truncate_text(market_research_report, 500)
sentiment_report = truncate_text(sentiment_report, 500)
news_report = truncate_text(news_report, 600)
fundamentals_report = truncate_text(fundamentals_report, 600)
trader_decision = truncate_text(trader_decision, 800)
history = truncate_text(history, 400)
current_risky_response = truncate_text(current_risky_response, 300)
current_neutral_response = truncate_text(current_neutral_response, 300)
# 建立提示 (prompt)
prompt = f"""作為安全/保守風險分析師,您的主要目標是保護資產、最小化波動性並確保穩定可靠的增長。您優先考慮穩定性、安全性和風險緩解,仔細評估潛在損失、經濟衰退和市場波動。在評估交易員的決策或計畫時,請批判性地審查高風險元素,指出決策可能使公司面臨過度風險的地方,以及更謹慎的替代方案可以在何處確保長期收益。這是交易員的決策:

View File

@ -46,6 +46,24 @@ def create_neutral_debator(llm):
# 獲取交易員的決策
trader_decision = state["trader_investment_plan"]
# 定義文本截斷函數以避免超過 token 限制
def truncate_text(text, max_chars):
"""截斷文本到指定字符數"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "\n...(內容已截斷)"
# 截斷各類輸入以控制 token 使用量
# 模型限制: 8192 tokens目標: < 3500 字符
market_research_report = truncate_text(market_research_report, 500)
sentiment_report = truncate_text(sentiment_report, 500)
news_report = truncate_text(news_report, 600)
fundamentals_report = truncate_text(fundamentals_report, 600)
trader_decision = truncate_text(trader_decision, 800)
history = truncate_text(history, 400)
current_risky_response = truncate_text(current_risky_response, 300)
current_safe_response = truncate_text(current_safe_response, 300)
# 建立提示 (prompt)
prompt = f"""作為中立風險分析師,您的角色是提供一個平衡的視角,權衡交易員決策或計畫的潛在利益和風險。您優先考慮一個全面的方法,評估其優缺點,同時考慮更廣泛的市場趨勢、潛在的經濟轉變和多元化策略。這是交易員的決策:

View File

@ -39,24 +39,43 @@ def create_trader(llm, memory):
news_report = state["news_report"]
fundamentals_report = state["fundamentals_report"]
# 整合當前情況
curr_situation = f"{market_research_report}\n\n{sentiment_report}\n\n{news_report}\n\n{fundamentals_report}"
# 定義文本截斷函數以避免超過 token 限制
def truncate_text(text, max_chars):
"""截斷文本到指定字符數"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "\n...(內容已截斷)"
# 截斷各類報告以控制 token 使用量
# 這些報告將用於記憶檢索embedding和 LLM prompt
market_research_report_truncated = truncate_text(market_research_report, 500)
sentiment_report_truncated = truncate_text(sentiment_report, 500)
news_report_truncated = truncate_text(news_report, 600)
fundamentals_report_truncated = truncate_text(fundamentals_report, 600)
investment_plan_truncated = truncate_text(investment_plan, 800)
# 整合當前情況(用於記憶檢索)
curr_situation = f"{market_research_report_truncated}\n\n{sentiment_report_truncated}\n\n{news_report_truncated}\n\n{fundamentals_report_truncated}"
# 從記憶體中獲取過去相似情況的經驗
past_memories = memory.get_memories(curr_situation, n_matches=2)
# 將過去的經驗格式化為字串
# 將過去的經驗格式化為字串(限制長度)
past_memory_str = ""
if past_memories:
for i, rec in enumerate(past_memories, 1):
past_memory_str += rec["recommendation"] + "\n\n"
recommendation = rec["recommendation"]
# 限制每條記憶的長度
if len(recommendation) > 200:
recommendation = recommendation[:200] + "...(已截斷)"
past_memory_str += recommendation + "\n\n"
else:
past_memory_str = "找不到過去的記憶。"
# 建立上下文,包含給交易員的指示和投資計畫
context = {
"role": "user",
"content": f"根據分析師團隊的綜合分析,這是一份為 {company_name} 量身定制的投資計畫。該計畫結合了當前技術市場趨勢、宏觀經濟指標和社群媒體情緒的見解。請以此計畫為基礎,評估您的下一個交易決策。\n\n建議的投資計畫:{investment_plan}\n\n利用這些見解,做出明智且具策略性的決策。",
"content": f"根據分析師團隊的綜合分析,這是一份為 {company_name} 量身定制的投資計畫。該計畫結合了當前技術市場趨勢、宏觀經濟指標和社群媒體情緒的見解。請以此計畫為基礎,評估您的下一個交易決策。\n\n建議的投資計畫:{investment_plan_truncated}\n\n利用這些見解,做出明智且具策略性的決策。",
}
# 建立傳送給 LLM 的訊息列表

View File

@ -18,6 +18,13 @@ class FinancialSituationMemory:
def get_embedding(self, text):
"""Get OpenAI embedding for a text"""
# Truncate text to avoid exceeding embedding model's token limit
# text-embedding-3-small has 8192 token limit
# For mixed Chinese/English text, estimate ~1.5-2 tokens per character
# Target: ~4000 characters to stay well under 8192 tokens
max_chars = 4000
if len(text) > max_chars:
text = text[:max_chars]
response = self.client.embeddings.create(
model=self.embedding, input=text

View File

@ -1,4 +1,5 @@
from .alpha_vantage_common import _make_api_request
import json
def get_fundamentals(ticker: str, curr_date: str = None) -> str:
@ -16,7 +17,59 @@ def get_fundamentals(ticker: str, curr_date: str = None) -> str:
"symbol": ticker,
}
return _make_api_request("OVERVIEW", params)
response = _make_api_request("OVERVIEW", params)
# 總結基本面數據以減少 token 使用量
try:
data = json.loads(response) if isinstance(response, str) else response
if isinstance(data, dict) and "Symbol" in data:
# 只保留關鍵的基本面指標
summarized_data = {
# 基本資訊
"Symbol": data.get("Symbol", ""),
"Name": data.get("Name", ""),
"Description": data.get("Description", "")[:300] if data.get("Description") else "", # 限制描述長度
"Sector": data.get("Sector", ""),
"Industry": data.get("Industry", ""),
"MarketCapitalization": data.get("MarketCapitalization", ""),
# 關鍵財務指標
"EBITDA": data.get("EBITDA", ""),
"PERatio": data.get("PERatio", ""),
"PEGRatio": data.get("PEGRatio", ""),
"BookValue": data.get("BookValue", ""),
"DividendPerShare": data.get("DividendPerShare", ""),
"DividendYield": data.get("DividendYield", ""),
"EPS": data.get("EPS", ""),
"RevenuePerShareTTM": data.get("RevenuePerShareTTM", ""),
"ProfitMargin": data.get("ProfitMargin", ""),
"OperatingMarginTTM": data.get("OperatingMarginTTM", ""),
"ReturnOnAssetsTTM": data.get("ReturnOnAssetsTTM", ""),
"ReturnOnEquityTTM": data.get("ReturnOnEquityTTM", ""),
"RevenueTTM": data.get("RevenueTTM", ""),
"GrossProfitTTM": data.get("GrossProfitTTM", ""),
# 交易指標
"52WeekHigh": data.get("52WeekHigh", ""),
"52WeekLow": data.get("52WeekLow", ""),
"50DayMovingAverage": data.get("50DayMovingAverage", ""),
"200DayMovingAverage": data.get("200DayMovingAverage", ""),
# 財務健康指標
"QuarterlyEarningsGrowthYOY": data.get("QuarterlyEarningsGrowthYOY", ""),
"QuarterlyRevenueGrowthYOY": data.get("QuarterlyRevenueGrowthYOY", ""),
"AnalystTargetPrice": data.get("AnalystTargetPrice", ""),
"Beta": data.get("Beta", ""),
}
return json.dumps(summarized_data, ensure_ascii=False, indent=2)
return response
except (json.JSONDecodeError, Exception) as e:
print(f"警告:無法總結基本面數據:{e}")
return response
def get_balance_sheet(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
@ -35,7 +88,26 @@ def get_balance_sheet(ticker: str, freq: str = "quarterly", curr_date: str = Non
"symbol": ticker,
}
return _make_api_request("BALANCE_SHEET", params)
response = _make_api_request("BALANCE_SHEET", params)
# 限制返回的報告數量以減少 token 使用量
try:
data = json.loads(response) if isinstance(response, str) else response
if isinstance(data, dict):
# 只保留最近的 2 份報告(而不是全部歷史)
if "quarterlyReports" in data and isinstance(data["quarterlyReports"], list):
data["quarterlyReports"] = data["quarterlyReports"][:2]
if "annualReports" in data and isinstance(data["annualReports"], list):
data["annualReports"] = data["annualReports"][:2]
return json.dumps(data, ensure_ascii=False, indent=2)
return response
except (json.JSONDecodeError, Exception) as e:
print(f"警告:無法處理資產負債表數據:{e}")
return response
def get_cashflow(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
@ -54,7 +126,26 @@ def get_cashflow(ticker: str, freq: str = "quarterly", curr_date: str = None) ->
"symbol": ticker,
}
return _make_api_request("CASH_FLOW", params)
response = _make_api_request("CASH_FLOW", params)
# 限制返回的報告數量以減少 token 使用量
try:
data = json.loads(response) if isinstance(response, str) else response
if isinstance(data, dict):
# 只保留最近的 2 份報告(而不是全部歷史)
if "quarterlyReports" in data and isinstance(data["quarterlyReports"], list):
data["quarterlyReports"] = data["quarterlyReports"][:2]
if "annualReports" in data and isinstance(data["annualReports"], list):
data["annualReports"] = data["annualReports"][:2]
return json.dumps(data, ensure_ascii=False, indent=2)
return response
except (json.JSONDecodeError, Exception) as e:
print(f"警告:無法處理現金流量表數據:{e}")
return response
def get_income_statement(ticker: str, freq: str = "quarterly", curr_date: str = None) -> str:
@ -73,4 +164,23 @@ def get_income_statement(ticker: str, freq: str = "quarterly", curr_date: str =
"symbol": ticker,
}
return _make_api_request("INCOME_STATEMENT", params)
response = _make_api_request("INCOME_STATEMENT", params)
# 限制返回的報告數量以減少 token 使用量
try:
data = json.loads(response) if isinstance(response, str) else response
if isinstance(data, dict):
# 只保留最近的 2 份報告(而不是全部歷史)
if "quarterlyReports" in data and isinstance(data["quarterlyReports"], list):
data["quarterlyReports"] = data["quarterlyReports"][:2]
if "annualReports" in data and isinstance(data["annualReports"], list):
data["annualReports"] = data["annualReports"][:2]
return json.dumps(data, ensure_ascii=False, indent=2)
return response
except (json.JSONDecodeError, Exception) as e:
print(f"警告:無法處理損益表數據:{e}")
return response

View File

@ -1,4 +1,5 @@
from .alpha_vantage_common import _make_api_request, format_datetime_for_api
import json
def get_news(ticker, start_date, end_date) -> dict[str, str] | str:
"""
@ -20,10 +21,64 @@ def get_news(ticker, start_date, end_date) -> dict[str, str] | str:
"time_from": format_datetime_for_api(start_date),
"time_to": format_datetime_for_api(end_date),
"sort": "LATEST",
"limit": "50",
"limit": "10", # 降低限制從 50 到 10 以避免超過 token 限制
}
return _make_api_request("NEWS_SENTIMENT", params)
response = _make_api_request("NEWS_SENTIMENT", params)
# 處理並總結回應以減少 token 使用量
try:
data = json.loads(response) if isinstance(response, str) else response
# 如果回應包含新聞項目,提取關鍵資訊
if isinstance(data, dict) and "feed" in data:
summarized_feed = []
for item in data.get("feed", []):
# 只保留必要的欄位以減少大小
summarized_item = {
"title": item.get("title", ""),
"url": item.get("url", ""),
"time_published": item.get("time_published", ""),
"summary": item.get("summary", "")[:200] if item.get("summary") else "", # 限制摘要長度
"source": item.get("source", ""),
"overall_sentiment_score": item.get("overall_sentiment_score", 0),
"overall_sentiment_label": item.get("overall_sentiment_label", ""),
}
# 為此新聞項目添加相關的股票代碼情緒
if "ticker_sentiment" in item:
ticker_sentiments = [
{
"ticker": ts.get("ticker", ""),
"relevance_score": ts.get("relevance_score", ""),
"ticker_sentiment_score": ts.get("ticker_sentiment_score", ""),
"ticker_sentiment_label": ts.get("ticker_sentiment_label", "")
}
for ts in item.get("ticker_sentiment", [])
if ts.get("ticker") == ticker # 只包含相關的股票代碼
]
if ticker_sentiments:
summarized_item["ticker_sentiment"] = ticker_sentiments
summarized_feed.append(summarized_item)
# 建立總結的回應
summarized_data = {
"items": data.get("items", "0"),
"sentiment_score_definition": data.get("sentiment_score_definition", ""),
"relevance_score_definition": data.get("relevance_score_definition", ""),
"feed": summarized_feed
}
return json.dumps(summarized_data, ensure_ascii=False, indent=2)
# 如果格式不如預期,返回原始回應
return response
except (json.JSONDecodeError, Exception) as e:
# 如果處理失敗,返回原始回應
print(f"警告:無法總結新聞數據:{e}")
return response
def get_insider_transactions(symbol: str) -> dict[str, str] | str:
"""
@ -42,4 +97,21 @@ def get_insider_transactions(symbol: str) -> dict[str, str] | str:
"symbol": symbol,
}
return _make_api_request("INSIDER_TRANSACTIONS", params)
response = _make_api_request("INSIDER_TRANSACTIONS", params)
# 限制返回的交易數量以減少 token 使用量
try:
data = json.loads(response) if isinstance(response, str) else response
if isinstance(data, dict) and "data" in data:
# 只保留最近的 15 筆交易(而不是全部)
if isinstance(data["data"], list):
data["data"] = data["data"][:15]
return json.dumps(data, ensure_ascii=False, indent=2)
return response
except (json.JSONDecodeError, Exception) as e:
print(f"警告:無法處理內部交易數據:{e}")
return response

View File

@ -127,14 +127,31 @@ def get_finnhub_news(
return ""
combined_result = ""
total_articles = 0
max_articles = 15 # 限制總文章數量
for day, data in result.items():
if len(data) == 0:
continue
for entry in data:
if total_articles >= max_articles:
break
headline = entry.get("headline", "")
summary = entry.get("summary", "")
# 限制摘要長度
if summary and len(summary) > 300:
summary = summary[:300] + "..."
current_news = (
"### " + entry["headline"] + f" ({day})" + "\n" + entry["summary"]
"### " + headline + f" ({day})" + "\n" + summary
)
combined_result += current_news + "\n\n"
total_articles += 1
if total_articles >= max_articles:
break
return f"## {query} 新聞,從 {start_date}{end_date}\n" + str(combined_result)
@ -468,12 +485,15 @@ def get_reddit_company_news(
total=total_iterations,
)
# 限制每天的文章數量以避免 token 過多
max_per_day = 5 # 從 10 降低到 5
while curr_date <= end_date_dt:
curr_date_str = curr_date.strftime("%Y-%m-%d")
fetch_result = fetch_top_from_category(
"company_news",
curr_date_str,
10, # 每天最大限制
max_per_day,
query,
data_path=os.path.join(DATA_DIR, "reddit_data"),
)
@ -487,11 +507,22 @@ def get_reddit_company_news(
if len(posts) == 0:
return ""
# 限制總文章數量和內容長度
max_total_posts = 20 # 最多 20 篇文章
posts = posts[:max_total_posts]
news_str = ""
for post in posts:
if post["content"] == "":
news_str += f"### {post['title']}\n\n"
title = post['title']
content = post['content']
# 限制每篇文章的內容長度
if content and len(content) > 300:
content = content[:300] + "..."
if content == "":
news_str += f"### {title}\n\n"
else:
news_str += f"### {post['title']}\n\n{post['content']}\n\n"
news_str += f"### {title}\n\n{content}\n\n"
return f"##{query} 新聞 Reddit{start_date}{end_date}\n\n{news_str}"