From 92c2a84b837c41dec8e00076cdd994fbeba612aa Mon Sep 17 00:00:00 2001 From: MarkLo127 Date: Tue, 10 Mar 2026 17:00:32 +0800 Subject: [PATCH] --- backend/__main__.py | 1 + backend/app/api/routes.py | 41 ++ backend/app/models/schemas.py | 29 ++ backend/app/services/chat_service.py | 194 ++++++++++ frontend/app/analysis/results/page.tsx | 18 +- frontend/app/history/chat/page.tsx | 510 +++++++++++++++++++++++++ frontend/app/history/page.tsx | 20 +- frontend/components/layout/Footer.tsx | 8 + frontend/components/layout/Header.tsx | 4 + frontend/lib/api.ts | 13 + frontend/lib/i18n/en.ts | 14 + frontend/lib/i18n/zh-TW.ts | 14 + frontend/lib/types.ts | 18 + 13 files changed, 872 insertions(+), 12 deletions(-) create mode 100644 backend/app/services/chat_service.py create mode 100644 frontend/app/history/chat/page.tsx diff --git a/backend/__main__.py b/backend/__main__.py index 56b4977e..70cfc046 100644 --- a/backend/__main__.py +++ b/backend/__main__.py @@ -62,6 +62,7 @@ def main(): host=args.host, port=args.port, reload=reload, + reload_excludes=["frontend/*", "node_modules/*", "*.pyc", ".git/*"], log_level="info", ) diff --git a/backend/app/api/routes.py b/backend/app/api/routes.py index 6dc08a8d..d947964f 100644 --- a/backend/app/api/routes.py +++ b/backend/app/api/routes.py @@ -15,6 +15,8 @@ from backend.app.models.schemas import ( TaskCreatedResponse, TaskStatusResponse, DownloadRequest, + ChatRequest, + ChatResponse, ) from backend.app.services.trading_service import TradingService from backend.app.services.task_manager import task_manager @@ -376,3 +378,42 @@ async def download_reports(request: DownloadRequest): } ) + +@router.post("/chat", response_model=ChatResponse) +async def chat_with_report(request: ChatRequest): + """ + Chat with the analysis report using the user's LLM. + + Sends the analysis reports as context to the LLM along with + the user's question, and returns the assistant's answer. + + Args: + request: Chat request with message, reports context, and LLM config + + Returns: + ChatResponse: Assistant's reply + """ + from backend.app.services.chat_service import chat_with_reports + + try: + reply = await chat_with_reports( + message=request.message, + reports=request.reports, + ticker=request.ticker, + analysis_date=request.analysis_date, + history=request.history, + model=request.model, + api_key=request.api_key, + base_url=request.base_url, + language=request.language or "zh-TW", + ) + + return ChatResponse(reply=reply) + + except Exception as e: + logger.error(f"Chat failed: {str(e)}", exc_info=True) + raise HTTPException( + status_code=500, + detail=f"Chat failed: {str(e)}" + ) + diff --git a/backend/app/models/schemas.py b/backend/app/models/schemas.py index ce2e4a08..1cc99a6e 100644 --- a/backend/app/models/schemas.py +++ b/backend/app/models/schemas.py @@ -183,3 +183,32 @@ class DownloadRequest(BaseModel): def uppercase_ticker(cls, v: str) -> str: return v.strip().upper() + +# Chat Schemas + +class ChatRequest(BaseModel): + """Request model for chatting about analysis reports""" + message: str = Field(..., description="User's question about the report", min_length=1, max_length=2000) + reports: Dict[str, Any] = Field(..., description="Full analysis reports dict") + ticker: str = Field(..., description="Stock ticker symbol") + analysis_date: str = Field(..., description="Analysis date") + history: Optional[List[Dict[str, str]]] = Field( + default=None, + description="Previous conversation messages [{role, content}]" + ) + model: str = Field(..., description="LLM model name") + api_key: str = Field(..., description="User's LLM API key", min_length=1) + base_url: str = Field( + default="https://api.openai.com/v1", + description="LLM API base URL" + ) + language: Optional[Literal["en", "zh-TW"]] = Field( + default="zh-TW", + description="Response language" + ) + + +class ChatResponse(BaseModel): + """Response model for chat""" + reply: str = Field(..., description="Assistant's answer") + diff --git a/backend/app/services/chat_service.py b/backend/app/services/chat_service.py new file mode 100644 index 00000000..e88be6f7 --- /dev/null +++ b/backend/app/services/chat_service.py @@ -0,0 +1,194 @@ +""" +Chat service for answering questions about analysis reports +Uses the user's LLM API key to call an OpenAI-compatible endpoint. +""" +import logging +from typing import Dict, Any, List, Optional +from openai import AsyncOpenAI + +logger = logging.getLogger(__name__) + + +SYSTEM_PROMPT_ZH = """你是 TradingAgentsX 的首席金融分析助手,擁有華爾街頂級分析師的專業素養。你的任務是基於提供的多位專業分析師(如基本面、技術面、新聞情緒等)的報告,精準、專業地回答使用者的問題。 + +回答指南與規則: +1. 【嚴守上下文】所有數據與觀點必須基於提供的報告內容,絕不可隨意編造或引入外部未經證實的資訊。若報告中未提及,請誠實告知「報告中未涵蓋此細節」。 +2. 【綜合與對比】當多位分析師有不同觀點時(例如看漲與看跌分析師的辯論),請提煉出雙方核心論點,提供全方位的客觀對比,而不是只偏袒一方。 +3. 【結構化與排版】使用清晰的 Markdown 格式排版。善用列點、粗體標示關鍵數據,讓使用者能快速抓到重點。 +4. 【開門見山】直接切入要點回答問題,不需要以「根據提供的報告...」這類多餘的廢話開頭。保持自信且客觀專業的語氣。 +5. 【明確引用】在提及特定預測或論點時,盡可能指出是哪一位分析師或哪一份報告提到的(例如:「技術面報告指出...」)。 +6. 【語言要求】全程使用流暢、具備金融專業術語的繁體中文回答。 + +以下是本次對話的基準報告內容: +========================================= +【標的】: {ticker} +【分析日期】: {analysis_date} + +{reports_text} +=========================================""" + +SYSTEM_PROMPT_EN = """You are the Lead Financial Analysis Assistant for TradingAgentsX, possessing the expertise of a top-tier Wall Street analyst. Your task is to accurately and professionally answer user questions based on the provided reports from various specialized analysts (e.g., Fundamentals, Technicals, Sentiment, etc.). + +Guidelines and Rules: +1. [Strict Adherence to Context] All data and opinions must be grounded strictly in the provided reports. Do not fabricate data or bring in unverified external information. If the reports do not contain the answer, honestly state, "The provided reports do not cover this detail." +2. [Synthesis and Contrast] When different analysts hold opposing views (e.g., Bull vs. Bear debate), distill the core arguments of both sides to provide a comprehensive and objective comparison. +3. [Structure and Readability] Use clear Markdown formatting. Utilize bullet points and bold text for key metrics so the user can quickly grasp key insights. +4. [Get to the Point] Start your answer directly and confidently without filler introductions like "Based on the provided reports...". +5. [Explicit Citations] When mentioning specific forecasts or arguments, clarify which analyst or report it originated from (e.g., "The Technical Analyst noted..."). +6. [Language Constraint] Ensure all responses are in highly professional, fluent English with appropriate financial terminology. + +Below are the baseline reports for this conversation: +========================================= +[Ticker]: {ticker} +[Analysis Date]: {analysis_date} + +{reports_text} +=========================================""" + + +def _flatten_reports(reports: Dict[str, Any]) -> str: + """Flatten all reports into a single text block for context.""" + sections = [] + + REPORT_LABELS = { + "market_report": "Market Analyst Report", + "sentiment_report": "Social Media Analyst Report", + "news_report": "News Analyst Report", + "fundamentals_report": "Fundamentals Analyst Report", + "trader_investment_plan": "Trader Investment Plan", + } + + for key, label in REPORT_LABELS.items(): + content = reports.get(key) + if content and isinstance(content, str): + sections.append(f"## {label}\n{content}") + + # Handle nested debate states + debate_keys = { + "investment_debate_state": { + "bull_history": "Bull Researcher", + "bear_history": "Bear Researcher", + "judge_decision": "Research Manager Decision", + }, + "risk_debate_state": { + "risky_history": "Aggressive Analyst", + "safe_history": "Conservative Analyst", + "neutral_history": "Neutral Analyst", + "judge_decision": "Risk Manager Decision", + }, + } + + for state_key, sub_keys in debate_keys.items(): + state = reports.get(state_key) + if isinstance(state, dict): + for sub_key, label in sub_keys.items(): + content = state.get(sub_key) + if content and isinstance(content, str): + sections.append(f"## {label}\n{content}") + + return "\n\n".join(sections) if sections else "(No reports available)" + + +async def chat_with_reports( + message: str, + reports: Dict[str, Any], + ticker: str, + analysis_date: str, + history: Optional[List[Dict[str, str]]], + model: str, + api_key: str, + base_url: str, + language: str = "zh-TW", +) -> str: + """ + Send a chat message about analysis reports to the LLM. + + Args: + message: User's question + reports: Full analysis reports dict + ticker: Stock ticker + analysis_date: Analysis date string + history: Previous conversation messages [{role, content}] + model: LLM model name + api_key: User's API key + base_url: LLM API base URL + language: Response language + + Returns: + Assistant's reply string + """ + reports_text = _flatten_reports(reports) + + # Truncate reports to avoid exceeding token limits + # ~4 chars per token, target max ~8000 tokens for context + MAX_REPORT_CHARS = 32000 + if len(reports_text) > MAX_REPORT_CHARS: + reports_text = reports_text[:MAX_REPORT_CHARS] + "\n\n...(報告內容已截斷以符合模型限制)..." + logger.info(f"Reports truncated from {len(reports_text)} to {MAX_REPORT_CHARS} chars") + + # Choose system prompt based on language + if language == "en": + system_prompt = SYSTEM_PROMPT_EN.format( + ticker=ticker, + analysis_date=analysis_date, + reports_text=reports_text, + ) + else: + system_prompt = SYSTEM_PROMPT_ZH.format( + ticker=ticker, + analysis_date=analysis_date, + reports_text=reports_text, + ) + + # Build messages list + messages = [{"role": "system", "content": system_prompt}] + + # Add conversation history (limit to last 6 messages to control token usage) + if history: + recent_history = history[-6:] + for msg in recent_history: + messages.append({ + "role": msg.get("role", "user"), + "content": msg.get("content", ""), + }) + + # Add current user message + messages.append({"role": "user", "content": message}) + + logger.info(f"Chat request for {ticker}: model={model}, history_len={len(history) if history else 0}, system_prompt_len={len(system_prompt)}") + + try: + # Call LLM via async OpenAI-compatible SDK + client = AsyncOpenAI( + api_key=api_key, + base_url=base_url, + timeout=60.0, + ) + + response = await client.chat.completions.create( + model=model, + messages=messages, + temperature=0.3, + max_tokens=2048, + ) + + reply = response.choices[0].message.content or "" + logger.info(f"Chat response for {ticker}: {len(reply)} chars") + + return reply + + except Exception as e: + error_msg = str(e) + logger.error(f"LLM call failed: {error_msg}", exc_info=True) + + # Provide user-friendly error messages + if "maximum context length" in error_msg.lower() or "token" in error_msg.lower(): + raise Exception(f"報告內容過長,超出模型 token 限制。請嘗試縮短問題或清除對話歷史後重試。") + elif "rate_limit" in error_msg.lower() or "429" in error_msg: + raise Exception(f"API 速率限制,請稍後再試。") + elif "401" in error_msg or "api_key" in error_msg.lower(): + raise Exception(f"API Key 無效或已過期,請檢查設定。") + elif "timeout" in error_msg.lower(): + raise Exception(f"請求超時,請稍後再試。") + else: + raise diff --git a/frontend/app/analysis/results/page.tsx b/frontend/app/analysis/results/page.tsx index 67a64971..6a4f61d8 100644 --- a/frontend/app/analysis/results/page.tsx +++ b/frontend/app/analysis/results/page.tsx @@ -143,8 +143,6 @@ export default function AnalysisResultsPage() { ); } - const currentAnalyst = ANALYSTS.find(a => a.key === selectedAnalyst); - const currentReport = getNestedValue(analysisResult.reports, currentAnalyst?.reportKey || ""); return (
@@ -253,16 +251,20 @@ export default function AnalysisResultsPage() { {/* 分析師報告 */} - {analyst.label} {t.results.report} - - {analyst.description} - +
+
+ {analyst.label} {t.results.report} + + {analyst.description} + +
+
- {currentReport ? ( + {getNestedValue(analysisResult.reports, analyst.reportKey) ? (
- {currentReport} + {getNestedValue(analysisResult.reports, analyst.reportKey)}
) : ( diff --git a/frontend/app/history/chat/page.tsx b/frontend/app/history/chat/page.tsx new file mode 100644 index 00000000..dcd3b791 --- /dev/null +++ b/frontend/app/history/chat/page.tsx @@ -0,0 +1,510 @@ +"use client"; + +import { useState, useEffect, useRef } from "react"; +import { useRouter, useSearchParams } from "next/navigation"; +import ReactMarkdown from "react-markdown"; +import remarkGfm from "remark-gfm"; +import { useLanguage } from "@/contexts/LanguageContext"; +import { useAuth } from "@/contexts/auth-context"; +import { getApiSettingsAsync } from "@/lib/storage"; +import { getBaseUrlForModel } from "@/lib/api-helpers"; +import { api } from "@/lib/api"; +import { getReportsByMarketType, type SavedReport } from "@/lib/reports-db"; +import { getCloudReports, isCloudSyncEnabled } from "@/lib/user-api"; + +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { + MessageCircle, + Send, + Loader2, + Bot, + User, + Sparkles, + AlertCircle, + Trash2, + ArrowLeft, + Settings2, +} from "lucide-react"; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from "@/components/ui/select"; + +interface ChatMessage { + role: "user" | "assistant"; + content: string; +} + +const AVAILABLE_MODELS = [ + { id: "auto", name: "🤖 自動選擇 (Auto)", provider: "auto" }, + { id: "gpt-4o", name: "GPT-4o", provider: "openai" }, + { id: "gpt-4o-mini", name: "GPT-4o Mini", provider: "openai" }, + { id: "o1-mini", name: "o1-mini", provider: "openai" }, + { id: "o3-mini", name: "o3-mini", provider: "openai" }, + { id: "claude-3-5-sonnet-20241022", name: "Claude 3.5 Sonnet", provider: "anthropic" }, + { id: "claude-3-5-haiku-20241022", name: "Claude 3.5 Haiku", provider: "anthropic" }, + { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", provider: "google" }, + { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", provider: "google" }, + { id: "grok-2-1212", name: "Grok 2", provider: "grok" }, + { id: "deepseek-chat", name: "DeepSeek Chat", provider: "deepseek" }, + { id: "deepseek-reasoner", name: "DeepSeek Reasoner", provider: "deepseek" }, + { id: "qwen-max", name: "Qwen Max", provider: "qwen" }, + { id: "custom", name: "⚙️ 其他 (自訂模型)", provider: "custom" }, +]; + +export default function HistoryChatPage() { + const router = useRouter(); + const searchParams = useSearchParams(); + const { t, locale } = useLanguage(); + const { isAuthenticated } = useAuth(); + + const [messages, setMessages] = useState([]); + const [input, setInput] = useState(""); + const [isLoading, setIsLoading] = useState(false); + const [error, setError] = useState(null); + const [report, setReport] = useState(null); + const [loadingReport, setLoadingReport] = useState(true); + + const [selectedModelId, setSelectedModelId] = useState("auto"); + const [customModel, setCustomModel] = useState(""); + + const messagesEndRef = useRef(null); + const inputRef = useRef(null); + + const ticker = searchParams.get("ticker"); + const dateStr = searchParams.get("date"); + const market = searchParams.get("market"); + + // Load the specific report + useEffect(() => { + const loadReport = async () => { + if (!ticker || !dateStr || !market) { + setLoadingReport(false); + return; + } + + try { + setLoadingReport(true); + // Try local DB first + const localObj = await getReportsByMarketType(market as any); + const match = localObj.find( + (r) => r.ticker === ticker && r.analysis_date === dateStr + ); + + if (match) { + setReport(match); + } else if (isAuthenticated && isCloudSyncEnabled()) { + // Fallback to cloud + const cloudReports = await getCloudReports(); + const cloudMatch = cloudReports.find( + (r) => + r.ticker === ticker && + r.analysis_date === dateStr && + r.market_type === market + ); + if (cloudMatch) { + setReport({ + id: parseInt(cloudMatch.id.replace(/-/g, "").slice(0, 8), 16), + ticker: cloudMatch.ticker, + market_type: cloudMatch.market_type as any, + analysis_date: cloudMatch.analysis_date, + saved_at: new Date(cloudMatch.created_at), + result: cloudMatch.result, + language: cloudMatch.language, + }); + } + } + } catch (err) { + console.error("Failed to load report for chat:", err); + } finally { + setLoadingReport(false); + } + }; + + loadReport(); + }, [ticker, dateStr, market, isAuthenticated]); + + // Auto-scroll to bottom when messages change + useEffect(() => { + messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); + }, [messages, isLoading]); + + // Focus input when loaded + useEffect(() => { + if (!loadingReport && report) { + setTimeout(() => inputRef.current?.focus(), 200); + } + }, [loadingReport, report]); + + const handleClearChat = () => { + setMessages([]); + setError(null); + }; + + const handleSend = async () => { + const trimmed = input.trim(); + if (!trimmed || isLoading || !report) return; + + setError(null); + const userMessage: ChatMessage = { role: "user", content: trimmed }; + setMessages((prev) => [...prev, userMessage]); + setInput(""); + setIsLoading(true); + + try { + const settings = await getApiSettingsAsync(); + + let chatModel = "gpt-4o-mini"; + let apiKey = ""; + let baseUrl = "https://api.openai.com/v1"; + + const providers = { + openai: { key: settings.openai_api_key, defaultModel: "gpt-4o-mini" }, + anthropic: { key: settings.anthropic_api_key, defaultModel: "claude-3-5-sonnet-20241022" }, + google: { key: settings.google_api_key, defaultModel: "gemini-2.5-flash" }, + grok: { key: settings.grok_api_key, defaultModel: "grok-2-1212" }, + deepseek: { key: settings.deepseek_api_key, defaultModel: "deepseek-chat" }, + qwen: { key: settings.qwen_api_key, defaultModel: "qwen-max" }, + }; + + const activeModelId = selectedModelId === "custom" ? customModel.trim() : selectedModelId; + + if (selectedModelId === "auto" || !activeModelId) { + // Auto logic: Pick first available + for (const [providerName, providerData] of Object.entries(providers)) { + if (providerData.key && providerData.key.trim() !== "") { + apiKey = providerData.key; + chatModel = providerData.defaultModel; + baseUrl = getBaseUrlForModel(chatModel, settings.custom_base_url); + break; + } + } + // Custom settings override if configured + if (settings.custom_api_key && settings.custom_base_url && !apiKey) { + apiKey = settings.custom_api_key; + baseUrl = settings.custom_base_url; + } + } else { + chatModel = activeModelId; + const modelInfo = AVAILABLE_MODELS.find(m => m.id === selectedModelId); + const providerName = modelInfo ? modelInfo.provider : "custom"; + + const matchedProvider = (providers as any)[providerName]; + + if (matchedProvider && matchedProvider.key) { + apiKey = matchedProvider.key; + baseUrl = getBaseUrlForModel(chatModel, settings.custom_base_url); + } else if (settings.custom_api_key) { + apiKey = settings.custom_api_key; + baseUrl = settings.custom_base_url || "https://api.openai.com/v1"; + } + } + + if (!apiKey) { + setError(t.chat?.noApiKey || "Please configure your API key in settings first."); + setIsLoading(false); + return; + } + + const history = messages.map((m) => ({ + role: m.role, + content: m.content, + })); + + const response = await api.sendChatMessage({ + message: trimmed, + reports: report.result.reports || {}, + ticker: report.ticker, + analysis_date: report.analysis_date, + history, + model: chatModel, + api_key: apiKey, + base_url: baseUrl, + language: locale as "en" | "zh-TW", + }); + + setMessages((prev) => [ + ...prev, + { role: "assistant", content: response.reply }, + ]); + } catch (err: any) { + console.error("Chat error:", err); + const errorMsg = + err?.response?.data?.detail || + err?.message || + (t.chat?.error || "Failed to get response. Please try again."); + setError(errorMsg); + } finally { + setIsLoading(false); + } + }; + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault(); + handleSend(); + } + }; + + if (loadingReport) { + return ( +
+ +

{t.history?.loading || "Loading..."}

+
+ ); + } + + if (!report) { + return ( +
+ +

+ Report not found. +

+ +
+ ); + } + + const contextLabel = t.chat?.allReports || "All Reports"; + + return ( +
+ {/* Header */} +
+
+
+ +
+

+ + + {t.chat?.title || "Report Chat"} — {report.ticker} + + + {contextLabel} + +

+

+ {t.history?.analysisDate || "Date"}: {report.analysis_date} +

+
+
+
+ {messages.length > 0 && ( + + )} +
+
+
+ + {/* Messages Area */} +
+
+ {/* Empty state */} + {messages.length === 0 && !isLoading && ( +
+
+ +
+
+

+ {t.chat?.emptyState || "Ask any question about this analysis report"} +

+

+ {t.chat?.emptyHint || 'e.g. "What are the main risk factors?"'} +

+
+ {/* Quick suggestions */} +
+ {(locale === "zh-TW" + ? [ + "主要的風險因素有哪些?", + "總結這份報告的重點", + "建議的進場策略是什麼?", + "看漲和看跌的觀點有何不同?", + ] + : [ + "What are the key risk factors?", + "Summarize this report", + "What's the recommended entry strategy?", + "How do bull and bear views differ?", + ] + ).map((suggestion) => ( + + ))} +
+
+ )} + + {/* Message list */} +
+ {messages.map((msg, i) => ( +
+ {/* Avatar */} +
+ {msg.role === "user" ? ( + + ) : ( + + )} +
+ + {/* Bubble */} +
+ {msg.role === "assistant" ? ( +
+ + {msg.content} + +
+ ) : ( +

{msg.content}

+ )} +
+
+ ))} + + {/* Loading indicator */} + {isLoading && ( +
+
+ +
+
+
+ + {t.chat?.thinking || "Thinking..."} +
+
+
+ )} + + {/* Error message */} + {error && ( +
+ + {error} +
+ )} + +
+
+
+
+ + {/* Input Bar */} +
+
+ {/* Model Selector */} +
+ + + {selectedModelId === "custom" && ( + setCustomModel(e.target.value)} + placeholder="輸入模型名稱 (e.g. gpt-4)" + className="h-8 w-[180px] text-xs rounded-full border-gray-200 dark:border-gray-700 bg-white dark:bg-gray-800" + /> + )} +
+ + {/* Text Input */} +
+ setInput(e.target.value)} + onKeyDown={handleKeyDown} + placeholder={t.chat?.placeholder || "Ask about this report..."} + disabled={isLoading} + className="flex-1 text-base rounded-full border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-800 focus-visible:ring-purple-500 h-12 md:h-14 px-6 shadow-sm" + /> + +
+
+
+

+ LLM can make mistakes. Please verify important information. +

+
+
+
+ ); +} diff --git a/frontend/app/history/page.tsx b/frontend/app/history/page.tsx index 586b4dc7..6a17f647 100644 --- a/frontend/app/history/page.tsx +++ b/frontend/app/history/page.tsx @@ -35,6 +35,7 @@ import { TrendingUp, FileText, Download, + MessageCircle, } from "lucide-react"; import { getReportsByMarketType, @@ -959,11 +960,11 @@ export default function HistoryPage() { ); })()} - + + + +