This commit is contained in:
MarkLo127 2026-03-10 17:00:32 +08:00
parent 6e9524c777
commit 92c2a84b83
13 changed files with 872 additions and 12 deletions

View File

@ -62,6 +62,7 @@ def main():
host=args.host,
port=args.port,
reload=reload,
reload_excludes=["frontend/*", "node_modules/*", "*.pyc", ".git/*"],
log_level="info",
)

View File

@ -15,6 +15,8 @@ from backend.app.models.schemas import (
TaskCreatedResponse,
TaskStatusResponse,
DownloadRequest,
ChatRequest,
ChatResponse,
)
from backend.app.services.trading_service import TradingService
from backend.app.services.task_manager import task_manager
@ -376,3 +378,42 @@ async def download_reports(request: DownloadRequest):
}
)
@router.post("/chat", response_model=ChatResponse)
async def chat_with_report(request: ChatRequest):
"""
Chat with the analysis report using the user's LLM.
Sends the analysis reports as context to the LLM along with
the user's question, and returns the assistant's answer.
Args:
request: Chat request with message, reports context, and LLM config
Returns:
ChatResponse: Assistant's reply
"""
from backend.app.services.chat_service import chat_with_reports
try:
reply = await chat_with_reports(
message=request.message,
reports=request.reports,
ticker=request.ticker,
analysis_date=request.analysis_date,
history=request.history,
model=request.model,
api_key=request.api_key,
base_url=request.base_url,
language=request.language or "zh-TW",
)
return ChatResponse(reply=reply)
except Exception as e:
logger.error(f"Chat failed: {str(e)}", exc_info=True)
raise HTTPException(
status_code=500,
detail=f"Chat failed: {str(e)}"
)

View File

@ -183,3 +183,32 @@ class DownloadRequest(BaseModel):
def uppercase_ticker(cls, v: str) -> str:
return v.strip().upper()
# Chat Schemas
class ChatRequest(BaseModel):
"""Request model for chatting about analysis reports"""
message: str = Field(..., description="User's question about the report", min_length=1, max_length=2000)
reports: Dict[str, Any] = Field(..., description="Full analysis reports dict")
ticker: str = Field(..., description="Stock ticker symbol")
analysis_date: str = Field(..., description="Analysis date")
history: Optional[List[Dict[str, str]]] = Field(
default=None,
description="Previous conversation messages [{role, content}]"
)
model: str = Field(..., description="LLM model name")
api_key: str = Field(..., description="User's LLM API key", min_length=1)
base_url: str = Field(
default="https://api.openai.com/v1",
description="LLM API base URL"
)
language: Optional[Literal["en", "zh-TW"]] = Field(
default="zh-TW",
description="Response language"
)
class ChatResponse(BaseModel):
"""Response model for chat"""
reply: str = Field(..., description="Assistant's answer")

View File

@ -0,0 +1,194 @@
"""
Chat service for answering questions about analysis reports
Uses the user's LLM API key to call an OpenAI-compatible endpoint.
"""
import logging
from typing import Dict, Any, List, Optional
from openai import AsyncOpenAI
logger = logging.getLogger(__name__)
SYSTEM_PROMPT_ZH = """你是 TradingAgentsX 的首席金融分析助手,擁有華爾街頂級分析師的專業素養。你的任務是基於提供的多位專業分析師(如基本面、技術面、新聞情緒等)的報告,精準、專業地回答使用者的問題。
回答指南與規則
1. 嚴守上下文所有數據與觀點必須基於提供的報告內容絕不可隨意編造或引入外部未經證實的資訊若報告中未提及請誠實告知報告中未涵蓋此細節
2. 綜合與對比當多位分析師有不同觀點時例如看漲與看跌分析師的辯論請提煉出雙方核心論點提供全方位的客觀對比而不是只偏袒一方
3. 結構化與排版使用清晰的 Markdown 格式排版善用列點粗體標示關鍵數據讓使用者能快速抓到重點
4. 開門見山直接切入要點回答問題不需要以根據提供的報告...這類多餘的廢話開頭保持自信且客觀專業的語氣
5. 明確引用在提及特定預測或論點時盡可能指出是哪一位分析師或哪一份報告提到的例如技術面報告指出...
6. 語言要求全程使用流暢具備金融專業術語的繁體中文回答
以下是本次對話的基準報告內容
=========================================
標的: {ticker}
分析日期: {analysis_date}
{reports_text}
========================================="""
SYSTEM_PROMPT_EN = """You are the Lead Financial Analysis Assistant for TradingAgentsX, possessing the expertise of a top-tier Wall Street analyst. Your task is to accurately and professionally answer user questions based on the provided reports from various specialized analysts (e.g., Fundamentals, Technicals, Sentiment, etc.).
Guidelines and Rules:
1. [Strict Adherence to Context] All data and opinions must be grounded strictly in the provided reports. Do not fabricate data or bring in unverified external information. If the reports do not contain the answer, honestly state, "The provided reports do not cover this detail."
2. [Synthesis and Contrast] When different analysts hold opposing views (e.g., Bull vs. Bear debate), distill the core arguments of both sides to provide a comprehensive and objective comparison.
3. [Structure and Readability] Use clear Markdown formatting. Utilize bullet points and bold text for key metrics so the user can quickly grasp key insights.
4. [Get to the Point] Start your answer directly and confidently without filler introductions like "Based on the provided reports...".
5. [Explicit Citations] When mentioning specific forecasts or arguments, clarify which analyst or report it originated from (e.g., "The Technical Analyst noted...").
6. [Language Constraint] Ensure all responses are in highly professional, fluent English with appropriate financial terminology.
Below are the baseline reports for this conversation:
=========================================
[Ticker]: {ticker}
[Analysis Date]: {analysis_date}
{reports_text}
========================================="""
def _flatten_reports(reports: Dict[str, Any]) -> str:
"""Flatten all reports into a single text block for context."""
sections = []
REPORT_LABELS = {
"market_report": "Market Analyst Report",
"sentiment_report": "Social Media Analyst Report",
"news_report": "News Analyst Report",
"fundamentals_report": "Fundamentals Analyst Report",
"trader_investment_plan": "Trader Investment Plan",
}
for key, label in REPORT_LABELS.items():
content = reports.get(key)
if content and isinstance(content, str):
sections.append(f"## {label}\n{content}")
# Handle nested debate states
debate_keys = {
"investment_debate_state": {
"bull_history": "Bull Researcher",
"bear_history": "Bear Researcher",
"judge_decision": "Research Manager Decision",
},
"risk_debate_state": {
"risky_history": "Aggressive Analyst",
"safe_history": "Conservative Analyst",
"neutral_history": "Neutral Analyst",
"judge_decision": "Risk Manager Decision",
},
}
for state_key, sub_keys in debate_keys.items():
state = reports.get(state_key)
if isinstance(state, dict):
for sub_key, label in sub_keys.items():
content = state.get(sub_key)
if content and isinstance(content, str):
sections.append(f"## {label}\n{content}")
return "\n\n".join(sections) if sections else "(No reports available)"
async def chat_with_reports(
message: str,
reports: Dict[str, Any],
ticker: str,
analysis_date: str,
history: Optional[List[Dict[str, str]]],
model: str,
api_key: str,
base_url: str,
language: str = "zh-TW",
) -> str:
"""
Send a chat message about analysis reports to the LLM.
Args:
message: User's question
reports: Full analysis reports dict
ticker: Stock ticker
analysis_date: Analysis date string
history: Previous conversation messages [{role, content}]
model: LLM model name
api_key: User's API key
base_url: LLM API base URL
language: Response language
Returns:
Assistant's reply string
"""
reports_text = _flatten_reports(reports)
# Truncate reports to avoid exceeding token limits
# ~4 chars per token, target max ~8000 tokens for context
MAX_REPORT_CHARS = 32000
if len(reports_text) > MAX_REPORT_CHARS:
reports_text = reports_text[:MAX_REPORT_CHARS] + "\n\n...(報告內容已截斷以符合模型限制)..."
logger.info(f"Reports truncated from {len(reports_text)} to {MAX_REPORT_CHARS} chars")
# Choose system prompt based on language
if language == "en":
system_prompt = SYSTEM_PROMPT_EN.format(
ticker=ticker,
analysis_date=analysis_date,
reports_text=reports_text,
)
else:
system_prompt = SYSTEM_PROMPT_ZH.format(
ticker=ticker,
analysis_date=analysis_date,
reports_text=reports_text,
)
# Build messages list
messages = [{"role": "system", "content": system_prompt}]
# Add conversation history (limit to last 6 messages to control token usage)
if history:
recent_history = history[-6:]
for msg in recent_history:
messages.append({
"role": msg.get("role", "user"),
"content": msg.get("content", ""),
})
# Add current user message
messages.append({"role": "user", "content": message})
logger.info(f"Chat request for {ticker}: model={model}, history_len={len(history) if history else 0}, system_prompt_len={len(system_prompt)}")
try:
# Call LLM via async OpenAI-compatible SDK
client = AsyncOpenAI(
api_key=api_key,
base_url=base_url,
timeout=60.0,
)
response = await client.chat.completions.create(
model=model,
messages=messages,
temperature=0.3,
max_tokens=2048,
)
reply = response.choices[0].message.content or ""
logger.info(f"Chat response for {ticker}: {len(reply)} chars")
return reply
except Exception as e:
error_msg = str(e)
logger.error(f"LLM call failed: {error_msg}", exc_info=True)
# Provide user-friendly error messages
if "maximum context length" in error_msg.lower() or "token" in error_msg.lower():
raise Exception(f"報告內容過長,超出模型 token 限制。請嘗試縮短問題或清除對話歷史後重試。")
elif "rate_limit" in error_msg.lower() or "429" in error_msg:
raise Exception(f"API 速率限制,請稍後再試。")
elif "401" in error_msg or "api_key" in error_msg.lower():
raise Exception(f"API Key 無效或已過期,請檢查設定。")
elif "timeout" in error_msg.lower():
raise Exception(f"請求超時,請稍後再試。")
else:
raise

View File

@ -143,8 +143,6 @@ export default function AnalysisResultsPage() {
);
}
const currentAnalyst = ANALYSTS.find(a => a.key === selectedAnalyst);
const currentReport = getNestedValue(analysisResult.reports, currentAnalyst?.reportKey || "");
return (
<div className="min-h-screen bg-gradient-to-br from-purple-50/30 via-pink-50/20 to-purple-50/30 dark:from-gray-950 dark:via-purple-950/40 dark:to-gray-950">
@ -253,16 +251,20 @@ export default function AnalysisResultsPage() {
{/* 分析師報告 */}
<Card className="animate-scale-up hover-lift">
<CardHeader>
<CardTitle>{analyst.label} {t.results.report}</CardTitle>
<CardDescription>
{analyst.description}
</CardDescription>
<div className="flex flex-col sm:flex-row sm:items-center sm:justify-between gap-2">
<div>
<CardTitle>{analyst.label} {t.results.report}</CardTitle>
<CardDescription>
{analyst.description}
</CardDescription>
</div>
</div>
</CardHeader>
<CardContent>
{currentReport ? (
{getNestedValue(analysisResult.reports, analyst.reportKey) ? (
<div className="prose prose-sm max-w-none dark:prose-invert animate-fade-in">
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{currentReport}
{getNestedValue(analysisResult.reports, analyst.reportKey)}
</ReactMarkdown>
</div>
) : (

View File

@ -0,0 +1,510 @@
"use client";
import { useState, useEffect, useRef } from "react";
import { useRouter, useSearchParams } from "next/navigation";
import ReactMarkdown from "react-markdown";
import remarkGfm from "remark-gfm";
import { useLanguage } from "@/contexts/LanguageContext";
import { useAuth } from "@/contexts/auth-context";
import { getApiSettingsAsync } from "@/lib/storage";
import { getBaseUrlForModel } from "@/lib/api-helpers";
import { api } from "@/lib/api";
import { getReportsByMarketType, type SavedReport } from "@/lib/reports-db";
import { getCloudReports, isCloudSyncEnabled } from "@/lib/user-api";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import {
MessageCircle,
Send,
Loader2,
Bot,
User,
Sparkles,
AlertCircle,
Trash2,
ArrowLeft,
Settings2,
} from "lucide-react";
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "@/components/ui/select";
interface ChatMessage {
role: "user" | "assistant";
content: string;
}
const AVAILABLE_MODELS = [
{ id: "auto", name: "🤖 自動選擇 (Auto)", provider: "auto" },
{ id: "gpt-4o", name: "GPT-4o", provider: "openai" },
{ id: "gpt-4o-mini", name: "GPT-4o Mini", provider: "openai" },
{ id: "o1-mini", name: "o1-mini", provider: "openai" },
{ id: "o3-mini", name: "o3-mini", provider: "openai" },
{ id: "claude-3-5-sonnet-20241022", name: "Claude 3.5 Sonnet", provider: "anthropic" },
{ id: "claude-3-5-haiku-20241022", name: "Claude 3.5 Haiku", provider: "anthropic" },
{ id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", provider: "google" },
{ id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", provider: "google" },
{ id: "grok-2-1212", name: "Grok 2", provider: "grok" },
{ id: "deepseek-chat", name: "DeepSeek Chat", provider: "deepseek" },
{ id: "deepseek-reasoner", name: "DeepSeek Reasoner", provider: "deepseek" },
{ id: "qwen-max", name: "Qwen Max", provider: "qwen" },
{ id: "custom", name: "⚙️ 其他 (自訂模型)", provider: "custom" },
];
export default function HistoryChatPage() {
const router = useRouter();
const searchParams = useSearchParams();
const { t, locale } = useLanguage();
const { isAuthenticated } = useAuth();
const [messages, setMessages] = useState<ChatMessage[]>([]);
const [input, setInput] = useState("");
const [isLoading, setIsLoading] = useState(false);
const [error, setError] = useState<string | null>(null);
const [report, setReport] = useState<SavedReport | null>(null);
const [loadingReport, setLoadingReport] = useState(true);
const [selectedModelId, setSelectedModelId] = useState<string>("auto");
const [customModel, setCustomModel] = useState<string>("");
const messagesEndRef = useRef<HTMLDivElement>(null);
const inputRef = useRef<HTMLInputElement>(null);
const ticker = searchParams.get("ticker");
const dateStr = searchParams.get("date");
const market = searchParams.get("market");
// Load the specific report
useEffect(() => {
const loadReport = async () => {
if (!ticker || !dateStr || !market) {
setLoadingReport(false);
return;
}
try {
setLoadingReport(true);
// Try local DB first
const localObj = await getReportsByMarketType(market as any);
const match = localObj.find(
(r) => r.ticker === ticker && r.analysis_date === dateStr
);
if (match) {
setReport(match);
} else if (isAuthenticated && isCloudSyncEnabled()) {
// Fallback to cloud
const cloudReports = await getCloudReports();
const cloudMatch = cloudReports.find(
(r) =>
r.ticker === ticker &&
r.analysis_date === dateStr &&
r.market_type === market
);
if (cloudMatch) {
setReport({
id: parseInt(cloudMatch.id.replace(/-/g, "").slice(0, 8), 16),
ticker: cloudMatch.ticker,
market_type: cloudMatch.market_type as any,
analysis_date: cloudMatch.analysis_date,
saved_at: new Date(cloudMatch.created_at),
result: cloudMatch.result,
language: cloudMatch.language,
});
}
}
} catch (err) {
console.error("Failed to load report for chat:", err);
} finally {
setLoadingReport(false);
}
};
loadReport();
}, [ticker, dateStr, market, isAuthenticated]);
// Auto-scroll to bottom when messages change
useEffect(() => {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
}, [messages, isLoading]);
// Focus input when loaded
useEffect(() => {
if (!loadingReport && report) {
setTimeout(() => inputRef.current?.focus(), 200);
}
}, [loadingReport, report]);
const handleClearChat = () => {
setMessages([]);
setError(null);
};
const handleSend = async () => {
const trimmed = input.trim();
if (!trimmed || isLoading || !report) return;
setError(null);
const userMessage: ChatMessage = { role: "user", content: trimmed };
setMessages((prev) => [...prev, userMessage]);
setInput("");
setIsLoading(true);
try {
const settings = await getApiSettingsAsync();
let chatModel = "gpt-4o-mini";
let apiKey = "";
let baseUrl = "https://api.openai.com/v1";
const providers = {
openai: { key: settings.openai_api_key, defaultModel: "gpt-4o-mini" },
anthropic: { key: settings.anthropic_api_key, defaultModel: "claude-3-5-sonnet-20241022" },
google: { key: settings.google_api_key, defaultModel: "gemini-2.5-flash" },
grok: { key: settings.grok_api_key, defaultModel: "grok-2-1212" },
deepseek: { key: settings.deepseek_api_key, defaultModel: "deepseek-chat" },
qwen: { key: settings.qwen_api_key, defaultModel: "qwen-max" },
};
const activeModelId = selectedModelId === "custom" ? customModel.trim() : selectedModelId;
if (selectedModelId === "auto" || !activeModelId) {
// Auto logic: Pick first available
for (const [providerName, providerData] of Object.entries(providers)) {
if (providerData.key && providerData.key.trim() !== "") {
apiKey = providerData.key;
chatModel = providerData.defaultModel;
baseUrl = getBaseUrlForModel(chatModel, settings.custom_base_url);
break;
}
}
// Custom settings override if configured
if (settings.custom_api_key && settings.custom_base_url && !apiKey) {
apiKey = settings.custom_api_key;
baseUrl = settings.custom_base_url;
}
} else {
chatModel = activeModelId;
const modelInfo = AVAILABLE_MODELS.find(m => m.id === selectedModelId);
const providerName = modelInfo ? modelInfo.provider : "custom";
const matchedProvider = (providers as any)[providerName];
if (matchedProvider && matchedProvider.key) {
apiKey = matchedProvider.key;
baseUrl = getBaseUrlForModel(chatModel, settings.custom_base_url);
} else if (settings.custom_api_key) {
apiKey = settings.custom_api_key;
baseUrl = settings.custom_base_url || "https://api.openai.com/v1";
}
}
if (!apiKey) {
setError(t.chat?.noApiKey || "Please configure your API key in settings first.");
setIsLoading(false);
return;
}
const history = messages.map((m) => ({
role: m.role,
content: m.content,
}));
const response = await api.sendChatMessage({
message: trimmed,
reports: report.result.reports || {},
ticker: report.ticker,
analysis_date: report.analysis_date,
history,
model: chatModel,
api_key: apiKey,
base_url: baseUrl,
language: locale as "en" | "zh-TW",
});
setMessages((prev) => [
...prev,
{ role: "assistant", content: response.reply },
]);
} catch (err: any) {
console.error("Chat error:", err);
const errorMsg =
err?.response?.data?.detail ||
err?.message ||
(t.chat?.error || "Failed to get response. Please try again.");
setError(errorMsg);
} finally {
setIsLoading(false);
}
};
const handleKeyDown = (e: React.KeyboardEvent) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault();
handleSend();
}
};
if (loadingReport) {
return (
<div className="min-h-screen bg-gradient-to-br from-purple-50/30 via-pink-50/20 to-purple-50/30 dark:from-gray-950 dark:via-purple-950/40 dark:to-gray-950 flex flex-col items-center justify-center">
<Loader2 className="h-10 w-10 animate-spin text-purple-600 mb-4" />
<p className="text-gray-500">{t.history?.loading || "Loading..."}</p>
</div>
);
}
if (!report) {
return (
<div className="min-h-screen bg-gradient-to-br from-purple-50/30 via-pink-50/20 to-purple-50/30 dark:from-gray-950 dark:via-purple-950/40 dark:to-gray-950 flex flex-col items-center justify-center p-6 text-center">
<AlertCircle className="h-16 w-16 text-gray-400 mb-4" />
<p className="text-lg text-gray-600 dark:text-gray-300 mb-6">
Report not found.
</p>
<Button onClick={() => router.push("/history")}>
<ArrowLeft className="h-4 w-4 mr-2" />
{t.history?.title || "Back to History"}
</Button>
</div>
);
}
const contextLabel = t.chat?.allReports || "All Reports";
return (
<div className="h-screen flex flex-col bg-gradient-to-br from-purple-50/30 via-pink-50/20 to-purple-50/30 dark:from-gray-950 dark:via-purple-950/40 dark:to-gray-950">
{/* Header */}
<div className="flex-shrink-0 px-4 py-4 md:px-8 md:py-6 border-b border-gray-200 dark:border-gray-800 bg-white/50 dark:bg-gray-900/50 backdrop-blur-md sticky top-0 z-10 shadow-sm">
<div className="max-w-5xl mx-auto flex items-center justify-between">
<div className="flex items-center gap-4">
<Button
variant="ghost"
size="icon"
onClick={() => router.push("/history")}
className="text-gray-600 dark:text-gray-400 hover:text-gray-900 dark:hover:text-white"
>
<ArrowLeft className="h-5 w-5" />
</Button>
<div>
<h1 className="flex items-center gap-2 text-xl md:text-2xl font-bold text-gray-900 dark:text-white">
<Sparkles className="h-6 w-6 text-purple-500" />
<span>
{t.chat?.title || "Report Chat"} {report.ticker}
</span>
<span className="hidden sm:inline text-purple-600 dark:text-purple-400 text-sm md:text-base font-normal ml-2 bg-purple-100 dark:bg-purple-900/30 px-3 py-1 rounded-full">
{contextLabel}
</span>
</h1>
<p className="text-sm text-gray-500 dark:text-gray-400 mt-1 ml-10">
{t.history?.analysisDate || "Date"}: {report.analysis_date}
</p>
</div>
</div>
<div className="flex items-center gap-3">
{messages.length > 0 && (
<Button
variant="outline"
size="sm"
onClick={handleClearChat}
className="gap-2 text-red-600 border-red-200 hover:bg-red-50 dark:text-red-400 dark:border-red-900/50 dark:hover:bg-red-900/20"
title={t.chat?.clearChat || "Clear chat"}
>
<Trash2 className="h-4 w-4" />
<span className="hidden sm:inline">
{t.chat?.clearChat || "Clear chat"}
</span>
</Button>
)}
</div>
</div>
</div>
{/* Messages Area */}
<div className="flex-1 overflow-y-auto px-4 py-6 md:px-8 space-y-6">
<div className="max-w-5xl mx-auto">
{/* Empty state */}
{messages.length === 0 && !isLoading && (
<div className="flex flex-col items-center justify-center min-h-[50vh] text-center text-gray-400 dark:text-gray-500 gap-6">
<div className="w-20 h-20 rounded-full bg-gradient-to-br from-purple-100 to-pink-100 dark:from-purple-900/30 dark:to-pink-900/30 flex items-center justify-center shadow-inner">
<Bot className="h-10 w-10 text-purple-600 dark:text-purple-400" />
</div>
<div>
<p className="text-xl font-medium text-gray-700 dark:text-gray-200">
{t.chat?.emptyState || "Ask any question about this analysis report"}
</p>
<p className="text-base mt-2 text-gray-500 dark:text-gray-400">
{t.chat?.emptyHint || 'e.g. "What are the main risk factors?"'}
</p>
</div>
{/* Quick suggestions */}
<div className="flex flex-wrap gap-3 mt-4 justify-center max-w-2xl px-4">
{(locale === "zh-TW"
? [
"主要的風險因素有哪些?",
"總結這份報告的重點",
"建議的進場策略是什麼?",
"看漲和看跌的觀點有何不同?",
]
: [
"What are the key risk factors?",
"Summarize this report",
"What's the recommended entry strategy?",
"How do bull and bear views differ?",
]
).map((suggestion) => (
<button
key={suggestion}
onClick={() => {
setInput(suggestion);
setTimeout(() => inputRef.current?.focus(), 50);
}}
className="px-4 py-2 text-sm rounded-full border border-purple-200 dark:border-purple-800 text-purple-700 dark:text-purple-300 bg-white/50 dark:bg-gray-800/50 hover:bg-purple-50 dark:hover:bg-purple-900/50 transition-all duration-200 shadow-sm hover:shadow"
>
{suggestion}
</button>
))}
</div>
</div>
)}
{/* Message list */}
<div className="space-y-6 pb-4">
{messages.map((msg, i) => (
<div
key={i}
className={`flex gap-4 ${msg.role === "user" ? "flex-row-reverse" : "flex-row"}`}
>
{/* Avatar */}
<div
className={`flex-shrink-0 w-10 h-10 rounded-full flex items-center justify-center text-white text-sm shadow-sm ${
msg.role === "user"
? "bg-gradient-to-br from-blue-500 to-cyan-500"
: "bg-gradient-to-br from-purple-600 to-pink-600"
}`}
>
{msg.role === "user" ? (
<User className="h-5 w-5" />
) : (
<Bot className="h-5 w-5" />
)}
</div>
{/* Bubble */}
<div
className={`max-w-[85%] md:max-w-[75%] rounded-2xl px-5 py-4 text-base leading-relaxed shadow-sm ${
msg.role === "user"
? "bg-gradient-to-r from-blue-500 to-cyan-500 text-white rounded-tr-sm"
: "bg-white dark:bg-gray-800 border border-gray-100 dark:border-gray-700 text-gray-800 dark:text-gray-200 rounded-tl-sm"
}`}
>
{msg.role === "assistant" ? (
<div className="prose prose-sm md:prose-base dark:prose-invert max-w-none [&>*:first-child]:mt-0 [&>*:last-child]:mb-0 [&_table]:text-sm">
<ReactMarkdown remarkPlugins={[remarkGfm]}>
{msg.content}
</ReactMarkdown>
</div>
) : (
<p className="whitespace-pre-wrap">{msg.content}</p>
)}
</div>
</div>
))}
{/* Loading indicator */}
{isLoading && (
<div className="flex gap-4">
<div className="flex-shrink-0 w-10 h-10 rounded-full flex items-center justify-center bg-gradient-to-br from-purple-600 to-pink-600 text-white shadow-sm">
<Bot className="h-5 w-5 text-white animate-pulse" />
</div>
<div className="bg-white dark:bg-gray-800 border border-gray-100 dark:border-gray-700 rounded-2xl rounded-tl-sm px-5 py-4 shadow-sm">
<div className="flex items-center gap-3 text-base text-gray-500 dark:text-gray-400 font-medium">
<Loader2 className="h-5 w-5 animate-spin text-purple-500" />
<span>{t.chat?.thinking || "Thinking..."}</span>
</div>
</div>
</div>
)}
{/* Error message */}
{error && (
<div className="flex items-start gap-3 text-red-600 dark:text-red-400 text-base bg-red-50 border border-red-100 dark:border-red-900 dark:bg-red-950/30 rounded-xl p-4 shadow-sm">
<AlertCircle className="h-5 w-5 mt-0.5 flex-shrink-0" />
<span>{error}</span>
</div>
)}
<div ref={messagesEndRef} />
</div>
</div>
</div>
{/* Input Bar */}
<div className="flex-shrink-0 border-t border-gray-200 dark:border-gray-800 px-4 py-4 md:px-8 md:py-6 bg-white/80 dark:bg-gray-900/80 backdrop-blur-lg">
<div className="max-w-4xl mx-auto flex flex-col gap-3">
{/* Model Selector */}
<div className="flex flex-wrap items-center gap-2">
<Select value={selectedModelId} onValueChange={setSelectedModelId}>
<SelectTrigger className="w-fit min-w-[160px] h-8 text-xs bg-white dark:bg-gray-800 rounded-full border border-gray-200 dark:border-gray-700 hover:bg-gray-50 dark:hover:bg-gray-700/50 shadow-sm transition-colors">
<div className="flex items-center gap-2 text-gray-700 dark:text-gray-300">
<Settings2 className="h-3 w-3 text-purple-500" />
<SelectValue placeholder="選擇模型" />
</div>
</SelectTrigger>
<SelectContent className="max-h-[300px]">
{AVAILABLE_MODELS.map((m) => (
<SelectItem key={m.id} value={m.id} className="cursor-pointer text-xs sm:text-sm">
{m.name}
</SelectItem>
))}
</SelectContent>
</Select>
{selectedModelId === "custom" && (
<Input
value={customModel}
onChange={(e) => setCustomModel(e.target.value)}
placeholder="輸入模型名稱 (e.g. gpt-4)"
className="h-8 w-[180px] text-xs rounded-full border-gray-200 dark:border-gray-700 bg-white dark:bg-gray-800"
/>
)}
</div>
{/* Text Input */}
<div className="flex gap-3 md:gap-4">
<Input
ref={inputRef}
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyDown={handleKeyDown}
placeholder={t.chat?.placeholder || "Ask about this report..."}
disabled={isLoading}
className="flex-1 text-base rounded-full border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-800 focus-visible:ring-purple-500 h-12 md:h-14 px-6 shadow-sm"
/>
<Button
onClick={handleSend}
disabled={!input.trim() || isLoading}
size="icon"
className="rounded-full bg-gradient-to-r from-purple-600 to-pink-600 hover:from-purple-700 hover:to-pink-700 h-12 w-12 md:h-14 md:w-14 flex-shrink-0 shadow-md hover:shadow-lg transition-all"
>
{isLoading ? (
<Loader2 className="h-6 w-6 animate-spin text-white" />
) : (
<Send className="h-6 w-6 text-white ml-1" />
)}
</Button>
</div>
</div>
<div className="max-w-4xl mx-auto mt-2 text-center">
<p className="text-xs text-gray-400 dark:text-gray-500">
LLM can make mistakes. Please verify important information.
</p>
</div>
</div>
</div>
);
}

View File

@ -35,6 +35,7 @@ import {
TrendingUp,
FileText,
Download,
MessageCircle,
} from "lucide-react";
import {
getReportsByMarketType,
@ -959,11 +960,11 @@ export default function HistoryPage() {
);
})()}
</CardContent>
<CardFooter className="flex gap-2 flex-wrap">
<CardFooter className="grid grid-cols-2 gap-2">
<Button
variant="default"
size="sm"
className="flex-1 gap-1"
className="w-full gap-1"
onClick={() => handleViewReport(report)}
>
<Eye className="h-4 w-4" />
@ -972,7 +973,7 @@ export default function HistoryPage() {
<Button
variant="outline"
size="sm"
className="gap-1"
className="w-full gap-1"
onClick={() => handleDownloadPdf(report)}
disabled={downloadingId === report.id}
>
@ -988,10 +989,21 @@ export default function HistoryPage() {
</>
)}
</Button>
<Button
variant="outline"
size="sm"
className="col-span-2 w-full gap-2 text-purple-600 dark:text-purple-400 border-purple-200 dark:border-purple-800 hover:bg-purple-50 dark:hover:bg-purple-950/50"
onClick={() => router.push(`/history/chat?ticker=${report.ticker}&date=${report.analysis_date}&market=${report.market_type}`)}
>
<MessageCircle className="h-4 w-4" />
{t.chat?.allReports || "全部報告"}
</Button>
<Button
variant="destructive"
size="sm"
className="gap-1"
className="col-span-2 w-full gap-1"
onClick={() => handleDeleteClick(report)}
>
<Trash2 className="h-4 w-4" />

View File

@ -1,7 +1,15 @@
/**
* Footer component
*/
"use client";
import { usePathname } from "next/navigation";
export function Footer() {
const pathname = usePathname();
if (pathname === "/history/chat") return null;
return (
<footer className="border-t bg-gradient-to-r from-blue-50/50 via-purple-50/50 to-pink-50/50 dark:from-gray-900/50 dark:via-purple-900/20 dark:to-blue-900/20 backdrop-blur-sm">
<div className="container mx-auto px-4 py-6">

View File

@ -12,10 +12,14 @@ import { LanguageSwitcher } from "@/components/settings/LanguageSwitcher";
import { LoginButton } from "@/components/auth/login-button";
import { Button } from "@/components/ui/button";
import { useLanguage } from "@/contexts/LanguageContext";
import { usePathname } from "next/navigation";
export function Header() {
const [mobileMenuOpen, setMobileMenuOpen] = useState(false);
const { t } = useLanguage();
const pathname = usePathname();
if (pathname === "/history/chat") return null;
return (
<header className="border-b bg-gradient-to-r from-blue-500 to-pink-500 dark:from-blue-600 dark:to-purple-600 text-white pwa-safe-header">

View File

@ -10,6 +10,8 @@ import type {
Ticker,
TaskCreatedResponse,
TaskStatusResponse,
ChatMessageRequest,
ChatMessageResponse,
} from "./types";
const apiClient = axios.create({
@ -80,4 +82,15 @@ export const api = {
return { success: false, message: "Cleanup failed silently" };
}
},
/**
* Send a chat message about analysis reports
*/
async sendChatMessage(request: ChatMessageRequest): Promise<ChatMessageResponse> {
const response = await apiClient.post<ChatMessageResponse>(
"/api/chat",
request
);
return response.data;
},
};

View File

@ -626,6 +626,20 @@ export const en = {
tradingRiskTeam: "Trading & Risk Team",
members: "members",
},
// Chat
chat: {
title: "Ask About Report",
placeholder: "Ask about this analysis report...",
send: "Send",
thinking: "Thinking...",
error: "Failed to get response. Please try again.",
emptyState: "Ask any question about this analysis report",
emptyHint: "e.g. \"What are the main risk factors?\"",
noApiKey: "Please configure your API key in settings first.",
allReports: "All Reports",
clearChat: "Clear chat",
},
};
export type TranslationKeys = typeof en;

View File

@ -554,4 +554,18 @@ export const zhTW = {
tradingRiskTeam: "交易與風險團隊",
members: "位",
},
// Chat
chat: {
title: "報告問答",
placeholder: "詢問有關此分析報告的問題...",
send: "發送",
thinking: "思考中...",
error: "回覆失敗,請稍後再試",
emptyState: "對此分析報告提出任何問題",
emptyHint: "例如:「主要的風險因素有哪些?」",
noApiKey: "請先在設定中配置您的 API Key",
allReports: "全部報告",
clearChat: "清除對話",
},
};

View File

@ -127,3 +127,21 @@ export interface TaskStatusResponse {
error?: string;
completed_at?: string;
}
// Chat Types
export interface ChatMessageRequest {
message: string;
reports: any;
ticker: string;
analysis_date: string;
history?: { role: string; content: string }[];
model: string;
api_key: string;
base_url: string;
language?: "en" | "zh-TW";
}
export interface ChatMessageResponse {
reply: string;
}