From d0c04d4048d139a6a622619d56b5370ebf1c5ab1 Mon Sep 17 00:00:00 2001 From: MarkLo127 Date: Tue, 10 Mar 2026 17:36:15 +0800 Subject: [PATCH] --- backend/test_chat.py | 30 ++++++++++++ frontend/app/api/chat/route.ts | 51 +++++++++++++++++++ frontend/app/history/chat/page.tsx | 79 ++++++++++++++++++++++-------- 3 files changed, 140 insertions(+), 20 deletions(-) create mode 100644 backend/test_chat.py create mode 100644 frontend/app/api/chat/route.ts diff --git a/backend/test_chat.py b/backend/test_chat.py new file mode 100644 index 00000000..8e283c67 --- /dev/null +++ b/backend/test_chat.py @@ -0,0 +1,30 @@ +import asyncio +import httpx +import json + +async def test(): + # create a large dummy report ~1MB + large_reports = {"market": "dummy " * 100000} + + payload = { + "message": "test context", + "reports": large_reports, + "ticker": "NVDA", + "analysis_date": "2025-01-01", + "model": "gpt-4o-mini", + "api_key": "dummy_key", + "base_url": "https://api.openai.com/v1", + "language": "zh-TW" + } + + print("Payload size:", len(json.dumps(payload))) + + async with httpx.AsyncClient() as client: + try: + resp = await client.post("http://localhost:8000/api/chat", json=payload, timeout=10) + print("Status:", resp.status_code) + print("Response:", resp.text[:200]) + except Exception as e: + print("Failed:", e) + +asyncio.run(test()) diff --git a/frontend/app/api/chat/route.ts b/frontend/app/api/chat/route.ts new file mode 100644 index 00000000..9f6b1794 --- /dev/null +++ b/frontend/app/api/chat/route.ts @@ -0,0 +1,51 @@ +import { NextResponse } from "next/server"; + +export async function POST(req: Request) { + try { + const isDev = process.env.NODE_ENV === "development"; + const backendUrl = + process.env.BACKEND_URL || + (isDev ? "http://localhost:8000" : "http://backend:8000"); + + // Read the complete body from the request + const bodyText = await req.text(); + + console.log(`[API Route] Proxying /api/chat to ${backendUrl}/api/chat (${bodyText.length} bytes)`); + + // Use native fetch to proxy the request to the backend. + // This bypasses the Next.js next.config.ts rewrites http-proxy, + // which has known bugs with large POST bodies and timeouts in standalone mode. + const response = await fetch(`${backendUrl}/api/chat`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: bodyText, + // @ts-ignore - Node.js fetch specific option to disable timeout + signal: AbortSignal.timeout ? AbortSignal.timeout(180_000) : undefined, // 3 minutes timeout + }); + + const data = await response.text(); + + if (!response.ok) { + console.error(`[API Route] Backend returned ${response.status}:`, data); + try { + const json = JSON.parse(data); + return NextResponse.json(json, { status: response.status }); + } catch (e) { + return NextResponse.json( + { detail: `Backend error: ${response.status}` }, + { status: response.status } + ); + } + } + + return NextResponse.json(JSON.parse(data)); + } catch (error: any) { + console.error("[API Route] Proxy error:", error); + return NextResponse.json( + { detail: `Failed to connect to backend: ${error.message}` }, + { status: 500 } + ); + } +} diff --git a/frontend/app/history/chat/page.tsx b/frontend/app/history/chat/page.tsx index 28eaa455..4464d23b 100644 --- a/frontend/app/history/chat/page.tsx +++ b/frontend/app/history/chat/page.tsx @@ -4,6 +4,7 @@ import { useState, useEffect, useRef, Suspense } from "react"; import { useRouter, useSearchParams } from "next/navigation"; import ReactMarkdown from "react-markdown"; import remarkGfm from "remark-gfm"; +import Image from "next/image"; import { useLanguage } from "@/contexts/LanguageContext"; import { useAuth } from "@/contexts/auth-context"; import { getApiSettingsAsync } from "@/lib/storage"; @@ -40,20 +41,48 @@ interface ChatMessage { } const AVAILABLE_MODELS = [ - { id: "auto", name: "πŸ€– θ‡ͺ動選擇 (Auto)", provider: "auto" }, - { id: "gpt-4o", name: "GPT-4o", provider: "openai" }, - { id: "gpt-4o-mini", name: "GPT-4o Mini", provider: "openai" }, - { id: "o1-mini", name: "o1-mini", provider: "openai" }, - { id: "o3-mini", name: "o3-mini", provider: "openai" }, - { id: "claude-3-5-sonnet-20241022", name: "Claude 3.5 Sonnet", provider: "anthropic" }, - { id: "claude-3-5-haiku-20241022", name: "Claude 3.5 Haiku", provider: "anthropic" }, - { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", provider: "google" }, - { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", provider: "google" }, - { id: "grok-2-1212", name: "Grok 2", provider: "grok" }, - { id: "deepseek-chat", name: "DeepSeek Chat", provider: "deepseek" }, - { id: "deepseek-reasoner", name: "DeepSeek Reasoner", provider: "deepseek" }, - { id: "qwen-max", name: "Qwen Max", provider: "qwen" }, - { id: "custom", name: "βš™οΈ ε…Άδ»– (θ‡ͺθ¨‚ζ¨‘εž‹)", provider: "custom" }, + // OpenAI + { id: "gpt-5.2-2025-12-11", name: "GPT-5.2", provider: "openai", logo: "/logos/openai.svg" }, + { id: "gpt-5.1", name: "GPT-5.1", provider: "openai", logo: "/logos/openai.svg" }, + { id: "gpt-5-mini", name: "GPT-5 Mini", provider: "openai", logo: "/logos/openai.svg" }, + { id: "gpt-5-nano", name: "GPT-5 Nano", provider: "openai", logo: "/logos/openai.svg" }, + { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", provider: "openai", logo: "/logos/openai.svg" }, + { id: "gpt-4.1-nano", name: "GPT-4.1 Nano", provider: "openai", logo: "/logos/openai.svg" }, + { id: "o4-mini", name: "o4-mini", provider: "openai", logo: "/logos/openai.svg" }, + + // Anthropic + { id: "claude-sonnet-4-5-20250929", name: "Claude Sonnet 4.5", provider: "anthropic", logo: "/logos/claude-color.svg" }, + { id: "claude-haiku-4-5-20251001", name: "Claude Haiku 4.5", provider: "anthropic", logo: "/logos/claude-color.svg" }, + { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4", provider: "anthropic", logo: "/logos/claude-color.svg" }, + { id: "claude-3-haiku-20240307", name: "Claude 3 Haiku", provider: "anthropic", logo: "/logos/claude-color.svg" }, + + // Google + { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", provider: "google", logo: "/logos/gemini-color.svg" }, + { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", provider: "google", logo: "/logos/gemini-color.svg" }, + { id: "gemini-2.5-flash-lite", name: "Gemini 2.5 Flash Lite", provider: "google", logo: "/logos/gemini-color.svg" }, + { id: "gemini-2.0-flash", name: "Gemini 2.0 Flash", provider: "google", logo: "/logos/gemini-color.svg" }, + { id: "gemini-2.0-flash-lite", name: "Gemini 2.0 Flash Lite", provider: "google", logo: "/logos/gemini-color.svg" }, + + // Grok + { id: "grok-4-1-fast-reasoning", name: "Grok 4.1 Fast Reasoning", provider: "grok", logo: "/logos/grok.svg" }, + { id: "grok-4-1-fast-non-reasoning", name: "Grok 4.1 Fast Non Reasoning", provider: "grok", logo: "/logos/grok.svg" }, + { id: "grok-4-fast-reasoning", name: "Grok 4 Fast Reasoning", provider: "grok", logo: "/logos/grok.svg" }, + { id: "grok-4-fast-non-reasoning", name: "Grok 4 Fast Non Reasoning", provider: "grok", logo: "/logos/grok.svg" }, + { id: "grok-4-0709", name: "Grok 4", provider: "grok", logo: "/logos/grok.svg" }, + { id: "grok-3", name: "Grok 3", provider: "grok", logo: "/logos/grok.svg" }, + { id: "grok-3-mini", name: "Grok 3 Mini", provider: "grok", logo: "/logos/grok.svg" }, + + // DeepSeek + { id: "deepseek-reasoner", name: "DeepSeek Reasoner", provider: "deepseek", logo: "/logos/deepseek-color.svg" }, + { id: "deepseek-chat", name: "DeepSeek Chat", provider: "deepseek", logo: "/logos/deepseek-color.svg" }, + + // Qwen + { id: "qwen3-max", name: "Qwen 3 Max", provider: "qwen", logo: "/logos/qwen-color.svg" }, + { id: "qwen-plus", name: "Qwen Plus", provider: "qwen", logo: "/logos/qwen-color.svg" }, + { id: "qwen-flash", name: "Qwen Flash", provider: "qwen", logo: "/logos/qwen-color.svg" }, + + // Custom + { id: "custom", name: "Other (θ‡ͺθ¨‚ζ¨‘εž‹)", provider: "custom", logo: null }, ]; function HistoryChatContent() { @@ -69,7 +98,8 @@ function HistoryChatContent() { const [report, setReport] = useState(null); const [loadingReport, setLoadingReport] = useState(true); - const [selectedModelId, setSelectedModelId] = useState("auto"); + // Default to GPT-5 Mini + const [selectedModelId, setSelectedModelId] = useState("gpt-5-mini"); const [customModel, setCustomModel] = useState(""); const messagesEndRef = useRef(null); @@ -173,8 +203,8 @@ function HistoryChatContent() { const activeModelId = selectedModelId === "custom" ? customModel.trim() : selectedModelId; - if (selectedModelId === "auto" || !activeModelId) { - // Auto logic: Pick first available + if (!activeModelId) { + // Auto logic wrapper (now acts as a fallback if custom is empty) for (const [providerName, providerData] of Object.entries(providers)) { if (providerData.key && providerData.key.trim() !== "") { apiKey = providerData.key; @@ -449,16 +479,25 @@ function HistoryChatContent() { {/* Model Selector */}