feat: fix prompt display, add event detail modal, run buttons, and node animations

Backend:
- Extract full prompt from all LLM messages (not just first)
- Add prompt/response fields to streamed event payloads
- Improve model name extraction with multiple fallback strategies
- Add run_portfolio and run_auto streaming methods
- Wire portfolio/auto in websocket router
- New tool_result event type for tool completion

Frontend:
- Add full event detail modal with tabs (Prompt, Response, Summary, Metrics)
- Show actual prompt content in drawer instead of "Prompting unknown..."
- Add Scan, Pipeline, Portfolio, Auto buttons to control panel
- Fix node animation: completed nodes never revert to running
- Handle tool_result type for marking tool nodes as done
- Drawer events have "Full Detail →" button to open modal

Co-authored-by: aguzererler <6199053+aguzererler@users.noreply.github.com>
Agent-Logs-Url: https://github.com/aguzererler/TradingAgents/sessions/7997c579-ab7e-4071-afd0-18703a8e5618
This commit is contained in:
copilot-swe-agent[bot] 2026-03-23 08:04:53 +00:00
parent 9d2dbf4a43
commit b08ce7199e
5 changed files with 391 additions and 53 deletions

View File

@ -38,7 +38,10 @@ async def websocket_endpoint(
stream_gen = engine.run_scan(run_id, params) stream_gen = engine.run_scan(run_id, params)
elif run_type == "pipeline": elif run_type == "pipeline":
stream_gen = engine.run_pipeline(run_id, params) stream_gen = engine.run_pipeline(run_id, params)
# Add other types as they are implemented in LangGraphEngine elif run_type == "portfolio":
stream_gen = engine.run_portfolio(run_id, params)
elif run_type == "auto":
stream_gen = engine.run_auto(run_id, params)
if stream_gen: if stream_gen:
async for payload in stream_gen: async for payload in stream_gen:

View File

@ -4,13 +4,17 @@ import time
from typing import Dict, Any, AsyncGenerator from typing import Dict, Any, AsyncGenerator
from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.graph.scanner_graph import ScannerGraph from tradingagents.graph.scanner_graph import ScannerGraph
from tradingagents.graph.portfolio_graph import PortfolioGraph
from tradingagents.default_config import DEFAULT_CONFIG from tradingagents.default_config import DEFAULT_CONFIG
logger = logging.getLogger("agent_os.engine") logger = logging.getLogger("agent_os.engine")
# Maximum characters of prompt/response content to include in streamed events # Maximum characters of prompt/response content to include in the short message
_MAX_CONTENT_LEN = 300 _MAX_CONTENT_LEN = 300
# Maximum characters of prompt/response for the full fields (generous limit)
_MAX_FULL_LEN = 50_000
class LangGraphEngine: class LangGraphEngine:
"""Orchestrates LangGraph pipeline executions and streams events.""" """Orchestrates LangGraph pipeline executions and streams events."""
@ -20,6 +24,8 @@ class LangGraphEngine:
self.active_runs: Dict[str, Dict[str, Any]] = {} self.active_runs: Dict[str, Dict[str, Any]] = {}
# Track node start times per run so we can compute latency # Track node start times per run so we can compute latency
self._node_start_times: Dict[str, Dict[str, float]] = {} self._node_start_times: Dict[str, Dict[str, float]] = {}
# Track the last prompt per node so we can attach it to result events
self._node_prompts: Dict[str, Dict[str, str]] = {}
# ------------------------------------------------------------------ # ------------------------------------------------------------------
# Run helpers # Run helpers
@ -55,6 +61,7 @@ class LangGraphEngine:
yield mapped yield mapped
self._node_start_times.pop(run_id, None) self._node_start_times.pop(run_id, None)
self._node_prompts.pop(run_id, None)
logger.info("Completed SCAN run=%s", run_id) logger.info("Completed SCAN run=%s", run_id)
async def run_pipeline( async def run_pipeline(
@ -88,8 +95,76 @@ class LangGraphEngine:
yield mapped yield mapped
self._node_start_times.pop(run_id, None) self._node_start_times.pop(run_id, None)
self._node_prompts.pop(run_id, None)
logger.info("Completed PIPELINE run=%s", run_id) logger.info("Completed PIPELINE run=%s", run_id)
async def run_portfolio(
self, run_id: str, params: Dict[str, Any]
) -> AsyncGenerator[Dict[str, Any], None]:
"""Run the portfolio manager workflow and stream events."""
date = params.get("date", time.strftime("%Y-%m-%d"))
portfolio_id = params.get("portfolio_id", "main_portfolio")
logger.info(
"Starting PORTFOLIO run=%s portfolio=%s date=%s",
run_id, portfolio_id, date,
)
yield self._system_log(
f"Starting portfolio manager for {portfolio_id} on {date}"
)
portfolio_graph = PortfolioGraph(config=self.config)
initial_state = {
"portfolio_id": portfolio_id,
"scan_date": date,
"messages": [],
}
self._node_start_times[run_id] = {}
async for event in portfolio_graph.graph.astream_events(
initial_state, version="v2"
):
mapped = self._map_langgraph_event(run_id, event)
if mapped:
yield mapped
self._node_start_times.pop(run_id, None)
self._node_prompts.pop(run_id, None)
logger.info("Completed PORTFOLIO run=%s", run_id)
async def run_auto(
self, run_id: str, params: Dict[str, Any]
) -> AsyncGenerator[Dict[str, Any], None]:
"""Run the full auto pipeline: scan → pipeline → portfolio."""
date = params.get("date", time.strftime("%Y-%m-%d"))
logger.info("Starting AUTO run=%s date=%s", run_id, date)
yield self._system_log(f"Starting full auto workflow for {date}")
# Phase 1: Market scan
yield self._system_log("Phase 1/3: Running market scan…")
async for evt in self.run_scan(f"{run_id}_scan", {"date": date}):
yield evt
# Phase 2: Pipeline analysis (default ticker for now)
ticker = params.get("ticker", "AAPL")
yield self._system_log(f"Phase 2/3: Running analysis pipeline for {ticker}")
async for evt in self.run_pipeline(
f"{run_id}_pipeline", {"ticker": ticker, "date": date}
):
yield evt
# Phase 3: Portfolio management
yield self._system_log("Phase 3/3: Running portfolio manager…")
async for evt in self.run_portfolio(
f"{run_id}_portfolio", {"date": date, **params}
):
yield evt
logger.info("Completed AUTO run=%s", run_id)
# ------------------------------------------------------------------ # ------------------------------------------------------------------
# Event mapping # Event mapping
# ------------------------------------------------------------------ # ------------------------------------------------------------------
@ -153,6 +228,51 @@ class LangGraphEngine:
content = getattr(first_item, "content", None) content = getattr(first_item, "content", None)
return str(content) if content is not None else str(first_item) return str(content) if content is not None else str(first_item)
def _extract_all_messages_content(self, messages: Any) -> str:
"""Extract text from ALL messages in a LangGraph messages payload.
Returns the concatenated content of every message so the user can
inspect the full prompt that was sent to the LLM.
"""
if not isinstance(messages, list) or not messages:
return ""
parts: list[str] = []
items = messages
# Handle list-of-lists
if isinstance(items[0], list):
items = items[0]
for msg in items:
content = getattr(msg, "content", None)
role = getattr(msg, "type", "unknown")
text = str(content) if content is not None else str(msg)
parts.append(f"[{role}] {text}")
return "\n\n".join(parts)
def _extract_model(self, event: Dict[str, Any]) -> str:
"""Best-effort extraction of the model name from a LangGraph event."""
data = event.get("data") or {}
# 1. invocation_params (standard LangChain)
inv = data.get("invocation_params") or {}
model = inv.get("model_name") or inv.get("model") or ""
if model:
return model
# 2. Serialized kwargs (OpenRouter / ChatOpenAI)
serialized = event.get("serialized") or data.get("serialized") or {}
kwargs = serialized.get("kwargs") or {}
model = kwargs.get("model_name") or kwargs.get("model") or ""
if model:
return model
# 3. metadata.ls_model_name (LangSmith tracing)
metadata = event.get("metadata") or {}
model = metadata.get("ls_model_name") or ""
if model:
return model
return "unknown"
def _map_langgraph_event( def _map_langgraph_event(
self, run_id: str, event: Dict[str, Any] self, run_id: str, event: Dict[str, Any]
) -> Dict[str, Any] | None: ) -> Dict[str, Any] | None:
@ -162,22 +282,24 @@ class LangGraphEngine:
node_name = self._extract_node_name(event) node_name = self._extract_node_name(event)
starts = self._node_start_times.get(run_id, {}) starts = self._node_start_times.get(run_id, {})
prompts = self._node_prompts.setdefault(run_id, {})
# ------ LLM start ------ # ------ LLM start ------
if kind == "on_chat_model_start": if kind == "on_chat_model_start":
starts[node_name] = time.monotonic() starts[node_name] = time.monotonic()
# Extract the prompt being sent to the LLM # Extract the full prompt being sent to the LLM
full_prompt = ""
prompt_snippet = "" prompt_snippet = ""
messages = (event.get("data") or {}).get("messages") messages = (event.get("data") or {}).get("messages")
if messages: if messages:
raw = self._first_message_content(messages) full_prompt = self._extract_all_messages_content(messages)
if raw: prompt_snippet = self._truncate(full_prompt.replace("\n", " "))
prompt_snippet = self._truncate(raw)
model = "unknown" # Remember the full prompt so we can attach it to the result event
inv_params = (event.get("data") or {}).get("invocation_params") or {} prompts[node_name] = full_prompt
model = inv_params.get("model_name") or inv_params.get("model") or "unknown"
model = self._extract_model(event)
logger.info( logger.info(
"LLM start node=%s model=%s run=%s", node_name, model, run_id "LLM start node=%s model=%s run=%s", node_name, model, run_id
@ -191,14 +313,17 @@ class LangGraphEngine:
"agent": node_name.upper(), "agent": node_name.upper(),
"message": f"Prompting {model}" "message": f"Prompting {model}"
+ (f" | {prompt_snippet}" if prompt_snippet else ""), + (f" | {prompt_snippet}" if prompt_snippet else ""),
"prompt": full_prompt,
"metrics": {"model": model}, "metrics": {"model": model},
} }
# ------ Tool call ------ # ------ Tool call ------
elif kind == "on_tool_start": elif kind == "on_tool_start":
full_input = ""
tool_input = "" tool_input = ""
inp = (event.get("data") or {}).get("input") inp = (event.get("data") or {}).get("input")
if inp: if inp:
full_input = str(inp)[:_MAX_FULL_LEN]
tool_input = self._truncate(str(inp)) tool_input = self._truncate(str(inp))
logger.info("Tool start tool=%s node=%s run=%s", name, node_name, run_id) logger.info("Tool start tool=%s node=%s run=%s", name, node_name, run_id)
@ -211,15 +336,19 @@ class LangGraphEngine:
"agent": node_name.upper(), "agent": node_name.upper(),
"message": f"▶ Tool: {name}" "message": f"▶ Tool: {name}"
+ (f" | {tool_input}" if tool_input else ""), + (f" | {tool_input}" if tool_input else ""),
"prompt": full_input,
"metrics": {}, "metrics": {},
} }
# ------ Tool result ------ # ------ Tool result ------
elif kind == "on_tool_end": elif kind == "on_tool_end":
full_output = ""
tool_output = "" tool_output = ""
out = (event.get("data") or {}).get("output") out = (event.get("data") or {}).get("output")
if out is not None: if out is not None:
tool_output = self._truncate(self._extract_content(out)) raw = self._extract_content(out)
full_output = raw[:_MAX_FULL_LEN]
tool_output = self._truncate(raw)
logger.info("Tool end tool=%s node=%s run=%s", name, node_name, run_id) logger.info("Tool end tool=%s node=%s run=%s", name, node_name, run_id)
@ -227,10 +356,11 @@ class LangGraphEngine:
"id": f"{event['run_id']}_tool_end", "id": f"{event['run_id']}_tool_end",
"node_id": f"tool_{name}", "node_id": f"tool_{name}",
"parent_node_id": node_name, "parent_node_id": node_name,
"type": "tool", "type": "tool_result",
"agent": node_name.upper(), "agent": node_name.upper(),
"message": f"✓ Tool result: {name}" "message": f"✓ Tool result: {name}"
+ (f" | {tool_output}" if tool_output else ""), + (f" | {tool_output}" if tool_output else ""),
"response": full_output,
"metrics": {}, "metrics": {},
} }
@ -240,21 +370,30 @@ class LangGraphEngine:
usage: Dict[str, Any] = {} usage: Dict[str, Any] = {}
model = "unknown" model = "unknown"
response_snippet = "" response_snippet = ""
full_response = ""
if output is not None: if output is not None:
if hasattr(output, "usage_metadata") and output.usage_metadata: if hasattr(output, "usage_metadata") and output.usage_metadata:
usage = output.usage_metadata usage = output.usage_metadata
if hasattr(output, "response_metadata") and output.response_metadata: if hasattr(output, "response_metadata") and output.response_metadata:
model = output.response_metadata.get("model_name", model) model = output.response_metadata.get("model_name") or output.response_metadata.get("model", model)
raw = self._extract_content(output) raw = self._extract_content(output)
if raw: if raw:
full_response = raw[:_MAX_FULL_LEN]
response_snippet = self._truncate(raw) response_snippet = self._truncate(raw)
# Fall back to event-level model extraction
if model == "unknown":
model = self._extract_model(event)
latency_ms = 0 latency_ms = 0
start_t = starts.pop(node_name, None) start_t = starts.pop(node_name, None)
if start_t is not None: if start_t is not None:
latency_ms = round((time.monotonic() - start_t) * 1000) latency_ms = round((time.monotonic() - start_t) * 1000)
# Retrieve the prompt that started this LLM call
matched_prompt = prompts.pop(node_name, "")
logger.info( logger.info(
"LLM end node=%s model=%s tokens_in=%s tokens_out=%s latency=%dms run=%s", "LLM end node=%s model=%s tokens_in=%s tokens_out=%s latency=%dms run=%s",
node_name, node_name,
@ -271,6 +410,8 @@ class LangGraphEngine:
"type": "result", "type": "result",
"agent": node_name.upper(), "agent": node_name.upper(),
"message": response_snippet or "Completed.", "message": response_snippet or "Completed.",
"prompt": matched_prompt,
"response": full_response,
"metrics": { "metrics": {
"model": model, "model": model,
"tokens_in": usage.get("input_tokens", 0), "tokens_in": usage.get("input_tokens", 0),
@ -292,3 +433,11 @@ class LangGraphEngine:
async def run_pipeline_background(self, run_id: str, params: Dict[str, Any]): async def run_pipeline_background(self, run_id: str, params: Dict[str, Any]):
async for _ in self.run_pipeline(run_id, params): async for _ in self.run_pipeline(run_id, params):
pass pass
async def run_portfolio_background(self, run_id: str, params: Dict[str, Any]):
async for _ in self.run_portfolio(run_id, params):
pass
async def run_auto_background(self, run_id: str, params: Dict[str, Any]):
async for _ in self.run_auto(run_id, params):
pass

View File

@ -18,8 +18,19 @@ import {
Tag, Tag,
Code, Code,
Badge, Badge,
Modal,
ModalOverlay,
ModalContent,
ModalHeader,
ModalBody,
ModalCloseButton,
Tabs,
TabList,
TabPanels,
Tab,
TabPanel,
} from '@chakra-ui/react'; } from '@chakra-ui/react';
import { LayoutDashboard, Wallet, Settings, Play, Terminal as TerminalIcon, ChevronRight, Eye } from 'lucide-react'; import { LayoutDashboard, Wallet, Settings, Play, Terminal as TerminalIcon, ChevronRight, Eye, Search, BarChart3, Bot } from 'lucide-react';
import { MetricHeader } from './components/MetricHeader'; import { MetricHeader } from './components/MetricHeader';
import { AgentGraph } from './components/AgentGraph'; import { AgentGraph } from './components/AgentGraph';
import { useAgentStream, AgentEvent } from './hooks/useAgentStream'; import { useAgentStream, AgentEvent } from './hooks/useAgentStream';
@ -30,42 +41,131 @@ const API_BASE = 'http://127.0.0.1:8088/api';
/** Return the colour token for a given event type. */ /** Return the colour token for a given event type. */
const eventColor = (type: AgentEvent['type']): string => { const eventColor = (type: AgentEvent['type']): string => {
switch (type) { switch (type) {
case 'tool': return 'purple.400'; case 'tool': return 'purple.400';
case 'result': return 'green.400'; case 'tool_result': return 'purple.300';
case 'log': return 'yellow.300'; case 'result': return 'green.400';
default: return 'cyan.400'; case 'log': return 'yellow.300';
default: return 'cyan.400';
} }
}; };
/** Return a short label badge for the event type. */ /** Return a short label badge for the event type. */
const eventLabel = (type: AgentEvent['type']): string => { const eventLabel = (type: AgentEvent['type']): string => {
switch (type) { switch (type) {
case 'thought': return '💭'; case 'thought': return '💭';
case 'tool': return '🔧'; case 'tool': return '🔧';
case 'result': return '✅'; case 'tool_result': return '✅🔧';
case 'log': return ''; case 'result': return '✅';
default: return '●'; case 'log': return '';
default: return '●';
} }
}; };
/** Short summary for terminal — no inline prompts, just agent + type. */ /** Short summary for terminal — no inline prompts, just agent + type. */
const eventSummary = (evt: AgentEvent): string => { const eventSummary = (evt: AgentEvent): string => {
switch (evt.type) { switch (evt.type) {
case 'thought': return `Thinking… (${evt.metrics?.model || 'LLM'})`; case 'thought': return `Thinking… (${evt.metrics?.model || 'LLM'})`;
case 'tool': return evt.message.startsWith('✓') ? 'Tool result received' : `Tool call: ${evt.message.replace(/^▶ Tool: /, '').split(' | ')[0]}`; case 'tool': return evt.message.startsWith('✓') ? 'Tool result received' : `Tool call: ${evt.message.replace(/^▶ Tool: /, '').split(' | ')[0]}`;
case 'result': return 'Completed'; case 'tool_result': return `Tool done: ${evt.message.replace(/^✓ Tool result: /, '').split(' | ')[0]}`;
case 'log': return evt.message; case 'result': return 'Completed';
default: return evt.type; case 'log': return evt.message;
default: return evt.type;
} }
}; };
// ─── Detail drawer for a single event ───────────────────────────────── // ─── Full Event Detail Modal ─────────────────────────────────────────
const EventDetail: React.FC<{ event: AgentEvent }> = ({ event }) => ( const EventDetailModal: React.FC<{ event: AgentEvent | null; isOpen: boolean; onClose: () => void }> = ({ event, isOpen, onClose }) => {
if (!event) return null;
return (
<Modal isOpen={isOpen} onClose={onClose} size="4xl" scrollBehavior="inside">
<ModalOverlay backdropFilter="blur(6px)" />
<ModalContent bg="slate.900" color="white" maxH="85vh" border="1px solid" borderColor="whiteAlpha.200">
<ModalCloseButton />
<ModalHeader borderBottomWidth="1px" borderColor="whiteAlpha.100">
<HStack>
<Badge colorScheme={event.type === 'result' ? 'green' : event.type === 'tool' || event.type === 'tool_result' ? 'purple' : 'cyan'} fontSize="sm">
{event.type.toUpperCase()}
</Badge>
<Badge variant="outline" fontSize="sm">{event.agent}</Badge>
<Text fontSize="sm" color="whiteAlpha.400" fontWeight="normal">{event.timestamp}</Text>
</HStack>
</ModalHeader>
<ModalBody py={4}>
<Tabs variant="soft-rounded" colorScheme="cyan" size="sm">
<TabList mb={4}>
{event.prompt && <Tab>Prompt / Request</Tab>}
{(event.response || (event.type === 'result' && event.message)) && <Tab>Response</Tab>}
<Tab>Summary</Tab>
{event.metrics && <Tab>Metrics</Tab>}
</TabList>
<TabPanels>
{event.prompt && (
<TabPanel p={0}>
<Box bg="blackAlpha.500" p={4} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100" maxH="60vh" overflowY="auto">
<Text fontSize="xs" fontFamily="mono" whiteSpace="pre-wrap" wordBreak="break-word" color="whiteAlpha.900">
{event.prompt}
</Text>
</Box>
</TabPanel>
)}
{(event.response || (event.type === 'result' && event.message)) && (
<TabPanel p={0}>
<Box bg="blackAlpha.500" p={4} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100" maxH="60vh" overflowY="auto">
<Text fontSize="xs" fontFamily="mono" whiteSpace="pre-wrap" wordBreak="break-word" color="whiteAlpha.900">
{event.response || event.message}
</Text>
</Box>
</TabPanel>
)}
<TabPanel p={0}>
<Box bg="blackAlpha.500" p={4} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100">
<Text fontSize="sm" whiteSpace="pre-wrap" wordBreak="break-word" color="whiteAlpha.900">
{event.message}
</Text>
</Box>
</TabPanel>
{event.metrics && (
<TabPanel p={0}>
<VStack align="stretch" spacing={3}>
{event.metrics.model && event.metrics.model !== 'unknown' && (
<HStack><Text fontSize="sm" color="whiteAlpha.600" minW="80px">Model:</Text><Code colorScheme="blue" fontSize="sm">{event.metrics.model}</Code></HStack>
)}
{event.metrics.tokens_in != null && event.metrics.tokens_in > 0 && (
<HStack><Text fontSize="sm" color="whiteAlpha.600" minW="80px">Tokens In:</Text><Code>{event.metrics.tokens_in}</Code></HStack>
)}
{event.metrics.tokens_out != null && event.metrics.tokens_out > 0 && (
<HStack><Text fontSize="sm" color="whiteAlpha.600" minW="80px">Tokens Out:</Text><Code>{event.metrics.tokens_out}</Code></HStack>
)}
{event.metrics.latency_ms != null && event.metrics.latency_ms > 0 && (
<HStack><Text fontSize="sm" color="whiteAlpha.600" minW="80px">Latency:</Text><Code>{event.metrics.latency_ms}ms</Code></HStack>
)}
{event.node_id && (
<HStack><Text fontSize="sm" color="whiteAlpha.600" minW="80px">Node ID:</Text><Code fontSize="xs">{event.node_id}</Code></HStack>
)}
</VStack>
</TabPanel>
)}
</TabPanels>
</Tabs>
</ModalBody>
</ModalContent>
</Modal>
);
};
// ─── Detail card for a single event in the drawer ─────────────────────
const EventDetail: React.FC<{ event: AgentEvent; onOpenModal?: (evt: AgentEvent) => void }> = ({ event, onOpenModal }) => (
<VStack align="stretch" spacing={4}> <VStack align="stretch" spacing={4}>
<HStack> <HStack>
<Badge colorScheme="cyan">{event.type.toUpperCase()}</Badge> <Badge colorScheme={event.type === 'result' ? 'green' : event.type === 'tool' || event.type === 'tool_result' ? 'purple' : 'cyan'}>{event.type.toUpperCase()}</Badge>
<Badge variant="outline">{event.agent}</Badge> <Badge variant="outline">{event.agent}</Badge>
<Text fontSize="xs" color="whiteAlpha.400">{event.timestamp}</Text> <Text fontSize="xs" color="whiteAlpha.400">{event.timestamp}</Text>
{onOpenModal && (
<Button size="xs" variant="ghost" colorScheme="cyan" ml="auto" onClick={() => onOpenModal(event)}>
Full Detail
</Button>
)}
</HStack> </HStack>
{event.metrics?.model && event.metrics.model !== 'unknown' && ( {event.metrics?.model && event.metrics.model !== 'unknown' && (
@ -79,7 +179,7 @@ const EventDetail: React.FC<{ event: AgentEvent }> = ({ event }) => (
<Box> <Box>
<Text fontSize="xs" fontWeight="bold" color="whiteAlpha.600" mb={1}>Metrics</Text> <Text fontSize="xs" fontWeight="bold" color="whiteAlpha.600" mb={1}>Metrics</Text>
<HStack spacing={4} fontSize="sm"> <HStack spacing={4} fontSize="sm">
{event.metrics.tokens_in != null && ( {event.metrics.tokens_in != null && event.metrics.tokens_in > 0 && (
<Text>Tokens: <Code>{event.metrics.tokens_in}</Code> in / <Code>{event.metrics.tokens_out}</Code> out</Text> <Text>Tokens: <Code>{event.metrics.tokens_in}</Code> in / <Code>{event.metrics.tokens_out}</Code> out</Text>
)} )}
{event.metrics.latency_ms != null && event.metrics.latency_ms > 0 && ( {event.metrics.latency_ms != null && event.metrics.latency_ms > 0 && (
@ -89,16 +189,41 @@ const EventDetail: React.FC<{ event: AgentEvent }> = ({ event }) => (
</Box> </Box>
)} )}
<Box> {/* Show prompt if available */}
<Text fontSize="xs" fontWeight="bold" color="whiteAlpha.600" mb={1}> {event.prompt && (
{event.type === 'thought' ? 'Request / Prompt' : event.type === 'result' ? 'Response' : 'Message'} <Box>
</Text> <Text fontSize="xs" fontWeight="bold" color="whiteAlpha.600" mb={1}>Request / Prompt</Text>
<Box bg="blackAlpha.500" p={3} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100" maxH="300px" overflowY="auto"> <Box bg="blackAlpha.500" p={3} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100" maxH="200px" overflowY="auto">
<Text fontSize="xs" fontFamily="mono" whiteSpace="pre-wrap" wordBreak="break-word" color="whiteAlpha.900"> <Text fontSize="xs" fontFamily="mono" whiteSpace="pre-wrap" wordBreak="break-word" color="whiteAlpha.900">
{event.message} {event.prompt.length > 1000 ? event.prompt.substring(0, 1000) + '…' : event.prompt}
</Text> </Text>
</Box>
</Box> </Box>
</Box> )}
{/* Show response if available (result events) */}
{event.response && (
<Box>
<Text fontSize="xs" fontWeight="bold" color="whiteAlpha.600" mb={1}>Response</Text>
<Box bg="blackAlpha.500" p={3} borderRadius="md" border="1px solid" borderColor="green.900" maxH="200px" overflowY="auto">
<Text fontSize="xs" fontFamily="mono" whiteSpace="pre-wrap" wordBreak="break-word" color="whiteAlpha.900">
{event.response.length > 1000 ? event.response.substring(0, 1000) + '…' : event.response}
</Text>
</Box>
</Box>
)}
{/* Fallback: show message if no prompt/response */}
{!event.prompt && !event.response && (
<Box>
<Text fontSize="xs" fontWeight="bold" color="whiteAlpha.600" mb={1}>Message</Text>
<Box bg="blackAlpha.500" p={3} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100" maxH="300px" overflowY="auto">
<Text fontSize="xs" fontFamily="mono" whiteSpace="pre-wrap" wordBreak="break-word" color="whiteAlpha.900">
{event.message}
</Text>
</Box>
</Box>
)}
{event.node_id && ( {event.node_id && (
<Box> <Box>
@ -110,7 +235,7 @@ const EventDetail: React.FC<{ event: AgentEvent }> = ({ event }) => (
); );
// ─── Detail drawer showing all events for a given graph node ────────── // ─── Detail drawer showing all events for a given graph node ──────────
const NodeEventsDetail: React.FC<{ nodeId: string; events: AgentEvent[] }> = ({ nodeId, events }) => { const NodeEventsDetail: React.FC<{ nodeId: string; events: AgentEvent[]; onOpenModal: (evt: AgentEvent) => void }> = ({ nodeId, events, onOpenModal }) => {
const nodeEvents = useMemo( const nodeEvents = useMemo(
() => events.filter((e) => e.node_id === nodeId), () => events.filter((e) => e.node_id === nodeId),
[events, nodeId], [events, nodeId],
@ -124,7 +249,7 @@ const NodeEventsDetail: React.FC<{ nodeId: string; events: AgentEvent[] }> = ({
<VStack align="stretch" spacing={4}> <VStack align="stretch" spacing={4}>
{nodeEvents.map((evt) => ( {nodeEvents.map((evt) => (
<Box key={evt.id} bg="whiteAlpha.50" p={3} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100"> <Box key={evt.id} bg="whiteAlpha.50" p={3} borderRadius="md" border="1px solid" borderColor="whiteAlpha.100">
<EventDetail event={evt} /> <EventDetail event={evt} onOpenModal={onOpenModal} />
</Box> </Box>
))} ))}
</VStack> </VStack>
@ -138,6 +263,10 @@ export const Dashboard: React.FC = () => {
const { events, status, clearEvents } = useAgentStream(activeRunId); const { events, status, clearEvents } = useAgentStream(activeRunId);
const { isOpen, onOpen, onClose } = useDisclosure(); const { isOpen, onOpen, onClose } = useDisclosure();
// Event detail modal state
const { isOpen: isModalOpen, onOpen: onModalOpen, onClose: onModalClose } = useDisclosure();
const [modalEvent, setModalEvent] = useState<AgentEvent | null>(null);
// What's shown in the drawer: either a single event or all events for a node // What's shown in the drawer: either a single event or all events for a node
const [drawerMode, setDrawerMode] = useState<'event' | 'node'>('event'); const [drawerMode, setDrawerMode] = useState<'event' | 'node'>('event');
const [selectedEvent, setSelectedEvent] = useState<AgentEvent | null>(null); const [selectedEvent, setSelectedEvent] = useState<AgentEvent | null>(null);
@ -150,8 +279,10 @@ export const Dashboard: React.FC = () => {
terminalEndRef.current?.scrollIntoView({ behavior: 'smooth' }); terminalEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, [events.length]); }, [events.length]);
const isRunning = isTriggering || status === 'streaming' || status === 'connecting';
const startRun = async (type: string) => { const startRun = async (type: string) => {
if (isTriggering || status === 'streaming' || status === 'connecting') return; if (isRunning) return;
setIsTriggering(true); setIsTriggering(true);
try { try {
@ -168,6 +299,12 @@ export const Dashboard: React.FC = () => {
} }
}; };
/** Open the full-screen event detail modal */
const openModal = useCallback((evt: AgentEvent) => {
setModalEvent(evt);
onModalOpen();
}, [onModalOpen]);
/** Open the drawer for a single event (terminal click). */ /** Open the drawer for a single event (terminal click). */
const openEventDetail = useCallback((evt: AgentEvent) => { const openEventDetail = useCallback((evt: AgentEvent) => {
setDrawerMode('event'); setDrawerMode('event');
@ -211,16 +348,50 @@ export const Dashboard: React.FC = () => {
<AgentGraph events={events} onNodeClick={openNodeDetail} /> <AgentGraph events={events} onNodeClick={openNodeDetail} />
{/* Floating Control Panel */} {/* Floating Control Panel */}
<HStack position="absolute" top={4} left={4} bg="blackAlpha.800" p={2} borderRadius="lg" backdropFilter="blur(10px)" border="1px solid" borderColor="whiteAlpha.200" spacing={3}> <HStack position="absolute" top={4} left={4} bg="blackAlpha.800" p={2} borderRadius="lg" backdropFilter="blur(10px)" border="1px solid" borderColor="whiteAlpha.200" spacing={2} flexWrap="wrap">
<Button <Button
size="sm" size="sm"
leftIcon={<Play size={14} />} leftIcon={<Search size={14} />}
colorScheme="cyan" colorScheme="cyan"
variant="solid" variant="solid"
onClick={() => startRun('scan')} onClick={() => startRun('scan')}
isLoading={isTriggering || status === 'connecting' || status === 'streaming'} isLoading={isRunning}
loadingText="Running…"
> >
Start Market Scan Scan
</Button>
<Button
size="sm"
leftIcon={<BarChart3 size={14} />}
colorScheme="blue"
variant="solid"
onClick={() => startRun('pipeline')}
isLoading={isRunning}
loadingText="Running…"
>
Pipeline
</Button>
<Button
size="sm"
leftIcon={<Wallet size={14} />}
colorScheme="purple"
variant="solid"
onClick={() => startRun('portfolio')}
isLoading={isRunning}
loadingText="Running…"
>
Portfolio
</Button>
<Button
size="sm"
leftIcon={<Bot size={14} />}
colorScheme="green"
variant="solid"
onClick={() => startRun('auto')}
isLoading={isRunning}
loadingText="Running…"
>
Auto
</Button> </Button>
<Divider orientation="vertical" h="20px" /> <Divider orientation="vertical" h="20px" />
<Tag size="sm" colorScheme={status === 'streaming' ? 'green' : status === 'completed' ? 'blue' : 'gray'}> <Tag size="sm" colorScheme={status === 'streaming' ? 'green' : status === 'completed' ? 'blue' : 'gray'}>
@ -290,14 +461,17 @@ export const Dashboard: React.FC = () => {
</DrawerHeader> </DrawerHeader>
<DrawerBody py={4}> <DrawerBody py={4}>
{drawerMode === 'event' && selectedEvent && ( {drawerMode === 'event' && selectedEvent && (
<EventDetail event={selectedEvent} /> <EventDetail event={selectedEvent} onOpenModal={openModal} />
)} )}
{drawerMode === 'node' && selectedNodeId && ( {drawerMode === 'node' && selectedNodeId && (
<NodeEventsDetail nodeId={selectedNodeId} events={events} /> <NodeEventsDetail nodeId={selectedNodeId} events={events} onOpenModal={openModal} />
)} )}
</DrawerBody> </DrawerBody>
</DrawerContent> </DrawerContent>
</Drawer> </Drawer>
{/* Full event detail modal */}
<EventDetailModal event={modalEvent} isOpen={isModalOpen} onClose={onModalClose} />
</Flex> </Flex>
); );
}; };

View File

@ -127,6 +127,9 @@ export const AgentGraph: React.FC<AgentGraphProps> = ({ events, onNodeClick }) =
for (const evt of newEvents) { for (const evt of newEvents) {
if (!evt.node_id || evt.node_id === '__system__') continue; if (!evt.node_id || evt.node_id === '__system__') continue;
// Determine if this event means the node is completed
const isCompleted = evt.type === 'result' || evt.type === 'tool_result';
if (!seenNodeIds.current.has(evt.node_id)) { if (!seenNodeIds.current.has(evt.node_id)) {
// New node — create it // New node — create it
seenNodeIds.current.add(evt.node_id); seenNodeIds.current.add(evt.node_id);
@ -138,7 +141,7 @@ export const AgentGraph: React.FC<AgentGraphProps> = ({ events, onNodeClick }) =
position: { x: 250, y: nodeCount.current * 150 + 50 }, position: { x: 250, y: nodeCount.current * 150 + 50 },
data: { data: {
agent: evt.agent, agent: evt.agent,
status: evt.type === 'result' ? 'completed' : 'running', status: isCompleted ? 'completed' : 'running',
metrics: evt.metrics, metrics: evt.metrics,
}, },
}); });
@ -159,8 +162,11 @@ export const AgentGraph: React.FC<AgentGraphProps> = ({ events, onNodeClick }) =
} }
} else { } else {
// Existing node — queue a status/metrics update // Existing node — queue a status/metrics update
// Never revert a completed node back to running
const prev = updatedNodeData.get(evt.node_id);
const currentlyCompleted = prev?.status === 'completed';
updatedNodeData.set(evt.node_id, { updatedNodeData.set(evt.node_id, {
status: evt.type === 'result' ? 'completed' : 'running', status: currentlyCompleted || isCompleted ? 'completed' : 'running',
metrics: evt.metrics, metrics: evt.metrics,
}); });
} }
@ -178,9 +184,11 @@ export const AgentGraph: React.FC<AgentGraphProps> = ({ events, onNodeClick }) =
prev.map((n) => { prev.map((n) => {
const patch = updatedNodeData.get(n.id); const patch = updatedNodeData.get(n.id);
if (!patch) return n; if (!patch) return n;
// Never revert a completed node back to running
const finalStatus = n.data.status === 'completed' ? 'completed' : patch.status;
return { return {
...n, ...n,
data: { ...n.data, ...patch, metrics: patch.metrics ?? n.data.metrics }, data: { ...n.data, ...patch, status: finalStatus, metrics: patch.metrics ?? n.data.metrics },
}; };
}), }),
); );

View File

@ -5,8 +5,12 @@ export interface AgentEvent {
timestamp: string; timestamp: string;
agent: string; agent: string;
tier: 'quick' | 'mid' | 'deep'; tier: 'quick' | 'mid' | 'deep';
type: 'thought' | 'tool' | 'result' | 'log' | 'system'; type: 'thought' | 'tool' | 'tool_result' | 'result' | 'log' | 'system';
message: string; message: string;
/** Full prompt text (available on thought & result events). */
prompt?: string;
/** Full response text (available on result & tool_result events). */
response?: string;
node_id?: string; node_id?: string;
parent_node_id?: string; parent_node_id?: string;
metrics?: { metrics?: {