refactor(chat): execution graph optimize (#873)

Co-authored-by: Haze <hazeone@users.noreply.github.com>
This commit is contained in:
Haze
2026-04-19 19:36:33 +08:00
committed by GitHub
Unverified
parent 2f03aa1fad
commit 1b2dccee6e
24 changed files with 1444 additions and 536 deletions

View File

@@ -13,8 +13,6 @@
"noLogs": "(No logs available yet)", "noLogs": "(No logs available yet)",
"toolbar": { "toolbar": {
"refresh": "Refresh chat", "refresh": "Refresh chat",
"showThinking": "Show thinking",
"hideThinking": "Hide thinking",
"currentAgent": "Talking to {{agent}}" "currentAgent": "Talking to {{agent}}"
}, },
"taskPanel": { "taskPanel": {
@@ -34,19 +32,12 @@
} }
}, },
"executionGraph": { "executionGraph": {
"eyebrow": "Conversation Run",
"title": "Execution Graph", "title": "Execution Graph",
"status": {
"active": "Active",
"latest": "Latest",
"previous": "Previous"
},
"branchLabel": "branch", "branchLabel": "branch",
"userTrigger": "User Trigger", "thinkingLabel": "Thinking",
"userTriggerHint": "Triggered by the user message above",
"agentRun": "{{agent}} execution", "agentRun": "{{agent}} execution",
"agentReply": "Assistant Reply", "collapsedSummary": "{{toolCount}} tool calls · {{processCount}} process messages",
"agentReplyHint": "Resolved in the assistant reply below" "collapseAction": "Collapse execution graph"
}, },
"composer": { "composer": {
"attachFiles": "Attach files", "attachFiles": "Attach files",

View File

@@ -13,8 +13,6 @@
"noLogs": "(ログはまだありません)", "noLogs": "(ログはまだありません)",
"toolbar": { "toolbar": {
"refresh": "チャットを更新", "refresh": "チャットを更新",
"showThinking": "思考を表示",
"hideThinking": "思考を非表示",
"currentAgent": "現在の会話相手: {{agent}}" "currentAgent": "現在の会話相手: {{agent}}"
}, },
"taskPanel": { "taskPanel": {
@@ -34,19 +32,12 @@
} }
}, },
"executionGraph": { "executionGraph": {
"eyebrow": "会話実行",
"title": "実行グラフ", "title": "実行グラフ",
"status": {
"active": "進行中",
"latest": "直近",
"previous": "履歴"
},
"branchLabel": "branch", "branchLabel": "branch",
"userTrigger": "ユーザー入力", "thinkingLabel": "考え中",
"userTriggerHint": "上のユーザーメッセージがトリガーです",
"agentRun": "{{agent}} の実行", "agentRun": "{{agent}} の実行",
"agentReply": "アシスタント返信", "collapsedSummary": "ツール呼び出し {{toolCount}} 件 · プロセスメッセージ {{processCount}} 件",
"agentReplyHint": "結果は下のアシスタント返信に反映されます" "collapseAction": "実行グラフを折りたたむ"
}, },
"composer": { "composer": {
"attachFiles": "ファイルを添付", "attachFiles": "ファイルを添付",

View File

@@ -13,8 +13,6 @@
"noLogs": "(Журналы ещё недоступны)", "noLogs": "(Журналы ещё недоступны)",
"toolbar": { "toolbar": {
"refresh": "Обновить чат", "refresh": "Обновить чат",
"showThinking": "Показать размышления",
"hideThinking": "Скрыть размышления",
"currentAgent": "Общение с {{agent}}" "currentAgent": "Общение с {{agent}}"
}, },
"taskPanel": { "taskPanel": {
@@ -34,19 +32,12 @@
} }
}, },
"executionGraph": { "executionGraph": {
"eyebrow": "Выполнение в чате",
"title": "Граф выполнения", "title": "Граф выполнения",
"status": {
"active": "Активно",
"latest": "Последнее",
"previous": "Предыдущее"
},
"branchLabel": "ветвь", "branchLabel": "ветвь",
"userTrigger": "Триггер пользователя", "thinkingLabel": "Думаю",
"userTriggerHint": "Запущен пользовательским сообщением выше",
"agentRun": "Выполнение {{agent}}", "agentRun": "Выполнение {{agent}}",
"agentReply": "Ответ ассистента", "collapsedSummary": "Вызовов инструментов: {{toolCount}} · Промежуточных сообщений: {{processCount}}",
"agentReplyHint": "Разрешено в ответе ассистента ниже" "collapseAction": "Свернуть граф выполнения"
}, },
"composer": { "composer": {
"attachFiles": "Прикрепить файлы", "attachFiles": "Прикрепить файлы",

View File

@@ -13,8 +13,6 @@
"noLogs": "(暂无日志)", "noLogs": "(暂无日志)",
"toolbar": { "toolbar": {
"refresh": "刷新聊天", "refresh": "刷新聊天",
"showThinking": "显示思考过程",
"hideThinking": "隐藏思考过程",
"currentAgent": "当前对话对象:{{agent}}" "currentAgent": "当前对话对象:{{agent}}"
}, },
"taskPanel": { "taskPanel": {
@@ -34,19 +32,12 @@
} }
}, },
"executionGraph": { "executionGraph": {
"eyebrow": "对话执行",
"title": "执行关系图", "title": "执行关系图",
"status": {
"active": "执行中",
"latest": "最近一次",
"previous": "历史"
},
"branchLabel": "分支", "branchLabel": "分支",
"userTrigger": "用户触发", "thinkingLabel": "思考中",
"userTriggerHint": "对应上方这条用户消息",
"agentRun": "{{agent}} 执行", "agentRun": "{{agent}} 执行",
"agentReply": "助手回复", "collapsedSummary": "{{toolCount}} 个工具调用,{{processCount}} 条过程消息",
"agentReplyHint": "结果体现在下方这条助手回复里" "collapseAction": "收起执行关系图"
}, },
"composer": { "composer": {
"attachFiles": "添加文件", "attachFiles": "添加文件",

View File

@@ -114,7 +114,7 @@ export function ChatInput({ onSend, onStop, disabled = false, sending = false, i
useEffect(() => { useEffect(() => {
if (textareaRef.current) { if (textareaRef.current) {
textareaRef.current.style.height = 'auto'; textareaRef.current.style.height = 'auto';
textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 200)}px`; textareaRef.current.style.height = `${Math.min(textareaRef.current.scrollHeight, 240)}px`;
} }
}, [input]); }, [input]);
@@ -407,33 +407,54 @@ export function ChatInput({ onSend, onStop, disabled = false, sending = false, i
</div> </div>
)} )}
{/* Input Row */} {/* Input Container */}
<div className={`relative bg-white dark:bg-card rounded-[28px] shadow-sm border p-1.5 transition-all ${dragOver ? 'border-primary ring-1 ring-primary' : 'border-black/10 dark:border-white/10'}`}> <div className={`relative bg-white dark:bg-card rounded-2xl shadow-sm border px-3 pt-2.5 pb-1.5 transition-all ${dragOver ? 'border-primary ring-1 ring-primary' : 'border-black/10 dark:border-white/10'}`}>
{selectedTarget && ( {selectedTarget && (
<div className="px-2.5 pt-2 pb-1"> <div className="pb-1.5">
<button <button
type="button" type="button"
onClick={() => setTargetAgentId(null)} onClick={() => setTargetAgentId(null)}
className="inline-flex items-center gap-1.5 rounded-full border border-primary/20 bg-primary/5 px-3 py-1 text-[13px] font-medium text-foreground transition-colors hover:bg-primary/10" className="inline-flex items-center gap-1.5 rounded-lg border border-primary/20 bg-primary/5 px-2.5 py-1 text-[13px] font-medium text-foreground transition-colors hover:bg-primary/10"
title={t('composer.clearTarget')} title={t('composer.clearTarget')}
> >
<span>{t('composer.targetChip', { agent: selectedTarget.name })}</span> <span>{t('composer.targetChip', { agent: selectedTarget.name })}</span>
<X className="h-3.5 w-3.5 text-muted-foreground" /> <X className="h-3 w-3 text-muted-foreground" />
</button> </button>
</div> </div>
)} )}
<div className="flex items-end gap-1.5"> {/* Text Row — flush-left */}
<Textarea
ref={textareaRef}
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyDown={handleKeyDown}
onCompositionStart={() => {
isComposingRef.current = true;
}}
onCompositionEnd={() => {
isComposingRef.current = false;
}}
onPaste={handlePaste}
placeholder={disabled ? t('composer.gatewayDisconnectedPlaceholder') : ''}
disabled={disabled}
data-testid="chat-composer-input"
className="min-h-[48px] max-h-[240px] resize-none border-0 focus-visible:ring-0 focus-visible:ring-offset-0 shadow-none bg-transparent p-0 text-[15px] placeholder:text-muted-foreground/60 leading-relaxed"
rows={1}
/>
{/* Action Row — icons on their own line */}
<div className="mt-1.5 flex items-center gap-1">
{/* Attach Button */} {/* Attach Button */}
<Button <Button
variant="ghost" variant="ghost"
size="icon" size="icon"
className="shrink-0 h-10 w-10 rounded-full text-muted-foreground hover:bg-black/5 dark:hover:bg-white/10 hover:text-foreground transition-colors" className="shrink-0 h-8 w-8 rounded-lg text-muted-foreground hover:bg-black/5 dark:hover:bg-white/10 hover:text-foreground transition-colors"
onClick={pickFiles} onClick={pickFiles}
disabled={disabled || sending} disabled={disabled || sending}
title={t('composer.attachFiles')} title={t('composer.attachFiles')}
> >
<Paperclip className="h-4 w-4" /> <Paperclip className="h-3.5 w-3.5" />
</Button> </Button>
{showAgentPicker && ( {showAgentPicker && (
@@ -442,14 +463,14 @@ export function ChatInput({ onSend, onStop, disabled = false, sending = false, i
variant="ghost" variant="ghost"
size="icon" size="icon"
className={cn( className={cn(
'h-10 w-10 rounded-full text-muted-foreground hover:bg-black/5 dark:hover:bg-white/10 hover:text-foreground transition-colors', 'h-8 w-8 rounded-lg text-muted-foreground hover:bg-black/5 dark:hover:bg-white/10 hover:text-foreground transition-colors',
(pickerOpen || selectedTarget) && 'bg-primary/10 text-primary hover:bg-primary/20' (pickerOpen || selectedTarget) && 'bg-primary/10 text-primary hover:bg-primary/20'
)} )}
onClick={() => setPickerOpen((open) => !open)} onClick={() => setPickerOpen((open) => !open)}
disabled={disabled || sending} disabled={disabled || sending}
title={t('composer.pickAgent')} title={t('composer.pickAgent')}
> >
<AtSign className="h-4 w-4" /> <AtSign className="h-3.5 w-3.5" />
</Button> </Button>
{pickerOpen && ( {pickerOpen && (
<div className="absolute left-0 bottom-full z-20 mb-2 w-72 overflow-hidden rounded-2xl border border-black/10 bg-white p-1.5 shadow-xl dark:border-white/10 dark:bg-card"> <div className="absolute left-0 bottom-full z-20 mb-2 w-72 overflow-hidden rounded-2xl border border-black/10 bg-white p-1.5 shadow-xl dark:border-white/10 dark:bg-card">
@@ -475,35 +496,13 @@ export function ChatInput({ onSend, onStop, disabled = false, sending = false, i
</div> </div>
)} )}
{/* Textarea */} {/* Send Button — pushed to the right */}
<div className="flex-1 relative">
<Textarea
ref={textareaRef}
value={input}
onChange={(e) => setInput(e.target.value)}
onKeyDown={handleKeyDown}
onCompositionStart={() => {
isComposingRef.current = true;
}}
onCompositionEnd={() => {
isComposingRef.current = false;
}}
onPaste={handlePaste}
placeholder={disabled ? t('composer.gatewayDisconnectedPlaceholder') : ''}
disabled={disabled}
data-testid="chat-composer-input"
className="min-h-[40px] max-h-[200px] resize-none border-0 focus-visible:ring-0 focus-visible:ring-offset-0 shadow-none bg-transparent py-2.5 px-2 text-[15px] placeholder:text-muted-foreground/60 leading-relaxed"
rows={1}
/>
</div>
{/* Send Button */}
<Button <Button
onClick={sending ? handleStop : handleSend} onClick={sending ? handleStop : handleSend}
disabled={sending ? !canStop : !canSend} disabled={sending ? !canStop : !canSend}
size="icon" size="icon"
data-testid="chat-composer-send" data-testid="chat-composer-send"
className={`shrink-0 h-10 w-10 rounded-full transition-colors ${ className={`ml-auto shrink-0 h-8 w-8 rounded-lg transition-colors ${
(sending || canSend) (sending || canSend)
? 'bg-black/5 dark:bg-white/10 text-foreground hover:bg-black/10 dark:hover:bg-white/20' ? 'bg-black/5 dark:bg-white/10 text-foreground hover:bg-black/10 dark:hover:bg-white/20'
: 'text-muted-foreground/50 hover:bg-transparent bg-transparent' : 'text-muted-foreground/50 hover:bg-transparent bg-transparent'
@@ -512,9 +511,9 @@ export function ChatInput({ onSend, onStop, disabled = false, sending = false, i
title={sending ? t('composer.stop') : t('composer.send')} title={sending ? t('composer.stop') : t('composer.send')}
> >
{sending ? ( {sending ? (
<Square className="h-4 w-4" fill="currentColor" /> <Square className="h-3.5 w-3.5" fill="currentColor" />
) : ( ) : (
<SendHorizontal className="h-[18px] w-[18px]" strokeWidth={2} /> <SendHorizontal className="h-4 w-4" strokeWidth={2} />
)} )}
</Button> </Button>
</div> </div>

View File

@@ -16,9 +16,17 @@ import { extractText, extractThinking, extractImages, extractToolUse, formatTime
interface ChatMessageProps { interface ChatMessageProps {
message: RawMessage; message: RawMessage;
showThinking: boolean; textOverride?: string;
suppressToolCards?: boolean; suppressToolCards?: boolean;
suppressProcessAttachments?: boolean; suppressProcessAttachments?: boolean;
/**
* When true, hides the assistant text bubble (and any thinking block that
* would be shown above it). Used when the message's text is being folded
* into an ExecutionGraphCard as a narration step, to prevent the same text
* from appearing both inside the graph and as an orphan bubble in the chat
* stream.
*/
suppressAssistantText?: boolean;
isStreaming?: boolean; isStreaming?: boolean;
streamingTools?: Array<{ streamingTools?: Array<{
id?: string; id?: string;
@@ -41,21 +49,27 @@ function imageSrc(img: ExtractedImage): string | null {
export const ChatMessage = memo(function ChatMessage({ export const ChatMessage = memo(function ChatMessage({
message, message,
showThinking, textOverride,
suppressToolCards = false, suppressToolCards = false,
suppressProcessAttachments = false, suppressProcessAttachments = false,
suppressAssistantText = false,
isStreaming = false, isStreaming = false,
streamingTools = [], streamingTools = [],
}: ChatMessageProps) { }: ChatMessageProps) {
const isUser = message.role === 'user'; const isUser = message.role === 'user';
const role = typeof message.role === 'string' ? message.role.toLowerCase() : ''; const role = typeof message.role === 'string' ? message.role.toLowerCase() : '';
const isToolResult = role === 'toolresult' || role === 'tool_result'; const isToolResult = role === 'toolresult' || role === 'tool_result';
const text = extractText(message); const text = textOverride ?? extractText(message);
const hasText = text.trim().length > 0; // When text is folded into an ExecutionGraphCard, treat the message as
const thinking = extractThinking(message); // having no text for rendering purposes. Keeping this behind a flag (vs
// blanking `text` outright) lets future hover affordances still read the
// original content without surfacing the bubble.
const hideAssistantText = suppressAssistantText && !isUser;
const hasText = !hideAssistantText && text.trim().length > 0;
const visibleThinkingRaw = extractThinking(message);
const visibleThinking = hideAssistantText ? null : visibleThinkingRaw;
const images = extractImages(message); const images = extractImages(message);
const tools = extractToolUse(message); const tools = extractToolUse(message);
const visibleThinking = showThinking ? thinking : null;
const visibleTools = suppressToolCards ? [] : tools; const visibleTools = suppressToolCards ? [] : tools;
const shouldHideProcessAttachments = suppressProcessAttachments const shouldHideProcessAttachments = suppressProcessAttachments
&& (hasText || !!visibleThinking || images.length > 0 || visibleTools.length > 0); && (hasText || !!visibleThinking || images.length > 0 || visibleTools.length > 0);

View File

@@ -1,10 +1,10 @@
/** /**
* Chat Toolbar * Chat Toolbar
* Session selector, new session, refresh, and thinking toggle. * Session selector, new session, and refresh.
* Rendered in the Header when on the Chat page. * Rendered in the Header when on the Chat page.
*/ */
import { useMemo } from 'react'; import { useMemo } from 'react';
import { RefreshCw, Brain, Bot } from 'lucide-react'; import { RefreshCw, Bot } from 'lucide-react';
import { Button } from '@/components/ui/button'; import { Button } from '@/components/ui/button';
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'; import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip';
import { useChatStore } from '@/stores/chat'; import { useChatStore } from '@/stores/chat';
@@ -15,8 +15,6 @@ import { useTranslation } from 'react-i18next';
export function ChatToolbar() { export function ChatToolbar() {
const refresh = useChatStore((s) => s.refresh); const refresh = useChatStore((s) => s.refresh);
const loading = useChatStore((s) => s.loading); const loading = useChatStore((s) => s.loading);
const showThinking = useChatStore((s) => s.showThinking);
const toggleThinking = useChatStore((s) => s.toggleThinking);
const currentAgentId = useChatStore((s) => s.currentAgentId); const currentAgentId = useChatStore((s) => s.currentAgentId);
const agents = useAgentsStore((s) => s.agents); const agents = useAgentsStore((s) => s.agents);
const { t } = useTranslation('chat'); const { t } = useTranslation('chat');
@@ -48,26 +46,6 @@ export function ChatToolbar() {
<p>{t('toolbar.refresh')}</p> <p>{t('toolbar.refresh')}</p>
</TooltipContent> </TooltipContent>
</Tooltip> </Tooltip>
{/* Thinking Toggle */}
<Tooltip>
<TooltipTrigger asChild>
<Button
variant="ghost"
size="icon"
className={cn(
'h-8 w-8',
showThinking && 'bg-primary/10 text-primary',
)}
onClick={toggleThinking}
>
<Brain className="h-4 w-4" />
</Button>
</TooltipTrigger>
<TooltipContent>
<p>{showThinking ? t('toolbar.hideThinking') : t('toolbar.showThinking')}</p>
</TooltipContent>
</Tooltip>
</div> </div>
); );
} }

View File

@@ -1,16 +1,32 @@
import { useState } from 'react'; import { useState } from 'react';
import { ArrowDown, ArrowUp, Bot, CheckCircle2, ChevronDown, ChevronRight, CircleDashed, GitBranch, Sparkles, Wrench, XCircle } from 'lucide-react'; import { CheckCircle2, ChevronDown, ChevronRight, CircleDashed, GitBranch, MessageSquare, Wrench, XCircle } from 'lucide-react';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { cn } from '@/lib/utils'; import { cn } from '@/lib/utils';
import type { TaskStep } from './task-visualization'; import type { TaskStep } from './task-visualization';
interface ExecutionGraphCardProps { interface ExecutionGraphCardProps {
agentLabel: string; agentLabel: string;
sessionLabel: string;
steps: TaskStep[]; steps: TaskStep[];
active: boolean; active: boolean;
onJumpToTrigger?: () => void; /**
onJumpToReply?: () => void; * When provided, the card becomes fully controlled: the parent owns the
* expand state (e.g. to persist across remounts) and toggling goes through
* `onExpandedChange`. When omitted, the card manages its own local state.
*/
expanded?: boolean;
onExpandedChange?: (expanded: boolean) => void;
}
const TOOL_ROW_EXTRA_INDENT_PX = 8;
function AnimatedDots({ className }: { className?: string }) {
return (
<span className={cn('flex items-center gap-0.5 leading-none text-muted-foreground', className)} aria-hidden="true">
<span className="inline-block animate-bounce [animation-delay:0ms]">.</span>
<span className="inline-block animate-bounce [animation-delay:150ms]">.</span>
<span className="inline-block animate-bounce [animation-delay:300ms]">.</span>
</span>
);
} }
function GraphStatusIcon({ status }: { status: TaskStep['status'] }) { function GraphStatusIcon({ status }: { status: TaskStep['status'] }) {
@@ -23,45 +39,107 @@ function StepDetailCard({ step }: { step: TaskStep }) {
const { t } = useTranslation('chat'); const { t } = useTranslation('chat');
const [expanded, setExpanded] = useState(false); const [expanded, setExpanded] = useState(false);
const hasDetail = !!step.detail; const hasDetail = !!step.detail;
// Narration steps (intermediate pure-text assistant messages folded from
// the chat stream) are rendered without a label/status pill: the message
// text IS the primary content.
const isNarration = step.kind === 'message';
const isTool = step.kind === 'tool';
const isThinking = step.kind === 'thinking';
const showRunningDots = isTool && step.status === 'running';
const hideStatusText = isTool && step.status === 'completed';
const detailPreview = step.detail?.replace(/\s+/g, ' ').trim();
const canExpand = hasDetail;
const usePlainExpandedDetail = isTool || isThinking;
const displayLabel = isThinking ? t('executionGraph.thinkingLabel') : step.label;
return ( return (
<div className="min-w-0 flex-1 rounded-xl border border-black/10 bg-white/40 px-3 py-2 dark:border-white/10 dark:bg-white/[0.03]"> <div
className={cn(
'min-w-0 flex-1 text-muted-foreground',
isTool || isNarration
? 'px-0 py-0'
: 'rounded-xl border border-black/10 bg-white/40 px-3 py-2 dark:border-white/10 dark:bg-white/[0.03]',
)}
>
<button <button
type="button" type="button"
className={cn('flex w-full items-start gap-2 text-left', hasDetail ? 'cursor-pointer' : 'cursor-default')} className={cn(
'flex w-full gap-2 text-left',
isTool ? 'items-center' : 'items-start',
canExpand ? 'cursor-pointer' : 'cursor-default',
)}
onClick={() => { onClick={() => {
if (!hasDetail) return; if (!canExpand) return;
setExpanded((value) => !value); setExpanded((value) => !value);
}} }}
> >
<div className="min-w-0 flex-1"> <div className="min-w-0 flex-1">
<div className="flex items-center gap-2"> {!isNarration && (
<p className="text-sm font-medium text-foreground">{step.label}</p> <div className="flex min-w-0 items-center gap-2">
<span className="rounded-full bg-black/5 px-2 py-0.5 text-[10px] font-medium uppercase tracking-wide text-muted-foreground dark:bg-white/10"> <p className="shrink-0 text-sm font-medium text-muted-foreground">{displayLabel}</p>
{t(`taskPanel.stepStatus.${step.status}`)} {isTool && detailPreview && !expanded && (
</span> <p className="min-w-0 truncate text-[12px] leading-4 text-muted-foreground/80">
{step.depth > 1 && ( {detailPreview}
<span className="rounded-full bg-primary/10 px-2 py-0.5 text-[10px] font-medium uppercase tracking-wide text-primary"> </p>
{t('executionGraph.branchLabel')} )}
</span> {!hideStatusText && !showRunningDots && (
)} <span className="rounded-full bg-black/5 px-2 py-0.5 text-[10px] font-medium uppercase tracking-wide text-muted-foreground dark:bg-white/10">
</div> {t(`taskPanel.stepStatus.${step.status}`)}
{step.detail && !expanded && ( </span>
<p className="mt-1 text-[12px] leading-5 text-muted-foreground line-clamp-2">{step.detail}</p> )}
{showRunningDots && (
<AnimatedDots className="text-[14px]" />
)}
{step.depth > 1 && (
<span className="rounded-full bg-black/5 px-2 py-0.5 text-[10px] font-medium uppercase tracking-wide text-muted-foreground dark:bg-white/10">
{t('executionGraph.branchLabel')}
</span>
)}
</div>
)}
{step.detail && !expanded && !isTool && (
<p
className={cn(
'text-muted-foreground',
isThinking
? 'mt-0.5 text-[12px] leading-5 line-clamp-1'
: isNarration
? 'text-[13px] leading-6 text-muted-foreground line-clamp-2'
: 'mt-0.5 text-[12px] leading-5 line-clamp-2',
)}
>
{step.detail}
</p>
)} )}
</div> </div>
{hasDetail && ( {canExpand && (
<span className="mt-0.5 shrink-0 text-muted-foreground"> <span className="mt-0.5 shrink-0 text-muted-foreground">
{expanded ? <ChevronDown className="h-4 w-4" /> : <ChevronRight className="h-4 w-4" />} {expanded ? <ChevronDown className="h-4 w-4" /> : <ChevronRight className="h-4 w-4" />}
</span> </span>
)} )}
</button> </button>
{step.detail && expanded && ( {step.detail && expanded && canExpand && (
usePlainExpandedDetail ? (
<pre
className={cn(
'mt-0.5 whitespace-pre-wrap text-[12px] leading-5 text-muted-foreground',
isTool ? 'break-all' : 'break-words',
)}
>
{step.detail}
</pre>
) : (
<div className="mt-3 rounded-lg border border-black/10 bg-black/[0.03] px-3 py-2 dark:border-white/10 dark:bg-white/[0.03]"> <div className="mt-3 rounded-lg border border-black/10 bg-black/[0.03] px-3 py-2 dark:border-white/10 dark:bg-white/[0.03]">
<pre className="whitespace-pre-wrap break-all text-[12px] leading-5 text-muted-foreground"> <pre
className={cn(
'whitespace-pre-wrap text-[12px] leading-5',
isNarration ? 'text-muted-foreground' : 'break-all text-muted-foreground',
)}
>
{step.detail} {step.detail}
</pre> </pre>
</div> </div>
)
)} )}
</div> </div>
); );
@@ -69,118 +147,147 @@ function StepDetailCard({ step }: { step: TaskStep }) {
export function ExecutionGraphCard({ export function ExecutionGraphCard({
agentLabel, agentLabel,
sessionLabel,
steps, steps,
active, active,
onJumpToTrigger, expanded: controlledExpanded,
onJumpToReply, onExpandedChange,
}: ExecutionGraphCardProps) { }: ExecutionGraphCardProps) {
const { t } = useTranslation('chat'); const { t } = useTranslation('chat');
// Active runs should stay expanded by default so the user can follow the
// execution live. Once the run completes, the default state returns to
// collapsed. Explicit user toggles remain controlled by the parent override.
const [uncontrolledExpanded, setUncontrolledExpanded] = useState(active);
const [prevActive, setPrevActive] = useState(active);
if (prevActive !== active) {
setPrevActive(active);
if (controlledExpanded == null && uncontrolledExpanded !== active) {
setUncontrolledExpanded(active);
}
}
const isControlled = controlledExpanded != null;
const expanded = isControlled ? controlledExpanded : uncontrolledExpanded;
const setExpanded = (next: boolean) => {
if (!isControlled) setUncontrolledExpanded(next);
onExpandedChange?.(next);
};
const toolCount = steps.filter((step) => step.kind === 'tool').length;
const processCount = steps.length - toolCount;
const shouldShowTrailingThinking = active;
if (!expanded) {
return (
<button
type="button"
data-testid="chat-execution-graph"
data-collapsed="true"
onClick={() => setExpanded(true)}
className="group flex w-full items-center gap-2 rounded-lg px-2 py-1.5 text-left text-[12px] text-muted-foreground transition-colors hover:bg-black/5 hover:text-muted-foreground dark:hover:bg-white/5"
>
<ChevronRight className="h-3.5 w-3.5 shrink-0 transition-transform group-hover:translate-x-0.5" />
<span className="truncate">
{t('executionGraph.collapsedSummary', { toolCount, processCount })}
</span>
</button>
);
}
return ( return (
<div <div
data-testid="chat-execution-graph" data-testid="chat-execution-graph"
className="w-full rounded-2xl border border-black/10 bg-[#f5f1e8]/70 px-4 py-4 shadow-sm dark:border-white/10 dark:bg-white/[0.04]" data-collapsed="false"
className="w-full px-0 py-0 text-muted-foreground"
> >
<div className="flex items-start justify-between gap-3"> <button
<div> type="button"
<p className="text-[11px] font-semibold uppercase tracking-[0.18em] text-muted-foreground/70"> data-testid="chat-execution-graph-collapse"
{t('executionGraph.eyebrow')} onClick={() => setExpanded(false)}
</p> className="group flex w-full items-center gap-2 rounded-lg px-2 py-1.5 text-left text-[12px] text-muted-foreground transition-colors hover:bg-black/5 hover:text-muted-foreground dark:hover:bg-white/5"
<h3 className="mt-1 text-base font-semibold text-foreground">{t('executionGraph.title')}</h3> aria-label={t('executionGraph.collapseAction')}
<p className="mt-1 text-[12px] text-muted-foreground"> title={t('executionGraph.collapseAction')}
{agentLabel} · {sessionLabel} >
</p> <ChevronRight className="h-3.5 w-3.5 shrink-0 rotate-90" />
</div> <span className="truncate">{t('executionGraph.title')}</span>
<span </button>
className={cn(
'rounded-full px-2.5 py-1 text-[11px] font-medium',
active ? 'bg-primary/10 text-primary' : 'bg-black/5 text-foreground/70 dark:bg-white/10 dark:text-foreground/70',
)}
>
{active ? t('executionGraph.status.active') : t('executionGraph.status.previous')}
</span>
</div>
<div className="mt-4 space-y-3"> <div className="mt-0 px-0 py-0">
<button <div className="mt-0.5 flex items-center gap-0.5" style={{ marginLeft: `${TOOL_ROW_EXTRA_INDENT_PX}px` }}>
type="button" <div className="flex w-6 shrink-0 justify-center">
data-testid="chat-execution-jump-trigger" <div className="flex h-6 w-6 items-center justify-center text-muted-foreground">
onClick={onJumpToTrigger} <GitBranch className="h-3.5 w-3.5 shrink-0 text-muted-foreground" />
className="flex items-center gap-2 text-[12px] text-muted-foreground hover:text-foreground transition-colors"
>
<ArrowUp className="h-3.5 w-3.5" />
<span>{t('executionGraph.userTriggerHint')}</span>
</button>
<div className="pl-4">
<div className="ml-4 h-4 w-px bg-border" />
</div>
<div className="flex gap-3">
<div className="flex w-8 shrink-0 justify-center">
<div className="flex h-8 w-8 items-center justify-center rounded-full bg-primary/10 text-primary">
<Bot className="h-4 w-4" />
</div> </div>
</div> </div>
<div className="min-w-0 flex-1 rounded-xl border border-primary/15 bg-primary/5 px-3 py-2"> <div className="min-w-0 flex-1">
<div className="flex items-center gap-2 text-sm font-medium text-foreground"> <span className="truncate text-sm font-medium text-muted-foreground">
<GitBranch className="h-4 w-4 text-primary" /> {t('executionGraph.agentRun', { agent: agentLabel })}
<span>{t('executionGraph.agentRun', { agent: agentLabel })}</span> </span>
</div>
</div> </div>
</div> </div>
{steps.map((step, index) => ( {steps.map((step) => {
<div key={step.id}> const alignedIndentOffset = (
step.kind === 'tool'
|| step.kind === 'message'
|| step.kind === 'thinking'
) ? TOOL_ROW_EXTRA_INDENT_PX : 0;
const rowMarginLeft = (Math.max(step.depth - 1, 0) * 24) + alignedIndentOffset;
return (
<div key={step.id} className="mt-0.5">
<div <div
className="pl-4" className="pl-3"
style={{ marginLeft: `${Math.max(step.depth - 1, 0) * 24}px` }} style={{ marginLeft: `${rowMarginLeft}px` }}
> >
<div className="ml-4 h-4 w-px bg-border" /> <div className="ml-3 h-1 w-px bg-border" />
</div> </div>
<div <div
className="flex gap-3" className="flex items-start gap-0.5"
data-testid="chat-execution-step" data-testid="chat-execution-step"
style={{ marginLeft: `${Math.max(step.depth - 1, 0) * 24}px` }} style={{ marginLeft: `${rowMarginLeft}px` }}
> >
<div className="flex w-8 shrink-0 justify-center"> <div className="flex w-6 shrink-0 justify-center">
<div className="relative flex items-center justify-center"> <div className="relative flex items-center justify-center">
{step.depth > 1 && ( {step.depth > 1 && (
<div className="absolute -left-4 top-1/2 h-px w-4 -translate-y-1/2 bg-border" /> <div className="absolute -left-3 top-1/2 h-px w-3 -translate-y-1/2 bg-border" />
)} )}
<div <div
className={cn( className={cn(
'flex h-8 w-8 items-center justify-center rounded-full', 'flex h-6 w-6 items-center justify-center text-muted-foreground',
step.status === 'running' && 'bg-primary/10 text-primary',
step.status === 'completed' && 'bg-emerald-500/10 text-emerald-600 dark:text-emerald-400',
step.status === 'error' && 'bg-destructive/10 text-destructive',
)} )}
> >
{step.kind === 'thinking' ? <Sparkles className="h-4 w-4" /> : step.kind === 'tool' ? <Wrench className="h-4 w-4" /> : <GraphStatusIcon status={step.status} />} {step.kind === 'thinking'
? <AnimatedDots className="text-[14px]" />
: step.kind === 'tool'
? <Wrench className="h-3.5 w-3.5" />
: step.kind === 'message'
? <MessageSquare className="h-3.5 w-3.5" />
: <GraphStatusIcon status={step.status} />}
</div> </div>
</div> </div>
</div> </div>
<StepDetailCard step={step} /> <StepDetailCard step={step} />
</div> </div>
{index === steps.length - 1 && (
<>
<div className="pl-4">
<div className="ml-4 h-4 w-px bg-border" />
</div>
<button
type="button"
data-testid="chat-execution-jump-reply"
onClick={onJumpToReply}
className="flex items-center gap-2 pl-11 text-[12px] text-muted-foreground hover:text-foreground transition-colors"
>
<ArrowDown className="h-3.5 w-3.5" />
<span>{t('executionGraph.agentReplyHint')}</span>
</button>
</>
)}
</div> </div>
))} )})}
{shouldShowTrailingThinking && (
<div className="mt-0.5">
<div className="pl-3" style={{ marginLeft: `${TOOL_ROW_EXTRA_INDENT_PX}px` }}>
<div className="ml-3 h-1 w-px bg-border" />
</div>
<div
className="flex items-center gap-0.5"
data-testid="chat-execution-step-thinking-trailing"
style={{ marginLeft: `${TOOL_ROW_EXTRA_INDENT_PX}px` }}
>
<div className="w-6 shrink-0" />
<div className="min-w-0 flex-1 text-sm text-muted-foreground">
<span className="font-medium">{t('executionGraph.thinkingLabel')}</span>
<AnimatedDots className="ml-1 inline-flex text-[14px]" />
</div>
</div>
</div>
)}
</div> </div>
</div> </div>
); );

View File

@@ -4,7 +4,7 @@
* via gateway:rpc IPC. Session selector, thinking toggle, and refresh * via gateway:rpc IPC. Session selector, thinking toggle, and refresh
* are in the toolbar; messages render with markdown + streaming. * are in the toolbar; messages render with markdown + streaming.
*/ */
import { useEffect, useState } from 'react'; import { useEffect, useMemo, useState } from 'react';
import { AlertCircle, Loader2, Sparkles } from 'lucide-react'; import { AlertCircle, Loader2, Sparkles } from 'lucide-react';
import { useChatStore, type RawMessage } from '@/stores/chat'; import { useChatStore, type RawMessage } from '@/stores/chat';
import { useGatewayStore } from '@/stores/gateway'; import { useGatewayStore } from '@/stores/gateway';
@@ -15,13 +15,46 @@ import { ChatMessage } from './ChatMessage';
import { ChatInput } from './ChatInput'; import { ChatInput } from './ChatInput';
import { ExecutionGraphCard } from './ExecutionGraphCard'; import { ExecutionGraphCard } from './ExecutionGraphCard';
import { ChatToolbar } from './ChatToolbar'; import { ChatToolbar } from './ChatToolbar';
import { extractImages, extractText, extractThinking, extractToolUse } from './message-utils'; import { extractImages, extractText, extractThinking, extractToolUse, stripProcessMessagePrefix } from './message-utils';
import { deriveTaskSteps, parseSubagentCompletionInfo } from './task-visualization'; import { deriveTaskSteps, findReplyMessageIndex, parseSubagentCompletionInfo, type TaskStep } from './task-visualization';
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
import { cn } from '@/lib/utils'; import { cn } from '@/lib/utils';
import { useStickToBottomInstant } from '@/hooks/use-stick-to-bottom-instant'; import { useStickToBottomInstant } from '@/hooks/use-stick-to-bottom-instant';
import { useMinLoading } from '@/hooks/use-min-loading'; import { useMinLoading } from '@/hooks/use-min-loading';
type GraphStepCacheEntry = {
steps: ReturnType<typeof deriveTaskSteps>;
agentLabel: string;
sessionLabel: string;
segmentEnd: number;
replyIndex: number | null;
triggerIndex: number;
};
type UserRunCard = {
triggerIndex: number;
replyIndex: number | null;
active: boolean;
agentLabel: string;
sessionLabel: string;
segmentEnd: number;
steps: TaskStep[];
messageStepTexts: string[];
streamingReplyText: string | null;
};
function getPrimaryMessageStepTexts(steps: TaskStep[]): string[] {
return steps
.filter((step) => step.kind === 'message' && step.parentId === 'agent-run' && !!step.detail)
.map((step) => step.detail!);
}
// Keep the last non-empty execution-graph snapshot per session/run outside
// React state so `loadHistory` refreshes can still fall back to the previous
// steps without tripping React's set-state-in-effect lint rule.
const graphStepCacheStore = new Map<string, Record<string, GraphStepCacheEntry>>();
const streamingTimestampStore = new Map<string, number>();
export function Chat() { export function Chat() {
const { t } = useTranslation('chat'); const { t } = useTranslation('chat');
const gatewayStatus = useGatewayStore((s) => s.status); const gatewayStatus = useGatewayStore((s) => s.status);
@@ -34,7 +67,6 @@ export function Chat() {
const loading = useChatStore((s) => s.loading); const loading = useChatStore((s) => s.loading);
const sending = useChatStore((s) => s.sending); const sending = useChatStore((s) => s.sending);
const error = useChatStore((s) => s.error); const error = useChatStore((s) => s.error);
const showThinking = useChatStore((s) => s.showThinking);
const streamingMessage = useChatStore((s) => s.streamingMessage); const streamingMessage = useChatStore((s) => s.streamingMessage);
const streamingTools = useChatStore((s) => s.streamingTools); const streamingTools = useChatStore((s) => s.streamingTools);
const pendingFinal = useChatStore((s) => s.pendingFinal); const pendingFinal = useChatStore((s) => s.pendingFinal);
@@ -46,8 +78,14 @@ export function Chat() {
const cleanupEmptySession = useChatStore((s) => s.cleanupEmptySession); const cleanupEmptySession = useChatStore((s) => s.cleanupEmptySession);
const [childTranscripts, setChildTranscripts] = useState<Record<string, RawMessage[]>>({}); const [childTranscripts, setChildTranscripts] = useState<Record<string, RawMessage[]>>({});
// Persistent per-run override for the Execution Graph's expanded/collapsed
const [streamingTimestamp, setStreamingTimestamp] = useState<number>(0); // state. Keyed by a stable run id (trigger message id, or a fallback of
// `${sessionKey}:${triggerIdx}`) so user toggles survive the `loadHistory`
// refresh that runs after every final event — otherwise the card would
// remount and reset. `undefined` values mean "user hasn't toggled, let the
// card pick a default from its own `active` prop."
const [graphExpandedOverrides, setGraphExpandedOverrides] = useState<Record<string, boolean>>({});
const graphStepCache: Record<string, GraphStepCacheEntry> = graphStepCacheStore.get(currentSessionKey) ?? {};
const minLoading = useMinLoading(loading && messages.length > 0); const minLoading = useMinLoading(loading && messages.length > 0);
const { contentRef, scrollRef } = useStickToBottomInstant(currentSessionKey); const { contentRef, scrollRef } = useStickToBottomInstant(currentSessionKey);
@@ -117,30 +155,33 @@ export function Chat() {
}; };
}, [messages, childTranscripts]); }, [messages, childTranscripts]);
// Update timestamp when sending starts
useEffect(() => {
if (sending && streamingTimestamp === 0) {
// eslint-disable-next-line react-hooks/set-state-in-effect
setStreamingTimestamp(Date.now() / 1000);
} else if (!sending && streamingTimestamp !== 0) {
setStreamingTimestamp(0);
}
}, [sending, streamingTimestamp]);
// Gateway not running block has been completely removed so the UI always renders.
const streamMsg = streamingMessage && typeof streamingMessage === 'object' const streamMsg = streamingMessage && typeof streamingMessage === 'object'
? streamingMessage as unknown as { role?: string; content?: unknown; timestamp?: number } ? streamingMessage as unknown as { role?: string; content?: unknown; timestamp?: number }
: null; : null;
const streamTimestamp = typeof streamMsg?.timestamp === 'number' ? streamMsg.timestamp : 0;
useEffect(() => {
if (!sending) {
streamingTimestampStore.delete(currentSessionKey);
return;
}
if (!streamingTimestampStore.has(currentSessionKey)) {
streamingTimestampStore.set(currentSessionKey, streamTimestamp || Date.now() / 1000);
}
}, [currentSessionKey, sending, streamTimestamp]);
const streamingTimestamp = sending
? (streamingTimestampStore.get(currentSessionKey) ?? streamTimestamp)
: 0;
const streamText = streamMsg ? extractText(streamMsg) : (typeof streamingMessage === 'string' ? streamingMessage : ''); const streamText = streamMsg ? extractText(streamMsg) : (typeof streamingMessage === 'string' ? streamingMessage : '');
const hasStreamText = streamText.trim().length > 0; const hasStreamText = streamText.trim().length > 0;
const streamThinking = streamMsg ? extractThinking(streamMsg) : null; const streamThinking = streamMsg ? extractThinking(streamMsg) : null;
const hasStreamThinking = showThinking && !!streamThinking && streamThinking.trim().length > 0; const hasStreamThinking = !!streamThinking && streamThinking.trim().length > 0;
const streamTools = streamMsg ? extractToolUse(streamMsg) : []; const streamTools = streamMsg ? extractToolUse(streamMsg) : [];
const hasStreamTools = streamTools.length > 0; const hasStreamTools = streamTools.length > 0;
const streamImages = streamMsg ? extractImages(streamMsg) : []; const streamImages = streamMsg ? extractImages(streamMsg) : [];
const hasStreamImages = streamImages.length > 0; const hasStreamImages = streamImages.length > 0;
const hasStreamToolStatus = streamingTools.length > 0; const hasStreamToolStatus = streamingTools.length > 0;
const hasRunningStreamToolStatus = streamingTools.some((tool) => tool.status === 'running');
const shouldRenderStreaming = sending && (hasStreamText || hasStreamThinking || hasStreamTools || hasStreamImages || hasStreamToolStatus); const shouldRenderStreaming = sending && (hasStreamText || hasStreamThinking || hasStreamTools || hasStreamImages || hasStreamToolStatus);
const hasAnyStreamContent = hasStreamText || hasStreamThinking || hasStreamTools || hasStreamImages || hasStreamToolStatus; const hasAnyStreamContent = hasStreamText || hasStreamThinking || hasStreamTools || hasStreamImages || hasStreamToolStatus;
@@ -155,76 +196,236 @@ export function Chat() {
} }
} }
const userRunCards = messages.flatMap((message, idx) => { // Indices of intermediate assistant process messages that are represented
// in the ExecutionGraphCard (narration text and/or thinking). We suppress
// them from the chat stream so they don't appear duplicated below the graph.
const foldedNarrationIndices = new Set<number>();
const userRunCards: UserRunCard[] = messages.flatMap((message, idx) => {
if (message.role !== 'user' || subagentCompletionInfos[idx]) return []; if (message.role !== 'user' || subagentCompletionInfos[idx]) return [];
const runKey = message.id
? `msg-${message.id}`
: `${currentSessionKey}:trigger-${idx}`;
const nextUserIndex = nextUserMessageIndexes[idx]; const nextUserIndex = nextUserMessageIndexes[idx];
const segmentEnd = nextUserIndex === -1 ? messages.length : nextUserIndex; const segmentEnd = nextUserIndex === -1 ? messages.length : nextUserIndex;
const segmentMessages = messages.slice(idx + 1, segmentEnd); const segmentMessages = messages.slice(idx + 1, segmentEnd);
const replyIndexOffset = segmentMessages.findIndex((candidate) => candidate.role === 'assistant');
const replyIndex = replyIndexOffset === -1 ? null : idx + 1 + replyIndexOffset;
const completionInfos = subagentCompletionInfos const completionInfos = subagentCompletionInfos
.slice(idx + 1, segmentEnd) .slice(idx + 1, segmentEnd)
.filter((value): value is NonNullable<typeof value> => value != null); .filter((value): value is NonNullable<typeof value> => value != null);
const isLatestOpenRun = nextUserIndex === -1 && (sending || pendingFinal || hasAnyStreamContent); const isLatestOpenRun = nextUserIndex === -1 && (sending || pendingFinal || hasAnyStreamContent);
let steps = deriveTaskSteps({ const replyIndexOffset = findReplyMessageIndex(segmentMessages, isLatestOpenRun);
messages: segmentMessages, const replyIndex = replyIndexOffset === -1 ? null : idx + 1 + replyIndexOffset;
streamingMessage: isLatestOpenRun ? streamingMessage : null,
streamingTools: isLatestOpenRun ? streamingTools : [],
sending: isLatestOpenRun ? sending : false,
pendingFinal: isLatestOpenRun ? pendingFinal : false,
showThinking,
});
for (const completion of completionInfos) { const buildSteps = (omitLastStreamingMessageSegment: boolean): TaskStep[] => {
const childMessages = childTranscripts[completion.sessionId]; let builtSteps = deriveTaskSteps({
if (!childMessages || childMessages.length === 0) continue; messages: segmentMessages,
const branchRootId = `subagent:${completion.sessionId}`; streamingMessage: isLatestOpenRun ? streamingMessage : null,
const childSteps = deriveTaskSteps({ streamingTools: isLatestOpenRun ? streamingTools : [],
messages: childMessages, omitLastStreamingMessageSegment: isLatestOpenRun ? omitLastStreamingMessageSegment : false,
streamingMessage: null, });
streamingTools: [],
sending: false,
pendingFinal: false,
showThinking,
}).map((step) => ({
...step,
id: `${completion.sessionId}:${step.id}`,
depth: step.depth + 1,
parentId: branchRootId,
}));
steps = [ for (const completion of completionInfos) {
...steps, const childMessages = childTranscripts[completion.sessionId];
{ if (!childMessages || childMessages.length === 0) continue;
id: branchRootId, const branchRootId = `subagent:${completion.sessionId}`;
label: `${completion.agentId} subagent`, const childSteps = deriveTaskSteps({
status: 'completed', messages: childMessages,
kind: 'system' as const, streamingMessage: null,
detail: completion.sessionKey, streamingTools: [],
depth: 1, }).map((step) => ({
parentId: 'agent-run', ...step,
}, id: `${completion.sessionId}:${step.id}`,
...childSteps, depth: step.depth + 1,
]; parentId: branchRootId,
}));
builtSteps = [
...builtSteps,
{
id: branchRootId,
label: `${completion.agentId} subagent`,
status: 'completed',
kind: 'system' as const,
detail: completion.sessionKey,
depth: 1,
parentId: 'agent-run',
},
...childSteps,
];
}
return builtSteps;
};
const rawStreamingReplyCandidate = isLatestOpenRun
&& pendingFinal
&& (hasStreamText || hasStreamImages)
&& streamTools.length === 0
&& !hasRunningStreamToolStatus;
let steps = buildSteps(rawStreamingReplyCandidate);
let streamingReplyText: string | null = null;
if (rawStreamingReplyCandidate) {
const trimmedReplyText = stripProcessMessagePrefix(streamText, getPrimaryMessageStepTexts(steps));
const hasReplyText = trimmedReplyText.trim().length > 0;
if (hasReplyText || hasStreamImages) {
streamingReplyText = trimmedReplyText;
} else {
steps = buildSteps(false);
}
} }
if (steps.length === 0) return [];
const segmentAgentId = currentAgentId; const segmentAgentId = currentAgentId;
const segmentAgentLabel = agents.find((agent) => agent.id === segmentAgentId)?.name || segmentAgentId; const segmentAgentLabel = agents.find((agent) => agent.id === segmentAgentId)?.name || segmentAgentId;
const segmentSessionLabel = sessionLabels[currentSessionKey] || currentSessionKey; const segmentSessionLabel = sessionLabels[currentSessionKey] || currentSessionKey;
if (steps.length === 0) {
if (isLatestOpenRun && streamingReplyText == null) {
return [{
triggerIndex: idx,
replyIndex,
active: true,
agentLabel: segmentAgentLabel,
sessionLabel: segmentSessionLabel,
segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1,
steps: [],
messageStepTexts: [],
streamingReplyText: null,
}];
}
const cached = graphStepCache[runKey];
if (!cached) return [];
return [{
triggerIndex: idx,
replyIndex: cached.replyIndex,
active: false,
agentLabel: cached.agentLabel,
sessionLabel: cached.sessionLabel,
segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1,
steps: cached.steps,
messageStepTexts: getPrimaryMessageStepTexts(cached.steps),
streamingReplyText: null,
}];
}
// Mark intermediate assistant messages whose process output should be folded into
// the ExecutionGraphCard. We fold the text regardless of whether the
// message ALSO carries tool calls (mixed `text + toolCall` messages are
// common — e.g. "waiting for the page to load…" followed by a `wait`
// tool call). This prevents orphan narration bubbles from leaking into
// the chat stream once the graph is collapsed.
//
// When the run is still streaming (`isLatestOpenRun`) the final reply is
// not yet part of `segmentMessages`, so every assistant message in the
// segment counts as intermediate. For completed runs, we preserve the
// final reply bubble by skipping the message that `findReplyMessageIndex`
// identifies as the answer.
const segmentReplyOffset = findReplyMessageIndex(segmentMessages, isLatestOpenRun);
for (let offset = 0; offset < segmentMessages.length; offset += 1) {
if (offset === segmentReplyOffset) continue;
const candidate = segmentMessages[offset];
if (!candidate || candidate.role !== 'assistant') continue;
const hasNarrationText = extractText(candidate).trim().length > 0;
const hasThinking = !!extractThinking(candidate);
if (!hasNarrationText && !hasThinking) continue;
foldedNarrationIndices.add(idx + 1 + offset);
}
return [{ return [{
triggerIndex: idx, triggerIndex: idx,
replyIndex, replyIndex,
active: isLatestOpenRun, active: isLatestOpenRun && streamingReplyText == null,
agentLabel: segmentAgentLabel, agentLabel: segmentAgentLabel,
sessionLabel: segmentSessionLabel, sessionLabel: segmentSessionLabel,
segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1, segmentEnd: nextUserIndex === -1 ? messages.length - 1 : nextUserIndex - 1,
steps, steps,
messageStepTexts: getPrimaryMessageStepTexts(steps),
streamingReplyText,
}]; }];
}); });
const hasActiveExecutionGraph = userRunCards.some((card) => card.active);
const replyTextOverrides = new Map<number, string>();
for (const card of userRunCards) {
if (card.replyIndex == null) continue;
const replyMessage = messages[card.replyIndex];
if (!replyMessage || replyMessage.role !== 'assistant') continue;
const fullReplyText = extractText(replyMessage);
const trimmedReplyText = stripProcessMessagePrefix(fullReplyText, card.messageStepTexts);
if (trimmedReplyText !== fullReplyText) {
replyTextOverrides.set(card.replyIndex, trimmedReplyText);
}
}
const streamingReplyText = userRunCards.find((card) => card.streamingReplyText != null)?.streamingReplyText ?? null;
// Derive the set of run keys that should be auto-collapsed (run finished
// streaming or has a reply override) during render instead of in an effect,
// so we don't violate react-hooks/set-state-in-effect. Explicit user toggles
// still win via `graphExpandedOverrides` and are merged in at the call site.
const autoCollapsedRunKeys = useMemo(() => {
const keys = new Set<string>();
for (const card of userRunCards) {
const shouldCollapse = card.streamingReplyText != null
|| (card.replyIndex != null && replyTextOverrides.has(card.replyIndex));
if (!shouldCollapse) continue;
const triggerMsg = messages[card.triggerIndex];
const runKey = triggerMsg?.id
? `msg-${triggerMsg.id}`
: `${currentSessionKey}:trigger-${card.triggerIndex}`;
keys.add(runKey);
}
return keys;
}, [currentSessionKey, messages, replyTextOverrides, userRunCards]);
useEffect(() => {
if (userRunCards.length === 0) return;
const current = graphStepCacheStore.get(currentSessionKey) ?? {};
let changed = false;
const next = { ...current };
for (const card of userRunCards) {
if (card.steps.length === 0) continue;
const triggerMsg = messages[card.triggerIndex];
const runKey = triggerMsg?.id
? `msg-${triggerMsg.id}`
: `${currentSessionKey}:trigger-${card.triggerIndex}`;
const existing = current[runKey];
const sameSteps = !!existing
&& existing.steps.length === card.steps.length
&& existing.steps.every((step, index) => {
const nextStep = card.steps[index];
return nextStep
&& step.id === nextStep.id
&& step.label === nextStep.label
&& step.status === nextStep.status
&& step.kind === nextStep.kind
&& step.detail === nextStep.detail
&& step.depth === nextStep.depth
&& step.parentId === nextStep.parentId;
});
if (
sameSteps
&& existing?.agentLabel === card.agentLabel
&& existing?.sessionLabel === card.sessionLabel
&& existing?.segmentEnd === card.segmentEnd
&& existing?.replyIndex === card.replyIndex
&& existing?.triggerIndex === card.triggerIndex
) {
continue;
}
next[runKey] = {
steps: card.steps,
agentLabel: card.agentLabel,
sessionLabel: card.sessionLabel,
segmentEnd: card.segmentEnd,
replyIndex: card.replyIndex,
triggerIndex: card.triggerIndex,
};
changed = true;
}
if (changed) {
graphStepCacheStore.set(currentSessionKey, next);
}
}, [userRunCards, messages, currentSessionKey]);
return ( return (
<div className={cn("relative flex min-h-0 flex-col -m-6 transition-colors duration-500 dark:bg-background")} style={{ height: 'calc(100vh - 2.5rem)' }}> <div className={cn("relative flex min-h-0 flex-col -m-6 transition-colors duration-500 dark:bg-background")} style={{ height: 'calc(100vh - 2.5rem)' }}>
@@ -237,12 +438,19 @@ export function Chat() {
<div className="min-h-0 flex-1 overflow-hidden px-4 py-4"> <div className="min-h-0 flex-1 overflow-hidden px-4 py-4">
<div className="mx-auto flex h-full min-h-0 max-w-6xl flex-col gap-4 lg:flex-row lg:items-stretch"> <div className="mx-auto flex h-full min-h-0 max-w-6xl flex-col gap-4 lg:flex-row lg:items-stretch">
<div ref={scrollRef} className="min-h-0 min-w-0 flex-1 overflow-y-auto"> <div ref={scrollRef} className="min-h-0 min-w-0 flex-1 overflow-y-auto">
<div ref={contentRef} className="max-w-4xl space-y-4"> <div
ref={contentRef}
className={cn(
"space-y-4 transition-all duration-300",
isEmpty ? "mx-auto w-full max-w-3xl" : "max-w-4xl",
)}
>
{isEmpty ? ( {isEmpty ? (
<WelcomeScreen /> <WelcomeScreen />
) : ( ) : (
<> <>
{messages.map((msg, idx) => { {messages.map((msg, idx) => {
if (foldedNarrationIndices.has(idx)) return null;
const suppressToolCards = userRunCards.some((card) => const suppressToolCards = userRunCards.some((card) =>
idx > card.triggerIndex && idx <= card.segmentEnd, idx > card.triggerIndex && idx <= card.segmentEnd,
); );
@@ -255,40 +463,42 @@ export function Chat() {
> >
<ChatMessage <ChatMessage
message={msg} message={msg}
showThinking={showThinking} textOverride={replyTextOverrides.get(idx)}
suppressToolCards={suppressToolCards} suppressToolCards={suppressToolCards}
suppressProcessAttachments={suppressToolCards} suppressProcessAttachments={suppressToolCards}
/> />
{userRunCards {userRunCards
.filter((card) => card.triggerIndex === idx) .filter((card) => card.triggerIndex === idx)
.map((card) => ( .map((card) => {
<ExecutionGraphCard const triggerMsg = messages[card.triggerIndex];
key={`graph-${idx}`} const runKey = triggerMsg?.id
agentLabel={card.agentLabel} ? `msg-${triggerMsg.id}`
sessionLabel={card.sessionLabel} : `${currentSessionKey}:trigger-${card.triggerIndex}`;
steps={card.steps} const userOverride = graphExpandedOverrides[runKey];
active={card.active} const expanded = userOverride != null
onJumpToTrigger={() => { ? userOverride
document.getElementById(`chat-message-${card.triggerIndex}`)?.scrollIntoView({ : autoCollapsedRunKeys.has(runKey)
behavior: 'smooth', ? false
block: 'center', : undefined;
}); return (
}} <ExecutionGraphCard
onJumpToReply={() => { key={`graph-${runKey}`}
if (card.replyIndex == null) return; agentLabel={card.agentLabel}
document.getElementById(`chat-message-${card.replyIndex}`)?.scrollIntoView({ steps={card.steps}
behavior: 'smooth', active={card.active}
block: 'center', expanded={expanded}
}); onExpandedChange={(next) =>
}} setGraphExpandedOverrides((prev) => ({ ...prev, [runKey]: next }))
/> }
))} />
);
})}
</div> </div>
); );
})} })}
{/* Streaming message */} {/* Streaming message */}
{shouldRenderStreaming && ( {shouldRenderStreaming && !hasActiveExecutionGraph && (
<ChatMessage <ChatMessage
message={(streamMsg message={(streamMsg
? { ? {
@@ -302,19 +512,19 @@ export function Chat() {
content: streamText, content: streamText,
timestamp: streamingTimestamp, timestamp: streamingTimestamp,
}) as RawMessage} }) as RawMessage}
showThinking={showThinking} textOverride={streamingReplyText ?? undefined}
isStreaming isStreaming
streamingTools={streamingTools} streamingTools={streamingReplyText != null ? [] : streamingTools}
/> />
)} )}
{/* Activity indicator: waiting for next AI turn after tool execution */} {/* Activity indicator: waiting for next AI turn after tool execution */}
{sending && pendingFinal && !shouldRenderStreaming && ( {sending && pendingFinal && !shouldRenderStreaming && !hasActiveExecutionGraph && (
<ActivityIndicator phase="tool_processing" /> <ActivityIndicator phase="tool_processing" />
)} )}
{/* Typing indicator when sending but no stream content yet */} {/* Typing indicator when sending but no stream content yet */}
{sending && !pendingFinal && !hasAnyStreamContent && ( {sending && !pendingFinal && !hasAnyStreamContent && !hasActiveExecutionGraph && (
<TypingIndicator /> <TypingIndicator />
)} )}
</> </>

View File

@@ -63,6 +63,52 @@ function compactProgressiveParts(parts: string[]): string[] {
return compacted; return compacted;
} }
function splitProgressiveParts(parts: string[]): string[] {
const segments: string[] = [];
let previous = '';
for (const part of parts) {
const current = normalizeProgressiveText(part);
if (!current) continue;
if (!previous) {
segments.push(current);
previous = current;
continue;
}
if (current === previous || previous.startsWith(current)) {
continue;
}
if (current.startsWith(previous)) {
const incremental = current.slice(previous.length).trim();
if (incremental) {
segments.push(incremental);
}
previous = current;
continue;
}
segments.push(current);
previous = current;
}
return segments;
}
function escapeRegExp(value: string): string {
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
function consumeLeadingSegment(text: string, segment: string): number {
const tokens = segment.trim().split(/\s+/).filter(Boolean);
if (tokens.length === 0) return 0;
const pattern = new RegExp(`^\\s*${tokens.map(escapeRegExp).join('\\s+')}\\s*`, 'u');
const match = text.match(pattern);
return match ? match[0].length : 0;
}
/** /**
* Extract displayable text from a message's content field. * Extract displayable text from a message's content field.
* Handles both string content and array-of-blocks content. * Handles both string content and array-of-blocks content.
@@ -102,6 +148,37 @@ export function extractText(message: RawMessage | unknown): string {
return result; return result;
} }
export function extractTextSegments(message: RawMessage | unknown): string[] {
if (!message || typeof message !== 'object') return [];
const msg = message as Record<string, unknown>;
const content = msg.content;
const isUser = msg.role === 'user';
let segments: string[] = [];
if (typeof content === 'string') {
const cleaned = content.trim();
segments = cleaned ? [cleaned] : [];
} else if (Array.isArray(content)) {
const parts: string[] = [];
for (const block of content as ContentBlock[]) {
if (block.type === 'text' && block.text && block.text.trim()) {
parts.push(block.text);
}
}
segments = splitProgressiveParts(parts);
} else if (typeof msg.text === 'string') {
const cleaned = msg.text.trim();
segments = cleaned ? [cleaned] : [];
}
if (!isUser) return segments;
return segments
.map((segment) => cleanUserText(segment))
.filter((segment) => segment.length > 0);
}
/** /**
* Extract thinking/reasoning content from a message. * Extract thinking/reasoning content from a message.
* Returns null if no thinking content found. * Returns null if no thinking content found.
@@ -127,6 +204,43 @@ export function extractThinking(message: RawMessage | unknown): string | null {
return combined.length > 0 ? combined : null; return combined.length > 0 ? combined : null;
} }
export function extractThinkingSegments(message: RawMessage | unknown): string[] {
if (!message || typeof message !== 'object') return [];
const msg = message as Record<string, unknown>;
const content = msg.content;
if (!Array.isArray(content)) return [];
const parts: string[] = [];
for (const block of content as ContentBlock[]) {
if (block.type === 'thinking' && block.thinking) {
const cleaned = block.thinking.trim();
if (cleaned) {
parts.push(cleaned);
}
}
}
return splitProgressiveParts(parts);
}
export function stripProcessMessagePrefix(text: string, processSegments: string[]): string {
let remaining = text;
let strippedAny = false;
for (const segment of processSegments) {
const normalizedSegment = normalizeProgressiveText(segment);
if (!normalizedSegment) continue;
const consumed = consumeLeadingSegment(remaining, normalizedSegment);
if (consumed === 0) break;
remaining = remaining.slice(consumed);
strippedAny = true;
}
const trimmed = remaining.trimStart();
return strippedAny && trimmed ? trimmed : text;
}
/** /**
* Extract media file references from Gateway-formatted user message text. * Extract media file references from Gateway-formatted user message text.
* Returns array of { filePath, mimeType } from [media attached: path (mime) | path] patterns. * Returns array of { filePath, mimeType } from [media attached: path (mime) | path] patterns.

View File

@@ -1,4 +1,4 @@
import { extractThinking, extractToolUse } from './message-utils'; import { extractText, extractTextSegments, extractThinkingSegments, extractToolUse } from './message-utils';
import type { RawMessage, ToolStatus } from '@/stores/chat'; import type { RawMessage, ToolStatus } from '@/stores/chat';
export type TaskStepStatus = 'running' | 'completed' | 'error'; export type TaskStepStatus = 'running' | 'completed' | 'error';
@@ -7,21 +7,45 @@ export interface TaskStep {
id: string; id: string;
label: string; label: string;
status: TaskStepStatus; status: TaskStepStatus;
kind: 'thinking' | 'tool' | 'system'; kind: 'thinking' | 'tool' | 'system' | 'message';
detail?: string; detail?: string;
depth: number; depth: number;
parentId?: string; parentId?: string;
} }
const MAX_TASK_STEPS = 8; /**
* Detects the index of the "final reply" assistant message in a run segment.
*
* The reply is the last assistant message that carries non-empty text
* content, regardless of whether it ALSO carries tool calls. (Mixed
* `text + toolCall` replies are rare but real — the model can emit a parting
* text block alongside a final tool call. Treating such a message as the
* reply avoids mis-protecting an earlier narration as the "answer" and
* leaking the actual last text into the fold.)
*
* When this returns a non-negative index, the caller should avoid folding
* that message's text into the graph (it is the answer the user sees in the
* chat stream). When the run is still active (streaming) the final reply is
* produced via `streamingMessage` instead, so callers pass
* `hasStreamingReply = true` to skip protection and let every assistant-with-
* text message in history be folded into the graph as narration.
*/
export function findReplyMessageIndex(messages: RawMessage[], hasStreamingReply: boolean): number {
if (hasStreamingReply) return -1;
for (let idx = messages.length - 1; idx >= 0; idx -= 1) {
const message = messages[idx];
if (!message || message.role !== 'assistant') continue;
if (extractText(message).trim().length === 0) continue;
return idx;
}
return -1;
}
interface DeriveTaskStepsInput { interface DeriveTaskStepsInput {
messages: RawMessage[]; messages: RawMessage[];
streamingMessage: unknown | null; streamingMessage: unknown | null;
streamingTools: ToolStatus[]; streamingTools: ToolStatus[];
sending: boolean; omitLastStreamingMessageSegment?: boolean;
pendingFinal: boolean;
showThinking: boolean;
} }
export interface SubagentCompletionInfo { export interface SubagentCompletionInfo {
@@ -128,7 +152,7 @@ function attachTopology(steps: TaskStep[]): TaskStep[] {
continue; continue;
} }
if (step.kind === 'thinking') { if (step.kind === 'thinking' || step.kind === 'message') {
withTopology.push({ withTopology.push({
...step, ...step,
depth: activeBranchNodeId ? 3 : 1, depth: activeBranchNodeId ? 3 : 1,
@@ -157,13 +181,37 @@ function attachTopology(steps: TaskStep[]): TaskStep[] {
return withTopology; return withTopology;
} }
function appendDetailSegments(
segments: string[],
options: {
idPrefix: string;
label: string;
kind: Extract<TaskStep['kind'], 'thinking' | 'message'>;
running: boolean;
upsertStep: (step: TaskStep) => void;
},
): void {
const normalizedSegments = segments
.map((segment) => normalizeText(segment))
.filter((segment): segment is string => !!segment);
normalizedSegments.forEach((detail, index) => {
options.upsertStep({
id: `${options.idPrefix}-${index}`,
label: options.label,
status: options.running && index === normalizedSegments.length - 1 ? 'running' : 'completed',
kind: options.kind,
detail,
depth: 1,
});
});
}
export function deriveTaskSteps({ export function deriveTaskSteps({
messages, messages,
streamingMessage, streamingMessage,
streamingTools, streamingTools,
sending, omitLastStreamingMessageSegment = false,
pendingFinal,
showThinking,
}: DeriveTaskStepsInput): TaskStep[] { }: DeriveTaskStepsInput): TaskStep[] {
const steps: TaskStep[] = []; const steps: TaskStep[] = [];
const stepIndexById = new Map<string, number>(); const stepIndexById = new Map<string, number>();
@@ -187,30 +235,44 @@ export function deriveTaskSteps({
? streamingMessage as RawMessage ? streamingMessage as RawMessage
: null; : null;
const relevantAssistantMessages = messages.filter((message) => { // The final answer the user sees as a chat bubble. We avoid folding it into
if (!message || message.role !== 'assistant') return false; // the graph to prevent duplication. When a run is still streaming, the
if (extractToolUse(message).length > 0) return true; // reply lives in `streamingMessage`, so every pure-text assistant message in
return showThinking && !!extractThinking(message); // `messages` is treated as intermediate narration.
}); const replyIndex = findReplyMessageIndex(messages, streamMessage != null);
for (const [messageIndex, assistantMessage] of relevantAssistantMessages.entries()) { for (const [messageIndex, message] of messages.entries()) {
if (showThinking) { if (!message || message.role !== 'assistant') continue;
const thinking = extractThinking(assistantMessage);
if (thinking) {
upsertStep({
id: `history-thinking-${assistantMessage.id || messageIndex}`,
label: 'Thinking',
status: 'completed',
kind: 'thinking',
detail: normalizeText(thinking),
depth: 1,
});
}
}
extractToolUse(assistantMessage).forEach((tool, index) => { appendDetailSegments(extractThinkingSegments(message), {
idPrefix: `history-thinking-${message.id || messageIndex}`,
label: 'Thinking',
kind: 'thinking',
running: false,
upsertStep,
});
const toolUses = extractToolUse(message);
// Fold any intermediate assistant text into the graph as a narration
// step — including text that lives on a mixed `text + toolCall` message.
// The narration step is emitted BEFORE the tool steps so the graph
// preserves the original ordering (the assistant "thinks out loud" and
// then invokes the tool).
const narrationSegments = extractTextSegments(message);
const graphNarrationSegments = messageIndex === replyIndex
? narrationSegments.slice(0, -1)
: narrationSegments;
appendDetailSegments(graphNarrationSegments, {
idPrefix: `history-message-${message.id || messageIndex}`,
label: 'Message',
kind: 'message',
running: false,
upsertStep,
});
toolUses.forEach((tool, index) => {
upsertStep({ upsertStep({
id: tool.id || makeToolId(`history-tool-${assistantMessage.id || messageIndex}`, tool.name, index), id: tool.id || makeToolId(`history-tool-${message.id || messageIndex}`, tool.name, index),
label: tool.name, label: tool.name,
status: 'completed', status: 'completed',
kind: 'tool', kind: 'tool',
@@ -220,18 +282,29 @@ export function deriveTaskSteps({
}); });
} }
if (streamMessage && showThinking) { if (streamMessage) {
const thinking = extractThinking(streamMessage); appendDetailSegments(extractThinkingSegments(streamMessage), {
if (thinking) { idPrefix: 'stream-thinking',
upsertStep({ label: 'Thinking',
id: 'stream-thinking', kind: 'thinking',
label: 'Thinking', running: true,
status: 'running', upsertStep,
kind: 'thinking', });
detail: normalizeText(thinking),
depth: 1, // Stream-time narration should also appear in the execution graph so that
}); // intermediate process output stays in P1 instead of leaking into the
} // assistant reply area.
const streamNarrationSegments = extractTextSegments(streamMessage);
const graphStreamNarrationSegments = omitLastStreamingMessageSegment
? streamNarrationSegments.slice(0, -1)
: streamNarrationSegments;
appendDetailSegments(graphStreamNarrationSegments, {
idPrefix: 'stream-message',
label: 'Message',
kind: 'message',
running: !omitLastStreamingMessageSegment,
upsertStep,
});
} }
const activeToolIds = new Set<string>(); const activeToolIds = new Set<string>();
@@ -267,28 +340,5 @@ export function deriveTaskSteps({
}); });
} }
if (sending && pendingFinal) { return attachTopology(steps);
upsertStep({
id: 'system-finalizing',
label: 'Finalizing answer',
status: 'running',
kind: 'system',
detail: 'Waiting for the assistant to finish this run.',
depth: 1,
});
} else if (sending && steps.length === 0) {
upsertStep({
id: 'system-preparing',
label: 'Preparing run',
status: 'running',
kind: 'system',
detail: 'Waiting for the first streaming update.',
depth: 1,
});
}
const withTopology = attachTopology(steps);
return withTopology.length > MAX_TASK_STEPS
? withTopology.slice(-MAX_TASK_STEPS)
: withTopology;
} }

View File

@@ -1,6 +1,6 @@
/** /**
* Chat State Store * Chat State Store
* Manages chat messages, sessions, streaming, and thinking state. * Manages chat messages, sessions, and streaming state.
* Communicates with OpenClaw Gateway via renderer WebSocket RPC. * Communicates with OpenClaw Gateway via renderer WebSocket RPC.
*/ */
import { create } from 'zustand'; import { create } from 'zustand';
@@ -93,6 +93,13 @@ function buildChatEventDedupeKey(eventState: string, event: Record<string, unkno
const runId = event.runId != null ? String(event.runId) : ''; const runId = event.runId != null ? String(event.runId) : '';
const sessionKey = event.sessionKey != null ? String(event.sessionKey) : ''; const sessionKey = event.sessionKey != null ? String(event.sessionKey) : '';
const seq = event.seq != null ? String(event.seq) : ''; const seq = event.seq != null ? String(event.seq) : '';
// Some gateways emit multiple `delta` updates without a monotonically
// increasing `seq`. Deduping those by just `runId + sessionKey + state`
// collapses legitimate stream progression, so only seq-backed deltas are
// safe to dedupe generically.
if (eventState === 'delta' && !seq) {
return null;
}
if (runId || sessionKey || seq || eventState) { if (runId || sessionKey || seq || eventState) {
return [runId, sessionKey, seq, eventState].join('|'); return [runId, sessionKey, seq, eventState].join('|');
} }
@@ -204,45 +211,7 @@ function compactProgressiveTextParts(parts: string[]): string[] {
} }
function normalizeLiveContentBlocks(content: ContentBlock[]): ContentBlock[] { function normalizeLiveContentBlocks(content: ContentBlock[]): ContentBlock[] {
const normalized: ContentBlock[] = []; return content.map((block) => ({ ...block }));
let textBuffer: string[] = [];
let thinkingBuffer: string[] = [];
const flushTextBuffer = () => {
for (const part of compactProgressiveTextParts(textBuffer)) {
normalized.push({ type: 'text', text: part });
}
textBuffer = [];
};
const flushThinkingBuffer = () => {
for (const part of compactProgressiveTextParts(thinkingBuffer)) {
normalized.push({ type: 'thinking', thinking: part });
}
thinkingBuffer = [];
};
for (const block of content) {
if (block.type === 'text' && block.text) {
textBuffer.push(block.text);
continue;
}
if (block.type === 'thinking' && block.thinking) {
thinkingBuffer.push(block.thinking);
continue;
}
flushTextBuffer();
flushThinkingBuffer();
normalized.push(block);
}
flushTextBuffer();
flushThinkingBuffer();
return normalized;
} }
function normalizeStreamingMessage(message: unknown): unknown { function normalizeStreamingMessage(message: unknown): unknown {
@@ -1199,7 +1168,6 @@ export const useChatStore = create<ChatState>((set, get) => ({
sessionLabels: {}, sessionLabels: {},
sessionLastActivity: {}, sessionLastActivity: {},
showThinking: true,
thinkingLevel: null, thinkingLevel: null,
// ── Load sessions via sessions.list ── // ── Load sessions via sessions.list ──
@@ -2269,10 +2237,6 @@ export const useChatStore = create<ChatState>((set, get) => ({
} }
}, },
// ── Toggle thinking visibility ──
toggleThinking: () => set((s) => ({ showThinking: !s.showThinking })),
// ── Refresh: reload history + sessions ── // ── Refresh: reload history + sessions ──
refresh: async () => { refresh: async () => {

View File

@@ -109,44 +109,7 @@ function compactProgressiveTextParts(parts: string[]): string[] {
} }
function normalizeLiveContentBlocks(content: ContentBlock[]): ContentBlock[] { function normalizeLiveContentBlocks(content: ContentBlock[]): ContentBlock[] {
const normalized: ContentBlock[] = []; return content.map((block) => ({ ...block }));
let textBuffer: string[] = [];
let thinkingBuffer: string[] = [];
const flushTextBuffer = () => {
for (const part of compactProgressiveTextParts(textBuffer)) {
normalized.push({ type: 'text', text: part });
}
textBuffer = [];
};
const flushThinkingBuffer = () => {
for (const part of compactProgressiveTextParts(thinkingBuffer)) {
normalized.push({ type: 'thinking', thinking: part });
}
thinkingBuffer = [];
};
for (const block of content) {
if (block.type === 'text' && block.text) {
textBuffer.push(block.text);
continue;
}
if (block.type === 'thinking' && block.thinking) {
thinkingBuffer.push(block.thinking);
continue;
}
flushTextBuffer();
flushThinkingBuffer();
normalized.push(block);
}
flushTextBuffer();
flushThinkingBuffer();
return normalized;
} }
function normalizeStreamingMessage(message: unknown): unknown { function normalizeStreamingMessage(message: unknown): unknown {

View File

@@ -21,7 +21,6 @@ export const initialChatState: Pick<
| 'currentAgentId' | 'currentAgentId'
| 'sessionLabels' | 'sessionLabels'
| 'sessionLastActivity' | 'sessionLastActivity'
| 'showThinking'
| 'thinkingLevel' | 'thinkingLevel'
> = { > = {
messages: [], messages: [],
@@ -43,7 +42,6 @@ export const initialChatState: Pick<
sessionLabels: {}, sessionLabels: {},
sessionLastActivity: {}, sessionLastActivity: {},
showThinking: true,
thinkingLevel: null, thinkingLevel: null,
}; };
@@ -61,7 +59,6 @@ export function createChatActions(
| 'sendMessage' | 'sendMessage'
| 'abortRun' | 'abortRun'
| 'handleChatEvent' | 'handleChatEvent'
| 'toggleThinking'
| 'refresh' | 'refresh'
| 'clearError' | 'clearError'
> { > {

View File

@@ -1,9 +1,7 @@
import type { ChatGet, ChatSet, RuntimeActions } from './store-api'; import type { ChatGet, ChatSet, RuntimeActions } from './store-api';
export function createRuntimeUiActions(set: ChatSet, get: ChatGet): Pick<RuntimeActions, 'toggleThinking' | 'refresh' | 'clearError'> { export function createRuntimeUiActions(set: ChatSet, get: ChatGet): Pick<RuntimeActions, 'refresh' | 'clearError'> {
return { return {
toggleThinking: () => set((s) => ({ showThinking: !s.showThinking })),
// ── Refresh: reload history + sessions ── // ── Refresh: reload history + sessions ──
refresh: async () => { refresh: async () => {

View File

@@ -14,5 +14,5 @@ export type SessionHistoryActions = Pick<
export type RuntimeActions = Pick< export type RuntimeActions = Pick<
ChatState, ChatState,
'sendMessage' | 'abortRun' | 'handleChatEvent' | 'toggleThinking' | 'refresh' | 'clearError' 'sendMessage' | 'abortRun' | 'handleChatEvent' | 'refresh' | 'clearError'
>; >;

View File

@@ -85,7 +85,6 @@ export interface ChatState {
sessionLastActivity: Record<string, number>; sessionLastActivity: Record<string, number>;
// Thinking // Thinking
showThinking: boolean;
thinkingLevel: string | null; thinkingLevel: string | null;
// Actions // Actions
@@ -108,7 +107,6 @@ export interface ChatState {
) => Promise<void>; ) => Promise<void>;
abortRun: () => Promise<void>; abortRun: () => Promise<void>;
handleChatEvent: (event: Record<string, unknown>) => void; handleChatEvent: (event: Record<string, unknown>) => void;
toggleThinking: () => void;
refresh: () => Promise<void>; refresh: () => Promise<void>;
clearError: () => void; clearError: () => void;
} }

View File

@@ -148,6 +148,38 @@ const childTranscriptMessages = [
}, },
]; ];
const inFlightPrompt = 'Open browser, search for tech news, and take a screenshot';
const seededInFlightHistory = [
{
role: 'user',
content: [{ type: 'text', text: inFlightPrompt }],
timestamp: Date.now(),
},
];
const longRunPrompt = 'Inspect the workspace and summarize the result';
const longRunProcessSegments = Array.from({ length: 9 }, (_, index) => `Checked source ${index + 1}.`);
const longRunSummary = 'Here is the summary.';
const longRunReplyText = `${longRunProcessSegments.join(' ')} ${longRunSummary}`;
const longRunHistory = [
{
role: 'user',
content: [{ type: 'text', text: longRunPrompt }],
timestamp: Date.now(),
},
...longRunProcessSegments.map((segment, index) => ({
role: 'assistant',
id: `long-run-step-${index + 1}`,
content: [{ type: 'text', text: segment }],
timestamp: Date.now(),
})),
{
role: 'assistant',
id: 'long-run-final',
content: [{ type: 'text', text: longRunReplyText }],
timestamp: Date.now(),
},
];
test.describe('ClawX chat execution graph', () => { test.describe('ClawX chat execution graph', () => {
test('renders internal yield status and linked subagent branch from mocked IPC', async ({ launchElectronApp }) => { test('renders internal yield status and linked subagent branch from mocked IPC', async ({ launchElectronApp }) => {
const app = await launchElectronApp({ skipSetup: true }); const app = await launchElectronApp({ skipSetup: true });
@@ -222,6 +254,12 @@ test.describe('ClawX chat execution graph', () => {
} }
await expect(page.getByTestId('main-layout')).toBeVisible(); await expect(page.getByTestId('main-layout')).toBeVisible();
await expect(page.getByTestId('chat-execution-graph')).toBeVisible({ timeout: 30_000 }); await expect(page.getByTestId('chat-execution-graph')).toBeVisible({ timeout: 30_000 });
// Completed runs auto-collapse into a single-line summary button. Expand
// it first so the underlying step details are rendered.
const graph = page.getByTestId('chat-execution-graph');
if ((await graph.getAttribute('data-collapsed')) === 'true') {
await graph.click();
}
await expect( await expect(
page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('sessions_yield', { exact: true }), page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('sessions_yield', { exact: true }),
).toBeVisible(); ).toBeVisible();
@@ -229,6 +267,9 @@ test.describe('ClawX chat execution graph', () => {
await expect( await expect(
page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('exec', { exact: true }), page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('exec', { exact: true }),
).toBeVisible(); ).toBeVisible();
const execRow = page.locator('[data-testid="chat-execution-step"]').filter({ hasText: 'exec' }).first();
await execRow.click();
await expect(execRow.locator('pre')).toBeVisible();
await expect(page.locator('[data-testid="chat-execution-graph"]').getByText('I asked coder to break down the core blocks of ~/Velaria uncommitted changes; will give you the conclusion when it returns.')).toBeVisible(); await expect(page.locator('[data-testid="chat-execution-graph"]').getByText('I asked coder to break down the core blocks of ~/Velaria uncommitted changes; will give you the conclusion when it returns.')).toBeVisible();
await expect(page.getByText('CHECKLIST.md')).toHaveCount(0); await expect(page.getByText('CHECKLIST.md')).toHaveCount(0);
} finally { } finally {
@@ -252,7 +293,7 @@ test.describe('ClawX chat execution graph', () => {
[stableStringify(['chat.history', { sessionKey: PROJECT_MANAGER_SESSION_KEY, limit: 200 }])]: { [stableStringify(['chat.history', { sessionKey: PROJECT_MANAGER_SESSION_KEY, limit: 200 }])]: {
success: true, success: true,
result: { result: {
messages: [], messages: seededInFlightHistory,
}, },
}, },
}, },
@@ -281,9 +322,16 @@ test.describe('ClawX chat execution graph', () => {
await app.evaluate(async ({ app: _app }) => { await app.evaluate(async ({ app: _app }) => {
const { ipcMain } = process.mainModule!.require('electron') as typeof import('electron'); const { ipcMain } = process.mainModule!.require('electron') as typeof import('electron');
const sendPayloads: Array<{ message?: string; sessionKey?: string }> = []; (globalThis as typeof globalThis & { __chatExecutionHistory?: unknown[] }).__chatExecutionHistory = [
{
role: 'user',
content: [{ type: 'text', text: 'Open browser, search for tech news, and take a screenshot' }],
timestamp: Date.now(),
},
];
ipcMain.removeHandler('gateway:rpc'); ipcMain.removeHandler('gateway:rpc');
ipcMain.handle('gateway:rpc', async (_event: unknown, method: string, payload: unknown) => { ipcMain.handle('gateway:rpc', async (_event: unknown, method: string, payload: unknown) => {
void payload;
if (method === 'sessions.list') { if (method === 'sessions.list') {
return { return {
success: true, success: true,
@@ -295,22 +343,16 @@ test.describe('ClawX chat execution graph', () => {
if (method === 'chat.history') { if (method === 'chat.history') {
return { return {
success: true, success: true,
result: { messages: [] }, result: {
}; messages: (
} (globalThis as typeof globalThis & { __chatExecutionHistory?: unknown[] }).__chatExecutionHistory
if (method === 'chat.send') { ?? seededInFlightHistory
if (payload && typeof payload === 'object') { ),
const p = payload as { message?: string; sessionKey?: string }; },
sendPayloads.push({ message: p.message, sessionKey: p.sessionKey });
}
return {
success: true,
result: { runId: 'mock-run' },
}; };
} }
return { success: true, result: {} }; return { success: true, result: {} };
}); });
(globalThis as typeof globalThis & { __clawxSendPayloads?: Array<{ message?: string; sessionKey?: string }> }).__clawxSendPayloads = sendPayloads;
}); });
const page = await getStableWindow(app); const page = await getStableWindow(app);
@@ -323,18 +365,24 @@ test.describe('ClawX chat execution graph', () => {
} }
await expect(page.getByTestId('main-layout')).toBeVisible(); await expect(page.getByTestId('main-layout')).toBeVisible();
await page.getByTestId('chat-composer-input').fill('Open browser, search for tech news, and take a screenshot'); await expect(page.getByText(inFlightPrompt)).toHaveCount(1);
await page.getByTestId('chat-composer-send').click();
await expect(page.getByText('Open browser, search for tech news, and take a screenshot')).toHaveCount(1); await app.evaluate(async ({ BrowserWindow }) => {
await expect.poll(async () => { const win = BrowserWindow.getAllWindows()[0];
return await app.evaluate(() => { win?.webContents.send('gateway:notification', {
const sendPayloads = (globalThis as typeof globalThis & { method: 'agent',
__clawxSendPayloads?: Array<{ message?: string; sessionKey?: string }>; params: {
}).__clawxSendPayloads || []; runId: 'mock-run',
return sendPayloads.length; sessionKey: 'agent:main:main',
state: 'started',
},
}); });
}).toBe(1); });
await expect(page.locator('[data-testid="chat-execution-graph"]')).toHaveAttribute('data-collapsed', 'false');
await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"]')).toBeVisible();
await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"] [aria-hidden="true"]')).toHaveCount(1);
await expect(page.locator('[data-testid^="chat-message-"]')).toHaveCount(1);
await app.evaluate(async ({ BrowserWindow }) => { await app.evaluate(async ({ BrowserWindow }) => {
const win = BrowserWindow.getAllWindows()[0]; const win = BrowserWindow.getAllWindows()[0];
@@ -359,15 +407,143 @@ test.describe('ClawX chat execution graph', () => {
}); });
}); });
await expect(page.getByText('Open browser, search for tech news, and take a screenshot')).toHaveCount(1); await expect(page.getByText(inFlightPrompt)).toHaveCount(1);
await expect(page.getByText(/^thinking 1 2 3$/)).toHaveCount(1); // Intermediate process output should be rendered in the execution graph
await expect(page.getByText(/^thinking 1 2$/)).toHaveCount(0); // only, not as a streaming assistant chat bubble.
await expect(page.getByText(/^thinking 1$/)).toHaveCount(0); await expect(page.locator('[data-testid^="chat-message-"]')).toHaveCount(1);
await expect(page.getByText(/^1 2 3$/)).toHaveCount(1); await expect(page.locator('[data-testid="chat-execution-graph"]')).toHaveAttribute('data-collapsed', 'false');
await expect(page.getByText(/^1 2$/)).toHaveCount(0); await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"]')).toBeVisible();
await expect(page.getByText(/^1$/)).toHaveCount(0); await expect(page.locator('[data-testid="chat-execution-step-thinking-trailing"] [aria-hidden="true"]')).toHaveCount(1);
await expect(page.locator('[data-testid="chat-execution-graph"] [data-testid="chat-execution-step"]').getByText('Thinking', { exact: true })).toHaveCount(3);
const firstChatBubble = page.locator('[data-testid^="chat-message-"] > div').first();
await expect(firstChatBubble.getByText(/^1 2 3$/)).toHaveCount(0);
await app.evaluate(async ({ BrowserWindow }) => {
(globalThis as typeof globalThis & { __chatExecutionHistory?: unknown[] }).__chatExecutionHistory = [
{
role: 'user',
content: [{ type: 'text', text: 'Open browser, search for tech news, and take a screenshot' }],
timestamp: Date.now(),
},
{
role: 'assistant',
content: [{
type: 'toolCall',
id: 'browser-start-call',
name: 'browser',
arguments: { action: 'start' },
}],
timestamp: Date.now(),
},
{
role: 'assistant',
content: [{
type: 'toolCall',
id: 'browser-open-call',
name: 'browser',
arguments: { action: 'open', targetUrl: 'https://x.com/home' },
}],
timestamp: Date.now(),
},
{
role: 'assistant',
id: 'final-response',
content: [{ type: 'text', text: 'Done.' }],
timestamp: Date.now(),
},
];
const win = BrowserWindow.getAllWindows()[0];
win?.webContents.send('gateway:notification', {
method: 'agent',
params: {
runId: 'mock-run',
sessionKey: 'agent:main:main',
state: 'final',
message: {
role: 'assistant',
id: 'final-response',
content: [{ type: 'text', text: 'Done.' }],
timestamp: Date.now(),
},
},
});
});
await expect(page.getByText('Done.')).toBeVisible();
await expect(page.locator('[data-testid="chat-execution-graph"]')).toHaveAttribute('data-collapsed', 'true');
} finally { } finally {
await closeElectronApp(app); await closeElectronApp(app);
} }
}); });
test('preserves long execution history counts and strips the full folded reply prefix', async ({ launchElectronApp }) => {
const app = await launchElectronApp({ skipSetup: true });
try {
await installIpcMocks(app, {
gatewayStatus: { state: 'running', port: 18789, pid: 12345 },
gatewayRpc: {
[stableStringify(['sessions.list', {}])]: {
success: true,
result: {
sessions: [{ key: PROJECT_MANAGER_SESSION_KEY, displayName: 'main' }],
},
},
[stableStringify(['chat.history', { sessionKey: PROJECT_MANAGER_SESSION_KEY, limit: 200 }])]: {
success: true,
result: {
messages: longRunHistory,
},
},
[stableStringify(['chat.history', { sessionKey: PROJECT_MANAGER_SESSION_KEY, limit: 1000 }])]: {
success: true,
result: {
messages: longRunHistory,
},
},
},
hostApi: {
[stableStringify(['/api/gateway/status', 'GET'])]: {
ok: true,
data: {
status: 200,
ok: true,
json: { state: 'running', port: 18789, pid: 12345 },
},
},
[stableStringify(['/api/agents', 'GET'])]: {
ok: true,
data: {
status: 200,
ok: true,
json: {
success: true,
agents: [{ id: 'main', name: 'main' }],
},
},
},
},
});
const page = await getStableWindow(app);
try {
await page.reload();
} catch (error) {
if (!String(error).includes('ERR_FILE_NOT_FOUND')) {
throw error;
}
}
await expect(page.getByTestId('main-layout')).toBeVisible();
await expect(page.getByTestId('chat-execution-graph')).toBeVisible({ timeout: 30_000 });
await expect(page.getByTestId('chat-execution-graph')).toHaveAttribute('data-collapsed', 'true');
await expect(page.getByTestId('chat-execution-graph')).toContainText('0 tool calls');
await expect(page.getByTestId('chat-execution-graph')).toContainText('9 process messages');
await expect(page.getByText(longRunSummary, { exact: true })).toBeVisible();
await expect(page.getByText(longRunReplyText, { exact: true })).toHaveCount(0);
} finally {
await closeElectronApp(app);
}
});
}); });

View File

@@ -0,0 +1,140 @@
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { extractText } from '@/pages/Chat/message-utils';
const { gatewayRpcMock, hostApiFetchMock, agentsState } = vi.hoisted(() => ({
gatewayRpcMock: vi.fn(),
hostApiFetchMock: vi.fn(),
agentsState: {
agents: [] as Array<Record<string, unknown>>,
},
}));
vi.mock('@/stores/gateway', () => ({
useGatewayStore: {
getState: () => ({
status: { state: 'running', port: 18789 },
rpc: gatewayRpcMock,
}),
},
}));
vi.mock('@/stores/agents', () => ({
useAgentsStore: {
getState: () => agentsState,
},
}));
vi.mock('@/lib/host-api', () => ({
hostApiFetch: (...args: unknown[]) => hostApiFetchMock(...args),
}));
describe('chat event dedupe', () => {
beforeEach(() => {
vi.resetModules();
window.localStorage.clear();
gatewayRpcMock.mockReset();
hostApiFetchMock.mockReset();
agentsState.agents = [];
});
it('keeps processing delta events without seq for the same run', async () => {
const { useChatStore } = await import('@/stores/chat');
useChatStore.setState({
currentSessionKey: 'agent:main:main',
currentAgentId: 'main',
sessions: [{ key: 'agent:main:main' }],
messages: [],
sessionLabels: {},
sessionLastActivity: {},
sending: false,
activeRunId: null,
streamingText: '',
streamingMessage: null,
streamingTools: [],
pendingFinal: true,
lastUserMessageAt: null,
pendingToolImages: [],
error: null,
loading: false,
thinkingLevel: null,
});
useChatStore.getState().handleChatEvent({
state: 'delta',
runId: 'run-no-seq',
sessionKey: 'agent:main:main',
message: {
role: 'assistant',
id: 'reply-stream',
content: [{ type: 'text', text: 'Checked X.' }],
},
});
useChatStore.getState().handleChatEvent({
state: 'delta',
runId: 'run-no-seq',
sessionKey: 'agent:main:main',
message: {
role: 'assistant',
id: 'reply-stream',
content: [
{ type: 'text', text: 'Checked X.' },
{ type: 'text', text: 'Checked X. Here is the summary.' },
],
},
});
expect(extractText(useChatStore.getState().streamingMessage)).toBe('Checked X. Here is the summary.');
});
it('still dedupes repeated delta events when seq matches', async () => {
const { useChatStore } = await import('@/stores/chat');
useChatStore.setState({
currentSessionKey: 'agent:main:main',
currentAgentId: 'main',
sessions: [{ key: 'agent:main:main' }],
messages: [],
sessionLabels: {},
sessionLastActivity: {},
sending: false,
activeRunId: null,
streamingText: '',
streamingMessage: null,
streamingTools: [],
pendingFinal: false,
lastUserMessageAt: null,
pendingToolImages: [],
error: null,
loading: false,
thinkingLevel: null,
});
useChatStore.getState().handleChatEvent({
state: 'delta',
runId: 'run-with-seq',
sessionKey: 'agent:main:main',
seq: 3,
message: {
role: 'assistant',
id: 'reply-stream',
content: [{ type: 'text', text: 'first version' }],
},
});
useChatStore.getState().handleChatEvent({
state: 'delta',
runId: 'run-with-seq',
sessionKey: 'agent:main:main',
seq: 3,
message: {
role: 'assistant',
id: 'reply-stream',
content: [{ type: 'text', text: 'duplicate version should be ignored' }],
},
});
expect(extractText(useChatStore.getState().streamingMessage)).toBe('first version');
});
});

View File

@@ -23,7 +23,6 @@ describe('ChatMessage attachment dedupe', () => {
render( render(
<ChatMessage <ChatMessage
message={message} message={message}
showThinking={false}
suppressProcessAttachments suppressProcessAttachments
/>, />,
); );

View File

@@ -0,0 +1,180 @@
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { render, screen, waitFor } from '@testing-library/react';
const hostApiFetchMock = vi.fn();
const { gatewayState, agentsState } = vi.hoisted(() => ({
gatewayState: {
status: { state: 'running', port: 18789 },
},
agentsState: {
agents: [{ id: 'main', name: 'main' }] as Array<Record<string, unknown>>,
fetchAgents: vi.fn(),
},
}));
vi.mock('@/stores/gateway', () => ({
useGatewayStore: (selector: (state: typeof gatewayState) => unknown) => selector(gatewayState),
}));
vi.mock('@/stores/agents', () => ({
useAgentsStore: (selector: (state: typeof agentsState) => unknown) => selector(agentsState),
}));
vi.mock('@/lib/host-api', () => ({
hostApiFetch: (...args: unknown[]) => hostApiFetchMock(...args),
}));
vi.mock('react-i18next', () => ({
useTranslation: () => ({
t: (key: string, params?: Record<string, unknown>) => {
if (key === 'executionGraph.collapsedSummary') {
return `collapsed ${String(params?.toolCount ?? '')} ${String(params?.processCount ?? '')}`.trim();
}
if (key === 'executionGraph.agentRun') {
return `Main execution`;
}
if (key === 'executionGraph.title') {
return 'Execution Graph';
}
if (key === 'executionGraph.collapseAction') {
return 'Collapse';
}
if (key === 'executionGraph.thinkingLabel') {
return 'Thinking';
}
if (key.startsWith('taskPanel.stepStatus.')) {
return key.split('.').at(-1) ?? key;
}
return key;
},
}),
}));
vi.mock('@/hooks/use-stick-to-bottom-instant', () => ({
useStickToBottomInstant: () => ({
contentRef: { current: null },
scrollRef: { current: null },
}),
}));
vi.mock('@/hooks/use-min-loading', () => ({
useMinLoading: () => false,
}));
vi.mock('@/pages/Chat/ChatToolbar', () => ({
ChatToolbar: () => null,
}));
vi.mock('@/pages/Chat/ChatInput', () => ({
ChatInput: () => null,
}));
describe('Chat execution graph lifecycle', () => {
beforeEach(async () => {
vi.resetModules();
hostApiFetchMock.mockReset();
hostApiFetchMock.mockResolvedValue({ success: true, messages: [] });
agentsState.fetchAgents.mockReset();
const { useChatStore } = await import('@/stores/chat');
useChatStore.setState({
messages: [
{
role: 'user',
content: 'Check semiconductor chatter',
},
{
role: 'assistant',
id: 'tool-turn',
content: [
{ type: 'text', text: 'Checked X.' },
{ type: 'tool_use', id: 'browser-search', name: 'browser', input: { action: 'search', query: 'semiconductor' } },
],
},
],
loading: false,
error: null,
sending: true,
activeRunId: 'run-live',
streamingText: '',
streamingMessage: {
role: 'assistant',
id: 'final-stream',
content: [
{ type: 'text', text: 'Checked X.' },
{ type: 'text', text: 'Checked X. Here is the summary.' },
],
},
streamingTools: [
{
toolCallId: 'browser-search',
name: 'browser',
status: 'completed',
updatedAt: Date.now(),
},
],
pendingFinal: true,
lastUserMessageAt: Date.now(),
pendingToolImages: [],
sessions: [{ key: 'agent:main:main' }],
currentSessionKey: 'agent:main:main',
currentAgentId: 'main',
sessionLabels: {},
sessionLastActivity: {},
thinkingLevel: null,
});
});
it('collapses execution once the reply starts streaming and keeps only the reply suffix in the bubble', async () => {
const { Chat } = await import('@/pages/Chat/index');
render(<Chat />);
await waitFor(() => {
expect(screen.getByTestId('chat-execution-graph')).toHaveAttribute('data-collapsed', 'true');
});
expect(screen.getByText('Here is the summary.')).toBeInTheDocument();
expect(screen.queryByText('Checked X. Here is the summary.')).not.toBeInTheDocument();
});
it('renders the execution graph immediately for an active run before any stream content arrives', async () => {
const { useChatStore } = await import('@/stores/chat');
useChatStore.setState({
messages: [
{
role: 'user',
content: 'Check semiconductor chatter',
},
],
loading: false,
error: null,
sending: true,
activeRunId: 'run-starting',
streamingText: '',
streamingMessage: null,
streamingTools: [],
pendingFinal: false,
lastUserMessageAt: Date.now(),
pendingToolImages: [],
sessions: [{ key: 'agent:main:main' }],
currentSessionKey: 'agent:main:main',
currentAgentId: 'main',
sessionLabels: {},
sessionLastActivity: {},
thinkingLevel: null,
});
const { Chat } = await import('@/pages/Chat/index');
render(<Chat />);
await waitFor(() => {
expect(screen.getByTestId('chat-execution-graph')).toHaveAttribute('data-collapsed', 'false');
});
expect(screen.getByTestId('chat-execution-step-thinking-trailing')).toBeInTheDocument();
expect(screen.getAllByText('Thinking').length).toBeGreaterThan(0);
});
});

View File

@@ -63,7 +63,6 @@ describe('useChatStore startup history retry', () => {
error: null, error: null,
loading: false, loading: false,
thinkingLevel: null, thinkingLevel: null,
showThinking: true,
}); });
gatewayRpcMock gatewayRpcMock
@@ -115,7 +114,6 @@ describe('useChatStore startup history retry', () => {
error: null, error: null,
loading: false, loading: false,
thinkingLevel: null, thinkingLevel: null,
showThinking: true,
}); });
gatewayRpcMock gatewayRpcMock
@@ -162,7 +160,6 @@ describe('useChatStore startup history retry', () => {
error: null, error: null,
loading: false, loading: false,
thinkingLevel: null, thinkingLevel: null,
showThinking: true,
}); });
let resolveFirstAttempt: ((value: { messages: Array<{ role: string; content: string; timestamp: number }> }) => void) | null = null; let resolveFirstAttempt: ((value: { messages: Array<{ role: string; content: string; timestamp: number }> }) => void) | null = null;
@@ -242,7 +239,6 @@ describe('useChatStore startup history retry', () => {
error: null, error: null,
loading: false, loading: false,
thinkingLevel: null, thinkingLevel: null,
showThinking: true,
}); });
gatewayRpcMock.mockImplementationOnce(async () => { gatewayRpcMock.mockImplementationOnce(async () => {

View File

@@ -104,7 +104,6 @@ describe('chat target routing', () => {
error: null, error: null,
loading: false, loading: false,
thinkingLevel: null, thinkingLevel: null,
showThinking: true,
}); });
await useChatStore.getState().sendMessage('Hello direct agent', undefined, 'research'); await useChatStore.getState().sendMessage('Hello direct agent', undefined, 'research');
@@ -148,7 +147,6 @@ describe('chat target routing', () => {
error: null, error: null,
loading: false, loading: false,
thinkingLevel: null, thinkingLevel: null,
showThinking: true,
}); });
await useChatStore.getState().sendMessage( await useChatStore.getState().sendMessage(

View File

@@ -1,5 +1,6 @@
import { describe, expect, it } from 'vitest'; import { describe, expect, it } from 'vitest';
import { deriveTaskSteps, parseSubagentCompletionInfo } from '@/pages/Chat/task-visualization'; import { deriveTaskSteps, parseSubagentCompletionInfo } from '@/pages/Chat/task-visualization';
import { stripProcessMessagePrefix } from '@/pages/Chat/message-utils';
import type { RawMessage, ToolStatus } from '@/stores/chat'; import type { RawMessage, ToolStatus } from '@/stores/chat';
describe('deriveTaskSteps', () => { describe('deriveTaskSteps', () => {
@@ -23,14 +24,11 @@ describe('deriveTaskSteps', () => {
], ],
}, },
streamingTools, streamingTools,
sending: true,
pendingFinal: false,
showThinking: true,
}); });
expect(steps).toEqual([ expect(steps).toEqual([
expect.objectContaining({ expect.objectContaining({
id: 'stream-thinking', id: 'stream-thinking-0',
label: 'Thinking', label: 'Thinking',
status: 'running', status: 'running',
kind: 'thinking', kind: 'thinking',
@@ -69,9 +67,6 @@ describe('deriveTaskSteps', () => {
summary: 'Scanning files', summary: 'Scanning files',
}, },
], ],
sending: true,
pendingFinal: false,
showThinking: false,
}); });
expect(steps).toEqual([ expect(steps).toEqual([
@@ -111,9 +106,6 @@ describe('deriveTaskSteps', () => {
summary: 'Permission denied', summary: 'Permission denied',
}, },
], ],
sending: true,
pendingFinal: false,
showThinking: false,
}); });
expect(steps).toEqual([ expect(steps).toEqual([
@@ -127,7 +119,7 @@ describe('deriveTaskSteps', () => {
]); ]);
}); });
it('keeps the newest running step when the execution graph exceeds the max length', () => { it('keeps all steps when the execution graph exceeds the previous max length', () => {
const messages: RawMessage[] = Array.from({ length: 9 }, (_, index) => ({ const messages: RawMessage[] = Array.from({ length: 9 }, (_, index) => ({
role: 'assistant', role: 'assistant',
id: `assistant-${index}`, id: `assistant-${index}`,
@@ -153,12 +145,14 @@ describe('deriveTaskSteps', () => {
summary: 'Scanning current workspace', summary: 'Scanning current workspace',
}, },
], ],
sending: true,
pendingFinal: false,
showThinking: false,
}); });
expect(steps).toHaveLength(8); expect(steps).toHaveLength(10);
expect(steps[0]).toEqual(expect.objectContaining({
id: 'tool-0',
label: 'read_0',
status: 'completed',
}));
expect(steps.at(-1)).toEqual(expect.objectContaining({ expect(steps.at(-1)).toEqual(expect.objectContaining({
id: 'tool-live', id: 'tool-live',
label: 'grep_live', label: 'grep_live',
@@ -182,14 +176,11 @@ describe('deriveTaskSteps', () => {
messages, messages,
streamingMessage: null, streamingMessage: null,
streamingTools: [], streamingTools: [],
sending: false,
pendingFinal: false,
showThinking: true,
}); });
expect(steps).toEqual([ expect(steps).toEqual([
expect.objectContaining({ expect.objectContaining({
id: 'history-thinking-assistant-1', id: 'history-thinking-assistant-1-0',
label: 'Thinking', label: 'Thinking',
status: 'completed', status: 'completed',
kind: 'thinking', kind: 'thinking',
@@ -203,31 +194,106 @@ describe('deriveTaskSteps', () => {
]); ]);
}); });
it('collapses cumulative streaming thinking details into the newest version', () => { it('splits cumulative streaming thinking into separate execution steps', () => {
const steps = deriveTaskSteps({ const steps = deriveTaskSteps({
messages: [], messages: [],
streamingMessage: { streamingMessage: {
role: 'assistant', role: 'assistant',
content: [ content: [
{ type: 'thinking', thinking: 'thinking 1' }, { type: 'thinking', thinking: 'Reviewing X.' },
{ type: 'thinking', thinking: 'thinking 1 2' }, { type: 'thinking', thinking: 'Reviewing X. Comparing Y.' },
{ type: 'thinking', thinking: 'thinking 1 2 3' }, { type: 'thinking', thinking: 'Reviewing X. Comparing Y. Drafting answer.' },
], ],
}, },
streamingTools: [], streamingTools: [],
sending: true,
pendingFinal: false,
showThinking: true,
}); });
expect(steps).toEqual([ expect(steps).toEqual([
expect.objectContaining({ expect.objectContaining({
id: 'stream-thinking', id: 'stream-thinking-0',
detail: 'thinking 1 2 3', detail: 'Reviewing X.',
status: 'completed',
}),
expect.objectContaining({
id: 'stream-thinking-1',
detail: 'Comparing Y.',
status: 'completed',
}),
expect.objectContaining({
id: 'stream-thinking-2',
detail: 'Drafting answer.',
status: 'running',
}), }),
]); ]);
}); });
it('keeps earlier reply segments in the graph when the last streaming segment is rendered separately', () => {
const steps = deriveTaskSteps({
messages: [],
streamingMessage: {
role: 'assistant',
content: [
{ type: 'text', text: 'Checked X.' },
{ type: 'text', text: 'Checked X. Checked Snowball.' },
{ type: 'text', text: 'Checked X. Checked Snowball. Here is the summary.' },
],
},
streamingTools: [],
omitLastStreamingMessageSegment: true,
});
expect(steps).toEqual([
expect.objectContaining({
id: 'stream-message-0',
detail: 'Checked X.',
status: 'completed',
}),
expect.objectContaining({
id: 'stream-message-1',
detail: 'Checked Snowball.',
status: 'completed',
}),
]);
});
it('folds earlier reply segments into the graph but leaves the final answer for the chat bubble', () => {
const steps = deriveTaskSteps({
messages: [
{
role: 'assistant',
id: 'assistant-reply',
content: [
{ type: 'text', text: 'Checked X.' },
{ type: 'text', text: 'Checked X. Checked Snowball.' },
{ type: 'text', text: 'Checked X. Checked Snowball. Here is the summary.' },
],
},
],
streamingMessage: null,
streamingTools: [],
});
expect(steps).toEqual([
expect.objectContaining({
id: 'history-message-assistant-reply-0',
detail: 'Checked X.',
status: 'completed',
}),
expect.objectContaining({
id: 'history-message-assistant-reply-1',
detail: 'Checked Snowball.',
status: 'completed',
}),
]);
});
it('strips folded process narration from the final reply text', () => {
expect(stripProcessMessagePrefix(
'Checked X. Checked Snowball. Here is the summary.',
['Checked X.', 'Checked Snowball.'],
)).toBe('Here is the summary.');
});
it('builds a branch for spawned subagents', () => { it('builds a branch for spawned subagents', () => {
const messages: RawMessage[] = [ const messages: RawMessage[] = [
{ {
@@ -254,9 +320,6 @@ describe('deriveTaskSteps', () => {
messages, messages,
streamingMessage: null, streamingMessage: null,
streamingTools: [], streamingTools: [],
sending: false,
pendingFinal: false,
showThinking: true,
}); });
expect(steps).toEqual([ expect(steps).toEqual([